Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/news68k/news68k cosmetics (typo, KNF etc.)
details: https://anonhg.NetBSD.org/src/rev/c521290d386a
branches: trunk
changeset: 519362:c521290d386a
user: tsutsui <tsutsui%NetBSD.org@localhost>
date: Sun Dec 16 03:41:57 2001 +0000
description:
cosmetics (typo, KNF etc.)
diffstat:
sys/arch/news68k/news68k/pmap.c | 229 ++++++++++++++++++++-------------------
1 files changed, 118 insertions(+), 111 deletions(-)
diffs (truncated from 652 to 300 lines):
diff -r 9797835b8b2b -r c521290d386a sys/arch/news68k/news68k/pmap.c
--- a/sys/arch/news68k/news68k/pmap.c Sun Dec 16 03:09:19 2001 +0000
+++ b/sys/arch/news68k/news68k/pmap.c Sun Dec 16 03:41:57 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.25 2001/12/13 04:39:52 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.26 2001/12/16 03:41:57 tsutsui Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -159,11 +159,7 @@
#define PDB_PVDUMP 0x8000
int debugmap = 0;
-#if 1
int pmapdebug = PDB_PARANOIA;
-#else
-int pmapdebug = 0xffff;
-#endif
#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
@@ -262,8 +258,10 @@
char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
+
+#ifdef CACHE_HAVE_VAC
int pmap_aliasmask; /* seperation at which VA aliasing ok */
-
+#endif
#if defined(M68040)
int protostfree; /* prototype (default) free ST map */
#endif
@@ -372,13 +370,13 @@
void
pmap_init()
{
- vaddr_t addr, addr2;
- vsize_t s;
- struct pv_entry *pv;
- char *attr;
- int rv;
- int npages;
- int bank;
+ vaddr_t addr, addr2;
+ vsize_t s;
+ struct pv_entry *pv;
+ char *attr;
+ int rv;
+ int npages;
+ int bank;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
@@ -395,18 +393,16 @@
*/
addr = (vaddr_t) intiobase;
if (uvm_map(kernel_map, &addr,
- intiotop_phys - intiobase_phys,
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)) != 0)
+ intiotop_phys - intiobase_phys,
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
goto bogons;
addr = (vaddr_t) Sysmap;
if (uvm_map(kernel_map, &addr, MAX_PTSIZE,
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)) != 0) {
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0) {
/*
* If this fails, it is probably because the static
* portion of the kernel page table isn't big enough
@@ -469,7 +465,7 @@
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
*/
- npages = min(atop(MAX_KPTSIZE), maxproc+16);
+ npages = min(atop(MAX_KPTSIZE), maxproc + 16);
s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
/*
@@ -478,8 +474,8 @@
*/
addr = 0;
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
- UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
panic("pmap_init: kernel PT too small");
uvm_unmap(kernel_map, addr, addr + s);
@@ -707,7 +703,7 @@
spa += NBPG;
}
pmap_update(pmap_kernel());
- return (va);
+ return va;
}
/*
@@ -723,12 +719,12 @@
pmap_t pmap;
PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
- ("pmap_create\n"));
+ ("pmap_create()\n"));
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
pmap_pinit(pmap);
- return (pmap);
+ return pmap;
}
/*
@@ -811,8 +807,7 @@
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
- uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
- MAX_PTSIZE);
+ uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab, MAX_PTSIZE);
}
KASSERT(pmap->pm_stab == Segtabzero);
}
@@ -824,10 +819,10 @@
*/
void
pmap_reference(pmap)
- pmap_t pmap;
+ pmap_t pmap;
{
+
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
-
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
@@ -995,7 +990,7 @@
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
- vm_prot_t prot;
+ vm_prot_t prot;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
struct pv_entry *pv;
@@ -1035,11 +1030,12 @@
pte, PRM_TFLUSH|PRM_CFLUSH);
else {
pv = pv->pv_next;
-#ifdef DEBUG
- if (pmapdebug & PDB_PARANOIA)
- printf("%s wired mapping for %lx not removed\n",
- "pmap_page_protect:", pa);
-#endif
+ PMAP_DPRINTF(PDB_PARANOIA,
+ ("%s wired mapping for %lx not removed\n",
+ "pmap_page_protect:", pa));
+ PMAP_DPRINTF(PDB_PARANOIA,
+ ("vm wired count %d\n",
+ PHYS_TO_VM_PAGE(pa)->wired_count));
if (pv == NULL)
break;
}
@@ -1055,9 +1051,9 @@
*/
void
pmap_protect(pmap, sva, eva, prot)
- pmap_t pmap;
- vaddr_t sva, eva;
- vm_prot_t prot;
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
{
vaddr_t nssva;
pt_entry_t *pte;
@@ -1177,8 +1173,8 @@
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
- pmap->pm_ptab = (pt_entry_t *)
- uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
+ pmap->pm_ptab =
+ (pt_entry_t *) uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
/*
* Segment table entry not valid, we need a new PT page
@@ -1425,6 +1421,14 @@
return 0;
}
+/*
+ * pmap_kenter_pa: [ INTERFACE ]
+ *
+ * Enter a va -> pa mapping into the kernel pmap without any
+ * physical->virtual tracking.
+ *
+ * Note: no locking is necessary in this function.
+ */
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
@@ -1442,7 +1446,7 @@
* Segment table entry not valid, we need a new PT page
*/
- if (!pmap_ste_v(pmap, va)) {
+ if (!pmap_ste_v(pmap, va)) {
s = splvm();
pmap_enter_ptpage(pmap, va);
splx(s);
@@ -1481,22 +1485,28 @@
*pte = npte;
}
+/*
+ * pmap_kremove: [ INTERFACE ]
+ *
+ * Remove a mapping entered with pmap_kenter_pa() starting at va,
+ * for len bytes (assumed to be page rounded).
+ *
+ */
void
pmap_kremove(va, len)
vaddr_t va;
vsize_t len;
{
struct pmap *pmap = pmap_kernel();
- vaddr_t sva, eva, nssva;
+ vaddr_t eva, nssva;
pt_entry_t *pte;
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_kremove(%lx, %lx)\n", va, len));
- sva = va;
eva = va + len;
- while (sva < eva) {
- nssva = m68k_trunc_seg(sva) + NBSEG;
+ while (va < eva) {
+ nssva = m68k_trunc_seg(va) + NBSEG;
if (nssva == 0 || nssva > eva)
nssva = eva;
@@ -1505,8 +1515,8 @@
* skip to the next segment boundary.
*/
- if (!pmap_ste_v(pmap, sva)) {
- sva = nssva;
+ if (!pmap_ste_v(pmap, va)) {
+ va = nssva;
continue;
}
@@ -1514,8 +1524,8 @@
* Invalidate every valid mapping within this segment.
*/
- pte = pmap_pte(pmap, sva);
- while (sva < nssva) {
+ pte = pmap_pte(pmap, va);
+ while (va < nssva) {
if (pmap_pte_v(pte)) {
#ifdef DEBUG
struct pv_entry *pv;
@@ -1525,7 +1535,7 @@
s = splvm();
while (pv->pv_pmap != NULL) {
KASSERT(pv->pv_pmap != pmap_kernel() ||
- pv->pv_va != sva);
+ pv->pv_va != va);
pv = pv->pv_next;
if (pv == NULL) {
break;
@@ -1545,10 +1555,10 @@
*/
*pte = PG_NV;
- TBIS(sva);
+ TBIS(va);
}
pte++;
- sva += NBPG;
+ va += NBPG;
}
}
}
@@ -1562,8 +1572,8 @@
Home |
Main Index |
Thread Index |
Old Index