Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/mips/mips ANSIfy and some KNF.
details: https://anonhg.NetBSD.org/src/rev/9351b9c681cc
branches: trunk
changeset: 586315:9351b9c681cc
user: tsutsui <tsutsui%NetBSD.org@localhost>
date: Tue Dec 13 16:32:33 2005 +0000
description:
ANSIfy and some KNF.
diffstat:
sys/arch/mips/mips/pmap.c | 212 ++++++++++++++++++---------------------------
1 files changed, 83 insertions(+), 129 deletions(-)
diffs (truncated from 635 to 300 lines):
diff -r f953180e7756 -r 9351b9c681cc sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Tue Dec 13 16:25:59 2005 +0000
+++ b/sys/arch/mips/mips/pmap.c Tue Dec 13 16:32:33 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.163 2005/12/13 15:41:50 tsutsui Exp $ */
+/* $NetBSD: pmap.c,v 1.164 2005/12/13 16:32:33 tsutsui Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -74,7 +74,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.163 2005/12/13 15:41:50 tsutsui Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.164 2005/12/13 16:32:33 tsutsui Exp $");
/*
* Manages physical address maps.
@@ -215,7 +215,7 @@
boolean_t pmap_initialized = FALSE;
#define PAGE_IS_MANAGED(pa) \
- (pmap_initialized == TRUE && vm_physseg_find(atop(pa), NULL) != -1)
+ (pmap_initialized == TRUE && vm_physseg_find(atop(pa), NULL) != -1)
#define PMAP_IS_ACTIVE(pm) \
((pm) == pmap_kernel() || \
@@ -310,8 +310,8 @@
buf_setvalimit(bufsz);
Sysmapsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
- bufsz + 16 * NCARGS + PAGER_MAP_SIZE) / NBPG +
- (maxproc * UPAGES) + nkmempages;
+ bufsz + 16 * NCARGS + PAGER_MAP_SIZE) / NBPG +
+ (maxproc * UPAGES) + nkmempages;
#ifdef SYSVSHM
Sysmapsize += shminfo.shmall;
@@ -423,9 +423,7 @@
* from the kernel virtual address range defined by pmap_virtual_space().
*/
vaddr_t
-pmap_steal_memory(size, vstartp, vendp)
- vsize_t size;
- vaddr_t *vstartp, *vendp;
+pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
{
int bank, x;
u_int npgs;
@@ -471,7 +469,7 @@
va = MIPS_PHYS_TO_KSEG0(pa);
memset((caddr_t)va, 0, size);
- return (va);
+ return va;
}
/*
@@ -570,8 +568,8 @@
}
} while (mem == NULL);
- pmap->pm_segtab = stp = (struct segtab *)
- MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+ pmap->pm_segtab = stp =
+ (struct segtab *)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
i = NBPG / sizeof(struct segtab);
while (--i != 0) {
stp++;
@@ -587,7 +585,7 @@
pmap->pm_asid = PMAP_ASID_RESERVED;
pmap->pm_asidgen = pmap_asid_generation;
- return (pmap);
+ return pmap;
}
/*
@@ -596,8 +594,7 @@
* no valid mappings.
*/
void
-pmap_destroy(pmap)
- pmap_t pmap;
+pmap_destroy(pmap_t pmap)
{
int count;
@@ -626,7 +623,7 @@
continue;
#ifdef PARANOIADIAG
for (j = 0; j < NPTEPG; j++) {
- if ((pte+j)->pt_entry)
+ if ((pte + j)->pt_entry)
panic("pmap_destroy: segmap not empty");
}
#endif
@@ -641,8 +638,7 @@
* cause problems on machines without VCED/VCEI.
*/
if (mips_cache_virtual_alias)
- mips_dcache_inv_range((vaddr_t)pte,
- PAGE_SIZE);
+ mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
#endif /* MIPS3_PLUS */
uvm_pagefree(PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pte)));
@@ -660,8 +656,7 @@
* Add a reference to the specified pmap.
*/
void
-pmap_reference(pmap)
- pmap_t pmap;
+pmap_reference(pmap_t pmap)
{
#ifdef DEBUG
@@ -679,8 +674,7 @@
* Make a new pmap (vmspace) active for the given process.
*/
void
-pmap_activate(l)
- struct lwp *l;
+pmap_activate(struct lwp *l)
{
pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
@@ -695,9 +689,9 @@
* Make a previously active pmap (vmspace) inactive.
*/
void
-pmap_deactivate(l)
- struct lwp *l;
+pmap_deactivate(struct lwp *l)
{
+
/* Nothing to do. */
}
@@ -708,9 +702,7 @@
* rounded to the page size.
*/
void
-pmap_remove(pmap, sva, eva)
- pmap_t pmap;
- vaddr_t sva, eva;
+pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
{
struct vm_page *pg;
vaddr_t nssva;
@@ -767,7 +759,7 @@
asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
if (asid != pmap->pm_asid) {
panic("inconsistency for active TLB flush: %d <-> %d",
- asid, pmap->pm_asid);
+ asid, pmap->pm_asid);
}
}
#endif
@@ -818,9 +810,7 @@
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(pg, prot)
- struct vm_page *pg;
- vm_prot_t prot;
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
pv_entry_t pv;
vaddr_t va;
@@ -847,7 +837,7 @@
for (; pv; pv = pv->pv_next) {
va = pv->pv_va;
pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
- prot);
+ prot);
pmap_update(pv->pv_pmap);
}
}
@@ -858,7 +848,7 @@
pv = pg->mdpage.pvh_list;
while (pv->pv_pmap != NULL) {
pmap_remove(pv->pv_pmap, pv->pv_va,
- pv->pv_va + PAGE_SIZE);
+ pv->pv_va + PAGE_SIZE);
}
pmap_update(pv->pv_pmap);
}
@@ -869,10 +859,7 @@
* specified range of this map as requested.
*/
void
-pmap_protect(pmap, sva, eva, prot)
- pmap_t pmap;
- vaddr_t sva, eva;
- vm_prot_t prot;
+pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
vaddr_t nssva;
pt_entry_t *pte;
@@ -930,7 +917,7 @@
asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
if (asid != pmap->pm_asid) {
panic("inconsistency for active TLB update: %d <-> %d",
- asid, pmap->pm_asid);
+ asid, pmap->pm_asid);
}
}
#endif
@@ -974,10 +961,7 @@
* XXXJRT -- need a version for each cache type.
*/
void
-pmap_procwr(p, va, len)
- struct proc *p;
- vaddr_t va;
- size_t len;
+pmap_procwr(struct proc *p, vaddr_t va, size_t len)
{
#ifdef MIPS1
pmap_t pmap;
@@ -987,11 +971,13 @@
if (MIPS_HAS_R4K_MMU) {
#ifdef MIPS3_PLUS /* XXX mmu XXX */
- /* XXX
- shouldn't need to do this for physical d$?
- should need to do this for virtual i$ if prot == EXEC?
+ /*
+ * XXX
+ * shouldn't need to do this for physical d$?
+ * should need to do this for virtual i$ if prot == EXEC?
*/
- if (p == curlwp->l_proc && mips_pdcache_way_mask < PAGE_SIZE) /* XXX check icache mask too? */
+ if (p == curlwp->l_proc && mips_pdcache_way_mask < PAGE_SIZE)
+ /* XXX check icache mask too? */
mips_icache_sync_range(va, len);
else
mips_icache_sync_range_index(va, len);
@@ -1029,12 +1015,10 @@
* Return RO protection of page.
*/
int
-pmap_is_page_ro(pmap, va, entry)
- pmap_t pmap;
- vaddr_t va;
- int entry;
+pmap_is_page_ro(pmap_t pmap, vaddr_t va, int entry)
{
- return (entry & mips_pg_ro_bit());
+
+ return entry & mips_pg_ro_bit();
}
#if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */
@@ -1074,8 +1058,7 @@
pte->pt_entry = entry;
MachTLBUpdate(pv->pv_va, entry);
}
- }
- else {
+ } else {
pte = pmap_segmap(pv->pv_pmap, pv->pv_va);
if (pte == NULL)
@@ -1107,12 +1090,7 @@
* insert this page into the given map NOW.
*/
int
-pmap_enter(pmap, va, pa, prot, flags)
- pmap_t pmap;
- vaddr_t va;
- paddr_t pa;
- vm_prot_t prot;
- int flags;
+pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
pt_entry_t *pte;
u_int npte;
@@ -1258,10 +1236,11 @@
panic("pmap_enter: cannot allocate segmap");
}
- pmap_segmap(pmap, va) = pte = (pt_entry_t *)
- MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+ pmap_segmap(pmap, va) = pte =
+ (pt_entry_t *)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
#ifdef PARANOIADIAG
- { int i;
+ {
+ int i;
for (i = 0; i < NPTEPG; i++) {
if ((pte+i)->pt_entry)
panic("pmap_enter: new segmap not empty");
@@ -1308,7 +1287,7 @@
asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
if (asid != pmap->pm_asid) {
panic("inconsistency for active TLB update: %d <-> %d",
- asid, pmap->pm_asid);
+ asid, pmap->pm_asid);
Home |
Main Index |
Thread Index |
Old Index