Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/mips/mips whitespace cleanup.
details: https://anonhg.NetBSD.org/src/rev/32f25a4bd83a
branches: trunk
changeset: 764659:32f25a4bd83a
user: matt <matt%NetBSD.org@localhost>
date: Fri Apr 29 22:18:53 2011 +0000
description:
whitespace cleanup.
diffstat:
sys/arch/mips/mips/pmap.c | 25 +++++++++++++------------
1 files changed, 13 insertions(+), 12 deletions(-)
diffs (102 lines):
diff -r ae52dfa75581 -r 32f25a4bd83a sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Fri Apr 29 22:18:16 2011 +0000
+++ b/sys/arch/mips/mips/pmap.c Fri Apr 29 22:18:53 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.198 2011/03/15 07:31:33 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.199 2011/04/29 22:18:53 matt Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.198 2011/03/15 07:31:33 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.199 2011/04/29 22:18:53 matt Exp $");
/*
* Manages physical address maps.
@@ -271,7 +271,7 @@
#endif
},
};
-struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
+struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
paddr_t mips_avail_start; /* PA of first available physical page */
paddr_t mips_avail_end; /* PA of last available physical page */
@@ -660,8 +660,8 @@
printf("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
__func__, bank,
- seg->avail_start, seg->start,
- seg->avail_end, seg->end);
+ seg->avail_start, seg->start,
+ seg->avail_end, seg->end);
if (seg->avail_start != seg->start
|| seg->avail_start >= seg->avail_end) {
@@ -773,11 +773,11 @@
* Disable sosend_loan() in src/sys/kern/uipc_socket.c
* on MIPS3 CPUs to avoid possible virtual cache aliases
* and uncached mappings in pmap_enter_pv().
- *
+ *
* Ideally, read only shared mapping won't cause aliases
* so pmap_enter_pv() should handle any shared read only
* mappings without uncached ops like ARM pmap.
- *
+ *
* On the other hand, R4000 and R4400 have the virtual
* coherency exceptions which will happen even on read only
* mappings, so we always have to disable sosend_loan()
@@ -1491,11 +1491,12 @@
if (!MIPS_HAS_R4K_MMU
&& pg != NULL
&& prot == (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
PMAP_COUNT(enter_exec_mapping);
if (!PG_MD_EXECPAGE_P(md)) {
mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa),
PAGE_SIZE);
- pmap_set_page_attributes(pg, PG_MD_EXECPAGE);
+ pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE);
PMAP_COUNT(exec_syncicache_entry);
}
}
@@ -1695,7 +1696,7 @@
(void)PG_MD_PVLIST_LOCK(md, false);
pv_entry_t pv = &md->pvh_first;
if (pv->pv_pmap == NULL) {
- pv->pv_va = va;
+ pv->pv_va = va;
} else if (PG_MD_CACHED_P(md)
&& mips_cache_badalias(pv->pv_va, va)) {
mips_dcache_wbinv_range(va, PAGE_SIZE);
@@ -2238,7 +2239,7 @@
/*
* To allocate a PV, we have to release the PVLIST lock
* so get the page generation. We allocate the PV, and
- * then reacquire the lock.
+ * then reacquire the lock.
*/
PG_MD_PVLIST_UNLOCK(md);
#endif
@@ -2496,7 +2497,7 @@
if (MIPS_CACHE_VIRTUAL_ALIAS)
mips_dcache_inv_range(va, PAGE_SIZE);
#endif
- struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+ struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg != NULL);
pmap_clear_mdpage_attributes(VM_PAGE_TO_MD(pg), PG_MD_POOLPAGE);
uvm_pagefree(pg);
@@ -2623,7 +2624,7 @@
KASSERT(pv->pv_pmap == NULL);
pv->pv_va = va;
if (PG_MD_CACHED_P(md) && mips_cache_badalias(last_va, va))
- mips_dcache_wbinv_range_index(last_va, PAGE_SIZE);
+ mips_dcache_wbinv_range_index(last_va, PAGE_SIZE);
PG_MD_PVLIST_UNLOCK(md);
}
#endif
Home |
Main Index |
Thread Index |
Old Index