Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/ia64/ia64 Remove null lines.
details: https://anonhg.NetBSD.org/src/rev/0df91323adc6
branches: trunk
changeset: 785653:0df91323adc6
user: kiyohara <kiyohara%NetBSD.org@localhost>
date: Sun Mar 24 06:56:14 2013 +0000
description:
Remove null lines.
Remove TAB and white-space terminate.
s/^ /\t/.
diffstat:
sys/arch/ia64/ia64/pmap.c | 715 ++++++++++++++++++++-------------------------
1 files changed, 313 insertions(+), 402 deletions(-)
diffs (truncated from 1360 to 300 lines):
diff -r f18f465cb00c -r 0df91323adc6 sys/arch/ia64/ia64/pmap.c
--- a/sys/arch/ia64/ia64/pmap.c Sun Mar 24 06:27:52 2013 +0000
+++ b/sys/arch/ia64/ia64/pmap.c Sun Mar 24 06:56:14 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.30 2013/03/24 06:27:52 kiyohara Exp $ */
+/* $NetBSD: pmap.c,v 1.31 2013/03/24 06:56:14 kiyohara Exp $ */
/*-
@@ -85,7 +85,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30 2013/03/24 06:27:52 kiyohara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.31 2013/03/24 06:56:14 kiyohara Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -130,7 +130,7 @@
struct ia64_bucket *pmap_vhpt_bucket;
int pmap_vhpt_nbuckets;
-kmutex_t pmap_vhptlock; /* VHPT collision chain lock */
+kmutex_t pmap_vhptlock; /* VHPT collision chain lock */
int pmap_vhpt_inserts;
int pmap_vhpt_resident;
@@ -219,20 +219,20 @@
#endif /* MULTIPROCESSOR || LOCKDEBUG */
-#define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
-#define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
-#define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
-#define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
-#define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
-#define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
-#define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
+#define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
+#define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
+#define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
+#define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
+#define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
+#define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
+#define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
-#define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
-#define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
-#define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
-#define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
+#define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
+#define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
+#define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
+#define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
-#define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
+#define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
/*
@@ -323,29 +323,14 @@
size = round_page(size);
npgs = atop(size);
-#if 0
- printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
-#endif
-
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
-#if 0
- printf(" bank %d: avail_start 0x%lx, start 0x%lx, "
- "avail_end 0x%lx\n", lcv, VM_PHYSMEM_PTR(lcv)->avail_start,
- VM_PHYSMEM_PTR(lcv)->start, VM_PHYSMEM_PTR(lcv)->avail_end);
-#endif
-
if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start ||
VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
continue;
-#if 0
- printf(" avail_end - avail_start = 0x%lx\n",
- VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start);
-#endif
-
if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
< npgs)
continue;
@@ -357,11 +342,11 @@
VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
VM_PHYSMEM_PTR(lcv)->start += npgs;
-
/*
* Have we used up this segment?
*/
- if (VM_PHYSMEM_PTR(lcv)->avail_start == VM_PHYSMEM_PTR(lcv)->end) {
+ if (VM_PHYSMEM_PTR(lcv)->avail_start ==
+ VM_PHYSMEM_PTR(lcv)->end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
@@ -385,7 +370,6 @@
panic("pmap_steal_memory: no memory to steal");
}
-
/*
* pmap_steal_vhpt_memory: Derived from alpha/pmap.c:pmap_steal_memory()
* Note: This function is not visible outside the pmap module.
@@ -393,35 +377,21 @@
* Assumptions: size is always a power of 2.
* Returns: Allocated memory at a naturally aligned address
*/
-
static vaddr_t
pmap_steal_vhpt_memory(vsize_t size)
{
int lcv, npgs, x;
vaddr_t va;
paddr_t pa;
-
paddr_t vhpt_start = 0, start1, start2, end1, end2;
size = round_page(size);
npgs = atop(size);
-#if 1
- printf("VHPTPSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
-#endif
-
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
if (uvm.page_init_done == true)
panic("pmap_vhpt_steal_memory: called _after_ bootstrap");
-#if 1
- printf(" lcv %d: avail_start 0x%lx, start 0x%lx, "
- "avail_end 0x%lx\n", lcv, VM_PHYSMEM_PTR(lcv)->avail_start,
- VM_PHYSMEM_PTR(lcv)->start, VM_PHYSMEM_PTR(lcv)->avail_end);
- printf(" avail_end - avail_start = 0x%lx\n",
- VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start);
-#endif
-
if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start || /* XXX: ??? */
VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
continue;
@@ -436,7 +406,6 @@
start2 = vhpt_start + npgs;
end2 = VM_PHYSMEM_PTR(lcv)->avail_end;
-
/* Case 1: Doesn't fit. skip this segment */
if (start2 > end2) {
@@ -449,44 +418,40 @@
* - Re-insert fragments via uvm_page_physload();
*/
- /*
+ /*
* We _fail_ on a vhpt request which exhausts memory.
*/
if (start1 == end1 &&
start2 == end2 &&
vm_nphysseg == 1) {
-#ifdef DEBUG
- printf("pmap_vhpt_steal_memory: out of memory!");
+#ifdef DEBUG
+ printf("pmap_vhpt_steal_memory: out of memory!");
#endif
- return -1;
- }
+ return -1;
+ }
/* Remove this segment from the list. */
vm_nphysseg--;
- // physmem -= end2 - start1;
- for (x = lcv; x < vm_nphysseg; x++) {
+ for (x = lcv; x < vm_nphysseg; x++)
/* structure copy */
VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
/* Case 2: Perfect fit - skip segment reload. */
if (start1 == end1 && start2 == end2) break;
- /* Case 3: Left unfit - reload it.
- */
+ /* Case 3: Left unfit - reload it.
+ */
- if (start1 != end1) {
+ if (start1 != end1)
uvm_page_physload(start1, end1, start1, end1,
VM_FREELIST_DEFAULT);
- }
-
+
/* Case 4: Right unfit - reload it. */
- if (start2 != end2) {
+ if (start2 != end2)
uvm_page_physload(start2, end2, start2, end2,
VM_FREELIST_DEFAULT);
- }
/* Case 5: Both unfit - Redundant, isn't it ? */
break;
@@ -512,10 +477,6 @@
return va;
}
-
-
-
-
/*
* pmap_bootstrap:
*
@@ -603,7 +564,6 @@
*/
mutex_init(&pmap_rid_lock, MUTEX_DEFAULT, IPL_VM);
-
/*
* Compute the number of pages kmem_map will have.
*/
@@ -627,13 +587,13 @@
* Allocate some memory for initial kernel 'page tables'.
*/
ia64_kptdir = (void *)uvm_pageboot_alloc((nkpt + 1) * PAGE_SIZE);
- for (i = 0; i < nkpt; i++) {
- ia64_kptdir[i] = (void*)( (vaddr_t)ia64_kptdir + PAGE_SIZE * (i + 1));
- }
+ for (i = 0; i < nkpt; i++)
+ ia64_kptdir[i] =
+ (void*)((vaddr_t)ia64_kptdir + PAGE_SIZE * (i + 1));
kernel_vm_end = nkpt * PAGE_SIZE * NKPTEPG + VM_MIN_KERNEL_ADDRESS -
- VM_GATEWAY_SIZE;
-
+ VM_GATEWAY_SIZE;
+
/*
* Initialize the pmap pools and list.
*/
@@ -643,19 +603,19 @@
/* XXX: Need to convert ia64_kptdir[][] to a pool. ????*/
- /* The default pool allocator uses uvm_km_alloc & friends.
- * XXX: We should be using regular vm_alloced mem for regular, non-kernel ptesl
+ /* The default pool allocator uses uvm_km_alloc & friends.
+ * XXX: We should be using regular vm_alloced mem for regular,
+ * non-kernel ptesl
*/
pool_init(&pmap_ia64_lpte_pool, sizeof (struct ia64_lpte),
- sizeof(void *), 0, 0, "ptpl", NULL, IPL_NONE);
+ sizeof(void *), 0, 0, "ptpl", NULL, IPL_NONE);
pool_init(&pmap_pv_pool, sizeof (struct pv_entry), sizeof(void *),
0, 0, "pvpl", &pmap_pv_page_allocator, IPL_NONE);
TAILQ_INIT(&pmap_all_pmaps);
-
/*
* Figure out a useful size for the VHPT, based on the size of
* physical memory and try to locate a region which is large
@@ -674,10 +634,10 @@
while (size < physmem * 32) {
pmap_vhpt_log2size++;
size <<= 1;
- }
- }
- else
- if (pmap_vhpt_log2size < 15) pmap_vhpt_log2size = 15;
+ }
+ } else
+ if (pmap_vhpt_log2size < 15)
+ pmap_vhpt_log2size = 15;
if (pmap_vhpt_log2size > 61) pmap_vhpt_log2size = 61;
@@ -690,7 +650,7 @@
/* allocate size bytes aligned at size */
/* #ifdef MULTIPROCESSOR, then (size * MAXCPU) bytes */
- base = pmap_steal_vhpt_memory(size);
+ base = pmap_steal_vhpt_memory(size);
if (!base) {
/* Can't fit, try next smaller size. */
@@ -732,8 +692,7 @@
pte[i].tag = 1UL << 63; /* Invalid tag */
Home |
Main Index |
Thread Index |
Old Index