Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Revert vm_physseg allocation changes. A report says...
details: https://anonhg.NetBSD.org/src/rev/82a15bd24565
branches: trunk
changeset: 759026:82a15bd24565
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Thu Nov 25 04:45:30 2010 +0000
description:
Revert vm_physseg allocation changes. A report says that it causes
panics when used with mplayer in heavy load.
diffstat:
sys/uvm/uvm_page.c | 319 ++++++++++++++++----------------------------------
sys/uvm/uvm_page.h | 20 +--
sys/uvm/uvm_pglist.c | 40 ++----
3 files changed, 120 insertions(+), 259 deletions(-)
diffs (truncated from 625 to 300 lines):
diff -r 43c05078c01b -r 82a15bd24565 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Thu Nov 25 04:33:30 2010 +0000
+++ b/sys/uvm/uvm_page.c Thu Nov 25 04:45:30 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.166 2010/11/14 15:18:07 uebayasi Exp $ */
+/* $NetBSD: uvm_page.c,v 1.167 2010/11/25 04:45:30 uebayasi Exp $ */
/*
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.166 2010/11/14 15:18:07 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.167 2010/11/25 04:45:30 uebayasi Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -125,13 +125,9 @@
* physical memory config is stored in vm_physmem.
*/
-SIMPLEQ_HEAD(vm_physseg_freelist, vm_physseg);
-
-struct vm_physseg *vm_physmem_ptrs[VM_PHYSSEG_MAX];
-int vm_nphysmem = 0;
-static struct vm_physseg vm_physmem_store[VM_PHYSSEG_MAX];
-static struct vm_physseg_freelist vm_physmem_freelist =
- SIMPLEQ_HEAD_INITIALIZER(vm_physmem_freelist);
+struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
+int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
+#define vm_nphysmem vm_nphysseg
/*
* Some supported CPUs in a given architecture don't support all
@@ -185,19 +181,6 @@
static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
static void uvm_pageremove(struct uvm_object *, struct vm_page *);
-static struct vm_physseg *uvm_physseg_alloc(
- struct vm_physseg_freelist * const, struct vm_physseg **, int,
- const paddr_t, const paddr_t);
-#if 0
-static void uvm_physseg_free(struct vm_physseg_freelist *,
- struct vm_physseg **, struct vm_physseg *);
-#endif
-static void uvm_physseg_init(void);
-static void uvm_physseg_insert(struct vm_physseg *,
- struct vm_physseg **, int);
-#if 0
-static void uvm_physseg_remove(struct vm_physseg **, struct vm_physseg *);
-#endif
/*
* per-object tree of pages
@@ -701,6 +684,7 @@
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
for (x = lcv ; x < vm_nphysmem ; x++)
+ /* structure copy */
VM_PHYSMEM_PTR_SWAP(x, x + 1);
}
return (true);
@@ -718,6 +702,7 @@
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
for (x = lcv ; x < vm_nphysmem ; x++)
+ /* structure copy */
VM_PHYSMEM_PTR_SWAP(x, x + 1);
}
return (true);
@@ -748,6 +733,7 @@
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
for (x = lcv ; x < vm_nphysmem ; x++)
+ /* structure copy */
VM_PHYSMEM_PTR_SWAP(x, x + 1);
}
return (true);
@@ -782,18 +768,31 @@
uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
paddr_t avail_end, int free_list)
{
- struct vm_physseg *seg;
- int lcv;
+ int preload, lcv;
+ psize_t npages;
+ struct vm_page *pgs;
+ struct vm_physseg *ps;
+ if (uvmexp.pagesize == 0)
+ panic("uvm_page_physload: page size not set!");
if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
panic("uvm_page_physload: bad free list %d", free_list);
+ if (start >= end)
+ panic("uvm_page_physload: start >= end");
- seg = uvm_physseg_alloc(&vm_physmem_freelist, vm_physmem_ptrs,
- vm_nphysmem, start, end);
- KASSERT(seg != NULL);
+ /*
+ * do we have room?
+ */
- seg->avail_start = avail_start;
- seg->avail_end = avail_end;
+ if (vm_nphysmem == VM_PHYSSEG_MAX) {
+ printf("uvm_page_physload: unable to load physical memory "
+ "segment\n");
+ printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
+ VM_PHYSSEG_MAX, (long long)start, (long long)end);
+ printf("\tincrease VM_PHYSSEG_MAX\n");
+ return;
+ }
+
/*
* check to see if this is a "preload" (i.e. uvm_page_init hasn't been
* called yet, so malloc is not available).
@@ -803,188 +802,112 @@
if (VM_PHYSMEM_PTR(lcv)->pgs)
break;
}
- if (lcv == vm_nphysmem) {
- seg->pgs = NULL;
- seg->lastpg = NULL;
- seg->free_list = free_list;
- } else {
- panic("uvm_page_physload: "
- "tried to add RAM after uvm_page_init");
- }
- vm_nphysmem++;
-}
+ preload = (lcv == vm_nphysmem);
-#if 0
-void
-uvm_page_physunload(void *cookie)
-{
- struct vm_physseg *seg = cookie;
-
- panic("memory unload is not supported yet");
-
- uvm_physseg_free(&vm_physmem_freelist, vm_physmem_ptrs, seg);
- vm_nphysmem--;
-}
-#endif
+ /*
+ * if VM is already running, attempt to malloc() vm_page structures
+ */
-int uvm_physseg_inited;
-
-static struct vm_physseg *
-uvm_physseg_alloc(struct vm_physseg_freelist *freelist,
- struct vm_physseg **segs, int nsegs,
- const paddr_t start, const paddr_t end)
-{
- struct vm_physseg *ps;
-
- if (uvmexp.pagesize == 0)
- panic("uvm_page_physload: page size not set!");
- if (start >= end)
- panic("uvm_page_physload: start >= end");
- if (nsegs == VM_PHYSSEG_MAX)
- panic("uvm_page_physload: unable to load physical memory "
- "segment\n"
- "\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n"
- "\tincrease VM_PHYSSEG_MAX\n",
- VM_PHYSSEG_MAX, (long long)start, (long long)end);
-
- if (uvm_physseg_inited == 0) {
- uvm_physseg_inited = 1;
- uvm_physseg_init();
+ if (!preload) {
+ panic("uvm_page_physload: tried to add RAM after vm_mem_init");
+ } else {
+ pgs = NULL;
+ npages = 0;
}
- ps = SIMPLEQ_FIRST(freelist);
- KASSERT(ps != NULL);
- SIMPLEQ_REMOVE_HEAD(freelist, list);
-
- ps->start = start;
- ps->end = end;
- uvm_physseg_insert(ps, segs, nsegs);
- return ps;
-}
-
-#if 0
-void
-uvm_physseg_free(struct vm_physseg_freelist *freelist,
- struct vm_physseg **segs, struct vm_physseg *seg)
-{
-
- uvm_physseg_remove(segs, seg);
- SIMPLEQ_INSERT_TAIL(freelist, seg, list);
-}
-#endif
-
-static void
-uvm_physseg_init(void)
-{
- int lcv;
-
- for (lcv = 0; lcv < VM_PHYSSEG_MAX; lcv++) {
- SIMPLEQ_INSERT_TAIL(&vm_physmem_freelist,
- &vm_physmem_store[lcv], list);
- }
-}
-
-static void
-uvm_physseg_insert(struct vm_physseg *ps,
- struct vm_physseg **segs, int nsegs)
-{
+ /*
+ * now insert us in the proper place in vm_physmem[]
+ */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
/* random: put it at the end (easy!) */
- segs[nsegs] = ps;
+ ps = VM_PHYSMEM_PTR(vm_nphysmem);
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
{
- int lcv;
int x;
/* sort by address for binary search */
- for (lcv = 0 ; lcv < nsegs ; lcv++)
- if (ps->start < segs[lcv]->start)
+ for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ if (start < VM_PHYSMEM_PTR(lcv)->start)
break;
+ ps = VM_PHYSMEM_PTR(lcv);
/* move back other entries, if necessary ... */
- for (x = nsegs ; x > lcv ; x--)
- segs[x] = segs[x - 1];
- segs[lcv] = ps;
+ for (x = vm_nphysmem ; x > lcv ; x--)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x - 1);
}
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
{
- int lcv;
int x;
/* sort by largest segment first */
- for (lcv = 0 ; lcv < nsegs ; lcv++)
- if ((ps->end - ps->start) >
- (segs[lcv]->end - segs[lcv]->start))
+ for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ if ((end - start) >
+ (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
break;
+ ps = VM_PHYSMEM_PTR(lcv);
/* move back other entries, if necessary ... */
- for (x = nsegs ; x > lcv ; x--)
- segs[x] = segs[x - 1];
- segs[lcv] = ps;
+ for (x = vm_nphysmem ; x > lcv ; x--)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x - 1);
}
#else
panic("uvm_page_physload: unknown physseg strategy selected!");
#endif
+
+ ps->start = start;
+ ps->end = end;
+ ps->avail_start = avail_start;
+ ps->avail_end = avail_end;
+ if (preload) {
+ ps->pgs = NULL;
+ } else {
+ ps->pgs = pgs;
+ ps->lastpg = pgs + npages;
+ }
+ ps->free_list = free_list;
+ vm_nphysmem++;
+
+ if (!preload) {
+ uvmpdpol_reinit();
+ }
}
-#if 0
-static void
-uvm_physseg_remove(struct vm_physseg **segs, struct vm_physseg *seg)
-{
- struct vm_physseg **segp;
+/*
+ * when VM_PHYSSEG_MAX is 1, we can simplify these functions
+ */
- for (segp = segs; segp < segs + VM_PHYSSEG_MAX; segp++)
- if (*segp == seg)
- break;
- if (segp == segs + VM_PHYSSEG_MAX)
- panic("unknown segment: %p", seg);
Home |
Main Index |
Thread Index |
Old Index