Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/uebayasi-xip]: src/sys To simplify things, revert global vm_page_md hash...
details: https://anonhg.NetBSD.org/src/rev/5de38a52a70b
branches: uebayasi-xip
changeset: 751729:5de38a52a70b
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Wed Jul 07 14:29:37 2010 +0000
description:
To simplify things, revert global vm_page_md hash and allocate struct
vm_page [] for XIP physical segments.
diffstat:
sys/miscfs/genfs/genfs_io.c | 19 ++-
sys/uvm/uvm_fault.c | 8 +-
sys/uvm/uvm_page.c | 225 ++++++-------------------------------------
sys/uvm/uvm_page.h | 16 +--
4 files changed, 56 insertions(+), 212 deletions(-)
diffs (truncated from 457 to 300 lines):
diff -r 1358bbeb37ba -r 5de38a52a70b sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c Tue Jul 06 07:20:26 2010 +0000
+++ b/sys/miscfs/genfs/genfs_io.c Wed Jul 07 14:29:37 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: genfs_io.c,v 1.36.2.11 2010/07/06 07:20:27 uebayasi Exp $ */
+/* $NetBSD: genfs_io.c,v 1.36.2.12 2010/07/07 14:29:39 uebayasi Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.11 2010/07/06 07:20:27 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.12 2010/07/07 14:29:39 uebayasi Exp $");
#include "opt_direct_page.h"
#include "opt_xip.h"
@@ -828,11 +828,18 @@
KASSERT(error == 0);
UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d", (long)lbn, (long)blkno, run, 0);
+ /*
+ * XIP page metadata assignment
+ * - Unallocated block is redirected to the dedicated zero'ed
+ * page.
+ * - Assume that struct vm_page *[] array of this segment is
+ * allocated and linearly ordered by physical address.
+ */
if (blkno < 0) {
static ONCE_DECL(xip_zero_page_inited);
RUN_ONCE(&xip_zero_page_inited, xip_zero_page_init);
- phys_addr = xip_zero_page->phys_addr;
+ pps[i] = xip_zero_page;
} else {
struct vm_physseg *seg;
@@ -842,10 +849,12 @@
phys_addr = pmap_phys_address(seg->start) +
(blkno << dev_bshift) +
(off - (lbn << fs_bshift));
+ pps[i] = seg->pgs +
+ ((phys_addr >> PAGE_SHIFT) - seg->start);
+ KASSERT(pps[i]->phys_addr == phys_addr);
+ KASSERT((pps[i]->flags & PG_DIRECT) != 0);
}
- pps[i] = uvm_phys_to_vm_page_direct(phys_addr);
-
UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
i,
(long)phys_addr,
diff -r 1358bbeb37ba -r 5de38a52a70b sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c Tue Jul 06 07:20:26 2010 +0000
+++ b/sys/uvm/uvm_fault.c Wed Jul 07 14:29:37 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault.c,v 1.166.2.8 2010/06/09 15:29:58 uebayasi Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.166.2.9 2010/07/07 14:29:37 uebayasi Exp $ */
/*
*
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.8 2010/06/09 15:29:58 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.9 2010/07/07 14:29:37 uebayasi Exp $");
#include "opt_uvmhist.h"
#include "opt_direct_page.h"
@@ -1657,9 +1657,9 @@
* - at this point uobjpage could be PG_WANTED (handle later)
*/
- KASSERT(uvm_pageisdirect_p(uobjpage) || uobj == NULL ||
+ KASSERT(uobj == NULL || uvm_pageisdirect_p(uobjpage) ||
uobj == uobjpage->uobject);
- KASSERT(uvm_pageisdirect_p(uobjpage) || uobj == NULL ||
+ KASSERT(uobj == NULL || uvm_pageisdirect_p(uobjpage) ||
!UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
(uobjpage->flags & PG_CLEAN) != 0);
diff -r 1358bbeb37ba -r 5de38a52a70b sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Tue Jul 06 07:20:26 2010 +0000
+++ b/sys/uvm/uvm_page.c Wed Jul 07 14:29:37 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.153.2.39 2010/05/31 13:26:38 uebayasi Exp $ */
+/* $NetBSD: uvm_page.c,v 1.153.2.40 2010/07/07 14:29:38 uebayasi Exp $ */
/*
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.39 2010/05/31 13:26:38 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.40 2010/07/07 14:29:38 uebayasi Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -195,10 +195,6 @@
static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
static void uvm_pageremove(struct uvm_object *, struct vm_page *);
-#ifdef DIRECT_PAGE
-static void vm_page_direct_mdpage_insert(paddr_t);
-static void vm_page_direct_mdpage_remove(paddr_t);
-#endif
/*
* per-object tree of pages
@@ -780,7 +776,7 @@
static struct vm_physseg *
uvm_page_physload_common(struct vm_physseg_freelist * const,
struct vm_physseg **, int,
- const paddr_t, const paddr_t, const paddr_t, const paddr_t);
+ const paddr_t, const paddr_t);
static void
uvm_page_physunload_common(struct vm_physseg_freelist *,
struct vm_physseg **, struct vm_physseg *);
@@ -804,7 +800,7 @@
panic("uvm_page_physload: bad free list %d", free_list);
seg = uvm_page_physload_common(&vm_physmem_freelist, vm_physmem_ptrs,
- vm_nphysmem, start, end, avail_start, avail_end);
+ vm_nphysmem, start, end);
KASSERT(seg != NULL);
seg->avail_start = avail_start;
@@ -848,15 +844,38 @@
paddr_t avail_end, int prot, int flags)
{
struct vm_physseg *seg;
+ int i;
seg = uvm_page_physload_common(&vm_physdev_freelist, vm_physdev_ptrs,
- vm_nphysdev, start, end, avail_start, avail_end);
+ vm_nphysdev, start, end);
KASSERT(seg != NULL);
seg->prot = prot;
seg->flags = flags; /* XXXUEBS BUS_SPACE_MAP_* */
- for (paddr_t pf = start; pf < end; pf++)
- vm_page_direct_mdpage_insert(pf);
+
+ /*
+ * XIP page metadata
+ * - Only "phys_addr" and "vm_page_md" (== "PV" management) are used.
+ * - No "pageq" operation is done.
+ */
+ seg->pgs = kmem_zalloc(sizeof(struct vm_page) * (end - start),
+ KM_SLEEP);
+ KASSERT(seg->pgs != NULL);
+ seg->endpg = seg->pgs + (end - start);
+ seg->start = start;
+ seg->end = end;
+
+ for (i = 0; i < end - start; i++) {
+ struct vm_page *pg = seg->pgs + i;
+ paddr_t paddr = (start + i) << PAGE_SHIFT;
+
+ pg->phys_addr = paddr;
+ pg->flags |= PG_DIRECT;
+#ifdef __HAVE_VM_PAGE_MD
+ VM_MDPAGE_INIT(&pg->mdpage, paddr);
+#endif
+ }
+
vm_nphysdev++;
return seg;
}
@@ -866,8 +885,7 @@
{
struct vm_physseg *seg = cookie;
- for (paddr_t pf = seg->start; pf < seg->end; pf++)
- vm_page_direct_mdpage_remove(pf);
+ kmem_free(seg->pgs, sizeof(struct vm_page) * (seg->end - seg->start));
uvm_page_physunload_common(&vm_physdev_freelist, vm_physdev_ptrs, seg);
vm_nphysdev--;
}
@@ -876,8 +894,7 @@
static struct vm_physseg *
uvm_page_physload_common(struct vm_physseg_freelist *freelist,
struct vm_physseg **segs, int nsegs,
- const paddr_t start, const paddr_t end,
- const paddr_t avail_start, const paddr_t avail_end)
+ const paddr_t start, const paddr_t end)
{
struct vm_physseg *ps;
static int uvm_page_physseg_inited;
@@ -1156,52 +1173,13 @@
}
}
-
-#ifdef DIRECT_PAGE
-/*
- * Device pages don't have struct vm_page objects for various reasons:
- *
- * - Device pages are volatile; not paging involved. Which means we have
- * much less state information to keep for each page.
- *
- * - Volatile, directly memory-mappable devices (framebuffers, audio devices,
- * etc.) only need physical address and attribute (protection and some
- * accelaration specific to physical bus) common to all the pages.
- * Allocating vm_page objects to keep such informations is wasteful.
- *
- * - Per-page MD information is only used for XIP vnodes' copy-on-write from
- * a device page to anon.
- */
-
-/* Assume struct vm_page * is aligned to 4 bytes. */
-/* XXXUEBS Consider to improve this. */
-#define VM_PAGE_DIRECT_MAGIC 0x2
-#define VM_PAGE_DIRECT_MAGIC_MASK 0x3
-#define VM_PAGE_DIRECT_MAGIC_SHIFT 2
-
-struct vm_page *
-uvm_phys_to_vm_page_direct(paddr_t pa)
-{
- paddr_t pf = pa >> PAGE_SHIFT;
- uintptr_t cookie = pf << VM_PAGE_DIRECT_MAGIC_SHIFT;
- return (void *)(cookie | VM_PAGE_DIRECT_MAGIC);
-}
-
-static inline paddr_t
-VM_PAGE_DIRECT_TO_PHYS(const struct vm_page *pg)
-{
- uintptr_t cookie = (uintptr_t)pg & ~VM_PAGE_DIRECT_MAGIC_MASK;
- paddr_t pf = cookie >> VM_PAGE_DIRECT_MAGIC_SHIFT;
- return pf << PAGE_SHIFT;
-}
-
bool
uvm_pageisdirect_p(const struct vm_page *pg)
{
- return ((uintptr_t)pg & VM_PAGE_DIRECT_MAGIC_MASK) == VM_PAGE_DIRECT_MAGIC;
+ KASSERT(pg != NULL);
+ return (pg->flags & PG_DIRECT) != 0;
}
-#endif
/*
@@ -1219,7 +1197,7 @@
#ifdef DIRECT_PAGE
psi = vm_physseg_find_direct(pf, &off);
if (psi != -1)
- return(uvm_phys_to_vm_page_direct(pa));
+ return(&vm_physdev_ptrs[psi]->pgs[off]);
#endif
psi = vm_physseg_find(pf, &off);
if (psi != -1)
@@ -1231,142 +1209,9 @@
uvm_vm_page_to_phys(const struct vm_page *pg)
{
-#ifdef DIRECT_PAGE
- if (uvm_pageisdirect_p(pg)) {
- return VM_PAGE_DIRECT_TO_PHYS(pg);
- }
-#endif
return pg->phys_addr;
}
-
-#ifdef __HAVE_VM_PAGE_MD
-#ifdef XIP
-/*
- * Device page's mdpage lookup.
- *
- * - Needed when promoting an XIP vnode page and invalidating its old mapping.
- *
- * - Hashing code is based on sys/arch/x86/x86/pmap.c.
- *
- * XXX Consider to allocate slots on-demand.
- */
-
-static struct vm_page_md *vm_page_direct_mdpage_lookup(struct vm_page *);
-
-struct vm_page_md *
-uvm_vm_page_to_md(struct vm_page *pg)
-{
-
- return uvm_pageisdirect_p(pg) ?
- vm_page_direct_mdpage_lookup(pg) : &pg->mdpage;
-}
-
-struct vm_page_direct_mdpage_entry {
- struct vm_page_md mde_mdpage;
- SLIST_ENTRY(vm_page_direct_mdpage_entry) mde_hash;
- paddr_t mde_pf;
-};
-
-/*
- * These can be optimized depending on the size of XIP'ed executables' .data
- * segments. If page size is 4K and .data is 1M, .data spans across 256
Home |
Main Index |
Thread Index |
Old Index