Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/uebayasi-xip]: src/sys/uvm Introduce uvm_page_physload_device(). This r...
details: https://anonhg.NetBSD.org/src/rev/7c3bb638dda5
branches: uebayasi-xip
changeset: 751592:7c3bb638dda5
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Tue Feb 23 07:44:25 2010 +0000
description:
Introduce uvm_page_physload_device(). This registers a physical address
range of a device, similar to uvm_page_physload() for memories. For now,
this is supposed to be called by MD code. We have to consider the design
when we'll manage mmap'able character devices.
Expose paddr_t -> struct vm_page * conversion function for device pages,
uvm_phys_to_vm_page_device(). This will be called by XIP vnode pager.
Because it knows if a given vnode is a device page (and its physical
address base) or not. Don't look up device segments, but directly make a
cookie.
diffstat:
sys/uvm/uvm_extern.h | 8 ++-
sys/uvm/uvm_page.c | 124 ++++++++++++++++++++++++++++++++------------------
sys/uvm/uvm_page.h | 6 +-
3 files changed, 86 insertions(+), 52 deletions(-)
diffs (truncated from 328 to 300 lines):
diff -r e5dcb1feae08 -r 7c3bb638dda5 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Tue Feb 23 07:12:08 2010 +0000
+++ b/sys/uvm/uvm_extern.h Tue Feb 23 07:44:25 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.161 2009/11/21 17:45:02 rmind Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.161.2.1 2010/02/23 07:44:25 uebayasi Exp $ */
/*
*
@@ -721,8 +721,10 @@
void uvm_pagerealloc(struct vm_page *,
struct uvm_object *, voff_t);
/* Actually, uvm_page_physload takes PF#s which need their own type */
-void uvm_page_physload(paddr_t, paddr_t, paddr_t,
- paddr_t, int);
+void uvm_page_physload(paddr_t, paddr_t,
+ paddr_t, paddr_t, int);
+void uvm_page_physload_device(paddr_t, paddr_t,
+ paddr_t, paddr_t, int);
void uvm_setpagesize(void);
/* uvm_pager.c */
diff -r e5dcb1feae08 -r 7c3bb638dda5 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Tue Feb 23 07:12:08 2010 +0000
+++ b/sys/uvm/uvm_page.c Tue Feb 23 07:44:25 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.153.2.12 2010/02/12 04:33:05 uebayasi Exp $ */
+/* $NetBSD: uvm_page.c,v 1.153.2.13 2010/02/23 07:44:25 uebayasi Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.12 2010/02/12 04:33:05 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.13 2010/02/23 07:44:25 uebayasi Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -165,6 +165,10 @@
static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
static void uvm_pageremove(struct uvm_object *, struct vm_page *);
+static void vm_page_device_mdpage_insert(paddr_t);
+#if 0
+static void vm_page_device_mdpage_remove(paddr_t);
+#endif
/*
* per-object tree of pages
@@ -449,7 +453,7 @@
paddr = ptoa(vm_physmem[lcv].start);
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
#ifdef __HAVE_VM_PAGE_MD
- VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
+ VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i].mdpage, paddr);
#endif
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
atop(paddr) <= vm_physmem[lcv].avail_end) {
@@ -738,10 +742,38 @@
* => we are limited to VM_PHYSSEG_MAX physical memory segments
*/
+static void
+uvm_page_physload_common(struct vm_physseg * const, const int,
+ const paddr_t, const paddr_t, const paddr_t, const paddr_t, const int);
+
void
uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
paddr_t avail_end, int free_list)
{
+
+ uvm_page_physload_common(vm_physmem, vm_nphysmem, start, end,
+ avail_start, avail_end, free_list);
+ vm_nphysmem++;
+}
+
+void
+uvm_page_physload_device(paddr_t start, paddr_t end, paddr_t avail_start,
+ paddr_t avail_end, int free_list)
+{
+
+ uvm_page_physload_common(vm_physdev, vm_nphysdev, start, end,
+ avail_start, avail_end, free_list);
+ vm_nphysdev++;
+
+ for (paddr_t pf = start; pf < end; pf++)
+ vm_page_device_mdpage_insert(pf);
+}
+
+static void
+uvm_page_physload_common(struct vm_physseg * const segs, const int nsegs,
+ const paddr_t start, const paddr_t end,
+ const paddr_t avail_start, const paddr_t avail_end, const int free_list)
+{
int preload, lcv;
psize_t npages;
struct vm_page *pgs;
@@ -758,7 +790,7 @@
* do we have room?
*/
- if (vm_nphysmem == VM_PHYSSEG_MAX) {
+ if (nsegs == VM_PHYSSEG_MAX) {
printf("uvm_page_physload: unable to load physical memory "
"segment\n");
printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
@@ -767,20 +799,26 @@
return;
}
+ if (segs == vm_physdev) {
+ preload = false;
+ goto uvm_page_physload_common_insert;
+ }
+
/*
* check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
* called yet, so malloc is not available).
*/
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
- if (vm_physmem[lcv].pgs)
+ for (lcv = 0 ; lcv < nsegs ; lcv++) {
+ if (segs[lcv].pgs)
break;
}
- preload = (lcv == vm_nphysmem);
+ preload = (lcv == nsegs);
/*
* if VM is already running, attempt to malloc() vm_page structures
*/
+ /* XXXUEBS this is super ugly */
if (!preload) {
#if defined(VM_PHYSSEG_NOADD)
@@ -814,39 +852,40 @@
npages = 0;
}
+uvm_page_physload_common_insert:
/*
- * now insert us in the proper place in vm_physmem[]
+ * now insert us in the proper place in segs[]
*/
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
/* random: put it at the end (easy!) */
- ps = &vm_physmem[vm_nphysmem];
+ ps = &segs[nsegs];
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
{
int x;
/* sort by address for binary search */
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
- if (start < vm_physmem[lcv].start)
+ for (lcv = 0 ; lcv < nsegs ; lcv++)
+ if (start < segs[lcv].start)
break;
- ps = &vm_physmem[lcv];
+ ps = &segs[lcv];
/* move back other entries, if necessary ... */
- for (x = vm_nphysmem ; x > lcv ; x--)
+ for (x = nsegs ; x > lcv ; x--)
/* structure copy */
- vm_physmem[x] = vm_physmem[x - 1];
+ segs[x] = segs[x - 1];
}
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
{
int x;
/* sort by largest segment first */
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ for (lcv = 0 ; lcv < nsegs ; lcv++)
if ((end - start) >
- (vm_physmem[lcv].end - vm_physmem[lcv].start))
+ (segs[lcv].end - segs[lcv].start))
break;
- ps = &vm_physmem[lcv];
+ ps = &segs[lcv];
/* move back other entries, if necessary ... */
- for (x = vm_nphysmem ; x > lcv ; x--)
+ for (x = nsegs ; x > lcv ; x--)
/* structure copy */
- vm_physmem[x] = vm_physmem[x - 1];
+ segs[x] = segs[x - 1];
}
#else
panic("uvm_page_physload: unknown physseg strategy selected!");
@@ -854,16 +893,16 @@
ps->start = start;
ps->end = end;
+
+ if (segs == vm_physdev)
+ return;
+
+ /* XXXUEBS ugly */
ps->avail_start = avail_start;
ps->avail_end = avail_end;
- if (preload) {
- ps->pgs = NULL;
- } else {
- ps->pgs = pgs;
- ps->endpg = pgs + npages;
- }
+ ps->pgs = pgs;
+ ps->endpg = pgs + npages;
ps->free_list = free_list;
- vm_nphysmem++;
if (!preload) {
uvmpdpol_reinit();
@@ -1052,8 +1091,8 @@
#define VM_PAGE_DEVICE_MAGIC_MASK 0x3
#define VM_PAGE_DEVICE_MAGIC_SHIFT 2
-static inline struct vm_page *
-PHYS_TO_VM_PAGE_DEVICE(paddr_t pa)
+struct vm_page *
+uvm_phys_to_vm_page_device(paddr_t pa)
{
paddr_t pf = pa >> PAGE_SHIFT;
uintptr_t cookie = pf << VM_PAGE_DEVICE_MAGIC_SHIFT;
@@ -1092,7 +1131,7 @@
#ifdef DEVICE_PAGE
psi = vm_physseg_find_device(pf, &off);
if (psi != -1)
- return(PHYS_TO_VM_PAGE_DEVICE(pa));
+ return(uvm_phys_to_vm_page_device(pa));
#endif
psi = vm_physseg_find(pf, &off);
if (psi != -1)
@@ -1155,13 +1194,6 @@
/* Global for now. Consider to make this per-vm_physseg. */
struct vm_page_device_mdpage vm_page_device_mdpage;
-static u_int
-vm_page_device_mdpage_hash(struct vm_page *pg)
-{
-
- return VM_PAGE_DEVICE_TO_PHYS(pg);
-}
-
static struct vm_page_device_mdpage_head *
vm_page_device_mdpage_head(u_int hash)
{
@@ -1177,14 +1209,15 @@
}
void
-vm_page_device_mdpage_insert(struct vm_page *pg)
+vm_page_device_mdpage_insert(paddr_t pf)
{
- paddr_t pf = VM_PAGE_DEVICE_TO_PHYS(pg);
- u_int hash = vm_page_device_mdpage_hash(pg);
+ u_int hash = (u_int)pf;
kmutex_t *lock = vm_page_device_mdpage_lock(hash);
struct vm_page_device_mdpage_head *head = vm_page_device_mdpage_head(hash);
struct vm_page_device_mdpage_entry *mde = kmem_zalloc(sizeof(*mde), KM_SLEEP);
+
+ VM_MDPAGE_INIT(&mde->mde_mdpage, pf << PAGE_SHIFT);
mde->mde_pf = pf;
mutex_spin_enter(lock);
@@ -1192,11 +1225,11 @@
mutex_spin_exit(lock);
}
+#if 0
void
-vm_page_device_mdpage_remove(struct vm_page *pg)
+vm_page_device_mdpage_remove(paddr_t pf)
{
- paddr_t pf = VM_PAGE_DEVICE_TO_PHYS(pg);
- u_int hash = vm_page_device_mdpage_hash(pg);
+ u_int hash = (u_int)pf;
kmutex_t *lock = vm_page_device_mdpage_lock(hash);
struct vm_page_device_mdpage_head *head = vm_page_device_mdpage_head(hash);
@@ -1219,16 +1252,17 @@
KASSERT(mde != NULL);
kmem_free(mde, sizeof(*mde));
}
+#endif
struct vm_page_md *
vm_page_device_mdpage_lookup(struct vm_page *pg)
{
- paddr_t pf = VM_PAGE_DEVICE_TO_PHYS(pg);
- u_int hash = vm_page_device_mdpage_hash(pg);
+ paddr_t pf = VM_PAGE_DEVICE_TO_PHYS(pg) >> PAGE_SHIFT;
+ u_int hash = (u_int)pf;
kmutex_t *lock = vm_page_device_mdpage_lock(hash);
struct vm_page_device_mdpage_head *head = vm_page_device_mdpage_head(hash);
- struct vm_page_device_mdpage_entry *mde;
Home |
Main Index |
Thread Index |
Old Index