Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/usermode/usermode Rewrite usermodes pmap to use ind...
details: https://anonhg.NetBSD.org/src/rev/054ccc8bdc1e
branches: trunk
changeset: 772394:054ccc8bdc1e
user: reinoud <reinoud%NetBSD.org@localhost>
date: Sun Jan 01 21:40:22 2012 +0000
description:
Rewrite usermodes pmap to use indirect adressing significantly reducing pmap
memory usage. Typically a decimation. This is done using L1 and L2 mappings.
diffstat:
sys/arch/usermode/usermode/pmap.c | 266 +++++++++++++++++++++++++++----------
1 files changed, 192 insertions(+), 74 deletions(-)
diffs (truncated from 522 to 300 lines):
diff -r b20984028245 -r 054ccc8bdc1e sys/arch/usermode/usermode/pmap.c
--- a/sys/arch/usermode/usermode/pmap.c Sun Jan 01 20:04:36 2012 +0000
+++ b/sys/arch/usermode/usermode/pmap.c Sun Jan 01 21:40:22 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.87 2012/01/01 13:52:51 reinoud Exp $ */
+/* $NetBSD: pmap.c,v 1.88 2012/01/01 21:40:22 reinoud Exp $ */
/*-
* Copyright (c) 2011 Reinoud Zandijk <reinoud%NetBSD.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.87 2012/01/01 13:52:51 reinoud Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.88 2012/01/01 21:40:22 reinoud Exp $");
#include "opt_memsize.h"
#include "opt_kmempages.h"
@@ -58,16 +58,25 @@
#define PV_MODIFIED 0x02
};
+#define PMAP_L2_SIZE PAGE_SIZE
+#define PMAP_L2_NENTRY (PMAP_L2_SIZE / sizeof(struct pv_entry *))
+
+struct pmap_l2 {
+ struct pv_entry *pm_l2[PMAP_L2_NENTRY];
+};
+
struct pmap {
int pm_count;
int pm_flags;
#define PM_ACTIVE 0x01
struct pmap_statistics pm_stats;
- struct pv_entry **pm_entries;
+ struct pmap_l2 **pm_l1;
};
-static struct pv_entry *pv_table; /* physical pages */
-static struct pv_entry **tlb; /* current tlb mappings */
+
+static struct pv_entry **kernel_pm_entries;
+static struct pv_entry *pv_table; /* physical pages info (direct mapped) */
+static struct pv_entry **tlb; /* current tlb mappings (direct mapped) */
static struct pmap pmap_kernel_store;
struct pmap * const kernel_pmap_ptr = &pmap_kernel_store;
@@ -79,9 +88,14 @@
static int phys_npages = 0;
static int pm_nentries = 0;
+static int pm_nl1 = 0;
+static int pm_l1_size = 0;
static uint64_t pm_entries_size = 0;
static struct pool pmap_pool;
+static struct pool pmap_l1_pool;
+static struct pool pmap_l2_pool;
+static struct pool pmap_pventry_pool;
/* forwards */
void pmap_bootstrap(void);
@@ -107,6 +121,7 @@
/* amount of physical memory */
int physmem;
int num_pv_entries = 0;
+int num_pmaps = 0;
#define SPARSE_MEMFILE
@@ -118,7 +133,7 @@
struct pmap *pmap;
paddr_t totmem_len;
paddr_t fpos, file_len;
- paddr_t pv_fpos, pm_fpos, tlb_fpos;
+ paddr_t pv_fpos, tlb_fpos, pm_l1_fpos, pm_fpos;
paddr_t wlen;
paddr_t user_len, barrier_len;
paddr_t pv_table_size;
@@ -126,7 +141,7 @@
vaddr_t mpos;
paddr_t pa;
vaddr_t va;
- uintptr_t pg;
+ uintptr_t pg, l1;
void *addr;
int err;
@@ -250,19 +265,30 @@
assert(err == 0);
#endif
- /* set up pv_table; bootstrap problem! */
+ /* initialize counters */
fpos = 0;
free_start = fpos; /* in physical space ! */
free_end = file_len; /* in physical space ! */
+ kmem_ext_cur_start = kmem_ext_start;
+ /* calculate pv table size */
phys_npages = (free_end - free_start) / PAGE_SIZE;
pv_table_size = round_page(phys_npages * sizeof(struct pv_entry));
-
dprintf_debug("claiming %"PRIu64" KB of pv_table for "
"%"PRIdPTR" pages of physical memory\n",
(uint64_t) pv_table_size/1024, (uintptr_t) phys_npages);
- kmem_ext_cur_start = kmem_ext_start;
+ /* calculate number of pmap entries needed for a complete map */
+ pm_nentries = (VM_MAX_KERNEL_ADDRESS - VM_MIN_ADDRESS) / PAGE_SIZE;
+ pm_entries_size = round_page(pm_nentries * sizeof(struct pv_entry *));
+ dprintf_debug("tlb va->pa lookup table is %"PRIu64" KB for "
+ "%d logical pages\n", pm_entries_size/1024, pm_nentries);
+
+ /* calculate how big the l1 tables are going to be */
+ pm_nl1 = pm_nentries / PMAP_L2_NENTRY;
+ pm_l1_size = pm_nl1 * sizeof(struct pmap_l1 *);
+
+ /* claim pv table */
pv_fpos = fpos;
pv_table = (struct pv_entry *) kmem_ext_cur_start;
addr = thunk_mmap(pv_table, pv_table_size,
@@ -280,37 +306,8 @@
kmem_ext_cur_start += pv_table_size;
fpos += pv_table_size;
- /* set up kernel pmap */
- pm_nentries = (VM_MAX_KERNEL_ADDRESS - VM_MIN_ADDRESS) / PAGE_SIZE;
- pm_entries_size = round_page(pm_nentries * sizeof(struct pv_entry *));
- dprintf_debug("pmap va->pa lookup table is %"PRIu64" KB for %d logical pages\n",
- pm_entries_size/1024, pm_nentries);
-
- pmap = pmap_kernel();
- memset(pmap, 0, sizeof(*pmap));
- pmap->pm_count = 1; /* reference */
- pmap->pm_flags = PM_ACTIVE; /* kernel pmap is allways active */
- pmap->pm_entries = (struct pv_entry **) kmem_ext_cur_start;
-
- pm_fpos = fpos;
- addr = thunk_mmap(pmap->pm_entries, pm_entries_size,
- THUNK_PROT_READ | THUNK_PROT_WRITE,
- THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
- mem_fh, pm_fpos);
- if (addr != (void *) pmap->pm_entries)
- panic("pmap_bootstrap: can't map in pmap entries\n");
-
- memset(pmap->pm_entries, 0, pm_entries_size); /* test and clear */
-
- dprintf_debug("kernel pmap entries initialiased correctly\n");
-
- /* advance */
- kmem_ext_cur_start += pm_entries_size;
- fpos += pm_entries_size;
-
/* set up tlb space */
tlb = (struct pv_entry **) kmem_ext_cur_start;
-
tlb_fpos = fpos;
addr = thunk_mmap(tlb, pm_entries_size,
THUNK_PROT_READ | THUNK_PROT_WRITE,
@@ -327,6 +324,52 @@
kmem_ext_cur_start += pm_entries_size;
fpos += pm_entries_size;
+ /* set up kernel pmap and add a l1 map */
+ pmap = pmap_kernel();
+ memset(pmap, 0, sizeof(*pmap));
+ pmap->pm_count = 1; /* reference */
+ pmap->pm_flags = PM_ACTIVE; /* kernel pmap is allways active */
+ pmap->pm_l1 = (struct pmap_l2 **) kmem_ext_cur_start;
+
+ pm_l1_fpos = fpos;
+ addr = thunk_mmap(pmap->pm_l1, pm_l1_size,
+ THUNK_PROT_READ | THUNK_PROT_WRITE,
+ THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
+ mem_fh, pm_l1_fpos);
+ if (addr != (void *) pmap->pm_l1)
+ panic("pmap_bootstrap: can't map in pmap l1 entries\n");
+
+ memset(pmap->pm_l1, 0, pm_l1_size); /* test and clear */
+
+ dprintf_debug("kernel pmap l1 table initialiased correctly\n");
+
+ /* advance for l1 tables */
+ kmem_ext_cur_start += round_page(pm_l1_size);
+ fpos += round_page(pm_l1_size);
+
+ /* followed by the pm entries */
+ pm_fpos = fpos;
+ kernel_pm_entries = (struct pv_entry **) kmem_ext_cur_start;
+ addr = thunk_mmap(kernel_pm_entries, pm_entries_size,
+ THUNK_PROT_READ | THUNK_PROT_WRITE,
+ THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
+ mem_fh, pm_fpos);
+ if (addr != (void *) kernel_pm_entries)
+ panic("pmap_bootstrap: can't map in kernel pmap entries\n");
+
+ memset(kernel_pm_entries, 0, pm_entries_size); /* test and clear */
+
+ /* advance for the statically allocated pm_entries */
+ kmem_ext_cur_start += pm_entries_size;
+ fpos += pm_entries_size;
+
+ /* put pointers in the l1 to point to the pv_entry space */
+ for (l1 = 0; l1 < pm_nl1; l1++) {
+ pmap = pmap_kernel();
+ pmap->pm_l1[l1] = (struct pmap_l2 *)
+ ((vaddr_t) kernel_pm_entries + l1 * PMAP_L2_SIZE);
+ }
+
/* kmem used [kmem_ext_start - kmem_ext_cur_start] */
kmem_ext_cur_end = kmem_ext_cur_start;
@@ -338,17 +381,23 @@
}
dprintf_debug("pv_table mem added to the kernel pmap\n");
for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) {
- pa = pm_fpos + pg;
- va = (vaddr_t) pmap->pm_entries + pg;
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
- }
- dprintf_debug("kernel pmap entries mem added to the kernel pmap\n");
- for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) {
pa = tlb_fpos + pg;
va = (vaddr_t) tlb + pg;
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
}
dprintf_debug("kernel tlb entries mem added to the kernel pmap\n");
+ for (pg = 0; pg < pm_l1_size; pg += PAGE_SIZE) {
+ pa = pm_l1_fpos + pg;
+ va = (vaddr_t) pmap->pm_l1 + pg;
+ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
+ }
+ dprintf_debug("kernel pmap l1 mem added to the kernel pmap\n");
+ for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) {
+ pa = pm_fpos + pg;
+ va = (vaddr_t) kernel_pm_entries + pg;
+ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
+ }
+ dprintf_debug("kernel pmap entries mem added to the kernel pmap\n");
/* add file space to uvm's FREELIST */
/* XXX really from 0? or from fpos to have better stats */
@@ -388,6 +437,12 @@
/* create pmap pool */
pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0,
"pmappool", NULL, IPL_NONE);
+ pool_init(&pmap_l2_pool, PMAP_L2_SIZE, 0, 0, 0,
+ "pmapl2pool", NULL, IPL_HIGH);
+ pool_init(&pmap_l1_pool, pm_l1_size, 0, 0, 0,
+ "pmapl1pool", NULL, IPL_NONE);
+ pool_init(&pmap_pventry_pool, sizeof(struct pv_entry), 0, 0, 0,
+ "pventry", NULL, IPL_HIGH);
}
pmap_t
@@ -402,14 +457,26 @@
}
dprintf_debug("pmap_create\n");
+ num_pmaps++;
+#if 0
+ printf("%s: pre alloc: num_pmaps %"PRIu64" (%"PRIu64" kb), "
+ "num_pv_entries %"PRIu64" (%"PRIu64" kb)\n",
+ __func__,
+ (uint64_t) num_pmaps,
+ (uint64_t) num_pmaps * (sizeof(*pmap) + pm_l1_size) / 1024,
+ (uint64_t) num_pv_entries,
+ (uint64_t) num_pv_entries * (sizeof(struct pv_entry)) / 1024);
+#endif
+
pmap = pool_get(&pmap_pool, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
-
pmap->pm_count = 1;
pmap->pm_flags = 0;
- pmap->pm_entries = (struct pv_entry **) malloc(
- pm_entries_size, M_VMPMAP,
- M_WAITOK | M_ZERO);
+
+ /* claim l1 table */
+ pmap->pm_l1 = pool_get(&pmap_l1_pool, PR_WAITOK);
+ memset(pmap->pm_l1, 0, pm_l1_size);
+
dprintf_debug("\tpmap %p\n", pmap);
return pmap;
@@ -418,23 +485,31 @@
void
pmap_destroy(pmap_t pmap)
{
- int i;
+ struct pmap_l2 *l2tbl;
+ int l1, l2;
/* if multiple references exist just remove a reference */
dprintf_debug("pmap_destroy %p\n", pmap);
if (--pmap->pm_count > 0)
return;
+ num_pmaps--;
Home |
Main Index |
Thread Index |
Old Index