Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-km]: src/sys/arch/i386/i386 - simplify pvpage allocation.
details: https://anonhg.NetBSD.org/src/rev/6a6a78b496a4
branches: yamt-km
changeset: 573297:6a6a78b496a4
user: yamt <yamt%NetBSD.org@localhost>
date: Wed Jan 26 09:20:45 2005 +0000
description:
- simplify pvpage allocation.
- assert that pmap_enter is never called before pmap_init.
diffstat:
sys/arch/i386/i386/pmap.c | 77 +++++++++++-----------------------------------
1 files changed, 19 insertions(+), 58 deletions(-)
diffs (154 lines):
diff -r 4043bfb96d2c -r 6a6a78b496a4 sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Wed Jan 26 08:33:14 2005 +0000
+++ b/sys/arch/i386/i386/pmap.c Wed Jan 26 09:20:45 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.180.4.1 2005/01/25 13:01:08 yamt Exp $ */
+/* $NetBSD: pmap.c,v 1.180.4.2 2005/01/26 09:20:45 yamt Exp $ */
/*
*
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.180.4.1 2005/01/25 13:01:08 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.180.4.2 2005/01/26 09:20:45 yamt Exp $");
#include "opt_cputype.h"
#include "opt_user_ldt.h"
@@ -379,7 +379,6 @@
static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
static int pv_nfpvents; /* # of free pv entries */
-static vaddr_t pv_cachedva; /* cached VA for later use */
#define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
#define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
@@ -1148,7 +1147,6 @@
{
int i;
- pv_cachedva = 0; /* a VA we have allocated but not used yet */
pv_nfpvents = 0;
pj_page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
@@ -1254,7 +1252,6 @@
struct pmap *pmap;
int mode;
{
- struct vm_page *pg;
struct pv_page *pvpage;
struct pv_entry *pv;
int s;
@@ -1280,38 +1277,17 @@
}
/*
- * see if we've got a cached unmapped VA that we can map a page in.
- * if not, try to allocate one.
- */
-
- if (pv_cachedva == 0) {
- s = splvm(); /* must protect kmem_map with splvm! */
- pv_cachedva = uvm_km_alloc(kmem_map, PAGE_SIZE, 0,
- UVM_KMF_TRYLOCK|UVM_KMF_VAONLY);
- splx(s);
- if (pv_cachedva == 0) {
- return (NULL);
- }
- }
-
- pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
- UVM_PGA_USERESERVE);
- if (pg == NULL)
- return (NULL);
- pg->flags &= ~PG_BUSY; /* never busy */
-
- /*
- * add a mapping for our new pv_page and free its entrys (save one!)
- *
* NOTE: If we are allocating a PV page for the kernel pmap, the
* pmap is already locked! (...but entering the mapping is safe...)
*/
- pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
- pmap_update(pmap_kernel());
- pvpage = (struct pv_page *) pv_cachedva;
- pv_cachedva = 0;
+ s = splvm(); /* must protect kmem_map with splvm! */
+ pvpage = (struct pv_page *)uvm_km_alloc(kmem_map, PAGE_SIZE, 0,
+ UVM_KMF_TRYLOCK|UVM_KMF_NOWAIT|UVM_KMF_WIRED);
+ splx(s);
+ if (pvpage == NULL)
+ return NULL;
+
return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
}
@@ -1447,40 +1423,23 @@
*
* => assume caller is holding the pvalloc_lock and that
* there is a page on the pv_unusedpgs list
- * => if we can't get a lock on the kmem_map we try again later
*/
static void
pmap_free_pvpage()
{
int s;
- struct vm_map *map;
- struct vm_map_entry *dead_entries;
struct pv_page *pvp;
- s = splvm(); /* protect kmem_map */
-
pvp = TAILQ_FIRST(&pv_unusedpgs);
-
- map = kmem_map;
- if (vm_map_lock_try(map)) {
-
- /* remove pvp from pv_unusedpgs */
- TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
-
- /* unmap the page */
- dead_entries = NULL;
- uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
- &dead_entries, NULL, 0);
- vm_map_unlock(map);
-
- if (dead_entries != NULL)
- uvm_unmap_detach(dead_entries, 0);
-
- pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
- }
-
+ /* remove pvp from pv_unusedpgs */
+ TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
+
+ s = splvm();
+ uvm_km_free(kmem_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
splx(s);
+
+ pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
}
/*
@@ -3259,6 +3218,8 @@
int error;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ KASSERT(pmap_initialized);
+
#ifdef DIAGNOSTIC
/* sanity check: totally out of range? */
if (va >= VM_MAX_KERNEL_ADDRESS)
@@ -3367,7 +3328,7 @@
}
pg = PHYS_TO_VM_PAGE(pa);
- if (pmap_initialized && pg != NULL) {
+ if (pg != NULL) {
/* This is a managed page */
npte |= PG_PVLIST;
mdpg = &pg->mdpage;
Home |
Main Index |
Thread Index |
Old Index