Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-km]: src/sys/arch/amd64/amd64 - use new apis.
details: https://anonhg.NetBSD.org/src/rev/eda70a3190b8
branches: yamt-km
changeset: 573340:eda70a3190b8
user: yamt <yamt%NetBSD.org@localhost>
date: Sun Feb 13 10:04:46 2005 +0000
description:
- use new apis.
- simplify pmap bootstrap and pv_page allocation.
diffstat:
sys/arch/amd64/amd64/cpu.c | 6 +-
sys/arch/amd64/amd64/gdt.c | 7 +-
sys/arch/amd64/amd64/machdep.c | 7 +-
sys/arch/amd64/amd64/pmap.c | 118 ++++++++----------------------------
sys/arch/amd64/amd64/sys_machdep.c | 10 +-
sys/arch/amd64/amd64/vm_machdep.c | 8 +-
6 files changed, 47 insertions(+), 109 deletions(-)
diffs (truncated from 381 to 300 lines):
diff -r 0c14a0bd58cb -r eda70a3190b8 sys/arch/amd64/amd64/cpu.c
--- a/sys/arch/amd64/amd64/cpu.c Sat Feb 12 18:17:28 2005 +0000
+++ b/sys/arch/amd64/amd64/cpu.c Sun Feb 13 10:04:46 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.7 2004/06/28 08:23:21 fvdl Exp $ */
+/* $NetBSD: cpu.c,v 1.7.6.1 2005/02/13 10:04:46 yamt Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.7 2004/06/28 08:23:21 fvdl Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.7.6.1 2005/02/13 10:04:46 yamt Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@@ -289,7 +289,7 @@
/*
* Allocate UPAGES contiguous pages for the idle PCB and stack.
*/
- kstack = uvm_km_alloc (kernel_map, USPACE);
+ kstack = uvm_km_alloc(kernel_map, USPACE, 0, UVM_KMF_WIRED);
if (kstack == 0) {
if (caa->cpu_role != CPU_ROLE_AP) {
panic("cpu_attach: unable to allocate idle stack for"
diff -r 0c14a0bd58cb -r eda70a3190b8 sys/arch/amd64/amd64/gdt.c
--- a/sys/arch/amd64/amd64/gdt.c Sat Feb 12 18:17:28 2005 +0000
+++ b/sys/arch/amd64/amd64/gdt.c Sun Feb 13 10:04:46 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: gdt.c,v 1.5.6.1 2005/02/12 18:17:30 yamt Exp $ */
+/* $NetBSD: gdt.c,v 1.5.6.2 2005/02/13 10:04:46 yamt Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@@ -44,7 +44,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.5.6.1 2005/02/12 18:17:30 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.5.6.2 2005/02/13 10:04:46 yamt Exp $");
#include "opt_multiprocessor.h"
@@ -166,7 +166,8 @@
(gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor);
old_gdt = gdtstore;
- gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
+ gdtstore = (char *)uvm_km_alloc(kernel_map, MAXGDTSIZ, 0,
+ UVM_KMF_VAONLY);
for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MINGDTSIZ;
va += PAGE_SIZE) {
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
diff -r 0c14a0bd58cb -r eda70a3190b8 sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c Sat Feb 12 18:17:28 2005 +0000
+++ b/sys/arch/amd64/amd64/machdep.c Sun Feb 13 10:04:46 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.c,v 1.31 2004/10/20 04:20:05 thorpej Exp $ */
+/* $NetBSD: machdep.c,v 1.31.6.1 2005/02/13 10:04:46 yamt Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
@@ -72,7 +72,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.31 2004/10/20 04:20:05 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.31.6.1 2005/02/13 10:04:46 yamt Exp $");
#include "opt_user_ldt.h"
#include "opt_ddb.h"
@@ -244,7 +244,8 @@
/*
* Initialize error message buffer (et end of core).
*/
- msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_round_page(MSGBUFSIZE));
+ msgbuf_vaddr = uvm_km_alloc(kernel_map, x86_round_page(MSGBUFSIZE), 0,
+ UVM_KMF_VAONLY);
if (msgbuf_vaddr == 0)
panic("failed to valloc msgbuf_vaddr");
diff -r 0c14a0bd58cb -r eda70a3190b8 sys/arch/amd64/amd64/pmap.c
--- a/sys/arch/amd64/amd64/pmap.c Sat Feb 12 18:17:28 2005 +0000
+++ b/sys/arch/amd64/amd64/pmap.c Sun Feb 13 10:04:46 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.15 2005/01/01 21:00:06 yamt Exp $ */
+/* $NetBSD: pmap.c,v 1.15.4.1 2005/02/13 10:04:46 yamt Exp $ */
/*
*
@@ -108,7 +108,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15 2005/01/01 21:00:06 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15.4.1 2005/02/13 10:04:46 yamt Exp $");
#ifndef __x86_64__
#include "opt_cputype.h"
@@ -429,8 +429,6 @@
static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
static int pv_nfpvents; /* # of free pv entries */
-static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
-static vaddr_t pv_cachedva; /* cached VA for later use */
#define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
#define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
@@ -1258,7 +1256,8 @@
s = (vsize_t) (sizeof(struct pv_head) * npages +
sizeof(char) * npages);
s = round_page(s);
- addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
+ addr = (vaddr_t) uvm_km_alloc(kernel_map, s, 0,
+ UVM_KMF_WIRED | UVM_KMF_ZERO);
if (addr == 0)
panic("pmap_init: unable to allocate pv_heads");
@@ -1304,22 +1303,9 @@
}
#endif
- /*
- * now we need to free enough pv_entry structures to allow us to get
- * the kmem_map allocated and inited (done after this
- * function is finished). to do this we allocate one bootstrap page out
- * of kernel_map and use it to provide an initial pool of pv_entry
- * structures. we never free this page.
- */
-
- pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
- if (pv_initpage == NULL)
- panic("pmap_init: pv_initpage");
- pv_cachedva = 0; /* a VA we have allocated but not used yet */
pv_nfpvents = 0;
- (void) pmap_add_pvpage(pv_initpage, FALSE);
-
- pj_page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE);
+
+ pj_page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
if (pj_page == NULL)
panic("pmap_init: pj_page");
@@ -1423,7 +1409,6 @@
struct pmap *pmap;
int mode;
{
- struct vm_page *pg;
struct pv_page *pvpage;
struct pv_entry *pv;
int s;
@@ -1449,42 +1434,17 @@
}
/*
- * see if we've got a cached unmapped VA that we can map a page in.
- * if not, try to allocate one.
- */
-
- if (pv_cachedva == 0) {
- s = splvm(); /* must protect kmem_map with splvm! */
- pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE,
- UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
- splx(s);
- if (pv_cachedva == 0) {
- return (NULL);
- }
- }
-
- /*
- * we have a VA, now let's try and allocate a page.
- */
-
- pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
- UVM_PGA_USERESERVE);
- if (pg == NULL)
- return NULL;
- pg->flags &= ~PG_BUSY; /* never busy */
-
- /*
- * add a mapping for our new pv_page and free its entrys (save one!)
- *
* NOTE: If we are allocating a PV page for the kernel pmap, the
* pmap is already locked! (...but entering the mapping is safe...)
*/
- pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
- pmap_update(pmap_kernel());
- pvpage = (struct pv_page *) pv_cachedva;
- pv_cachedva = 0;
+ s = splvm(); /* must protect kmem_map with splvm! */
+ pvpage = (struct pv_page *)uvm_km_alloc(kmem_map, PAGE_SIZE, 0,
+ UVM_KMF_TRYLOCK|UVM_KMF_NOWAIT|UVM_KMF_WIRED);
+ splx(s);
+ if (pvpage == NULL)
+ return NULL;
+
return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
}
@@ -1620,51 +1580,23 @@
*
* => assume caller is holding the pvalloc_lock and that
* there is a page on the pv_unusedpgs list
- * => if we can't get a lock on the kmem_map we try again later
*/
static void
pmap_free_pvpage()
{
int s;
- struct vm_map *map;
- struct vm_map_entry *dead_entries;
struct pv_page *pvp;
- s = splvm(); /* protect kmem_map */
-
pvp = TAILQ_FIRST(&pv_unusedpgs);
-
- /*
- * note: watch out for pv_initpage which is allocated out of
- * kernel_map rather than kmem_map.
- */
-
- if (pvp == pv_initpage)
- map = kernel_map;
- else
- map = kmem_map;
- if (vm_map_lock_try(map)) {
-
- /* remove pvp from pv_unusedpgs */
- TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
-
- /* unmap the page */
- dead_entries = NULL;
- uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
- &dead_entries, NULL);
- vm_map_unlock(map);
-
- if (dead_entries != NULL)
- uvm_unmap_detach(dead_entries, 0);
-
- pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
- }
- if (pvp == pv_initpage)
- /* no more initpage, we've freed it */
- pv_initpage = NULL;
-
+ /* remove pvp from pv_unusedpgs */
+ TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
+
+ s = splvm();
+ uvm_km_free(kmem_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
splx(s);
+
+ pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
}
/*
@@ -2079,7 +2011,7 @@
*/
ldt_free(pmap);
uvm_km_free(kernel_map, (vaddr_t)pmap->pm_ldt,
- pmap->pm_ldt_len);
+ pmap->pm_ldt_len, UVM_KMF_WIRED);
}
#endif
@@ -2119,7 +2051,7 @@
size_t len;
len = pmap1->pm_ldt_len;
- new_ldt = (char *)uvm_km_alloc(kernel_map, len);
+ new_ldt = (char *)uvm_km_alloc(kernel_map, len, UVM_KMF_WIRED);
memcpy(new_ldt, pmap1->pm_ldt, len);
pmap2->pm_ldt = new_ldt;
pmap2->pm_ldt_len = pmap1->pm_ldt_len;
@@ -2166,7 +2098,7 @@
simple_unlock(&pmap->pm_lock);
if (old_ldt != NULL)
- uvm_km_free(kernel_map, (vaddr_t)old_ldt, len);
+ uvm_km_free(kernel_map, (vaddr_t)old_ldt, len, UVM_KMF_WIRED);
}
#endif /* USER_LDT */
@@ -3167,6 +3099,8 @@
int ptpdelta, wireddelta, resdelta;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ KASSERT(pmap_initialized);
+
#ifdef DIAGNOSTIC
if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE)
panic("pmap_enter: trying to map over PDP/APDP!");
@@ -3292,7 +3226,7 @@
*/
Home |
Main Index |
Thread Index |
Old Index