Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch allocate and free memory explicitly rather than abu...
details: https://anonhg.NetBSD.org/src/rev/fb5d151d31e0
branches: trunk
changeset: 517752:fb5d151d31e0
user: chs <chs%NetBSD.org@localhost>
date: Sun Nov 18 19:28:34 2001 +0000
description:
allocate and free memory explicitly rather than abusing uvm_map_pageable().
diffstat:
sys/arch/i386/i386/gdt.c | 46 ++++++++++++++++++++++++++++++++++---------
sys/arch/x86_64/x86_64/gdt.c | 44 ++++++++++++++++++++++++++++++++++-------
2 files changed, 72 insertions(+), 18 deletions(-)
diffs (204 lines):
diff -r 0f4bb2354515 -r fb5d151d31e0 sys/arch/i386/i386/gdt.c
--- a/sys/arch/i386/i386/gdt.c Sun Nov 18 18:48:55 2001 +0000
+++ b/sys/arch/i386/i386/gdt.c Sun Nov 18 19:28:34 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: gdt.c,v 1.24 2001/11/15 07:03:29 lukem Exp $ */
+/* $NetBSD: gdt.c,v 1.25 2001/11/18 19:28:34 chs Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.24 2001/11/15 07:03:29 lukem Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.25 2001/11/18 19:28:34 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -45,7 +45,7 @@
#include <sys/lock.h>
#include <sys/user.h>
-#include <uvm/uvm_extern.h>
+#include <uvm/uvm.h>
#include <machine/gdt.h>
@@ -161,6 +161,8 @@
size_t max_len, min_len;
struct region_descriptor region;
union descriptor *old_gdt;
+ struct vm_page *pg;
+ vaddr_t va;
lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);
@@ -174,10 +176,15 @@
old_gdt = gdt;
gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
- uvm_map_pageable(kernel_map, (vaddr_t)gdt,
- (vaddr_t)gdt + min_len, FALSE, FALSE);
+ for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+ if (pg == NULL) {
+ panic("gdt_init: no pages");
+ }
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0]));
-
setregion(®ion, gdt, max_len - 1);
lgdt(®ion);
}
@@ -189,26 +196,45 @@
gdt_grow()
{
size_t old_len, new_len;
+ struct vm_page *pg;
+ vaddr_t va;
old_len = gdt_size * sizeof(gdt[0]);
gdt_size <<= 1;
new_len = old_len << 1;
- uvm_map_pageable(kernel_map, (vaddr_t)gdt + old_len,
- (vaddr_t)gdt + new_len, FALSE, FALSE);
+ for (va = (vaddr_t)gdt + old_len; va < (vaddr_t)gdt + new_len;
+ va += PAGE_SIZE) {
+ while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
+ NULL) {
+ uvm_wait("gdt_grow");
+ }
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
}
void
gdt_shrink()
{
size_t old_len, new_len;
+ struct vm_page *pg;
+ paddr_t pa;
+ vaddr_t va;
old_len = gdt_size * sizeof(gdt[0]);
gdt_size >>= 1;
new_len = old_len >> 1;
- uvm_map_pageable(kernel_map, (vaddr_t)gdt + new_len,
- (vaddr_t)gdt + old_len, TRUE, FALSE);
+ for (va = (vaddr_t)gdt + new_len; va < (vaddr_t)gdt + old_len;
+ va += PAGE_SIZE) {
+ if (!pmap_extract(pmap_kernel(), va, &pa)) {
+ panic("gdt_shrink botch");
+ }
+ pg = PHYS_TO_VM_PAGE(pa);
+ pmap_kremove(va, PAGE_SIZE);
+ uvm_pagefree(pg);
+ }
}
/*
diff -r 0f4bb2354515 -r fb5d151d31e0 sys/arch/x86_64/x86_64/gdt.c
--- a/sys/arch/x86_64/x86_64/gdt.c Sun Nov 18 18:48:55 2001 +0000
+++ b/sys/arch/x86_64/x86_64/gdt.c Sun Nov 18 19:28:34 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: gdt.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */
+/* $NetBSD: gdt.c,v 1.2 2001/11/18 19:28:35 chs Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@@ -49,7 +49,7 @@
#include <sys/lock.h>
#include <sys/user.h>
-#include <uvm/uvm_extern.h>
+#include <uvm/uvm.h>
#include <machine/gdt.h>
@@ -171,6 +171,8 @@
{
struct region_descriptor region;
char *old_gdt;
+ struct vm_page *pg;
+ vaddr_t va;
lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);
@@ -183,8 +185,15 @@
old_gdt = gdtstore;
gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
- uvm_map_pageable(kernel_map, (vaddr_t)gdtstore,
- (vaddr_t)gdtstore + MINGDTSIZ, FALSE, FALSE);
+ for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MINGDTSIZ;
+ va += PAGE_SIZE) {
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+ if (pg == NULL) {
+ panic("gdt_init: no pages");
+ }
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
memcpy(gdtstore, old_gdt, DYNSEL_START);
setregion(®ion, gdtstore, (u_int16_t)(MAXGDTSIZ - 1));
@@ -198,6 +207,8 @@
gdt_grow()
{
size_t old_len, new_len;
+ struct vm_page *pg;
+ vaddr_t va;
old_len = gdt_size;
gdt_size <<= 1;
@@ -205,14 +216,24 @@
gdt_dynavail =
(gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor);
- uvm_map_pageable(kernel_map, (vaddr_t)gdtstore + old_len,
- (vaddr_t)gdtstore + new_len, FALSE, FALSE);
+ for (va = (vaddr_t)gdtstore + old_len; va < (vaddr_t)gdtstore + new_len;
+ va += PAGE_SIZE) {
+ while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
+ NULL) {
+ uvm_wait("gdt_grow");
+ }
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
}
void
gdt_shrink()
{
size_t old_len, new_len;
+ struct vm_page *pg;
+ paddr_t pa;
+ vaddr_t va;
old_len = gdt_size;
gdt_size >>= 1;
@@ -220,8 +241,15 @@
gdt_dynavail =
(gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor);
- uvm_map_pageable(kernel_map, (vaddr_t)gdtstore + new_len,
- (vaddr_t)gdtstore + old_len, TRUE, FALSE);
+ for (va = (vaddr_t)gdtstore + new_len; va < (vaddr_t)gdtstore + old_len;
+ va += PAGE_SIZE) {
+ if (!pmap_extract(pmap_kernel(), va, &pa)) {
+ panic("gdt_shrink botch");
+ }
+ pg = PHYS_TO_VM_PAGE(pa);
+ pmap_kremove(va, PAGE_SIZE);
+ uvm_pagefree(pg);
+ }
}
/*
Home |
Main Index |
Thread Index |
Old Index