Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Remove VM_MAP_INTRSAFE and related code. Not used s...
details: https://anonhg.NetBSD.org/src/rev/466faecd5dd7
branches: trunk
changeset: 777443:466faecd5dd7
user: rmind <rmind%NetBSD.org@localhost>
date: Sun Feb 19 00:05:55 2012 +0000
description:
Remove VM_MAP_INTRSAFE and related code. Not used since the "kmem changes".
diffstat:
sys/uvm/uvm_fault.c | 6 +--
sys/uvm/uvm_fault_i.h | 13 +-------
sys/uvm/uvm_km.c | 20 +++++-------
sys/uvm/uvm_loan.c | 5 +-
sys/uvm/uvm_map.c | 77 ++++++++++++++------------------------------------
sys/uvm/uvm_map.h | 6 +---
6 files changed, 38 insertions(+), 89 deletions(-)
diffs (truncated from 371 to 300 lines):
diff -r 6c4f326db2e4 -r 466faecd5dd7 sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c Sat Feb 18 23:51:27 2012 +0000
+++ b/sys/uvm/uvm_fault.c Sun Feb 19 00:05:55 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault.c,v 1.193 2012/02/02 19:43:08 tls Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.194 2012/02/19 00:05:55 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.193 2012/02/02 19:43:08 tls Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.194 2012/02/19 00:05:55 rmind Exp $");
#include "opt_uvmhist.h"
@@ -2400,8 +2400,6 @@
paddr_t pa;
struct vm_page *pg;
- KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
-
/*
* we assume that the area we are unwiring has actually been wired
* in the first place. this means that we should be able to extract
diff -r 6c4f326db2e4 -r 466faecd5dd7 sys/uvm/uvm_fault_i.h
--- a/sys/uvm/uvm_fault_i.h Sat Feb 18 23:51:27 2012 +0000
+++ b/sys/uvm/uvm_fault_i.h Sun Feb 19 00:05:55 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault_i.h,v 1.27 2011/06/12 03:36:03 rmind Exp $ */
+/* $NetBSD: uvm_fault_i.h,v 1.28 2012/02/19 00:05:56 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -107,16 +107,7 @@
* only be two levels so we won't loop very long.
*/
- /*CONSTCOND*/
- while (1) {
- /*
- * Make sure this is not an "interrupt safe" map.
- * Such maps are never supposed to be involved in
- * a fault.
- */
- if (ufi->map->flags & VM_MAP_INTRSAFE)
- return (false);
-
+ for (;;) {
/*
* lock map
*/
diff -r 6c4f326db2e4 -r 466faecd5dd7 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Sat Feb 18 23:51:27 2012 +0000
+++ b/sys/uvm/uvm_km.c Sun Feb 19 00:05:55 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $ */
+/* $NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -120,7 +120,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $");
#include "opt_uvmhist.h"
@@ -499,15 +499,13 @@
panic("uvm_km_check_empty: va %p has pa 0x%llx",
(void *)va, (long long)pa);
}
- if ((map->flags & VM_MAP_INTRSAFE) == 0) {
- mutex_enter(uvm_kernel_object->vmobjlock);
- pg = uvm_pagelookup(uvm_kernel_object,
- va - vm_map_min(kernel_map));
- mutex_exit(uvm_kernel_object->vmobjlock);
- if (pg) {
- panic("uvm_km_check_empty: "
- "has page hashed at %p", (const void *)va);
- }
+ mutex_enter(uvm_kernel_object->vmobjlock);
+ pg = uvm_pagelookup(uvm_kernel_object,
+ va - vm_map_min(kernel_map));
+ mutex_exit(uvm_kernel_object->vmobjlock);
+ if (pg) {
+ panic("uvm_km_check_empty: "
+ "has page hashed at %p", (const void *)va);
}
}
}
diff -r 6c4f326db2e4 -r 466faecd5dd7 sys/uvm/uvm_loan.c
--- a/sys/uvm/uvm_loan.c Sat Feb 18 23:51:27 2012 +0000
+++ b/sys/uvm/uvm_loan.c Sun Feb 19 00:05:55 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_loan.c,v 1.81 2011/08/06 17:25:03 rmind Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.82 2012/02/19 00:05:56 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81 2011/08/06 17:25:03 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.82 2012/02/19 00:05:56 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -245,7 +245,6 @@
KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
((flags & UVM_LOAN_TOPAGE) == 0));
- KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
/*
* "output" is a pointer to the current place to put the loaned page.
diff -r 6c4f326db2e4 -r 466faecd5dd7 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Sat Feb 18 23:51:27 2012 +0000
+++ b/sys/uvm/uvm_map.c Sun Feb 19 00:05:55 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.313 2012/02/12 20:28:14 martin Exp $ */
+/* $NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.313 2012/02/12 20:28:14 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -607,8 +607,6 @@
/*
* vm_map_lock: acquire an exclusive (write) lock on a map.
*
- * => Note that "intrsafe" maps use only exclusive, spin locks.
- *
* => The locking protocol provides for guaranteed upgrade from shared ->
* exclusive by whichever thread currently has the map marked busy.
* See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
@@ -620,24 +618,18 @@
vm_map_lock(struct vm_map *map)
{
- if ((map->flags & VM_MAP_INTRSAFE) != 0) {
- mutex_spin_enter(&map->mutex);
- return;
- }
-
for (;;) {
rw_enter(&map->lock, RW_WRITER);
- if (map->busy == NULL)
+ if (map->busy == NULL || map->busy == curlwp) {
break;
- if (map->busy == curlwp)
- break;
+ }
mutex_enter(&map->misc_lock);
rw_exit(&map->lock);
- if (map->busy != NULL)
+ if (map->busy != NULL) {
cv_wait(&map->cv, &map->misc_lock);
+ }
mutex_exit(&map->misc_lock);
}
-
map->timestamp++;
}
@@ -649,15 +641,13 @@
vm_map_lock_try(struct vm_map *map)
{
- if ((map->flags & VM_MAP_INTRSAFE) != 0)
- return mutex_tryenter(&map->mutex);
- if (!rw_tryenter(&map->lock, RW_WRITER))
+ if (!rw_tryenter(&map->lock, RW_WRITER)) {
return false;
+ }
if (map->busy != NULL) {
rw_exit(&map->lock);
return false;
}
-
map->timestamp++;
return true;
}
@@ -670,13 +660,9 @@
vm_map_unlock(struct vm_map *map)
{
- if ((map->flags & VM_MAP_INTRSAFE) != 0)
- mutex_spin_exit(&map->mutex);
- else {
- KASSERT(rw_write_held(&map->lock));
- KASSERT(map->busy == NULL || map->busy == curlwp);
- rw_exit(&map->lock);
- }
+ KASSERT(rw_write_held(&map->lock));
+ KASSERT(map->busy == NULL || map->busy == curlwp);
+ rw_exit(&map->lock);
}
/*
@@ -711,21 +697,17 @@
vm_map_lock_read(struct vm_map *map)
{
- KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
-
rw_enter(&map->lock, RW_READER);
}
/*
* vm_map_unlock_read: release a shared lock on a map.
*/
-
+
void
vm_map_unlock_read(struct vm_map *map)
{
- KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
-
rw_exit(&map->lock);
}
@@ -756,11 +738,7 @@
vm_map_locked_p(struct vm_map *map)
{
- if ((map->flags & VM_MAP_INTRSAFE) != 0) {
- return mutex_owned(&map->mutex);
- } else {
- return rw_write_held(&map->lock);
- }
+ return rw_write_held(&map->lock);
}
/*
@@ -775,14 +753,14 @@
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
me = pool_cache_get(&uvm_map_entry_cache, pflags);
- if (__predict_false(me == NULL))
+ if (__predict_false(me == NULL)) {
return NULL;
+ }
me->flags = 0;
-
UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
- ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
- return (me);
+ (map == kernel_map), 0, 0);
+ return me;
}
/*
@@ -1131,8 +1109,7 @@
* detect a popular device driver bug.
*/
- KASSERT(doing_shutdown || curlwp != NULL ||
- (map->flags & VM_MAP_INTRSAFE));
+ KASSERT(doing_shutdown || curlwp != NULL);
/*
* zero-sized mapping doesn't make any sense.
@@ -1156,11 +1133,9 @@
/*
* figure out where to put new VM range
*/
-
retry:
if (vm_map_lock_try(map) == false) {
- if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
- (map->flags & VM_MAP_INTRSAFE) == 0) {
+ if ((flags & UVM_FLAG_TRYLOCK) != 0) {
return EAGAIN;
}
vm_map_lock(map); /* could sleep here */
@@ -4180,6 +4155,7 @@
if (vm->vm_shm != NULL)
shmexit(vm);
#endif
+
if (map->nentries) {
uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
&dead_entries, 0);
@@ -4188,8 +4164,8 @@
}
Home |
Main Index |
Thread Index |
Old Index