Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Add the guts of mlockall(MCL_FUTURE). This requires tha...
details: https://anonhg.NetBSD.org/src/rev/8f3e7a6352a0
branches: trunk
changeset: 473751:8f3e7a6352a0
user: thorpej <thorpej%NetBSD.org@localhost>
date: Fri Jun 18 05:13:45 1999 +0000
description:
Add the guts of mlockall(MCL_FUTURE). This requires that a process's
"memlock" resource limit to uvm_mmap(). Update all calls accordingly.
diffstat:
sys/arch/hp300/dev/grf.c | 5 +-
sys/arch/mac68k/dev/grf.c | 5 +-
sys/arch/pmax/dev/px.c | 7 ++-
sys/arch/pmax/dev/qvss_compat.c | 4 +-
sys/arch/x68k/dev/grf.c | 5 +-
sys/uvm/uvm_extern.h | 6 +-
sys/uvm/uvm_map.c | 14 +++++--
sys/uvm/uvm_mmap.c | 73 +++++++++++++++++++++++++++++++++-------
8 files changed, 86 insertions(+), 33 deletions(-)
diffs (truncated from 320 to 300 lines):
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/arch/hp300/dev/grf.c
--- a/sys/arch/hp300/dev/grf.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/arch/hp300/dev/grf.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: grf.c,v 1.33 1999/04/10 14:28:22 drochner Exp $ */
+/* $NetBSD: grf.c,v 1.34 1999/06/18 05:13:45 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -637,7 +637,8 @@
vn.v_rdev = dev; /* XXX */
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
(vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
- flags, (caddr_t)&vn, 0);
+ flags, (caddr_t)&vn, 0,
+ p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
if (error == 0)
(void) (*gp->g_sw->gd_mode)(gp, GM_MAP, *addrp);
return(error);
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/arch/mac68k/dev/grf.c
--- a/sys/arch/mac68k/dev/grf.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/arch/mac68k/dev/grf.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: grf.c,v 1.61 1999/04/07 06:45:15 scottr Exp $ */
+/* $NetBSD: grf.c,v 1.62 1999/06/18 05:13:46 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -376,7 +376,8 @@
vn.v_rdev = dev; /* XXX */
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
- (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL, flags, (caddr_t)&vn, 0);
+ (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL, flags, (caddr_t)&vn, 0,
+ p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
/* Offset into page: */
*addrp += (unsigned long)gm->fboff;
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/arch/pmax/dev/px.c
--- a/sys/arch/pmax/dev/px.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/arch/pmax/dev/px.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: px.c,v 1.10 1999/05/19 20:14:46 ad Exp $ */
+/* $NetBSD: px.c,v 1.11 1999/06/18 05:13:46 thorpej Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -43,7 +43,7 @@
#endif
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: px.c,v 1.10 1999/05/19 20:14:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: px.c,v 1.11 1999/06/18 05:13:46 thorpej Exp $");
/*
* px.c: driver for the DEC TURBOchannel 2D and 3D accelerated framebuffers
@@ -1949,5 +1949,6 @@
flags = MAP_SHARED | MAP_FILE;
*va = round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
return uvm_mmap(&p->p_vmspace->vm_map, va, size, prot,
- VM_PROT_ALL, flags, (caddr_t)&vn, 0);
+ VM_PROT_ALL, flags, (caddr_t)&vn, 0,
+ p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
}
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/arch/pmax/dev/qvss_compat.c
--- a/sys/arch/pmax/dev/qvss_compat.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/arch/pmax/dev/qvss_compat.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: qvss_compat.c,v 1.16 1999/04/24 08:01:06 simonb Exp $ */
+/* $NetBSD: qvss_compat.c,v 1.17 1999/06/18 05:13:46 thorpej Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -487,7 +487,7 @@
*/
error = uvm_mmap(&p->p_vmspace->vm_map, &addr, len,
VM_PROT_ALL, VM_PROT_ALL, MAP_SHARED, (caddr_t)&vn,
- (vaddr_t)0);
+ (vaddr_t)0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
if (error)
return (error);
fbp = (struct fbuaccess *)(addr + ((vaddr_t)fbu & PGOFSET));
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/arch/x68k/dev/grf.c
--- a/sys/arch/x68k/dev/grf.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/arch/x68k/dev/grf.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: grf.c,v 1.14 1999/05/05 14:31:16 minoura Exp $ */
+/* $NetBSD: grf.c,v 1.15 1999/06/18 05:13:46 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -567,7 +567,8 @@
vn.v_rdev = dev; /* XXX */
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
(vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
- flags, (caddr_t)&vn, 0);
+ flags, (caddr_t)&vn, 0,
+ p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
if (error == 0)
(void) (*gp->g_sw->gd_mode)(gp, GM_MAP, *addrp);
return(error);
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/uvm/uvm_extern.h Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.29 1999/06/17 15:47:22 thorpej Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.30 1999/06/18 05:13:46 thorpej Exp $ */
/*
*
@@ -318,7 +318,7 @@
int uvm_map __P((vm_map_t, vaddr_t *, vsize_t,
struct uvm_object *, vaddr_t, uvm_flag_t));
int uvm_map_pageable __P((vm_map_t, vaddr_t,
- vaddr_t, boolean_t));
+ vaddr_t, boolean_t, boolean_t));
int uvm_map_pageable_all __P((vm_map_t, int, vsize_t));
boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t,
vaddr_t, vm_prot_t));
@@ -343,7 +343,7 @@
/* uvm_mmap.c */
int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t,
vm_prot_t, vm_prot_t, int,
- caddr_t, vaddr_t));
+ caddr_t, vaddr_t, vsize_t));
/* uvm_page.c */
struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/uvm/uvm_map.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.58 1999/06/17 00:24:10 thorpej Exp $ */
+/* $NetBSD: uvm_map.c,v 1.59 1999/06/18 05:13:46 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -1982,15 +1982,18 @@
* for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
* => regions sepcified as not pageable require lock-down (wired) memory
* and page tables.
- * => map must not be locked.
+ * => map must never be read-locked
+ * => if islocked is TRUE, map is already write-locked
+ * => we always unlock the map, since we must downgrade to a read-lock
+ * to call uvm_fault_wire()
* => XXXCDC: check this and try and clean it up.
*/
int
-uvm_map_pageable(map, start, end, new_pageable)
+uvm_map_pageable(map, start, end, new_pageable, islocked)
vm_map_t map;
vaddr_t start, end;
- boolean_t new_pageable;
+ boolean_t new_pageable, islocked;
{
vm_map_entry_t entry, start_entry, failed_entry;
int rv;
@@ -2003,7 +2006,8 @@
panic("uvm_map_pageable: map %p not pageable", map);
#endif
- vm_map_lock(map);
+ if (islocked == FALSE)
+ vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
/*
diff -r d92c901949c1 -r 8f3e7a6352a0 sys/uvm/uvm_mmap.c
--- a/sys/uvm/uvm_mmap.c Fri Jun 18 05:08:57 1999 +0000
+++ b/sys/uvm/uvm_mmap.c Fri Jun 18 05:13:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_mmap.c,v 1.24 1999/06/17 21:05:19 thorpej Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.25 1999/06/18 05:13:47 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -514,7 +514,7 @@
*/
error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
- flags, handle, pos);
+ flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
if (error == 0)
/* remember to add offset */
@@ -902,7 +902,8 @@
return (error);
#endif
- error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE);
+ error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
+ FALSE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -948,7 +949,8 @@
return (error);
#endif
- error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE);
+ error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
+ FALSE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -965,7 +967,6 @@
struct sys_mlockall_args /* {
syscallarg(int) flags;
} */ *uap = v;
- vsize_t limit;
int error, flags;
flags = SCARG(uap, flags);
@@ -974,16 +975,13 @@
(flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
return (EINVAL);
-#ifdef pmap_wired_count
- /* Actually checked in uvm_map_pageable_all() */
- limit = p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur;
-#else
- limit = 0;
+#ifndef pmap_wired_count
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
return (error);
#endif
- error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, limit);
+ error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
+ p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
switch (error) {
case KERN_SUCCESS:
error = 0;
@@ -1029,7 +1027,7 @@
*/
int
-uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
+uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
vm_map_t map;
vaddr_t *addr;
vsize_t size;
@@ -1037,6 +1035,7 @@
int flags;
caddr_t handle; /* XXX: VNODE? */
vaddr_t foff;
+ vsize_t locklimit;
{
struct uvm_object *uobj;
struct vnode *vp;
@@ -1151,8 +1150,51 @@
retval = uvm_map(map, addr, size, uobj, foff, uvmflag);
- if (retval == KERN_SUCCESS)
- return(0);
+ if (retval == KERN_SUCCESS) {
+ /*
+ * POSIX 1003.1b -- if our address space was configured
+ * to lock all future mappings, wire the one we just made.
+ */
+ if (prot == VM_PROT_NONE) {
+ /*
+ * No more work to do in this case.
+ */
+ return (0);
+ }
+
+ vm_map_lock(map);
+
+ if (map->flags & VM_MAP_WIREFUTURE) {
+ /*
+ * uvm_map_pageable() always returns the map
+ * unlocked.
+ */
+ if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
+#ifdef pmap_wired_count
+ || (lockedlimit != 0 && (size +
+ ptoa(pmap_wired_count(vm_map_pmap(map)))) >
+ lockedlimit)
+#endif
+ ) {
+ retval = KERN_RESOURCE_SHORTAGE;
+ /* unmap the region! */
+ (void) uvm_unmap(map, *addr, *addr + size);
+ goto bad;
+ }
+ retval = uvm_map_pageable(map, *addr, *addr + size,
+ FALSE, TRUE);
+ if (retval != KERN_SUCCESS) {
+ /* unmap the region! */
+ (void) uvm_unmap(map, *addr, *addr + size);
+ goto bad;
+ }
+ return (0);
+ }
+
+ vm_map_unlock(map);
Home |
Main Index |
Thread Index |
Old Index