Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/kern shmctl(SHM_LOCK) does not need to mess with mapping...
details: https://anonhg.NetBSD.org/src/rev/2402c5455159
branches: trunk
changeset: 451878:2402c5455159
user: chs <chs%NetBSD.org@localhost>
date: Mon Jun 10 00:35:47 2019 +0000
description:
shmctl(SHM_LOCK) does not need to mess with mappings of the shm segment,
uvm_obj_wirepages() is sufficient. this fixes the problem reported in
https://syzkaller.appspot.com/bug?id=71f9271d761f5b6ed517a18030dc04f0135e6179
diffstat:
sys/kern/sysv_shm.c | 73 +++++++++++++---------------------------------------
1 files changed, 19 insertions(+), 54 deletions(-)
diffs (124 lines):
diff -r b1573db063c9 -r 2402c5455159 sys/kern/sysv_shm.c
--- a/sys/kern/sysv_shm.c Sun Jun 09 18:44:29 2019 +0000
+++ b/sys/kern/sysv_shm.c Mon Jun 10 00:35:47 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $ */
+/* $NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $ */
/*-
* Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_sysv.h"
@@ -249,55 +249,30 @@
/*
* Lock/unlock the memory.
* => must be called with shm_lock held;
- * => called from one place, thus, inline;
*/
-static inline int
-shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
+static int
+shm_memlock(struct shmid_ds *shmseg, int shmid, int cmd)
{
- struct proc *p = l->l_proc;
- struct shmmap_entry *shmmap_se;
- struct shmmap_state *shmmap_s;
size_t size;
int error;
KASSERT(mutex_owned(&shm_lock));
- shmmap_s = shmmap_getprivate(p);
- /* Find our shared memory address by shmid */
- SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
- if (shmmap_se->shmid != shmid)
- continue;
-
- size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
+ size = round_page(shmseg->shm_segsz);
- if (cmd == SHM_LOCK &&
- (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
- /* Wire the object and map, then tag it */
- error = uvm_obj_wirepages(shmseg->_shm_internal,
- 0, size, NULL);
- if (error)
- return EIO;
- error = uvm_map_pageable(&p->p_vmspace->vm_map,
- shmmap_se->va, shmmap_se->va + size, false, 0);
- if (error) {
- uvm_obj_unwirepages(shmseg->_shm_internal,
- 0, size);
- if (error == EFAULT)
- error = ENOMEM;
- return error;
- }
- shmseg->shm_perm.mode |= SHMSEG_WIRED;
+ if (cmd == SHM_LOCK && (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
+ /* Wire the object and map, then tag it */
+ error = uvm_obj_wirepages(shmseg->_shm_internal,
+ 0, size, NULL);
+ if (error)
+ return EIO;
+ shmseg->shm_perm.mode |= SHMSEG_WIRED;
- } else if (cmd == SHM_UNLOCK &&
- (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
- /* Unwire the object and map, then untag it */
- uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
- error = uvm_map_pageable(&p->p_vmspace->vm_map,
- shmmap_se->va, shmmap_se->va + size, true, 0);
- if (error)
- return EIO;
- shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
- }
+ } else if (cmd == SHM_UNLOCK &&
+ (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
+ /* Unwire the object, then untag it */
+ uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
+ shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
}
return 0;
@@ -462,16 +437,6 @@
UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
if (error)
goto err_detach;
- if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
- error = uvm_map_pageable(&vm->vm_map, attach_va,
- attach_va + size, false, 0);
- if (error) {
- if (error == EFAULT)
- error = ENOMEM;
- uvm_deallocate(&vm->vm_map, attach_va, size);
- goto err_detach;
- }
- }
/* Set the new address, and update the time */
mutex_enter(&shm_lock);
@@ -595,7 +560,7 @@
(cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
break;
- error = shm_memlock(l, shmseg, shmid, cmd);
+ error = shm_memlock(shmseg, shmid, cmd);
break;
default:
error = EINVAL;
@@ -717,7 +682,7 @@
mutex_exit(&shm_lock);
return ENOSPC;
}
- size = (size + PGOFSET) & ~PGOFSET;
+ size = round_page(size);
if (shm_committed + btoc(size) > shminfo.shmall) {
mutex_exit(&shm_lock);
return ENOMEM;
Home |
Main Index |
Thread Index |
Old Index