Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src UVM locking changes, proposed on tech-kern:
details: https://anonhg.NetBSD.org/src/rev/07a3862f57dd
branches: trunk
changeset: 745159:07a3862f57dd
user: ad <ad%NetBSD.org@localhost>
date: Sun Feb 23 15:46:38 2020 +0000
description:
UVM locking changes, proposed on tech-kern:
- Change the lock on uvm_object, vm_amap and vm_anon to be a RW lock.
- Break v_interlock and vmobjlock apart. v_interlock remains a mutex.
- Do partial PV list locking in the x86 pmap. Others to follow later.
diffstat:
external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c | 54 ++--
external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c | 2 +-
lib/libp2k/p2k.c | 4 +-
sys/arch/hppa/hppa/pmap.c | 8 +-
sys/arch/m68k/m68k/pmap_motorola.c | 24 +-
sys/arch/x86/include/pmap.h | 4 +-
sys/arch/x86/include/pmap_pv.h | 9 +-
sys/arch/x86/x86/pmap.c | 197 ++++++++++------
sys/arch/xen/xen/privcmd.c | 14 +-
sys/coda/coda_vnops.c | 7 +-
sys/dev/fss.c | 6 +-
sys/dev/ic/ssdfb.c | 8 +-
sys/dev/vnd.c | 16 +-
sys/external/bsd/drm2/dist/drm/drm_gem.c | 10 +-
sys/external/bsd/drm2/dist/drm/i915/i915_drv.h | 6 +-
sys/external/bsd/drm2/dist/drm/i915/i915_gem.c | 16 +-
sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c | 8 +-
sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c | 10 +-
sys/external/bsd/drm2/dist/drm/ttm/ttm_bo_util.c | 6 +-
sys/external/bsd/drm2/include/linux/mm.h | 6 +-
sys/external/bsd/drm2/ttm/ttm_bo_vm.c | 6 +-
sys/external/bsd/drm2/ttm/ttm_bus_dma.c | 6 +-
sys/fs/msdosfs/msdosfs_vnops.c | 8 +-
sys/fs/puffs/puffs_msgif.c | 6 +-
sys/fs/puffs/puffs_vnops.c | 26 +-
sys/fs/tmpfs/tmpfs_subr.c | 14 +-
sys/fs/tmpfs/tmpfs_vnops.c | 21 +-
sys/fs/udf/udf_vnops.c | 8 +-
sys/fs/union/union_subr.c | 16 +-
sys/fs/union/union_vnops.c | 14 +-
sys/fs/unionfs/unionfs_vnops.c | 10 +-
sys/kern/kern_synch.c | 45 +++-
sys/kern/sys_descrip.c | 8 +-
sys/kern/vfs_subr.c | 10 +-
sys/kern/vfs_vnode.c | 39 ++-
sys/kern/vfs_vnops.c | 31 +--
sys/miscfs/deadfs/dead_vnops.c | 8 +-
sys/miscfs/genfs/genfs_io.c | 96 ++++---
sys/miscfs/genfs/genfs_vnops.c | 6 +-
sys/miscfs/genfs/layer_vfsops.c | 11 +-
sys/miscfs/genfs/layer_vnops.c | 24 +-
sys/nfs/nfs_bio.c | 38 +-
sys/nfs/nfs_clntsubs.c | 8 +-
sys/nfs/nfs_vnops.c | 6 +-
sys/rump/include/rump-sys/vfs_if.h | 4 +-
sys/rump/include/rump/rumpvfs_if_pub.h | 3 +-
sys/rump/librump/rumpkern/ltsleep.c | 45 ++-
sys/rump/librump/rumpkern/vm.c | 47 +--
sys/rump/librump/rumpvfs/rump_vfs.c | 11 +-
sys/rump/librump/rumpvfs/rumpvfs.ifspec | 3 +-
sys/rump/librump/rumpvfs/rumpvfs_if_wrappers.c | 11 +-
sys/rump/librump/rumpvfs/vm_vfs.c | 24 +-
sys/sys/proc.h | 3 +-
sys/sys/vnode.h | 69 +++--
sys/ufs/chfs/chfs_vnops.c | 8 +-
sys/ufs/ext2fs/ext2fs_readwrite.c | 8 +-
sys/ufs/ffs/ffs_alloc.c | 12 +-
sys/ufs/ffs/ffs_inode.c | 8 +-
sys/ufs/ffs/ffs_snapshot.c | 6 +-
sys/ufs/ffs/ffs_vfsops.c | 6 +-
sys/ufs/ffs/ffs_vnops.c | 8 +-
sys/ufs/lfs/lfs_inode.c | 10 +-
sys/ufs/lfs/lfs_pages.c | 106 ++++----
sys/ufs/lfs/lfs_segment.c | 6 +-
sys/ufs/lfs/lfs_vfsops.c | 8 +-
sys/ufs/lfs/lfs_vnops.c | 6 +-
sys/ufs/lfs/ulfs_inode.c | 10 +-
sys/ufs/lfs/ulfs_readwrite.c | 8 +-
sys/ufs/ufs/ufs_inode.c | 10 +-
sys/ufs/ufs/ufs_readwrite.c | 10 +-
sys/uvm/uvm.h | 9 +-
sys/uvm/uvm_amap.c | 64 ++--
sys/uvm/uvm_amap.h | 10 +-
sys/uvm/uvm_anon.c | 37 +-
sys/uvm/uvm_anon.h | 4 +-
sys/uvm/uvm_aobj.c | 56 ++--
sys/uvm/uvm_bio.c | 65 ++--
sys/uvm/uvm_coredump.c | 6 +-
sys/uvm/uvm_device.c | 20 +-
sys/uvm/uvm_extern.h | 4 +-
sys/uvm/uvm_fault.c | 74 +++---
sys/uvm/uvm_fault_i.h | 4 +-
sys/uvm/uvm_km.c | 16 +-
sys/uvm/uvm_loan.c | 107 ++++----
sys/uvm/uvm_map.c | 24 +-
sys/uvm/uvm_map.h | 4 +-
sys/uvm/uvm_mmap.c | 10 +-
sys/uvm/uvm_mremap.c | 8 +-
sys/uvm/uvm_object.c | 34 +-
sys/uvm/uvm_object.h | 5 +-
sys/uvm/uvm_page.c | 60 ++--
sys/uvm/uvm_page.h | 4 +-
sys/uvm/uvm_page_array.c | 6 +-
sys/uvm/uvm_page_status.c | 10 +-
sys/uvm/uvm_pager.c | 14 +-
sys/uvm/uvm_pdaemon.c | 36 +-
sys/uvm/uvm_pdaemon.h | 4 +-
sys/uvm/uvm_pdpolicy.h | 4 +-
sys/uvm/uvm_pdpolicy_clock.c | 22 +-
sys/uvm/uvm_readahead.c | 16 +-
sys/uvm/uvm_vnode.c | 63 ++++-
tests/rump/kernspace/busypage.c | 25 +-
102 files changed, 1176 insertions(+), 979 deletions(-)
diffs (truncated from 7678 to 300 lines):
diff -r b47b7093b128 -r 07a3862f57dd external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c
--- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c Sun Feb 23 15:23:08 2020 +0000
+++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c Sun Feb 23 15:46:38 2020 +0000
@@ -728,7 +728,7 @@
{
znode_t *zp = VTOZ(vp);
struct uvm_object *uobj = &vp->v_uobj;
- kmutex_t *mtx = uobj->vmobjlock;
+ krwlock_t *rw = uobj->vmobjlock;
int64_t start;
caddr_t va;
size_t len = nbytes;
@@ -745,10 +745,10 @@
pp = NULL;
npages = 1;
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
found = uvn_findpages(uobj, start, &npages, &pp, NULL,
UFP_NOALLOC);
- mutex_exit(mtx);
+ rw_exit(rw);
/* XXXNETBSD shouldn't access userspace with the page busy */
if (found) {
@@ -760,9 +760,9 @@
uio, bytes);
}
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
uvm_page_unbusy(&pp, 1);
- mutex_exit(mtx);
+ rw_exit(rw);
len -= bytes;
off = 0;
@@ -777,13 +777,13 @@
int segflg, dmu_tx_t *tx)
{
struct uvm_object *uobj = &vp->v_uobj;
- kmutex_t *mtx = uobj->vmobjlock;
+ krwlock_t *rw = uobj->vmobjlock;
caddr_t va;
int off, status;
ASSERT(vp->v_mount != NULL);
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
off = start & PAGEOFFSET;
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
@@ -816,20 +816,20 @@
/* Nothing to do. */
break;
}
- mutex_exit(mtx);
+ rw_exit(rw);
va = zfs_map_page(pp, S_WRITE);
(void) dmu_read(os, oid, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(pp, va);
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
uvm_page_unbusy(&pp, 1);
}
len -= nbytes;
off = 0;
}
- mutex_exit(mtx);
+ rw_exit(rw);
}
#endif /* __NetBSD__ */
@@ -5974,7 +5974,7 @@
const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
struct uvm_object * const uobj = &vp->v_uobj;
- kmutex_t * const mtx = uobj->vmobjlock;
+ krwlock_t * const rw = uobj->vmobjlock;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
vfs_t *mp;
@@ -5987,7 +5987,7 @@
ap->a_m[ap->a_centeridx] = NULL;
return EBUSY;
}
- mutex_exit(mtx);
+ rw_exit(rw);
if (async) {
return 0;
@@ -6005,9 +6005,9 @@
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
if (offset >= vp->v_size) {
- mutex_exit(mtx);
+ rw_exit(rw);
ZFS_EXIT(zfsvfs);
fstrans_done(mp);
return EINVAL;
@@ -6017,14 +6017,14 @@
uvn_findpages(uobj, offset, &npages, &pg, NULL, UFP_ALL);
if (pg->flags & PG_FAKE) {
- mutex_exit(mtx);
+ rw_exit(rw);
va = zfs_map_page(pg, S_WRITE);
err = dmu_read(zfsvfs->z_os, zp->z_id, offset, PAGE_SIZE,
va, DMU_READ_PREFETCH);
zfs_unmap_page(pg, va);
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
pg->flags &= ~(PG_FAKE);
}
@@ -6033,14 +6033,16 @@
/* For write faults, start dirtiness tracking. */
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
}
+ mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
vp->v_iflag |= VI_WRMAPDIRTY;
}
- }
- mutex_exit(mtx);
+ mutex_exit(vp->v_interlock);
+ }
+ rw_exit(rw);
ap->a_m[ap->a_centeridx] = pg;
ZFS_EXIT(zfsvfs);
@@ -6062,7 +6064,7 @@
bool async = (flags & PGO_SYNCIO) == 0;
bool *cleanedp;
struct uvm_object *uobj = &vp->v_uobj;
- kmutex_t *mtx = uobj->vmobjlock;
+ krwlock_t *rw = uobj->vmobjlock;
if (zp->z_sa_hdl == NULL) {
err = 0;
@@ -6121,9 +6123,9 @@
dmu_tx_commit(tx);
out_unbusy:
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
uvm_page_unbusy(pp, count);
- mutex_exit(mtx);
+ rw_exit(rw);
out:
return (err);
@@ -6185,7 +6187,7 @@
len = UINT64_MAX;
else
len = offhi - offlo;
- mutex_exit(vp->v_interlock);
+ rw_exit(vp->v_uobj.vmobjlock);
if (curlwp == uvm.pagedaemon_lwp) {
error = fstrans_start_nowait(vp->v_mount);
if (error)
@@ -6206,7 +6208,7 @@
rrm_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG);
rl = zfs_range_lock(zp, offlo, len, RL_WRITER);
- mutex_enter(vp->v_interlock);
+ rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
tsd_set(zfs_putpage_key, &cleaned);
}
error = genfs_putpages(v);
@@ -6244,7 +6246,7 @@
zfs_netbsd_setsize(vnode_t *vp, off_t size)
{
struct uvm_object *uobj = &vp->v_uobj;
- kmutex_t *mtx = uobj->vmobjlock;
+ krwlock_t *rw = uobj->vmobjlock;
page_t *pg;
int count, pgoff;
caddr_t va;
@@ -6262,7 +6264,7 @@
* If there's a partial page, we need to zero the tail.
*/
- mutex_enter(mtx);
+ rw_enter(rw, RW_WRITER);
count = 1;
pg = NULL;
if (uvn_findpages(uobj, tsize, &count, &pg, NULL, UFP_NOALLOC)) {
@@ -6273,7 +6275,7 @@
uvm_page_unbusy(&pg, 1);
}
- mutex_exit(mtx);
+ rw_exit(rw);
}
static int
diff -r b47b7093b128 -r 07a3862f57dd external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c
--- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c Sun Feb 23 15:23:08 2020 +0000
+++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c Sun Feb 23 15:46:38 2020 +0000
@@ -1561,7 +1561,7 @@
zp->z_unlinked = (zp->z_links == 0);
zp->z_blksz = doi.doi_data_block_size;
#ifdef __NetBSD__
- mutex_enter(vp->v_interlock);
+ rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
(void)VOP_PUTPAGES(vp, 0, 0, PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO);
#else
vn_pages_remove(vp, 0, 0);
diff -r b47b7093b128 -r 07a3862f57dd lib/libp2k/p2k.c
--- a/lib/libp2k/p2k.c Sun Feb 23 15:23:08 2020 +0000
+++ b/lib/libp2k/p2k.c Sun Feb 23 15:46:38 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: p2k.c,v 1.72 2020/02/20 15:48:52 riastradh Exp $ */
+/* $NetBSD: p2k.c,v 1.73 2020/02/23 15:46:38 ad Exp $ */
/*
* Copyright (c) 2007, 2008, 2009 Antti Kantee. All Rights Reserved.
@@ -1386,7 +1386,7 @@
* a way to regain the data from "stable storage".
*/
if (!p2m->p2m_imtmpfsman) {
- rump_pub_vp_interlock(vp);
+ rump_pub_vp_vmobjlock(vp, 1);
RUMP_VOP_PUTPAGES(vp, 0, 0,
PGO_ALLPAGES|PGO_CLEANIT|PGO_FREE);
}
diff -r b47b7093b128 -r 07a3862f57dd sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Sun Feb 23 15:23:08 2020 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Sun Feb 23 15:46:38 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.103 2020/02/23 15:46:39 ad Exp $ */
/*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.103 2020/02/23 15:46:39 ad Exp $");
#include "opt_cputype.h"
@@ -579,7 +579,7 @@
DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
__func__, pg, pve, pm, va, pdep, flags));
- KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg));
+ KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
pve->pv_pmap = pm;
pve->pv_va = va | flags;
@@ -594,7 +594,7 @@
struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry **pve, *pv;
- KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg));
+ KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
for (pv = *(pve = &md->pvh_list);
pv; pv = *(pve = &(*pve)->pv_next)) {
diff -r b47b7093b128 -r 07a3862f57dd sys/arch/m68k/m68k/pmap_motorola.c
--- a/sys/arch/m68k/m68k/pmap_motorola.c Sun Feb 23 15:23:08 2020 +0000
+++ b/sys/arch/m68k/m68k/pmap_motorola.c Sun Feb 23 15:46:38 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_motorola.c,v 1.71 2018/09/03 16:29:25 riastradh Exp $ */
+/* $NetBSD: pmap_motorola.c,v 1.72 2020/02/23 15:46:39 ad Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -119,7 +119,7 @@
#include "opt_m68k_arch.h"
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.71 2018/09/03 16:29:25 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.72 2020/02/23 15:46:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -2105,9 +2105,9 @@
#endif
pmap_remove_mapping(pmap_kernel(), ptpva,
NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
- mutex_enter(uvm_kernel_object->vmobjlock);
+ rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
Home |
Main Index |
Thread Index |
Old Index