Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86 Back the pv_hash stuff out. Now seeing errors ...
details: https://anonhg.NetBSD.org/src/rev/eaba6c7501e2
branches: trunk
changeset: 466822:eaba6c7501e2
user: ad <ad%NetBSD.org@localhost>
date: Thu Jan 02 21:39:42 2020 +0000
description:
Back the pv_hash stuff out. Now seeing errors from ATOMIC_*.
For another day.
diffstat:
sys/arch/x86/include/pmap_pv.h | 4 +-
sys/arch/x86/x86/pmap.c | 127 ++++++++++++++++------------------------
2 files changed, 52 insertions(+), 79 deletions(-)
diffs (231 lines):
diff -r 718b976b9bef -r eaba6c7501e2 sys/arch/x86/include/pmap_pv.h
--- a/sys/arch/x86/include/pmap_pv.h Thu Jan 02 21:17:13 2020 +0000
+++ b/sys/arch/x86/include/pmap_pv.h Thu Jan 02 21:39:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_pv.h,v 1.7 2020/01/02 19:20:01 ad Exp $ */
+/* $NetBSD: pmap_pv.h,v 1.8 2020/01/02 21:39:42 ad Exp $ */
/*-
* Copyright (c)2008 YAMAMOTO Takashi,
@@ -56,7 +56,7 @@
struct pv_entry {
struct pv_pte pve_pte; /* should be the first member */
LIST_ENTRY(pv_entry) pve_list; /* on pv_head::pvh_list */
- struct pv_entry *pve_hash; /* hash table link */
+ SLIST_ENTRY(pv_entry) pve_hash;
};
#define pve_next pve_list.le_next
diff -r 718b976b9bef -r eaba6c7501e2 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Thu Jan 02 21:17:13 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c Thu Jan 02 21:39:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.351 2020/01/02 21:17:13 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.352 2020/01/02 21:39:42 ad Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.351 2020/01/02 21:17:13 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.352 2020/01/02 21:39:42 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -302,8 +302,16 @@
#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mp_pp)
#define PV_HASH_SIZE 32768
-
-struct pv_entry *pv_hash_heads[PV_HASH_SIZE];
+#define PV_HASH_LOCK_CNT 32
+
+struct pv_hash_lock {
+ kmutex_t lock;
+} __aligned(CACHE_LINE_SIZE) pv_hash_locks[PV_HASH_LOCK_CNT]
+ __aligned(CACHE_LINE_SIZE);
+
+struct pv_hash_head {
+ SLIST_HEAD(, pv_entry) hh_list;
+} pv_hash_heads[PV_HASH_SIZE];
static u_int
pvhash_hash(struct vm_page *ptp, vaddr_t va)
@@ -312,78 +320,39 @@
return (uintptr_t)ptp / sizeof(*ptp) + (va >> PAGE_SHIFT);
}
-static struct pv_entry **
+static struct pv_hash_head *
pvhash_head(u_int hash)
{
- return &pv_hash_heads[hash & (PV_HASH_SIZE - 1)];
+ return &pv_hash_heads[hash % PV_HASH_SIZE];
+}
+
+static kmutex_t *
+pvhash_lock(u_int hash)
+{
+
+ return &pv_hash_locks[hash % PV_HASH_LOCK_CNT].lock;
}
static struct pv_entry *
-pvhash_remove(struct pv_entry **hh, struct vm_page *ptp, vaddr_t va)
+pvhash_remove(struct pv_hash_head *hh, struct vm_page *ptp, vaddr_t va)
{
- struct pv_entry *pve, *first, *unlocked, *locked, *prev;
- int s, count;
-
- count = SPINLOCK_BACKOFF_MIN;
- for (;;) {
- /*
- * Entries can't be added behind us, only in front of us, so
- * if this is the first entry in the bucket we can remove
- * the entry and observe that nobody has the bucket locked
- * in one pass.
- *
- * Otherwise we have to lock the bucket - use the low bit in
- * the hash head to indicate this. Go to splvm() while in
- * possetion of the lock as we don't want to be interrupted
- * while holding it, and we can't context switch.
- */
- unlocked = (struct pv_entry *)((uintptr_t)*hh & ~1L);
- KASSERT(unlocked != NULL);
- if (unlocked->pve_pte.pte_ptp == ptp &&
- unlocked->pve_pte.pte_va == va) {
- first = atomic_cas_ptr(hh, unlocked, unlocked->pve_hash);
- if (__predict_true(first == unlocked)) {
- /* Easy case - entry removed. */
- return first;
- }
- } else {
- s = splvm();
- locked = (struct pv_entry *)((uintptr_t)unlocked | 1L);
- first = atomic_cas_ptr(hh, unlocked, locked);
- if (first == unlocked) {
- /* Got it! */
- break;
- }
- splx(s);
- }
- SPINLOCK_BACKOFF(count);
- }
-
- KASSERT(((uintptr_t)first & 1L) == 0);
- KASSERT(((uintptr_t)*hh & 1L) == 1);
+ struct pv_entry *pve;
+ struct pv_entry *prev;
prev = NULL;
- for (pve = first; pve != NULL; pve = pve->pve_hash) {
+ SLIST_FOREACH(pve, &hh->hh_list, pve_hash) {
if (pve->pve_pte.pte_ptp == ptp &&
pve->pve_pte.pte_va == va) {
if (prev != NULL) {
- /* Remove, then unlock below. */
- prev->pve_hash = pve->pve_hash;
- break;
+ SLIST_REMOVE_AFTER(prev, pve_hash);
} else {
- /* Remove AND unlock below. */
- first = pve->pve_hash;
- break;
+ SLIST_REMOVE_HEAD(&hh->hh_list, pve_hash);
}
+ break;
}
prev = pve;
}
-
- /* Lock release and SPL drop must be last, and in order. */
- __insn_barrier();
- *hh = first;
- splx(s);
return pve;
}
@@ -1740,7 +1709,14 @@
void
pmap_init(void)
{
- int flags;
+ int i, flags;
+
+ for (i = 0; i < PV_HASH_SIZE; i++) {
+ SLIST_INIT(&pv_hash_heads[i].hh_list);
+ }
+ for (i = 0; i < PV_HASH_LOCK_CNT; i++) {
+ mutex_init(&pv_hash_locks[i].lock, MUTEX_NODEBUG, IPL_VM);
+ }
/*
* initialize caches.
@@ -1892,8 +1868,8 @@
* pmap_enter_pv: enter a mapping onto a pv_head list
* pmap_remove_pv: remove a mapping from a pv_head list
*
- * NOTE: Both pmap_enter_pv and pmap_remove_pv expect the caller to have
- * locked object that owns the page before calling
+ * NOTE: Both pmap_enter_pv and pmap_remove_pv expect the caller to lock
+ * the pvh before calling
*/
/*
@@ -1902,23 +1878,16 @@
static void
insert_pv(struct pmap_page *pp, struct pv_entry *pve)
{
- struct pv_entry **hh, *o, *v;
- u_int hash, count;
+ struct pv_hash_head *hh;
+ kmutex_t *lock;
+ u_int hash;
hash = pvhash_hash(pve->pve_pte.pte_ptp, pve->pve_pte.pte_va);
+ lock = pvhash_lock(hash);
hh = pvhash_head(hash);
-
- /* Observe the bucket unlocked and insert in one pass. */
- count = SPINLOCK_BACKOFF_MIN;
- for (v = *hh;;) {
- o = (struct pv_entry *)((uintptr_t)v & ~1L);
- pve->pve_hash = o;
- v = atomic_cas_ptr(hh, o, pve);
- if (o == v) {
- break;
- }
- SPINLOCK_BACKOFF(count);
- }
+ mutex_spin_enter(lock);
+ SLIST_INSERT_HEAD(&hh->hh_list, pve, pve_hash);
+ mutex_spin_exit(lock);
LIST_INSERT_HEAD(&pp->pp_head.pvh_list, pve, pve_list);
}
@@ -1974,8 +1943,9 @@
static struct pv_entry *
pmap_remove_pv(struct pmap_page *pp, struct vm_page *ptp, vaddr_t va)
{
- struct pv_entry **hh;
+ struct pv_hash_head *hh;
struct pv_entry *pve;
+ kmutex_t *lock;
u_int hash;
KASSERT(ptp == NULL || ptp->uobject != NULL);
@@ -1992,8 +1962,11 @@
}
hash = pvhash_hash(ptp, va);
+ lock = pvhash_lock(hash);
hh = pvhash_head(hash);
+ mutex_spin_enter(lock);
pve = pvhash_remove(hh, ptp, va);
+ mutex_spin_exit(lock);
LIST_REMOVE(pve, pve_list);
Home |
Main Index |
Thread Index |
Old Index