Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Significant pmap changes to no longer rely on the "...
details: https://anonhg.NetBSD.org/src/rev/3c9c1ba2524f
branches: trunk
changeset: 535200:3c9c1ba2524f
user: fredette <fredette%NetBSD.org@localhost>
date: Sun Aug 11 22:29:07 2002 +0000
description:
Significant pmap changes to no longer rely on the "U-bit" (TLB_UNCACHEABLE)
to deal with aliasing of regular memory pages, because many processors don't
support it.
Now, the pmap marks all mappings of a page that has any non-equivalent
aliasing and any writable mapping, and the fault handlers watch for this
and flush other mappings out of the TLB and cache before (re)entering a
conflicting mapping.
When a page has non-equivalent aliasing, only one writable mapping at
a time may be in the TLB and cache. If no writable mapping is in the
TLB and cache, any number of read-only mappings may be.
The PA7100LC/PA7300LC fault handlers have not been converted yet.
diffstat:
sys/arch/hp700/hp700/genassym.cf | 15 +-
sys/arch/hp700/include/cpu.h | 3 +-
sys/arch/hppa/hppa/hpt.h | 39 +-
sys/arch/hppa/hppa/pmap.c | 496 ++++++++++++++-------------
sys/arch/hppa/hppa/trap.S | 680 ++++++++++++++++++++++++++++++++++++--
sys/arch/hppa/include/pte.h | 29 +-
6 files changed, 952 insertions(+), 310 deletions(-)
diffs (truncated from 1707 to 300 lines):
diff -r efb9533bbd19 -r 3c9c1ba2524f sys/arch/hp700/hp700/genassym.cf
--- a/sys/arch/hp700/hp700/genassym.cf Sun Aug 11 22:03:43 2002 +0000
+++ b/sys/arch/hp700/hp700/genassym.cf Sun Aug 11 22:29:07 2002 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.1 2002/06/06 19:48:06 fredette Exp $
+# $NetBSD: genassym.cf,v 1.2 2002/08/11 22:29:07 fredette Exp $
# $OpenBSD: genassym.cf,v 1.18 2001/09/20 18:31:14 mickey Exp $
@@ -75,8 +75,9 @@
# pte things
export TLB_REF_POS
-export TLB_GATE_PROT
+export TLB_NO_RW_ALIAS_POS
export TLB_DIRTY_POS
+export TLB_UNMANAGED_POS
# hpt_table fields
struct hpt_entry
@@ -92,6 +93,16 @@
member PV_VA pv_va
member PV_TLBPAGE pv_tlbpage
member PV_TLBPROT pv_tlbprot
+member PV_NEXT pv_next
+member PV_HPT pv_hpt
+
+# pv_head fields
+struct pv_head
+member PV_HEAD_PVS pv_head_pvs
+member PV_HEAD_WRITABLE_DIRTY_REF pv_head_writable_dirty_ref
+export PV_HEAD_DIRTY_POS
+export PV_HEAD_REF_POS
+export PV_HEAD_WRITABLE_POS
# saved state fields
struct trapframe
diff -r efb9533bbd19 -r 3c9c1ba2524f sys/arch/hp700/include/cpu.h
--- a/sys/arch/hp700/include/cpu.h Sun Aug 11 22:03:43 2002 +0000
+++ b/sys/arch/hp700/include/cpu.h Sun Aug 11 22:29:07 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.h,v 1.2 2002/08/05 20:58:37 fredette Exp $ */
+/* $NetBSD: cpu.h,v 1.3 2002/08/11 22:29:08 fredette Exp $ */
/* $OpenBSD: cpu.h,v 1.20 2001/01/29 00:01:58 mickey Exp $ */
@@ -125,6 +125,7 @@
#define HPPA_PGALIAS 0x00100000
#define HPPA_PGAMASK 0xfff00000
#define HPPA_PGAOFF 0x000fffff
+#define HPPA_SPAMASK 0xf0f0f000
#define HPPA_IOSPACE 0xf0000000
#define HPPA_IOBCAST 0xfffc0000
diff -r efb9533bbd19 -r 3c9c1ba2524f sys/arch/hppa/hppa/hpt.h
--- a/sys/arch/hppa/hppa/hpt.h Sun Aug 11 22:03:43 2002 +0000
+++ b/sys/arch/hppa/hppa/hpt.h Sun Aug 11 22:29:07 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: hpt.h,v 1.1 2002/06/05 01:04:20 fredette Exp $ */
+/* $NetBSD: hpt.h,v 1.2 2002/08/11 22:29:08 fredette Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@@ -115,6 +115,9 @@
* Pmap header for hppa.
*/
+/* Predeclare struct hpt_entry. */
+struct hpt_entry;
+
/*
* keep it at 32 bytes for the cache overall satisfaction
* also, align commonly used pairs on double-word boundary
@@ -127,12 +130,9 @@
u_int pv_tlbpage; /* physical page (for TLB load) */
u_int pv_tlbprot; /* TLB format protection */
struct pv_entry *pv_hash; /* VTOP hash bucket list */
- u_int pv_flags; /* flags about this entry */
+ struct hpt_entry *pv_hpt; /* pointer to HPT entry */
};
-/* These are kept in the pv_flags field. */
-#define HPPA_PV_UNMANAGED (1 << 0) /* mapping is unmanaged */
-
/*
* If HPT is defined, we cache the last miss for each bucket using a
* structure defined for the 7100 hardware TLB walker. On non-7100s, this
@@ -152,6 +152,35 @@
struct pv_entry *hpt_entry; /* Pointer to associated hash list */
};
+/*
+ * This structure contains information for a single physical page.
+ */
+struct pv_head {
+
+ /* The struct pv_entry chain for this physical page. */
+ struct pv_entry *pv_head_pvs;
+
+ /*
+ * This word has three fields:
+ *
+ * The least significant bit is a page-referenced bit.
+ *
+ * The next least significant bit is a page-dirty bit.
+ *
+ * The remaining bits are the struct pv_entry * of any
+ * mapping currently in the TLB/cache as writable.
+ * This address is shifted to the right by two bits.
+ * (I.e., mask off the referenced and dirty bits to
+ * recover the pointer.)
+ */
+ u_int pv_head_writable_dirty_ref;
+#define PV_HEAD_DIRTY_POS 30
+#define PV_HEAD_DIRTY (1 << (31 - PV_HEAD_DIRTY_POS))
+#define PV_HEAD_REF_POS 31
+#define PV_HEAD_REF (1 << (31 - PV_HEAD_REF_POS))
+#define PV_HEAD_WRITABLE_POS 29
+};
+
#define HPPA_MAX_PID 0xfffa
#define HPPA_PID_KERNEL 2
diff -r efb9533bbd19 -r 3c9c1ba2524f sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Sun Aug 11 22:03:43 2002 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Sun Aug 11 22:29:07 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.2 2002/08/05 20:58:35 fredette Exp $ */
+/* $NetBSD: pmap.c,v 1.3 2002/08/11 22:29:08 fredette Exp $ */
/*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -244,22 +244,17 @@
/* Free list of PV entries. */
static struct pv_entry *pv_free_list;
-/*
- * These tables have one entry per physical page. While
- * memory may be sparse, these tables are always dense;
- * we navigate them with the help of vm_physseg_find.
- */
+/* This is an array of struct pv_head, one per physical page. */
+static struct pv_head *pv_head_tbl;
-/* This table is heads of struct pv_entry lists. */
-static struct pv_entry **pv_head_tbl;
-
-/*
- * This table is modified/referenced information. Here,
- * TLB_REF and TLB_DIRTY are shifted right by PV_SHIFT
- * so they fit in the u_char.
+/*
+ * This is a bitmap of page-is-aliased bits.
+ * The magic 5 is log2(sizeof(u_int) * 8), and the magic 31 is 2^5 - 1.
*/
-static u_char *pv_flags_tbl;
-#define PV_SHIFT 24
+static u_int *page_aliased_bitmap;
+#define _PAGE_ALIASED_WORD(pa) page_aliased_bitmap[((pa) >> PGSHIFT) >> 5]
+#define _PAGE_ALIASED_BIT(pa) (1 << (((pa) >> PGSHIFT) & 31))
+#define PAGE_IS_ALIASED(pa) (_PAGE_ALIASED_WORD(pa) & _PAGE_ALIASED_BIT(pa))
struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
@@ -288,15 +283,28 @@
(((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
/*
- * XXX For now, we assume that two VAs mapped to the same PA, for
- * any kind of accesses, are always bad (non-equivalent) aliases.
- * See page 3-6 of the "PA-RISC 1.1 Architecture and Instruction
- * Set Reference Manual" (HP part number 09740-90039) for more on
- * aliasing.
+ * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
+ * Reference Manual" (HP part number 09740-90039) defines equivalent
+ * and non-equivalent virtual addresses in the cache.
+ *
+ * This macro evaluates to TRUE iff the two space/virtual address
+ * combinations are non-equivalent aliases, and therefore will find
+ * two different locations in the cache.
+ *
+ * NB: currently, the CPU-specific desidhash() functions disable the
+ * use of the space in all cache hashing functions. This means that
+ * this macro definition is stricter than it has to be (because it
+ * takes space into account), but one day cache space hashing should
+ * be re-enabled. Cache space hashing should yield better performance
+ * through better utilization of the cache, assuming that most aliasing
+ * is the read-only kind, which we do allow in the cache.
*/
-#define BADALIAS(sp1, va1, pr1, sp2, va2, pr2) (TRUE)
+#define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
+ (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
+ ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
/* Prototypes. */
+void __pmap_pv_update __P((paddr_t, struct pv_entry *, u_int, u_int));
static __inline void pmap_pv_remove __P((struct pv_entry *));
/*
@@ -347,12 +355,11 @@
if (pv == NULL) {
/*
* We need to find a struct pv_entry to forcibly
- * free. It cannot be wired or unmanaged. We
- * prefer to free mappings that aren't marked as
- * referenced. We search the HPT for an entry
- * to free, starting at a semirandom HPT index
- * determined by the current value of the interval
- * timer.
+ * free. It cannot be wired. We prefer to free
+ * mappings that aren't marked as referenced. We
+ * search the HPT for an entry to free, starting
+ * at a semirandom HPT index determined by the
+ * current value of the interval timer.
*/
hpt_size = hpt_mask / sizeof(*hpt);
mfctl(CR_ITMR, hpt_index_first);
@@ -363,8 +370,7 @@
for (pv = hpt->hpt_entry;
pv != NULL;
pv = pv->pv_hash) {
- if (!(pv->pv_tlbprot & TLB_WIRED) &&
- !(pv->pv_flags & HPPA_PV_UNMANAGED)) {
+ if (!(pv->pv_tlbprot & TLB_WIRED)) {
if (!(pv->pv_tlbprot & TLB_REF))
break;
pv_fallback = pv;
@@ -443,55 +449,6 @@
}
/*
- * Given an HPT entry and a VA->PA mapping, this flushes the
- * mapping from the cache and TLB, possibly invalidates the HPT
- * entry, and optionally frobs protection bits in the mapping.
- * This is used when a mapping is changing.
- *
- * Invalidating the HPT entry prevents the hardware or software HPT
- * walker from using stale information for the mapping, if the mapping
- * happens to be the one cached in the HPT entry.
- */
-static __inline void _pmap_pv_update __P((struct hpt_entry *, struct pv_entry *, u_int, u_int));
-static __inline void
-_pmap_pv_update(struct hpt_entry *hpt, struct pv_entry *pv,
- u_int tlbprot_clear, u_int tlbprot_set)
-{
- int s;
-
- /* We're paranoid and turn off all interrupts. */
- s = splhigh();
-
- /*
- * If TLB_REF and/or TLB_DIRTY aren't set on the mapping
- * already, clear them after the mapping is flushed, in
- * case the flushing causes them to be set.
- */
- tlbprot_clear |= (~pv->pv_tlbprot) & (TLB_REF | TLB_DIRTY);
-
- /* We have to flush the icache first, since fic may use the DTLB. */
- ficache(pv->pv_space, pv->pv_va, NBPG);
- pitlb(pv->pv_space, pv->pv_va);
-
- fdcache(pv->pv_space, pv->pv_va, NBPG);
- pdtlb(pv->pv_space, pv->pv_va);
-
- /* Possibly invalidate the HPT entry. */
- if (hptbtop(pv->pv_va) == hpt->hpt_vpn &&
- pv->pv_space == hpt->hpt_space) {
- hpt->hpt_space = -1;
- hpt->hpt_valid = 0;
- }
-
- /* Frob bits in the protection. */
- pv->pv_tlbprot = (pv->pv_tlbprot & ~tlbprot_clear) | tlbprot_set;
-
- splx(s);
-}
-#define pmap_pv_update(pv, bc, bs) \
- _pmap_pv_update(pmap_hpt_hash((pv)->pv_space, (pv)->pv_va), pv, bc, bs)
-
-/*
* Given a PA, returns the table offset for it.
*/
static __inline int pmap_table_find_pa __P((paddr_t));
@@ -515,7 +472,7 @@
table_off = pmap_table_find_pa(pa);
KASSERT(table_off >= 0);
- return pv_head_tbl[table_off];
+ return pv_head_tbl[table_off].pv_head_pvs;
}
/*
@@ -536,6 +493,134 @@
}
/*
+ * Given a page's PA, checks for non-equivalent aliasing,
+ * and stores and returns the result.
+ */
+static int pmap_pv_check_alias __P((paddr_t));
Home |
Main Index |
Thread Index |
Old Index