Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/powerpc Add a common file to do pmap_zero_page/pmap...
details: https://anonhg.NetBSD.org/src/rev/bfc794684c4c
branches: trunk
changeset: 534111:bfc794684c4c
user: matt <matt%NetBSD.org@localhost>
date: Wed Jul 17 03:11:07 2002 +0000
description:
Add a common file to do pmap_zero_page/pmap_copy_page/pmap_pageidlezero and
pmap_syncicache. This file uses a ppc feature in a sick and twisted way
to avoid mapping the physical pages used by those routines. It performs
the operations with the MMU disabled but PPC exception save and retstore
the machine state and are invoked with the MMU disabled, this doesn't have
an adverse effect on the system.
Currently only enable for MPC6xx and !OLDPMAP.
diffstat:
sys/arch/powerpc/conf/files.powerpc | 3 +-
sys/arch/powerpc/include/mpc6xx/pmap.h | 4 +-
sys/arch/powerpc/mpc6xx/pmap.c | 239 +--------------------------------
sys/arch/powerpc/powerpc/pmap_subr.c | 224 ++++++++++++++++++++++++++++++
4 files changed, 232 insertions(+), 238 deletions(-)
diffs (truncated from 553 to 300 lines):
diff -r e8644e1d295b -r bfc794684c4c sys/arch/powerpc/conf/files.powerpc
--- a/sys/arch/powerpc/conf/files.powerpc Wed Jul 17 02:57:14 2002 +0000
+++ b/sys/arch/powerpc/conf/files.powerpc Wed Jul 17 03:11:07 2002 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: files.powerpc,v 1.32 2002/07/05 18:45:20 matt Exp $
+# $NetBSD: files.powerpc,v 1.33 2002/07/17 03:11:07 matt Exp $
defflag opt_altivec.h ALTIVEC K_ALTIVEC
defflag opt_openpic.h OPENPIC OPENPIC_SERIAL_MODE
@@ -14,6 +14,7 @@
file arch/powerpc/powerpc/kgdb_machdep.c kgdb
file arch/powerpc/powerpc/mem.c
file arch/powerpc/powerpc/openpic.c openpic
+file arch/powerpc/powerpc/pmap_subr.c ppc_mpc6xx & !oldpmap
file arch/powerpc/powerpc/powerpc_machdep.c
file arch/powerpc/powerpc/process_machdep.c
file arch/powerpc/powerpc/sig_machdep.c
diff -r e8644e1d295b -r bfc794684c4c sys/arch/powerpc/include/mpc6xx/pmap.h
--- a/sys/arch/powerpc/include/mpc6xx/pmap.h Wed Jul 17 02:57:14 2002 +0000
+++ b/sys/arch/powerpc/include/mpc6xx/pmap.h Wed Jul 17 03:11:07 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.4 2002/04/23 12:41:07 kleink Exp $ */
+/* $NetBSD: pmap.h,v 1.5 2002/07/17 03:11:08 matt Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@@ -88,6 +88,8 @@
boolean_t pmap_clear_bit (struct vm_page *, int);
void pmap_real_memory (paddr_t *, psize_t *);
void pmap_pinit (struct pmap *);
+boolean_t pmap_pageidlezero (paddr_t);
+void pmap_syncicache (paddr_t, psize_t);
#define PMAP_NEED_PROCWR
void pmap_procwr (struct proc *, vaddr_t, size_t);
diff -r e8644e1d295b -r bfc794684c4c sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c Wed Jul 17 02:57:14 2002 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c Wed Jul 17 03:11:07 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.48 2002/07/07 00:43:11 dbj Exp $ */
+/* $NetBSD: pmap.c,v 1.49 2002/07/17 03:11:08 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -122,13 +122,6 @@
u_long pmap_pvo_remove_depth;
#endif
u_int64_t pmap_pte_spills = 0;
-struct pvo_entry *pmap_pvo_syncicache;
-struct pvo_entry *pmap_pvo_zeropage;
-struct pvo_entry *pmap_pvo_copypage_src;
-struct pvo_entry *pmap_pvo_copypage_dst;
-
-vaddr_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
-unsigned int pmap_rkva_count = 4;
int physmem;
#ifndef MSGBUFADDR
@@ -242,12 +235,8 @@
STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
STATIC volatile pte_t *pmap_pvo_to_pte(const struct pvo_entry *, int);
-STATIC struct pvo_entry *pmap_rkva_alloc(int);
-STATIC void pmap_pa_map(struct pvo_entry *, paddr_t, pte_t *, int *);
-STATIC void pmap_pa_unmap(struct pvo_entry *, pte_t *, int *);
STATIC void tlbia(void);
-STATIC void pmap_syncicache(paddr_t, psize_t);
STATIC void pmap_release (pmap_t);
STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
@@ -834,7 +823,7 @@
* For now, reserve one segment (minus some overhead) for kernel
* virtual memory
*/
- *start = VM_MIN_KERNEL_ADDRESS + pmap_rkva_count * NBPG;
+ *start = VM_MIN_KERNEL_ADDRESS;
*end = VM_MAX_KERNEL_ADDRESS;
}
@@ -987,70 +976,6 @@
{
}
-/*
- * Fill the given physical page with zeroes.
- */
-void
-pmap_zero_page(paddr_t pa)
-{
- caddr_t va;
-
- if (pa < SEGMENT_LENGTH) {
- va = (caddr_t) pa;
- } else if (pmap_initialized) {
- if (__predict_false(pmap_pvo_zeropage == NULL))
- pmap_pvo_zeropage = pmap_rkva_alloc(VM_PROT_READ|VM_PROT_WRITE);
- pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
- va = (caddr_t) PVO_VADDR(pmap_pvo_zeropage);
- } else {
- panic("pmap_zero_page: can't zero pa %#lx", pa);
- }
-#if 1
- memset(va, 0, NBPG);
-#else
- {
- int i;
-
- for (i = NBPG/CACHELINESIZE; i > 0; i--) {
- __asm __volatile ("dcbz 0,%0" :: "r"(va));
- va += CACHELINESIZE;
- }
- }
-#endif
- if (pa >= SEGMENT_LENGTH)
- pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
-}
-
-/*
- * Copy the given physical source page to its destination.
- */
-void
-pmap_copy_page(paddr_t src, paddr_t dst)
-{
- if (src < SEGMENT_LENGTH && dst < SEGMENT_LENGTH) {
- memcpy((void *) dst, (void *) src, NBPG);
- return;
- }
- if (pmap_initialized) {
- if (__predict_false(pmap_pvo_copypage_src == NULL))
- pmap_pvo_copypage_src = pmap_rkva_alloc(VM_PROT_READ);
- if (__predict_false(pmap_pvo_copypage_dst == NULL))
- pmap_pvo_copypage_dst = pmap_rkva_alloc(VM_PROT_READ|VM_PROT_WRITE);
-
- pmap_pa_map(pmap_pvo_copypage_src, src, NULL, NULL);
- pmap_pa_map(pmap_pvo_copypage_dst, dst, NULL, NULL);
-
- memcpy((caddr_t)PVO_VADDR(pmap_pvo_copypage_dst),
- (caddr_t)PVO_VADDR(pmap_pvo_copypage_src),
- NBPG);
-
- pmap_pa_unmap(pmap_pvo_copypage_src, NULL, NULL);
- pmap_pa_unmap(pmap_pvo_copypage_dst, NULL, NULL);
- return;
- }
- panic("pmap_copy_page: failed to copy contents of pa %#lx to pa %#lx", src, dst);
-}
-
static __inline int
pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
{
@@ -1158,164 +1083,6 @@
return NULL;
}
-void
-pmap_pa_map(struct pvo_entry *pvo, paddr_t pa, pte_t *saved_pt, int *depth_p)
-{
- u_int32_t msr;
- int s;
-
- s = splvm();
- msr = pmap_interrupts_off();
- /*
- * If this pvo already has a valid PTE, we need to save it
- * so it can restored later. We then just reload the new
- * PTE over the old slot.
- */
- if (saved_pt != NULL) {
- volatile pte_t *pt;
- pt = pmap_pvo_to_pte(pvo, -1);
- if (pt != NULL) {
-#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
- if (depth_p != NULL && *depth_p == 0)
- panic("pmap_pa_map: pvo %p: valid pt %p"
- " on 0 depth", pvo, pt);
-#endif
- pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
- PVO_PTEGIDX_CLR(pvo);
- pmap_pte_overflow++;
- }
- *saved_pt = pvo->pvo_pte;
- DPRINTFN(PAMAP,
- ("pmap_pa_map: saved pte %#x/%#x va %#lx\n",
- pvo->pvo_pte.pte_hi, pvo->pvo_pte.pte_lo,
- pvo->pvo_vaddr));
- pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
-#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
- } else if ((pvo->pvo_pte.pte_hi & PTE_VALID) ||
- (depth_p != NULL && (*depth_p) > 0)) {
- panic("pmap_pa_map: unprotected recursive use of pvo %p", pvo);
-#endif
- }
- pvo->pvo_pte.pte_lo |= pa;
- if (!pmap_pte_spill(pvo->pvo_vaddr))
- panic("pmap_pa_map: could not spill pvo %p", pvo);
-#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
- if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
- panic("pmap_pa_map: pvo %p: pte not valid after spill", pvo);
- if (PVO_PTEGIDX_ISSET(pvo) == 0)
- panic("pmap_pa_map: pvo %p: no pte index spill", pvo);
-#endif
- if (depth_p != NULL)
- (*depth_p)++;
- pmap_interrupts_restore(msr);
- splx(s);
-}
-
-void
-pmap_pa_unmap(struct pvo_entry *pvo, pte_t *saved_pt, int *depth_p)
-{
- volatile pte_t *pt;
- u_int32_t msr;
- int s;
-
- s = splvm();
- msr = pmap_interrupts_off();
- pt = pmap_pvo_to_pte(pvo, -1);
- if (pt != NULL) {
- pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
- PVO_PTEGIDX_CLR(pvo);
- pmap_pte_overflow++;
- }
- pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
-
- /*
- * If there is a saved PTE and its valid, restore it
- * and return.
- */
- if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
-#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
- if (pvo->pvo_pte.pte_hi != saved_pt->pte_hi)
- panic("pmap_pa_unmap: pvo %p pte_hi %#x "
- "!= saved pte_hi %#x", pvo, pvo->pvo_pte.pte_hi,
- saved_pt->pte_hi);
-#endif
- if (depth_p != NULL && --(*depth_p) == 0)
- panic("pmap_pa_unmap: restoring but depth == 0");
- pvo->pvo_pte = *saved_pt;
- DPRINTFN(PAMAP,
- ("pmap_pa_unmap: restored pte %#x/%#x va %#lx\n",
- pvo->pvo_pte.pte_hi, pvo->pvo_pte.pte_lo, pvo->pvo_vaddr));
- if (!pmap_pte_spill(pvo->pvo_vaddr))
- panic("pmap_pa_unmap: could not spill pvo %p", pvo);
-#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
- if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
- panic("pmap_pa_unmap: pvo %p: pte not valid after "
- "spill", pvo);
- } else {
- if (depth_p != NULL && --(*depth_p) != 0)
- panic("pmap_pa_unmap: reseting but depth (%u) > 0",
- *depth_p);
-#endif
- }
-
- pmap_interrupts_restore(msr);
- splx(s);
-}
-
-void
-pmap_syncicache(paddr_t pa, psize_t len)
-{
- static int depth;
- static u_int calls;
- DPRINTFN(SYNCICACHE, ("pmap_syncicache[%d]: pa %#lx\n", depth, pa));
- if (pa + len <= SEGMENT_LENGTH) {
- __syncicache((void *)pa, len);
- return;
- }
- if (pmap_initialized) {
- pte_t saved_pte;
- psize_t offset = pa & ADDR_POFF;
- if (__predict_false(pmap_pvo_syncicache == NULL))
- pmap_pvo_syncicache = pmap_rkva_alloc(VM_PROT_READ|VM_PROT_WRITE);
- calls++;
- pmap_pa_map(pmap_pvo_syncicache, pa, &saved_pte, &depth);
- __syncicache((void *)(PVO_VADDR(pmap_pvo_syncicache)|offset),
- len);
- pmap_pa_unmap(pmap_pvo_syncicache, &saved_pte, &depth);
- return;
- }
- panic("pmap_syncicache: can't sync the icache @ pa %#lx", pa);
-}
-
-/*
- * Return a unmapped pvo for a kernel virtual address.
- * Used by pmap function that operate of physical pages.
- */
-struct pvo_entry *
-pmap_rkva_alloc(int prot)
-{
- struct pvo_entry *pvo;
- volatile pte_t *pt;
- vaddr_t kva;
- int pteidx;
-
- if (pmap_rkva_count == 0)
- panic("pmap_kva_alloc: no more reserved KVAs!");
-
- kva = pmap_rkva_start + (NBPG * --pmap_rkva_count);
Home |
Main Index |
Thread Index |
Old Index