Subject: new MIPS pmap.c
To: None <port-mips@netbsd.org>
From: Toru Nishimura <locore64@alkyltechnology.com>
List: port-mips
Date: 03/06/2005 13:19:21
"You don't get it."
"Why don't you get grown up."
The seasoned female researcher Lou Fletcher scolds her youger
coleague Chris Wallken, in a movie named BrainStorm filmed
1981 by a legendary director Doug Trumble. No matter which you are
a type of neard, geek, otaku or what-so-ever shameful engineer like Dilber,
I recommend you to see and realize the early '80 technology. It's all
analog, not a bargain cheap VFX movie.
Here is my pmap.c code which has never completed for years. I
bring this here to show for VM/pmap engineers as technology
demonstration;
- this is imcomplete, not compilable.
- I changed the strategy repeatedly. Last time I visited I decided
to place linear PTE range at the beginning of KSEG2.
- I'm planning to make LP64 OS. The issue is 64bit TLB refill
handler strategy. I will take a hashed-reverse-PTE way.
Toru Nishimura/ALKYL Technology
-=-=-=-
/* $Id: pmap.c,v 1.31 2003/12/06 12:57:10 nisimura Exp $ */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD$");
#include "opt_sysv.h"
#include "opt_cputype.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/buf.h>
#include <sys/user.h>
#ifdef SYSVSHM
#include <sys/shm.h>
#endif
#include <uvm/uvm.h>
#include <mips/cache.h>
#include <mips/locore.h>
#ifndef DEBUG
#define DEBUG
#endif
#ifndef DIAGNOSTIC
#define DIAGNOSTIC
#endif
#ifdef DEBUG
#define PDB_FOLLOW 0x0001
#define PDB_BOOTSTRAP 0x0002
#define PDB_INIT 0x0004
#define PDB_ENTER 0x0008
#define PDB_REMOVE 0x0010
#define PDB_CREATE 0x0020
#define PDB_PTPAGE 0x0040
#define PDB_BITS 0x0080
#define PDB_PROTECT 0x0100
#define PDB_COLLECT 0x0200
#define PDB_WIRING 0x0400
#define PDB_ASID 0x0800
#define PDB_VPTSPACE 0x1000
#define PDB_PARANOIA 0x8000
int pmapdebug = 0;
static const char *XPROT[] = {
"---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
#endif
/*
* The pools from which pmap structures and sub-structures are allocated.
*/
struct pool pmap_pmap_pool;
struct pool pmap_pv_pool;
struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */
struct simplelock pmap_all_pmaps_slock;
struct lock pmap_main_lock;
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
#define PMAP_MAP_TO_HEAD_LOCK() \
(void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
#define PMAP_MAP_TO_HEAD_UNLOCK() \
(void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
#define PMAP_HEAD_TO_MAP_LOCK() \
(void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
#define PMAP_HEAD_TO_MAP_UNLOCK() \
(void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
#else
#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
#endif /* MULTIPROCESSOR || LOCKDEBUG */
paddr_t avail_start; /* PA of first available physical page */
paddr_t avail_end; /* PA of last available physical page */
vaddr_t virtual_avail; /* VA of first avail page */
vaddr_t virtual_end; /* VA of last avail page */
boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
#ifndef PMAP_PV_LOWAT
#define PMAP_PV_LOWAT 16
#endif
int pmap_pv_lowat = PMAP_PV_LOWAT;
/*
* List of all pmaps, used to update them when e.g. additional kernel
* page tables are allocated. This list is kept LRU-ordered by
* pmap_activate().
*/
TAILQ_HEAD(, pmap) pmap_all_pmaps;
int pmap_asid_max, pmap_asid_next, pmap_asid_generation;
#define PMAP_ASID_RESERVED -1
void pmap_do_remove(struct pmap *, vaddr_t, vaddr_t, boolean_t);
int pmap_pv_enter(struct pmap *, struct vm_page *, vaddr_t, void *);
void pmap_pv_remove(struct pmap *, struct vm_page *, vaddr_t);
#define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT)
#define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
void *pmap_pv_page_alloc(struct pool *, int);
void pmap_pv_page_free(struct pool *, void *);
struct pool_allocator pmap_pv_page_allocator = {
pmap_pv_page_alloc, pmap_pv_page_free, 0,
};
boolean_t pmap_physpage_alloc(int, paddr_t *);
void pmap_physpage_free(paddr_t);
int pmap_physpage_addref(void *);
int pmap_physpage_delref(void *);
int pmap_ptpage_enter(struct pmap *, vaddr_t);
pt_entry_t *pmap_map_ptes(struct pmap *);
void pmap_unmap_ptes(struct pmap *);
boolean_t pmap_is_current(struct pmap *);
void pmap_asid_alloc(struct pmap *);
void pmap_clearbit(struct vm_page *, unsigned);
void pmap_testout(void);
pt_entry_t **pd0, *desertpt, desertpte;
int nkpte, nkernelpt;
#if defined(MIPS1) && defined(MIPS3)
/* for bi-nature kernel, PG_N/V/D/G/WIRED/WPROT are runtime variables */
pt_entry_t pg_n, pg_d, pg_v, pg_g, pg_wired, pg_wprot;
#endif
#define MIPS_TBIX() mips1_TBIX()
#define MIPS_TBIZ(va) mips1_TBIZ(va)
extern void mips1_TBIX(void);
extern void mips1_TBIZ(vaddr_t);
void
pmap_bootstrap()
{
pt_entry_t *kptbase, *pte, **pde;
int i;
kmeminit_nkmempages();
/*
* Figure out how many initial PTE's are necessary to map the
* kernel.
*/
nkpte = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
nbuf * MAXBSIZE + 16 * NCARGS + PAGER_MAP_SIZE) / PAGE_SIZE +
(maxproc * UPAGES) + nkmempages;
#ifdef SYSVSHM
nkpte += shminfo.shmall;
#endif
#ifdef KSEG2IOBUFSIZE
nkpte += (KSEG2IOBUFSIZE >> PGSHIFT);
#endif
nkpte = roundup(nkpte, NPTEPG);
nkernelpt = nkpte / NPTEPG;
/*
* Initialize `FYI' variables. Note we're relying on
* the fact that BSEARCH sorts the vm_physmem[] array
* for us. Must do this before uvm_pageboot_alloc()
* can be called.
*/
avail_start = ptoa(vm_physmem[0].start);
avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
virtual_end = VM_MIN_KERNEL_ADDRESS + PAGE_SIZE * nkpte;
/*
* Now actually allocate the kernel PTE array (must be done
* after virtual_end is initialized).
*/
pd0 = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
kptbase = (pt_entry_t *)uvm_pageboot_alloc(nkernelpt * PAGE_SIZE);
desertpt = (pt_entry_t *)uvm_pageboot_alloc(PAGE_SIZE);
/*
* Initialize the pools.
*/
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
&pool_allocator_nointr);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
&pmap_pv_page_allocator);
TAILQ_INIT(&pmap_all_pmaps);
spinlockinit(&pmap_main_lock, "pmaplk", 0);
simple_lock_init(&pmap_all_pmaps_slock);
#if defined(MIPS1) && defined(MIPS3)
/* absorb PTE design difference */
if (cpu_arch == CPU_ARCH_MIPS1) {
pg_n = MIPS1_PG_N; pg_d = MIPS1_PG_D;
pg_v = MIPS1_PG_V; pg_g = MIPS1_PG_G;
pg_wired = MIPS1_PG_WIRED;
pg_wprot = MIPS1_PG_WP;
}
else {
pg_n = MIPS3_PG_N; pg_d = MIPS3_PG_D;
pg_v = MIPS3_PG_V; pg_g = MIPS3_PG_G;
pg_wired = MIPS3_PG_WIRED;
pg_wprot = MIPS3_PG_WP;
}
#endif
/* mark kernel PTEs with PG_G */
pte = kptbase;
for (i = 0; i < nkpte; i++)
*pte++ = PG_G;
/* layout kernel PTEs in PD */
pde = &pd0[768];
pte = kptbase;
for (i = 0; i < nkernelpt; i++) {
*pde = pte;
pde += 1; pte += NPTEPG;
}
/* PD can be found 1018th PT in VPT space */
pd0[1018] = (pt_entry_t *)pd0;
/* odd PT to make 4MB desert hole illusion in VPT/AVPT space */
desertpte = MIPS_KSEG0_TO_PTE(desertpt) | PG_V | PG_N;
/* effective only for MIPS3; flush D$ for kptes and desertpt */
mips_dcache_wbinv_all();
/*
* Initialize the kernel pmap.
*/
simple_lock_init(&pmap_kernel()->pm_slock);
pmap_kernel()->pm_count = 1;
pmap_kernel()->pm_stats.wired_count = 0;
pmap_kernel()->pm_stats.resident_count = 1;
pmap_kernel()->pm_pdir = pd0;
pmap_kernel()->pm_asid = pmap_kernel()->pm_asidgen = 0;
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
pmap_asid_max = (CPUISMIPS3) ? 256 : 64;
pmap_asid_next = 1;
pmap_asid_generation = 0;
MIPS_SETASID(pmap_kernel()->pm_asid);
/* now ready to accept KSEG2 TLB refill */
#ifdef DEBUG
if (pmapdebug & PDB_BOOTSTRAP) {
printf("nkpte = %d kernel pages ", nkpte);
printf("virtual_end = %lx\n", virtual_end);
}
#endif
}
void
pmap_init()
{
#ifdef DEBUG
int bank;
#endif
/*
* Set a low water mark on the pv_entry pool, so that we are
* more likely to have these around even in extreme memory
* starvation.
*/
pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
curpcb->pcb_pmap = pmap_kernel(); /* proc0's pcb */
/*
* Now it is safe to enable pv entry recording.
*/
pmap_initialized = TRUE;
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
for (bank = 0; bank < vm_nphysseg; bank++) {
printf("bank %d\n", bank);
printf("\tstart = 0x%lx\n", ptoa(vm_physmem[bank].start));
printf("\tend = 0x%lx\n", ptoa(vm_physmem[bank].end));
printf("\tavail_start = 0x%lx\n",
ptoa(vm_physmem[bank].avail_start));
printf("\tavail_end = 0x%lx\n",
ptoa(vm_physmem[bank].avail_end));
}
#endif
#if 0
pmap_testout();
#endif
}
/*
* Define the initial bounds of the kernel virtual address space.
*/
void
pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
{
*vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in KSEG0 */
*vendp = trunc_page(virtual_end); /* XXX need pmap_growkernel() */
}
vaddr_t
pmap_steal_memory(size, vstartp, vendp)
vsize_t size;
vaddr_t *vstartp, *vendp; /* UNUSED */
{
int bank, x;
u_int npgs;
paddr_t pa;
vaddr_t va;
size = round_page(size);
npgs = atop(size);
for (bank = 0; bank < vm_nphysseg; bank++) {
if (uvm.page_init_done == TRUE)
panic("pmap_steal_memory: called _after_ bootstrap");
if (vm_physmem[bank].avail_start != vm_physmem[bank].start ||
vm_physmem[bank].avail_start >= vm_physmem[bank].avail_end)
continue;
if ((vm_physmem[bank].avail_end - vm_physmem[bank].avail_start)
< npgs)
continue;
/*
* There are enough pages here; steal them!
*/
pa = ptoa(vm_physmem[bank].avail_start);
vm_physmem[bank].avail_start += npgs;
vm_physmem[bank].start += npgs;
/*
* Have we used up this segment?
*/
if (vm_physmem[bank].avail_start == vm_physmem[bank].end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
/* Remove this segment from the list. */
vm_nphysseg--;
for (x = bank; x < vm_nphysseg; x++) {
/* structure copy */
vm_physmem[x] = vm_physmem[x + 1];
}
}
va = MIPS_PHYS_TO_KSEG0(pa);
memset((caddr_t)va, 0, size);
return (va);
}
/*
* If we got here, there was no memory left.
*/
panic("pmap_steal_memory: no memory to steal");
}
struct pmap *
pmap_create()
{
struct pmap *pmap;
paddr_t pdpa;
pt_entry_t **pd;
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
if (pmap_physpage_alloc(PGU_PD, &pdpa) == FALSE)
panic("pmap_create: no page directory available");
pd = (pt_entry_t **)MIPS_PHYS_TO_KSEG0(pdpa);
memcpy(&pd[768], &pd0[768], nkernelpt * sizeof(pt_entry_t *));
pd[1018] = (pt_entry_t *)pd;
pmap->pm_count = 1;
pmap->pm_stats.wired_count = 0;
pmap->pm_stats.resident_count = 1; /* count the PD page */
pmap->pm_pdir = pd;
pmap->pm_asid = PMAP_ASID_RESERVED;
pmap->pm_asidgen = pmap_asid_generation;
simple_lock_init(&pmap->pm_slock);
simple_lock(&pmap_all_pmaps_slock);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
simple_unlock(&pmap_all_pmaps_slock);
#ifdef DEBUG
if (pmapdebug & PDB_CREATE)
printf("pmap_create: %p\n", pmap);
#endif
return (pmap);
}
void
pmap_destroy(pmap)
struct pmap *pmap;
{
int refs;
PMAP_LOCK(pmap);
refs = --pmap->pm_count;
PMAP_UNLOCK(pmap);
if (refs > 0)
return;
simple_lock(&pmap_all_pmaps_slock);
TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
simple_unlock(&pmap_all_pmaps_slock);
#ifdef DIAGNOSTIC
if (pmap->pm_stats.resident_count > 1)
panic("pmap_destroy: %p resident_count %ld != 1",
pmap, pmap->pm_stats.resident_count);
#endif
pmap_physpage_free(MIPS_KSEG0_TO_PHYS(pmap->pm_pdir));
pool_put(&pmap_pmap_pool, pmap);
}
void
pmap_reference(pmap)
struct pmap *pmap;
{
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_reference(%p)\n", pmap);
#endif
PMAP_LOCK(pmap);
pmap->pm_count++;
PMAP_UNLOCK(pmap);
}
#if 0 /* PMAP_GROWKERNEL is yet to be completed */
vaddr_t
pmap_growkernel(maxkvaddr)
vaddr_t maxkvaddr;
{
int morept;
struct pmap *pmap;
vaddr_t kva;
paddr_t ptaddr;
int s;
morept = (int)(maxkvaddr - VM_MIN_KERNEL_ADDRESS + (NBPD-1)) / NBPD;
if (morept <= nkernelpt)
goto out; /* we are OK */
kva = VM_MIN_KERNEL_ADDRESS + (nkernelpt * NBPD);
s = splhigh(); /* to be safe */
simple_lock(pmap_kernel()->pm_slock);
do {
/*
* THIS *MUST* BE CODED SO AS TO WORK IN THE
* pmap_initialized == FALSE CASE! WE MAY BE
* INVOKED WHILE pmap_init() IS RUNNING!
*/
if (uvm.page_init_done == FALSE) {
pt_entry_t *ptep;
int i;
/*
* we're growing the kernel pmap early (from
* uvm_pageboot_alloc()). this case must be
* handled a little differently.
*/
if (uvm_page_physget(&ptaddr) == FALSE)
panic("pmap_growkernel: out of memory");
ptep = (pt_entry_t *)MIPS_PHYS_TO_KSEG0(ptaddr);
for (i = 0; i < 1024; i++)
ptep[i] = PG_G:
pmap_kernel()->pm_pdir[768+nkernelpt] = ptep;
/* count PT page as resident */
pmap_kernel()->pm_stats.resident_count++;
}
else {
error = pmap_ptpage_enter(kernel_pmap(), kva);
if (error)
panic("pmap_growkernel: alloc ptp failed");
/* propergate new kernel PT among all active pmaps */
simple_lock(&pmap_all_pmaps_slock);
LIST_FOREACH(pmap, &pmap_all_pmaps, pm_list) {
pmap->pm_pdir[768+nkernelpt]
= pmap_kernel()->pm_pdir[768+nkernelpt];
pmap->pm_stats.resident_count++;
}
simple_unlock(&pmap_all_pmaps_slock);
}
kva += NBPD;
nkernelpt += 1;
} while (nkernelpt <= morept);
simple_unlock(pmap_kernel()->pm_slock);
splx(s);
out:
return (VM_MIN_KERNEL_ADDRESS + (nkernelpt * NBPD));
}
#endif
int
pmap_enter(pmap, va, pa, prot, flags)
struct pmap *pmap;
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
int flags;
{
struct vm_page *pg; /* if != NULL, managed page */
boolean_t managed, wired;
pt_entry_t *ptes, opte, npte;
void *ptp;
int error;
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
printf("pmap_enter: %p, VA=%lx, PA=%lx, %s, %x\n",
pmap, va, pa, XPROT[prot], flags);
#endif
#ifdef DIAGNOSTIC
if (pdei(va) == 1018 || pdei(va) == 1019)
panic("pmap_enter: trying to map over VPT/AVPT!");
if (va >= virtual_end)
panic("pmap_enter: beyond virtual end");
#endif
managed = ((pg = PHYS_TO_VM_PAGE(pa)) != 0);
wired = (flags & PMAP_WIRED) != 0;
PMAP_MAP_TO_HEAD_LOCK();
ptes = pmap_map_ptes(pmap);
if (pmap->pm_pdir[pdei(va)] == NULL) {
error = pmap_ptpage_enter(pmap, va);
if (error) {
if (flags & PMAP_CANFAIL)
goto out;
panic("pmap_enter: no ptpage available");
}
/* make sure to remove desert PT page mapping */
MIPS_TBIS((vaddr_t)ptes + pdei(va) * PAGE_SIZE);
#ifdef DEBUG
if (pmapdebug & PDB_PTPAGE)
printf("pmap_enter: new ptpage %p at pdei %d\n",
pmap->pm_pdir[pdei(va)], pdei(va));
#endif
}
opte = ptes[mips_btop(va)];
if (pmap_pte_v(opte)) {
paddr_t opa;
struct vm_page *opg;
if (wired && pmap_pte_w(opte) == 0)
pmap->pm_stats.wired_count++;
else if (!wired && pmap_pte_w(opte) != 0)
pmap->pm_stats.wired_count--;
opa = MIPS_PTE_TO_PHYS(opte);
if (pa == opa)
goto validate;
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
printf("pmap_enter: removing old mapping 0x%lx\n", va);
#endif
if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL)
pmap_pv_remove(pmap, opg, va);
#if 0
/*
* not necessary to do.
* same va but distinct pa. R4K cache will write back
* when old (stale) cache line is found dirty.
*/
if ((opte & PG_D) && CACHE_IS_WRITEBACK)
mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif
MIPS_TBIS(va);
}
ptp = pmap->pm_pdir[pdei(va)];
if (managed) {
error = pmap_pv_enter(pmap, pg, va, ptp);
if (error) {
if (flags & PMAP_CANFAIL)
goto out;
panic("pmap_enter: unable to enter mapping in PV "
"table");
}
}
if (!pmap_pte_v(opte)) {
pmap->pm_stats.resident_count++;
if (wired)
pmap->pm_stats.wired_count++;
if (pmap != pmap_kernel())
pmap_physpage_addref(ptp);
}
validate:
npte = PG_V;
if (prot & VM_PROT_WRITE)
npte |= PG_D;
if (managed) {
unsigned attrs;
#ifdef DIAGNOSTIC
if ((flags & VM_PROT_ALL) & ~prot)
panic("pmap_enter: access type exceeds prot");
#endif
simple_lock(&pg->mdpage.pvh_slock);
if (flags & VM_PROT_WRITE)
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
else if (flags & VM_PROT_ALL)
pg->mdpage.pvh_attrs |= PGA_REFERENCED;
attrs = pg->mdpage.pvh_attrs;
simple_unlock(&pg->mdpage.pvh_slock);
/*
* Set up referenced/modified emulation for new mapping.
*/
if ((attrs & PGA_REFERENCED) == 0)
/* npte &= ~PG_V XXX */ ;
else if ((attrs & PGA_MODIFIED) == 0)
npte &= ~PG_D;
}
else
npte |= PG_N;
if (wired)
npte |= PG_WIRED;
if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end)
npte |= PG_G;
npte |= MIPS_PHYS_TO_PTE(pa);
ptes[mips_btop(va)] = npte;
error = 0;
out:
pmap_unmap_ptes(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
return (error);
}
void
pmap_remove(pmap, sva, eva)
struct pmap *pmap;
vaddr_t sva, eva;
{
#ifdef DEBUG
if (pmapdebug & PDB_REMOVE)
printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
#endif
pmap_do_remove(pmap, sva, eva, TRUE);
MIPS_TBIX();
}
void
pmap_protect(pmap, sva, eva, prot)
struct pmap *pmap;
vaddr_t sva, eva;
vm_prot_t prot;
{
pt_entry_t *ptep, opte;
#ifdef DEBUG
if (pmapdebug & PDB_PROTECT) {
printf("pmap_protect: %p %lx-%lx %s\n",
pmap, sva, eva, XPROT[prot]);
}
#endif
switch (prot) {
case VM_PROT_READ|VM_PROT_WRITE:
case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
break;
/* copy_on_write */
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
ptep = pmap_map_ptes(pmap) + mips_btop(sva);
for (/*null*/; sva < eva; ptep++, sva += PAGE_SIZE) {
opte = *ptep;
if (!pmap_pte_v(opte))
continue;
*ptep = (opte &~ PG_D) | PG_WP;
#if 0
if ((opte & PG_D) && CACHE_IS_WRITEBACK) {
/*
* Make sure to have no dirty cache.
* ???
* If sva always belongs to current
* pmap, then it's ok to hit_writeback.
* If not, it needs to index_writeback.
*/
mips_dcache_wb_range(sva, PAGE_SIZE);
}
#endif
}
pmap_unmap_ptes(pmap);
break;
/* remove */
default:
pmap_do_remove(pmap, sva, eva, TRUE);
break;
}
MIPS_TBIX();
}
void
pmap_unwire(pmap, va)
struct pmap *pmap;
vaddr_t va;
{
pt_entry_t *ptes, opte;
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
printf("pmap_unwire(%p, %lx)\n", pmap, va);
#endif
if (pmap->pm_pdir[pdei(va)] != NULL) {
ptes = pmap_map_ptes(pmap);
opte = ptes[mips_btop(va)];
#ifdef DIAGNOSTIC
if (!pmap_pte_v(opte))
panic("pmap_unwire: invalid (unmapped) va");
#endif
if (pmap_pte_w(opte)) {
ptes[mips_btop(va)] = opte & ~PG_WIRED;
pmap->pm_stats.wired_count--;
}
#ifdef DIAGNOSTIC
else {
printf("pmap_unwire: wiring for pmap %p va 0x%lx "
"didn't change!\n", pmap, va);
}
#endif
pmap_unmap_ptes(pmap);
}
#ifdef DIAGNOSTIC
else {
panic("pmap_unwire: invalid PDE");
}
#endif
}
boolean_t
pmap_extract(pmap, va, pap)
struct pmap *pmap;
vaddr_t va;
paddr_t *pap;
{
pt_entry_t *ptes, pte;
if (va >= MIPS_KSEG0_START && va < MIPS_KSEG2_START) {
if (pap != NULL)
*pap = MIPS_KSEG0_TO_PHYS(va);
return (TRUE);
}
if (pmap->pm_pdir[pdei(va)] != NULL) {
ptes = pmap_map_ptes(pmap);
pte = ptes[mips_btop(va)];
pmap_unmap_ptes(pmap);
if (pmap_pte_v(pte)) {
if (pap != NULL)
*pap = MIPS_PTE_TO_PHYS(pte) | (va & PGOFSET);
return (TRUE);
}
}
return (FALSE);
}
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
pt_entry_t opte, npte;
struct pmap *pmap = pmap_kernel();
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
printf("pmap_kenter_pa: VA=%lx, PA=%lx, %s\n",
va, pa, XPROT[prot]);
#endif
#ifdef DIAGNOSTIC
if (va < VM_MIN_KERNEL_ADDRESS)
panic("pmap_kenter_pa: kernel pmap, invalid va 0x%lx", va);
#endif
opte = VPT_BASE[mips_btop(va)];
if (!pmap_pte_v(opte))
pmap->pm_stats.resident_count++;
else {
#if 0
/* necessary ? */
if ((opte & PD_D) && CACHE_IS_WRITEBACK)
mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif
MIPS_TBIS(va);
}
if (!pmap_pte_w(opte))
pmap->pm_stats.wired_count++;
npte = PG_V | PG_G | PG_WIRED;
if (prot & VM_PROT_WRITE)
npte |= PG_D;
npte |= MIPS_PHYS_TO_PTE(pa);
VPT_BASE[mips_btop(va)] = npte;
}
void
pmap_kremove(va, size)
vaddr_t va;
vsize_t size;
{
pt_entry_t opte;
struct pmap *pmap = pmap_kernel();
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
printf("pmap_kremove: VA=%lx, SZ=%lx\n", va, size);
#endif
#ifdef DIAGNOSTIC
if (va < VM_MIN_KERNEL_ADDRESS)
panic("pmap_kremove: user address");
#endif
for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) {
opte = VPT_BASE[mips_btop(va)];
if (!pmap_pte_v(opte))
continue;
#if 0
/* looks necessary to do */
if ((opte & PD_D) && CACHE_IS_WRITEBACK)
mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif
MIPS_TBIS(va);
VPT_BASE[mips_btop(va)] = PG_G;
pmap->pm_stats.resident_count--;
pmap->pm_stats.wired_count--;
}
}
void
pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap;
pmap_t src_pmap;
vaddr_t dst_addr;
vsize_t len;
vaddr_t src_addr;
{
#ifdef DEBUG
if (pmapdebug & PDB_CREATE)
printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
dst_pmap, src_pmap, dst_addr, len, src_addr);
#endif
}
void
pmap_collect(pmap)
struct pmap *pmap;
{
#ifdef DEBUG
if (pmapdebug & PDB_COLLECT)
printf("pmap_collect(%p)\n", pmap);
#endif
/*
* If called for the kernel pmap, just return. We
* handle this case in the event that we ever want
* to have swappable kernel threads.
*/
if (pmap == pmap_kernel())
return;
/*
* This process is about to be swapped out; free all of
* the PT pages by removing the physical mappings for its
* entire address space. Note: pmap_do_remove() performs
* all necessary locking.
*/
pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, FALSE);
MIPS_TBIX();
}
void
pmap_update(pmap)
struct pmap *pmap;
{
}
void
pmap_activate(p)
struct proc *p;
{
struct pcb *pcb = &p->p_addr->u_pcb;
struct pmap *pmap = p->p_vmspace->vm_map.pmap;
simple_lock(&pmap_all_pmaps_slock);
TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
simple_unlock(&pmap_all_pmaps_slock);
pcb->pcb_pmap = pmap; /* might be the first time */
if (p == curproc) {
if (pmap != pmap_kernel())
pmap_asid_alloc(pmap);
MIPS_SETASID(pmap->pm_asid);
}
}
void
pmap_deactivate(p)
struct proc *p;
{
}
void
pmap_zero_page(pa)
paddr_t pa;
{
memset((void *)MIPS_PHYS_TO_KSEG0(pa), 0, PAGE_SIZE);
/* XXX needs care about virtual indexed cache XXX */
}
boolean_t
pmap_zero_page_uncached(pa)
paddr_t pa;
{
memset((void *)MIPS_PHYS_TO_KSEG1(pa), 0, PAGE_SIZE);
return (TRUE);
}
void
pmap_copy_page(srcpa, dstpa)
paddr_t srcpa, dstpa;
{
caddr_t s, d;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_copy_page(%lx, %lx)\n", srcpa, dstpa);
#endif
/* XXX needs care about virtual indexed write back cache XXX */
s = (caddr_t)MIPS_PHYS_TO_KSEG0(srcpa);
d = (caddr_t)MIPS_PHYS_TO_KSEG0(dstpa);
memcpy(d, s, PAGE_SIZE);
}
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
{
struct pv_entry *pv;
struct pmap *pmap;
vaddr_t va;
pt_entry_t *ptes, opte;
#ifdef DEBUG
if (pmapdebug & PDB_PROTECT) {
printf("pmap_page_protect: PA=%08lx %s\n",
VM_PAGE_TO_PHYS(pg), XPROT[prot]);
}
#endif
switch (prot) {
case VM_PROT_READ|VM_PROT_WRITE:
case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
break;
/* copy_on_write */
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
pv = pg->mdpage.pvh_list;
while (pv != NULL) {
pmap = pv->pv_pmap;
va = pv->pv_va;
ptes = pmap_map_ptes(pmap);
opte = ptes[mips_btop(va)];
ptes[mips_btop(va)] = (opte &~ PG_D) | PG_WP;
MIPS_TBIZ(va);
pmap_unmap_ptes(pmap);
pv = pv->pv_next;
}
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
break;
/* remove_all */
default:
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
pv = pg->mdpage.pvh_list;
while (pv != NULL) {
void *ptp;
struct pv_entry *opv;
pmap = pv->pv_pmap;
va = pv->pv_va;
ptes = pmap_map_ptes(pmap);
opte = ptes[mips_btop(va)];
if (pmap_pte_w(opte)) {
printf("pmap_page_protect: ");
printf("wired mapping %lx removed, ", va);
printf("wire count %d\n", pg->wire_count);
pmap->pm_stats.wired_count--;
}
pmap->pm_stats.resident_count--;
ptes[mips_btop(va)] = opte & PG_G;
MIPS_TBIZ(va);
#if 1 /* necessary to do ? */
ptp = pv->pv_ptp;
if (pmap != pmap_kernel()
&& pmap_physpage_delref(ptp) == 0) {
pmap->pm_pdir[pdei(va)] = NULL;
pmap->pm_stats.resident_count--;
pmap_physpage_free(MIPS_KSEG0_TO_PHYS(ptp));
/* remove PT page mapping */
MIPS_TBIZ((vaddr_t)ptes + pdei(va) * PAGE_SIZE);
}
#endif
pmap_unmap_ptes(pmap);
opv = pv;
pv = pv->pv_next;
pmap_pv_free(opv);
}
pg->mdpage.pvh_list = NULL;
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
break;
}
return;
}
boolean_t
pmap_clear_modify(pg)
struct vm_page *pg;
{
boolean_t rv = FALSE;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_modify: PA=0x%lx\n", VM_PAGE_TO_PHYS(pg));
#endif
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
if (pg->mdpage.pvh_attrs & PGA_MODIFIED) {
rv = TRUE;
pmap_clearbit(pg, PG_D);
pg->mdpage.pvh_attrs &= ~PGA_MODIFIED;
}
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (rv);
}
boolean_t
pmap_clear_reference(pg)
struct vm_page *pg;
{
boolean_t rv = FALSE;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_clear_reference: PA=0x%lx\n", VM_PAGE_TO_PHYS(pg));
#endif
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
if (pg->mdpage.pvh_attrs & PGA_REFERENCED) {
rv = TRUE;
pmap_clearbit(pg, PG_V);
pg->mdpage.pvh_attrs &= ~PGA_REFERENCED;
}
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (rv);
}
boolean_t
pmap_is_modified(pg)
struct vm_page *pg;
{
boolean_t rv;
rv = ((pg->mdpage.pvh_attrs & PGA_MODIFIED) != 0);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
printf("pmap_is_modified: PA=0x%lx -> %c\n",
VM_PAGE_TO_PHYS(pg), "FT"[rv]);
}
#endif
return (rv);
}
boolean_t
pmap_is_referenced(pg)
struct vm_page *pg;
{
boolean_t rv;
rv = ((pg->mdpage.pvh_attrs & PGA_REFERENCED) != 0);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
printf("pmap_is_referenced: PA=0x%lx -> %c\n",
VM_PAGE_TO_PHYS(pg), "FT"[rv]);
}
#endif
return (rv);
}
void
pmap_emulate_modify(pa)
paddr_t pa;
{
struct vm_page *pg;
pt_entry_t *ptes;
struct pv_entry *pv;
pg = PHYS_TO_VM_PAGE(pa);
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
/* propagate PG_D across pmaps */
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
ptes = pmap_map_ptes(pv->pv_pmap);
ptes[mips_btop(pv->pv_va)] |= PG_D;
pmap_unmap_ptes(pv->pv_pmap);
}
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
}
void
pmap_emulate_reference(pa)
paddr_t pa;
{
struct vm_page *pg;
pt_entry_t *ptes;
struct pv_entry *pv;
pg = PHYS_TO_VM_PAGE(pa);
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
pg->mdpage.pvh_attrs |= PGA_REFERENCED;
/* propagate PG_V across pmaps */
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
ptes = pmap_map_ptes(pv->pv_pmap);
ptes[mips_btop(pv->pv_va)] |= PG_V;
pmap_unmap_ptes(pv->pv_pmap);
}
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
}
paddr_t
pmap_phys_address(pfn)
int pfn;
{
return (mips_ptob(pfn));
}
void
pmap_do_remove(pmap, sva, eva, dowired)
struct pmap *pmap;
vaddr_t sva, eva;
boolean_t dowired;
{
pt_entry_t *ptes, *ptep, pte;
void *ptp;
vaddr_t va, blkendva;
paddr_t pa;
struct vm_page *pg;
unsigned remain;
PMAP_MAP_TO_HEAD_LOCK();
ptes = pmap_map_ptes(pmap); /* locks pmap */
/*
* removing a range of pages: we unmap in PT sized blocks (4MB)
*/
for (/*null*/ ; sva < eva; sva = blkendva) {
/* determine range of block */
blkendva = roundup(sva + 1, NBPD);
if (blkendva > eva)
blkendva = eva;
ptp = pmap->pm_pdir[pdei(sva)];
if (ptp == NULL)
continue; /* falls in 4MB hole */
ptep = ptes + mips_btop(sva);
remain = NPTEPG; /* sentinel value for kernel PT */
for (va = sva; va < blkendva; va += PAGE_SIZE, ptep++) {
pte = *ptep;
if (!pmap_pte_v(pte))
continue;
if (pmap_pte_w(pte)) {
if (dowired == FALSE)
continue;
pmap->pm_stats.wired_count--;
}
pmap->pm_stats.resident_count--;
pa = MIPS_PTE_TO_PHYS(pte);
pg = PHYS_TO_VM_PAGE(pa);
if (pg)
pmap_pv_remove(pmap, pg, va);
#if 0
/*
* sva does not always belong to current
* pmap. Then, it needs to index_writeback.
*/
if ((opte & PG_D) && CACHE_IS_WRITEBACK)
mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif
*ptep = pte & PG_G;
if (pmap == pmap_kernel())
MIPS_TBIS(va);
else if ((remain = pmap_physpage_delref(ptp)) == 0)
break;
}
/* PT page is no longer being used */
if (remain == 0) {
pmap->pm_pdir[pdei(sva)] = NULL;
pmap->pm_stats.resident_count--;
pmap_physpage_free(MIPS_KSEG0_TO_PHYS(ptp));
/* MIPS_TBIS((vaddr_t)ptes + pdei(sva) * PAGE_SIZE); */
}
}
pmap_unmap_ptes(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
}
pt_entry_t *
pmap_map_ptes(pmap)
struct pmap *pmap;
{
struct pmap *self;
pt_entry_t **pdp, *opd;
/* the kernel's pmap is always accessible */
if (pmap == pmap_kernel())
return (VPT_BASE);
/* if curpmap then we are always mapped */
if (pmap_is_current(pmap)) {
PMAP_LOCK(pmap);
return (VPT_BASE);
}
/* need to lock both curpmap and pmap: use ordered locking */
self = curpcb->pcb_pmap;
if ((unsigned)pmap < (unsigned)self) {
PMAP_LOCK(pmap);
PMAP_LOCK(self);
} else {
PMAP_LOCK(self);
PMAP_LOCK(pmap);
}
/* need to load a new alternate VPT space? */
pdp = &self->pm_pdir[1019];
opd = *pdp;
if (opd != (pt_entry_t *)pmap->pm_pdir) {
*pdp = (pt_entry_t *)pmap->pm_pdir;
#ifdef DEBUG
if (pmapdebug & PDB_VPTSPACE)
printf("map_ptes: load pmap %p into %p's AVPT\n",
pmap, self);
#endif
if (opd != NULL)
MIPS_TBIAP();
}
return (AVPT_BASE);
}
void
pmap_unmap_ptes(pmap)
struct pmap *pmap;
{
if (pmap == pmap_kernel())
return;
if (pmap_is_current(pmap))
PMAP_UNLOCK(pmap);
else {
PMAP_UNLOCK(pmap);
PMAP_UNLOCK(curpcb->pcb_pmap);
}
}
boolean_t
pmap_is_current(pmap)
struct pmap *pmap;
{
#if 1
return (curpcb->pcb_pmap == pmap);
#else
return (VPT_BASE[1018 * NPTEPG + 1018] == (pt_entry_t)pmap->pm_pdir);
#endif
}
void
pmap_clearbit(pg, bit)
struct vm_page *pg;
unsigned bit;
{
struct pv_entry *pv;
pt_entry_t *ptep, opte, npte;
struct pmap *pmap;
vaddr_t va;
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
pmap = pv->pv_pmap;
va = pv->pv_va;
ptep = pmap_map_ptes(pmap) + mips_btop(va);
opte = *ptep;
npte = opte &~ bit;
if (opte != npte) {
*ptep = npte;
MIPS_TBIZ(va);
}
#ifdef DEBUG
if (pmapdebug & PDB_BITS) {
printf("pmap_clearbit: pmap=%p ", pmap);
printf("va=0x%lx opte=0x%lx -%x\n", va, opte, bit);
}
#endif
pmap_unmap_ptes(pmap);
}
}
int
pmap_pv_enter(pmap, pg, va, ptp)
struct pmap *pmap;
struct vm_page *pg;
vaddr_t va;
void *ptp;
{
struct pv_entry *newpv;
/*
* Allocate and fill in the new pv_entry.
*/
newpv = pmap_pv_alloc();
if (newpv == NULL)
return (ENOMEM);
newpv->pv_va = va;
newpv->pv_pmap = pmap;
newpv->pv_ptp = ptp;
simple_lock(&pg->mdpage.pvh_slock);
#ifdef DEBUG
/*
* Make sure the entry doesn't already exist.
*/
if (pg->mdpage.pvh_list) {
struct pv_entry *pv = pg->mdpage.pvh_list;
do {
if (pmap == pv->pv_pmap && va == pv->pv_va) {
printf("pmap_pv_enter: already in pv table\n");
panic("pmap = %p, va = %lx", pmap, va);
}
pv = pv->pv_next;
} while (pv != NULL);
}
#endif
/*
* ...and put it in the list.
*/
newpv->pv_next = pg->mdpage.pvh_list;
pg->mdpage.pvh_list = newpv;
simple_unlock(&pg->mdpage.pvh_slock);
return (0);
}
void
pmap_pv_remove(pmap, pg, va)
struct pmap *pmap;
struct vm_page *pg;
vaddr_t va;
{
struct pv_entry *pv, **pvp;
simple_lock(&pg->mdpage.pvh_slock);
pvp = &pg->mdpage.pvh_list;
while ((pv = *pvp) != NULL) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
break;
pvp = &pv->pv_next;
}
#ifdef DEBUG
if (pv == NULL)
panic("pmap_pv_remove: not in pv table");
#endif
*pvp = pv->pv_next;
simple_unlock(&pg->mdpage.pvh_slock);
pmap_pv_free(pv);
}
void *
pmap_pv_page_alloc(pp, flags)
struct pool *pp;
int flags;
{
paddr_t pg;
if (pmap_physpage_alloc(PGU_PVENT, &pg))
return ((void *)MIPS_PHYS_TO_KSEG0(pg));
return (NULL);
}
void
pmap_pv_page_free(pp, v)
struct pool *pp;
void *v;
{
pmap_physpage_free(MIPS_KSEG0_TO_PHYS((vaddr_t)v));
}
boolean_t
pmap_physpage_alloc(usage, pap)
int usage;
paddr_t *pap;
{
struct vm_page *pg;
paddr_t pa;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (pg != NULL) {
pa = VM_PAGE_TO_PHYS(pg);
#ifdef DIAGNOSTIC
simple_lock(&pg->mdpage.pvh_slock);
if (pg->wire_count != 0) {
panic("pmap_physpage_alloc: page 0x%lx has "
"%d references\n", pa, pg->wire_count);
}
simple_unlock(&pg->mdpage.pvh_slock);
#endif
*pap = pa;
return (TRUE);
}
return (FALSE);
}
void
pmap_physpage_free(pa)
paddr_t pa;
{
struct vm_page *pg;
if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
panic("pmap_physpage_free: bogus physical page address");
#ifdef DIAGNOSTIC
simple_lock(&pg->mdpage.pvh_slock);
if (pg->wire_count != 0)
panic("pmap_physpage_free: page still has references");
simple_unlock(&pg->mdpage.pvh_slock);
#endif
uvm_pagefree(pg);
}
int
pmap_ptpage_enter(pmap, va)
struct pmap *pmap;
vaddr_t va;
{
paddr_t pa;
if (pmap_physpage_alloc(PGU_PT, &pa) == FALSE)
return (ENOMEM);
pmap->pm_pdir[pdei(va)] = (pt_entry_t *)MIPS_PHYS_TO_KSEG0(pa);
pmap->pm_stats.resident_count++;
/* now ready to populate PTE in a fresh PT */
return (0);
}
int
pmap_physpage_addref(kva)
void *kva;
{
struct vm_page *pg;
paddr_t pa;
int rval;
pa = MIPS_KSEG0_TO_PHYS(trunc_page((vaddr_t)kva));
pg = PHYS_TO_VM_PAGE(pa);
simple_lock(&pg->mdpage.pvh_slock);
rval = ++pg->wire_count;
simple_unlock(&pg->mdpage.pvh_slock);
return (rval);
}
int
pmap_physpage_delref(kva)
void *kva;
{
struct vm_page *pg;
paddr_t pa;
int rval;
pa = MIPS_KSEG0_TO_PHYS(trunc_page((vaddr_t)kva));
pg = PHYS_TO_VM_PAGE(pa);
simple_lock(&pg->mdpage.pvh_slock);
#ifdef DIAGNOSTIC
if (pg->wire_count == 0)
panic("pmap_physpage_delref: reference count already zero");
#endif
rval = --pg->wire_count;
simple_unlock(&pg->mdpage.pvh_slock);
return (rval);
}
void
pmap_asid_alloc(pmap)
struct pmap *pmap;
{
if (pmap->pm_asid != PMAP_ASID_RESERVED
&& pmap->pm_asidgen == pmap_asid_generation)
return;
if (pmap_asid_next == pmap_asid_max) {
pmap_asid_generation++; /* ok to wrap to 0 */
pmap_asid_next = 1;
MIPS_TBIAP(); /* invalidate non-G TLB */
}
pmap->pm_asid = pmap_asid_next++;
pmap->pm_asidgen = pmap_asid_generation;
#ifdef DEBUG
if (pmapdebug & PDB_ASID)
printf("pmap_asid_alloc(%p): new ASID %d\n", pmap, pmap->pm_asid);
#endif
}
#ifdef MIPS3
void
pmap_prefer(foff, vap)
vaddr_t foff;
vaddr_t *vap;
{
vaddr_t va;
vsize_t d;
if (CPUISMIPS3) {
va = *vap;
d = foff - va;
d &= mips_cache_prefer_mask;
*vap = va + d;
}
}
#endif
#if 0 /* handle with care */
/*
* Test ref/modify handling.
*/
void
pmap_testout()
{
vaddr_t va;
volatile int *loc;
int val = 0;
paddr_t pa;
struct vm_page *pg;
int ref, mod;
/* Allocate a page */
va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1);
loc = (int*)va;
pmap_extract(pmap_kernel(), va, &pa);
pg = PHYS_TO_VM_PAGE(pa);
pmap_unwire(pmap_kernel(), va);
pmap_remove(pmap_kernel(), va, va+1);
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
pmap_update(pmap_kernel());
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Check it's properly cleared */
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Checking cleared page: ref %d, mod %d\n",
ref, mod);
/* Reference page */
val = *loc;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Referenced page: ref %d, mod %d val %x\n",
ref, mod, val);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Modify page */
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Check it's properly cleared */
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Checking cleared page: ref %d, mod %d\n",
ref, mod);
/* Modify page */
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Check pmap_protect() */
pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
pmap_update(pmap_kernel());
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Reference page */
val = *loc;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Referenced page: ref %d, mod %d val %x\n",
ref, mod, val);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Modify page */
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
pmap_update(pmap_kernel());
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Check pmap_protect() */
pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
pmap_update(pmap_kernel());
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_protect(): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Reference page */
val = *loc;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Referenced page: ref %d, mod %d val %x\n",
ref, mod, val);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Modify page */
#if 0
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
pmap_update(pmap_kernel());
#endif
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Check pmap_page_protect() */
pmap_page_protect(pg, VM_PROT_READ);
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Reference page */
val = *loc;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Referenced page: ref %d, mod %d val %x\n",
ref, mod, val);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Modify page */
#if 0
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
pmap_update(pmap_kernel());
#endif
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Check pmap_page_protect() */
pmap_page_protect(pg, VM_PROT_NONE);
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_page_protect(): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Reference page */
val = *loc;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Referenced page: ref %d, mod %d val %x\n",
ref, mod, val);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa,
ref, mod);
/* Modify page */
#if 0
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
pmap_update(pmap_kernel());
#endif
*loc = 1;
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Modified page: ref %d, mod %d\n",
ref, mod);
/* Unmap page */
pmap_remove(pmap_kernel(), va, va+1);
pmap_update(pmap_kernel());
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Unmapped page: ref %d, mod %d\n", ref, mod);
/* Now clear reference and modify */
ref = pmap_clear_reference(pg);
mod = pmap_clear_modify(pg);
printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
(void *)(u_long)va, (long)pa, ref, mod);
/* Check it's properly cleared */
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("Checking cleared page: ref %d, mod %d\n",
ref, mod);
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL,
VM_PROT_ALL|PMAP_WIRED);
uvm_km_free(kernel_map, (vaddr_t)va, NBPG);
}
#endif
#if 0
/*
* TBIA --
* invalidate entire TLB.
*/
void
MIPS_TBIA(void)
{
FOR EACH TLB entry
DO
purge this entry
DONE
}
/*
* TBIAP --
* purge every TLB entry not belongs to kernel.
*/
void
MIPS_TBIAP(void)
{
FOR EACH TLB entry x
DO
if (x.EntryLo.G)
continue;
purge this entry from TLB
DONE
}
/*
* TBIS --
* purge a single TLB entry points va of current ASID context.
*/
void
MIPS_TBIS(vaddr_t va)
{
EntryHi.VPN = va;
tlbp
if (TlbIdx != 0) {
purge TlbIdx-th entry from TLB
}
}
/*
* TBIZ --
* purge every TLB entry points va _regardless to_ ASID.
*/
void
MIPS_TBIZ(vaddr_t va)
{
FOR EACH TLB entry x
DO
if (x.EntryHi.VPN != va)
continue;
purge this entry from TLB
DONE
}
/*
* TBIX --
* purge every TLB entry belongs to current ASID context.
*/
MIPS_TBIX(void)
{
asid = EntryHi & 0xfc0;
FOR EACH TLB ENTRY x
DO
if (x.EntryLo.G)
continue;
if (x.EntryHi & 0xfc0) == asid);
purge this entry from TLB
DONE
}
/*
*
* SETASID --
* switch to new ASID context.
*/
void
MIPS_SETASID(int asid)
{
EntryHi = (asid << 6);
}
#endif