Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arc/arc Moved from /cvsroot/syssrc/sys/arch/pica/pi...
details: https://anonhg.NetBSD.org/src/rev/aea8685b5982
branches: trunk
changeset: 481112:aea8685b5982
user: soda <soda%NetBSD.org@localhost>
date: Sun Jan 23 20:09:16 2000 +0000
description:
Moved from /cvsroot/syssrc/sys/arch/pica/pica/pmap.c,v
diffstat:
sys/arch/arc/arc/pmap.c | 1615 +++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 1615 insertions(+), 0 deletions(-)
diffs (truncated from 1619 to 300 lines):
diff -r e1ea05cb9b5d -r aea8685b5982 sys/arch/arc/arc/pmap.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/arc/arc/pmap.c Sun Jan 23 20:09:16 2000 +0000
@@ -0,0 +1,1615 @@
+/* $NetBSD: pmap.c,v 1.11 2000/01/23 20:09:17 soda Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 8.4 (Berkeley) 1/26/94
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_sysv.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
+
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+#include <mips/cpuregs.h>
+#include <mips/locore.h>
+#include <machine/pte.h>
+#include <machine/cpu.h>
+
+extern vm_page_t vm_page_alloc1 __P((void));
+extern void vm_page_free1 __P((vm_page_t));
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ * XXX really should do this as a part of the higher level code.
+ */
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ int pv_flags; /* Some flags for the mapping */
+} *pv_entry_t;
+#define PV_UNCACHED 0x0001 /* Page is mapped unchached */
+
+/*
+ * Local pte bits used only here
+ */
+#define PG_RO 0x40000000
+#define PG_WIRED 0x80000000
+
+pv_entry_t pv_table; /* array of entries, one per page */
+int pmap_remove_pv();
+
+#define pa_index(pa) atop((pa) - first_phys_addr)
+#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+
+#ifdef DEBUG
+struct {
+ int kernel; /* entering kernel mapping */
+ int user; /* entering user mapping */
+ int ptpneeded; /* needed to allocate a PT page */
+ int pwchange; /* no mapping change, just wiring or protection */
+ int wchange; /* no mapping change, just wiring */
+ int mchange; /* was mapped but mapping to different page */
+ int managed; /* a managed page */
+ int firstpv; /* first mapping for this PA */
+ int secondpv; /* second mapping for this PA */
+ int ci; /* cache inhibited */
+ int unmanaged; /* not a managed page */
+ int flushes; /* cache flushes */
+ int cachehit; /* new entry forced valid entry out */
+} enter_stats;
+struct {
+ int calls;
+ int removes;
+ int flushes;
+ int pidflushes; /* HW pid stolen */
+ int pvfirst;
+ int pvsearch;
+} remove_stats;
+
+int pmapdebug = 0;
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_PVENTRY 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_TLBPID 0x0400
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+
+#endif /* DEBUG */
+
+struct pmap kernel_pmap_store;
+
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_size_t mem_size; /* memory size in bytes */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int picapagesperpage; /* PAGE_SIZE / NBPG */
+#ifdef ATTR
+char *pmap_attributes; /* reference and modify bits */
+#endif
+struct segtab *free_segtab; /* free list kept locally */
+u_int tlbpid_gen = 1; /* TLB PID generation count */
+int tlbpid_cnt = 2; /* next available TLB PID */
+pt_entry_t *Sysmap; /* kernel pte table */
+u_int Sysmapsize; /* number of pte's in Sysmap */
+
+void pmap_pinit __P((pmap_t));
+void pmap_release __P((pmap_t));
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * firstaddr is the first unused kseg0 address (not page aligned).
+ */
+void
+pmap_bootstrap(firstaddr)
+ vm_offset_t firstaddr;
+{
+ register int i;
+ register pt_entry_t *spte;
+ vm_offset_t start = firstaddr;
+ extern int maxmem, physmem;
+
+#define valloc(name, type, num) \
+ (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))
+ /*
+ * Allocate a PTE table for the kernel.
+ * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init().
+ * This should be kept in sync.
+ * We also reserve space for kmem_alloc_pageable() for vm_fork().
+ */
+ Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +
+ nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256;
+#ifdef SYSVSHM
+ Sysmapsize += shminfo.shmall;
+#endif
+ valloc(Sysmap, pt_entry_t, Sysmapsize);
+#ifdef ATTR
+ valloc(pmap_attributes, char, physmem);
+#endif
+ /*
+ * Allocate memory for pv_table.
+ * This will allocate more entries than we really need.
+ * We could do this in pmap_init when we know the actual
+ * phys_start and phys_end but its better to use kseg0 addresses
+ * rather than kernel virtual addresses mapped through the TLB.
+ */
+ i = maxmem - pica_btop(MACH_CACHED_TO_PHYS(firstaddr));
+ valloc(pv_table, struct pv_entry, i);
+
+ /*
+ * Clear allocated memory.
+ */
+ firstaddr = mips_round_page(firstaddr);
+ bzero((caddr_t)start, firstaddr - start);
+
+ avail_start = MACH_CACHED_TO_PHYS(firstaddr);
+ avail_end = pica_ptob(maxmem);
+ mem_size = avail_end - avail_start;
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
+ /* XXX need to decide how to set cnt.v_page_size */
+ picapagesperpage = 1;
+
+ simple_lock_init(&pmap_kernel()->pm_lock);
+ pmap_kernel()->pm_count = 1;
+
+ /*
+ * The R4?00 stores only one copy of the Global bit in the
+ * translation lookaside buffer for each 2 page entry.
+ * Thus invalid entrys must have the Global bit set so
+ * when Entry LO and Entry HI G bits are anded together
+ * they will produce a global bit to store in the tlb.
+ */
+ for(i = 0, spte = Sysmap; i < Sysmapsize; i++, spte++)
+ spte->pt_entry = PG_G;
+}
+
+/*
+ * Bootstrap memory allocator. This function allows for early dynamic
+ * memory allocation until the virtual memory system has been bootstrapped.
+ * After that point, either kmem_alloc or malloc should be used. This
+ * function works by stealing pages from the (to be) managed page pool,
+ * stealing virtual address space, then mapping the pages and zeroing them.
+ *
+ * It should be used from pmap_bootstrap till vm_page_startup, afterwards
+ * it cannot be used, and will generate a panic if tried. Note that this
+ * memory will never be freed, and in essence it is wired down.
+ */
+void *
+pmap_bootstrap_alloc(size)
+ int size;
+{
+ vm_offset_t val;
+ extern boolean_t vm_page_startup_initialized;
+
+ if (vm_page_startup_initialized)
+ panic("pmap_bootstrap_alloc: called after startup initialized");
+
+ val = MACH_PHYS_TO_CACHED(avail_start);
+ size = round_page(size);
+ avail_start += size;
+
+ blkclr((caddr_t)val, size);
+ return ((void *)val);
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(phys_start, phys_end)
+ vm_offset_t phys_start, phys_end;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_INIT))
+ printf("pmap_init(%x, %x)\n", phys_start, phys_end);
+#endif
+}
+
+/*
+ * Create and return a physical map.
+ *
Home |
Main Index |
Thread Index |
Old Index