Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/vax Remove pmap's simple_lock; switch to atomic ops
details: https://anonhg.NetBSD.org/src/rev/6a9d60124fca
branches: trunk
changeset: 765390:6a9d60124fca
user: matt <matt%NetBSD.org@localhost>
date: Tue May 24 23:30:30 2011 +0000
description:
Remove pmap's simple_lock; switch to atomic ops
Change pvtable simple_lock to mutex
Switch to kmem (goodbye malloc).
diffstat:
sys/arch/vax/include/pmap.h | 56 ++++++------
sys/arch/vax/vax/pmap.c | 196 ++++++++++++++++++-------------------------
2 files changed, 110 insertions(+), 142 deletions(-)
diffs (truncated from 627 to 300 lines):
diff -r cc1aa623e6cc -r 6a9d60124fca sys/arch/vax/include/pmap.h
--- a/sys/arch/vax/include/pmap.h Tue May 24 23:16:16 2011 +0000
+++ b/sys/arch/vax/include/pmap.h Tue May 24 23:30:30 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.79 2010/11/14 13:33:23 uebayasi Exp $ */
+/* $NetBSD: pmap.h,v 1.80 2011/05/24 23:30:30 matt Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
@@ -81,7 +81,7 @@
#ifndef PMAP_H
#define PMAP_H
-#include <sys/simplelock.h>
+#include <sys/atomic.h>
#include <uvm/uvm_page.h>
@@ -102,13 +102,12 @@
struct pmap {
struct pte *pm_p1ap; /* Base of alloced p1 pte space */
- int pm_count; /* reference count */
+ u_int pm_count; /* reference count */
struct pcb *pm_pcbs; /* PCBs using this pmap */
struct pte *pm_p0br; /* page 0 base register */
long pm_p0lr; /* page 0 length register */
struct pte *pm_p1br; /* page 1 base register */
long pm_p1lr; /* page 1 length register */
- struct simplelock pm_lock; /* Lock entry in MP environment */
struct pmap_statistics pm_stats; /* Some statistics */
};
@@ -149,7 +148,7 @@
/*
* This is the by far most used pmap routine. Make it inline.
*/
-__inline static bool
+static __inline bool
pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
{
int *pte, sva;
@@ -190,25 +189,30 @@
return (false);
}
-bool pmap_clear_modify_long(struct pv_entry *);
-bool pmap_clear_reference_long(struct pv_entry *);
-bool pmap_is_modified_long(struct pv_entry *);
+bool pmap_clear_modify_long(const struct pv_entry *);
+bool pmap_clear_reference_long(const struct pv_entry *);
+bool pmap_is_modified_long_p(const struct pv_entry *);
void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
-__inline static bool
+static __inline struct pv_entry *
+pmap_pg_to_pv(const struct vm_page *pg)
+{
+ return pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
+}
+
+static __inline bool
pmap_is_referenced(struct vm_page *pg)
{
- struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
- bool rv = (pv->pv_attr & PG_V) != 0;
+ const struct pv_entry * const pv = pmap_pg_to_pv(pg);
- return rv;
+ return (pv->pv_attr & PG_V) != 0;
}
-__inline static bool
+static __inline bool
pmap_clear_reference(struct vm_page *pg)
{
- struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
+ struct pv_entry * const pv = pmap_pg_to_pv(pg);
bool rv = (pv->pv_attr & PG_V) != 0;
pv->pv_attr &= ~PG_V;
@@ -217,10 +221,10 @@
return rv;
}
-__inline static bool
+static __inline bool
pmap_clear_modify(struct vm_page *pg)
{
- struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
+ struct pv_entry * const pv = pmap_pg_to_pv(pg);
bool rv = (pv->pv_attr & PG_M) != 0;
pv->pv_attr &= ~PG_M;
@@ -229,26 +233,24 @@
return rv;
}
-__inline static bool
+static __inline bool
pmap_is_modified(struct vm_page *pg)
{
- struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
- if (pv->pv_attr & PG_M)
- return 1;
- else
- return pmap_is_modified_long(pv);
+ const struct pv_entry * const pv = pmap_pg_to_pv(pg);
+
+ return (pv->pv_attr & PG_M) != 0 || pmap_is_modified_long_p(pv);
}
-__inline static void
+static __inline void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
+ struct pv_entry * const pv = pmap_pg_to_pv(pg);
if (pv->pv_pmap != NULL || pv->pv_next != NULL)
pmap_page_protect_long(pv, prot);
}
-__inline static void
+static __inline void
pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
{
if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
@@ -266,10 +268,10 @@
#define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
#define pmap_copy(a,b,c,d,e) /* Dont do anything */
#define pmap_update(pmap) /* nothing (yet) */
-#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
+#define pmap_remove(pmap, start, end) pmap_protect(pmap, start, end, 0)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
-#define pmap_reference(pmap) (pmap)->pm_count++
+#define pmap_reference(pmap) atomic_inc_uint(&(pmap)->pm_count)
/* These can be done as efficient inline macros */
#define pmap_copy_page(src, dst) \
diff -r cc1aa623e6cc -r 6a9d60124fca sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c Tue May 24 23:16:16 2011 +0000
+++ b/sys/arch/vax/vax/pmap.c Tue May 24 23:30:30 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.179 2010/12/14 23:44:49 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.180 2011/05/24 23:30:30 matt Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179 2010/12/14 23:44:49 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.180 2011/05/24 23:30:30 matt Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@@ -45,8 +45,10 @@
#include <sys/cpu.h>
#include <sys/device.h>
#include <sys/extent.h>
-#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/atomic.h>
+#include <sys/kmem.h>
+#include <sys/mutex.h>
#include <uvm/uvm.h>
@@ -101,7 +103,7 @@
extern void *msgbufaddr;
-#define IOSPACE(p) (((u_long)(p)) & 0xe0000000)
+#define IOSPACE_P(p) (((u_long)(p) & 0xe0000000) != 0)
#define NPTEPROCSPC 0x1000 /* # of virtual PTEs per process space */
#define NPTEPG 0x80 /* # of PTEs per page (logical or physical) */
#define PPTESZ sizeof(struct pte)
@@ -115,6 +117,34 @@
#define P1SEG 1
#define SYSSEG 2
+static inline void
+pmap_decrement_stats(struct pmap *pm, bool wired)
+{
+#if defined(MULTIPROCESSOR)
+ atomic_dec_ulong(&pm->pm_stats.resident_count);
+ if (wired)
+ atomic_dec_ulong(&pm->pm_stats.wired_count);
+#else
+ pm->pm_stats.resident_count--;
+ if (wired)
+ pm->pm_stats.wired_count--;
+#endif
+}
+
+static inline void
+pmap_increment_stats(struct pmap *pm, bool wired)
+{
+#if defined(MULTIPROCESSOR)
+ atomic_inc_ulong(&pm->pm_stats.resident_count);
+ if (wired)
+ atomic_inc_ulong(&pm->pm_stats.wired_count);
+#else
+ pm->pm_stats.resident_count++;
+ if (wired)
+ pm->pm_stats.wired_count++;
+#endif
+}
+
/*
* Map in a virtual page.
*/
@@ -162,9 +192,9 @@
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-static struct simplelock pvtable_lock;
-#define PVTABLE_LOCK simple_lock(&pvtable_lock);
-#define PVTABLE_UNLOCK simple_unlock(&pvtable_lock);
+static kmutex_t pvtable_lock;
+#define PVTABLE_LOCK mutex_spin_enter(&pvtable_lock);
+#define PVTABLE_UNLOCK mutex_spin_enter(&pvtable_lock);
#else
#define PVTABLE_LOCK
#define PVTABLE_UNLOCK
@@ -180,7 +210,7 @@
struct pv_entry *get_pventry(void);
void free_pventry(struct pv_entry *);
void more_pventries(void);
-vaddr_t get_ptp(int);
+vaddr_t get_ptp(bool);
void free_ptp(paddr_t);
/*
@@ -378,7 +408,6 @@
/* btop(virtual_avail - KERNBASE); */
pmap->pm_count = 1;
- simple_lock_init(&pmap->pm_lock);
/* Activate the kernel pmap. */
pcb->P1BR = pmap->pm_p1br;
@@ -409,7 +438,7 @@
SIMPLEQ_FIRST(&cpus) = ci;
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- simple_lock_init(&pvtable_lock);
+ mutex_init(&pvtable_lock, MUTEX_DEFAULT, IPL_VM);
#endif
/*
@@ -519,18 +548,14 @@
else
vaddr = (br - (int *)pm->pm_p1br) * VAX_NBPG + 0x40000000;
- if (IOSPACE((br[0] & PG_FRAME) << VAX_PGSHIFT))
+ if (IOSPACE_P((br[0] & PG_FRAME) << VAX_PGSHIFT))
return; /* Forget mappings of IO space */
pv = pv_table + ((br[0] & PG_FRAME) >> LTOHPS);
if (((br[0] & PG_PROT) == PG_RW) &&
((pv->pv_attr & PG_M) != PG_M))
pv->pv_attr |= br[0]|br[1]|br[2]|br[3]|br[4]|br[5]|br[6]|br[7];
- simple_lock(&pm->pm_lock);
- pm->pm_stats.resident_count--;
- if (br[0] & PG_W)
- pm->pm_stats.wired_count--;
- simple_unlock(&pm->pm_lock);
+ pmap_decrement_stats(pm, (br[0] & PG_W) != 0);
if (pv->pv_pmap == pm && pv->pv_vaddr == vaddr) {
pv->pv_vaddr = NOVADDR;
pv->pv_pmap = 0;
@@ -584,7 +609,7 @@
* Allocate a page through direct-mapped segment.
*/
static vaddr_t
-getpage(int w)
+getpage(bool wait)
{
struct vm_page *pg;
@@ -592,7 +617,7 @@
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
if (pg != NULL)
break;
- if (w == NOWAIT)
+ if (!wait)
return 0;
uvm_wait("getpage");
}
@@ -886,17 +911,16 @@
/*
* pmap_create() creates a pmap for a new task.
- * If not already allocated, malloc space for one.
+ * If not already allocated, allocate space for one.
Home |
Main Index |
Thread Index |
Old Index