Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/arm32 Pass struct vm_page_md * where possible.



details:   https://anonhg.NetBSD.org/src/rev/4ade282f40b4
branches:  trunk
changeset: 758272:4ade282f40b4
user:      uebayasi <uebayasi%NetBSD.org@localhost>
date:      Sat Oct 30 16:14:08 2010 +0000

description:
Pass struct vm_page_md * where possible.

This causes 1% code increase, mainly because additional argument
(paddr_t) affects register usage.  This will be fixed when per-page
data structure (struct vm_page) is redone, and physical address
can be retrieved from struct vm_page_md *.

Tested on (uncommitted) i.MX35 (ARM1136).

diffstat:

 sys/arch/arm/arm32/pmap.c |  845 ++++++++++++++++++++++++---------------------
 1 files changed, 446 insertions(+), 399 deletions(-)

diffs (truncated from 1965 to 300 lines):

diff -r a1bed1dcba77 -r 4ade282f40b4 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Oct 30 08:12:43 2010 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Oct 30 16:14:08 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.214 2010/06/16 22:06:53 jmcneill Exp $      */
+/*     $NetBSD: pmap.c,v 1.215 2010/10/30 16:14:08 uebayasi Exp $      */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,9 @@
 #include <machine/param.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.214 2010/06/16 22:06:53 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.215 2010/10/30 16:14:08 uebayasi Exp $");
+
+#define        VM_PAGE_TO_MD(pg)       (&(pg)->mdpage)
 
 #ifdef PMAP_DEBUG
 
@@ -636,11 +638,11 @@
                            pt_entry_t **);
 static bool            pmap_is_current(pmap_t);
 static bool            pmap_is_cached(pmap_t);
-static void            pmap_enter_pv(struct vm_page *, struct pv_entry *,
+static void            pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *,
                            pmap_t, vaddr_t, u_int);
-static struct pv_entry *pmap_find_pv(struct vm_page *, pmap_t, vaddr_t);
-static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vaddr_t);
-static u_int           pmap_modify_pv(struct vm_page *, pmap_t, vaddr_t,
+static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t);
+static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
+static u_int           pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t,
                            u_int, u_int);
 
 static void            pmap_pinit(pmap_t);
@@ -656,26 +658,26 @@
 static int             pmap_l2ptp_ctor(void *, void *, int);
 static int             pmap_l2dtable_ctor(void *, void *, int);
 
-static void            pmap_vac_me_harder(struct vm_page *, pmap_t, vaddr_t);
+static void            pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
 #ifdef PMAP_CACHE_VIVT
-static void            pmap_vac_me_kpmap(struct vm_page *, pmap_t, vaddr_t);
-static void            pmap_vac_me_user(struct vm_page *, pmap_t, vaddr_t);
+static void            pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
+static void            pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
 #endif
 
-static void            pmap_clearbit(struct vm_page *, u_int);
+static void            pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
 #ifdef PMAP_CACHE_VIVT
 static int             pmap_clean_page(struct pv_entry *, bool);
 #endif
 #ifdef PMAP_CACHE_VIPT
-static void            pmap_syncicache_page(struct vm_page *);
+static void            pmap_syncicache_page(struct vm_page_md *, paddr_t);
 enum pmap_flush_op {
        PMAP_FLUSH_PRIMARY,
        PMAP_FLUSH_SECONDARY,
        PMAP_CLEAN_PRIMARY
 };
-static void            pmap_flush_page(struct vm_page *, enum pmap_flush_op);
+static void            pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
 #endif
-static void            pmap_page_remove(struct vm_page *);
+static void            pmap_page_remove(struct vm_page_md *, paddr_t);
 
 static void            pmap_init_l1(struct l1_ttable *, pd_entry_t *);
 static vaddr_t         kernel_pt_lookup(paddr_t);
@@ -859,20 +861,20 @@
  * => caller should not adjust pmap's wire_count
  */
 static void
-pmap_enter_pv(struct vm_page *pg, struct pv_entry *pv, pmap_t pm,
+pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
     vaddr_t va, u_int flags)
 {
        struct pv_entry **pvp;
 
        NPDEBUG(PDB_PVDUMP,
-           printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags));
+           printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags));
 
        pv->pv_pmap = pm;
        pv->pv_va = va;
        pv->pv_flags = flags;
 
-       simple_lock(&pg->mdpage.pvh_slock);     /* lock vm_page */
-       pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
+       simple_lock(&md->pvh_slock);    /* lock vm_page */
+       pvp = &SLIST_FIRST(&md->pvh_list);
 #ifdef PMAP_CACHE_VIPT
        /*
         * Insert unmanaged entries, writeable first, at the head of
@@ -888,25 +890,25 @@
 #endif
        SLIST_NEXT(pv, pv_link) = *pvp;         /* add to ... */
        *pvp = pv;                              /* ... locked list */
-       pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
+       md->pvh_attrs |= flags & (PVF_REF | PVF_MOD);
 #ifdef PMAP_CACHE_VIPT
        if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE)
-               pg->mdpage.pvh_attrs |= PVF_KMOD;
-       if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
-               pg->mdpage.pvh_attrs |= PVF_DIRTY;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+               md->pvh_attrs |= PVF_KMOD;
+       if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
+               md->pvh_attrs |= PVF_DIRTY;
+       KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif
        if (pm == pmap_kernel()) {
                PMAPCOUNT(kernel_mappings);
                if (flags & PVF_WRITE)
-                       pg->mdpage.krw_mappings++;
+                       md->krw_mappings++;
                else
-                       pg->mdpage.kro_mappings++;
+                       md->kro_mappings++;
        } else {
                if (flags & PVF_WRITE)
-                       pg->mdpage.urw_mappings++;
+                       md->urw_mappings++;
                else
-                       pg->mdpage.uro_mappings++;
+                       md->uro_mappings++;
        }
 
 #ifdef PMAP_CACHE_VIPT
@@ -915,8 +917,8 @@
         * for this page, make sure to sync the I-cache.
         */
        if (PV_IS_EXEC_P(flags)) {
-               if (!PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) {
-                       pmap_syncicache_page(pg);
+               if (!PV_IS_EXEC_P(md->pvh_attrs)) {
+                       pmap_syncicache_page(md, pa);
                        PMAPCOUNT(exec_synced_map);
                }
                PMAPCOUNT(exec_mappings);
@@ -924,7 +926,7 @@
 #endif
 
        PMAPCOUNT(mappings);
-       simple_unlock(&pg->mdpage.pvh_slock);   /* unlock, done! */
+       simple_unlock(&md->pvh_slock);  /* unlock, done! */
 
        if (pv->pv_flags & PVF_WIRED)
                ++pm->pm_stats.wired_count;
@@ -937,11 +939,11 @@
  * => caller should hold lock on vm_page
  */
 static inline struct pv_entry *
-pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va)
 {
        struct pv_entry *pv;
 
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
                if (pm == pv->pv_pmap && va == pv->pv_va)
                        break;
        }
@@ -960,20 +962,20 @@
  * => we return the removed pv
  */
 static struct pv_entry *
-pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
 {
        struct pv_entry *pv, **prevptr;
 
        NPDEBUG(PDB_PVDUMP,
-           printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va));
-
-       prevptr = &SLIST_FIRST(&pg->mdpage.pvh_list); /* prev pv_entry ptr */
+           printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
+
+       prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
        pv = *prevptr;
 
        while (pv) {
                if (pv->pv_pmap == pm && pv->pv_va == va) {     /* match? */
-                       NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, pg "
-                           "%p, flags 0x%x\n", pm, pg, pv->pv_flags));
+                       NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
+                           "%p, flags 0x%x\n", pm, md, pv->pv_flags));
                        if (pv->pv_flags & PVF_WIRED) {
                                --pm->pm_stats.wired_count;
                        }
@@ -981,14 +983,14 @@
                        if (pm == pmap_kernel()) {
                                PMAPCOUNT(kernel_unmappings);
                                if (pv->pv_flags & PVF_WRITE)
-                                       pg->mdpage.krw_mappings--;
+                                       md->krw_mappings--;
                                else
-                                       pg->mdpage.kro_mappings--;
+                                       md->kro_mappings--;
                        } else {
                                if (pv->pv_flags & PVF_WRITE)
-                                       pg->mdpage.urw_mappings--;
+                                       md->urw_mappings--;
                                else
-                                       pg->mdpage.uro_mappings--;
+                                       md->uro_mappings--;
                        }
 
                        PMAPCOUNT(unmappings);
@@ -1000,12 +1002,12 @@
                         * this was the last mapping, discard the contents,
                         * otherwise sync the i-cache for this page.
                         */
-                       if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) {
-                               if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
-                                       pg->mdpage.pvh_attrs &= ~PVF_EXEC;
+                       if (PV_IS_EXEC_P(md->pvh_attrs)) {
+                               if (SLIST_EMPTY(&md->pvh_list)) {
+                                       md->pvh_attrs &= ~PVF_EXEC;
                                        PMAPCOUNT(exec_discarded_unmap);
                                } else {
-                                       pmap_syncicache_page(pg);
+                                       pmap_syncicache_page(md, pa);
                                        PMAPCOUNT(exec_synced_unmap);
                                }
                        }
@@ -1021,18 +1023,18 @@
         * If we no longer have a WRITEABLE KENTRY at the head of list,
         * clear the KMOD attribute from the page.
         */
-       if (SLIST_FIRST(&pg->mdpage.pvh_list) == NULL
-           || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
-               pg->mdpage.pvh_attrs &= ~PVF_KMOD;
+       if (SLIST_FIRST(&md->pvh_list) == NULL
+           || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
+               md->pvh_attrs &= ~PVF_KMOD;
 
        /*
         * If this was a writeable page and there are no more writeable
         * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
         * the contents to memory.
         */
-       if (pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
-               pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+       if (md->krw_mappings + md->urw_mappings == 0)
+               md->pvh_attrs &= ~PVF_WRITE;
+       KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif /* PMAP_CACHE_VIPT */
 
        return(pv);                             /* return removed pv */
@@ -1051,7 +1053,7 @@
  * Modify a physical-virtual mapping in the pv table
  */
 static u_int
-pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
+pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
     u_int clr_mask, u_int set_mask)
 {
        struct pv_entry *npv;
@@ -1060,22 +1062,22 @@
        KASSERT((clr_mask & PVF_KENTRY) == 0);
        KASSERT((set_mask & PVF_KENTRY) == 0);
 
-       if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
+       if ((npv = pmap_find_pv(md, pm, va)) == NULL)
                return (0);
 
        NPDEBUG(PDB_PVDUMP,
-           printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, pg, clr_mask, set_mask, npv->pv_flags));
+           printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
 
        /*
         * There is at least one VA mapping this page.
         */
 
        if (clr_mask & (PVF_REF | PVF_MOD)) {
-               pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
+               md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
 #ifdef PMAP_CACHE_VIPT
-               if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
-                       pg->mdpage.pvh_attrs |= PVF_DIRTY;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+               if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
+                       md->pvh_attrs |= PVF_DIRTY;
+               KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif
        }
 
@@ -1092,38 +1094,38 @@
        if ((flags ^ oflags) & PVF_WRITE) {
                if (pm == pmap_kernel()) {
                        if (flags & PVF_WRITE) {
-                               pg->mdpage.krw_mappings++;
-                               pg->mdpage.kro_mappings--;
+                               md->krw_mappings++;
+                               md->kro_mappings--;
                        } else {
-                               pg->mdpage.kro_mappings++;



Home | Main Index | Thread Index | Old Index