Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm - Describe uvm_aobj and the lock order.
details: https://anonhg.NetBSD.org/src/rev/3f7ddbd0990f
branches: trunk
changeset: 781555:3f7ddbd0990f
user: rmind <rmind%NetBSD.org@localhost>
date: Fri Sep 14 18:56:15 2012 +0000
description:
- Describe uvm_aobj and the lock order.
- Remove unnecessary uao_dropswap_range1() wrapper.
- KNF. Sprinkle some __cacheline_aligned.
diffstat:
sys/uvm/uvm_aobj.c | 165 ++++++++++++++++++++--------------------------------
sys/uvm/uvm_aobj.h | 36 ++++------
2 files changed, 78 insertions(+), 123 deletions(-)
diffs (truncated from 406 to 300 lines):
diff -r a1a6e2cb4a42 -r 3f7ddbd0990f sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c Fri Sep 14 16:29:21 2012 +0000
+++ b/sys/uvm/uvm_aobj.c Fri Sep 14 18:56:15 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_aobj.c,v 1.116 2011/09/06 16:41:55 matt Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.117 2012/09/14 18:56:15 rmind Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116 2011/09/06 16:41:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.117 2012/09/14 18:56:15 rmind Exp $");
#include "opt_uvmhist.h"
@@ -52,66 +52,55 @@
#include <uvm/uvm.h>
/*
- * an aobj manages anonymous-memory backed uvm_objects. in addition
- * to keeping the list of resident pages, it also keeps a list of
- * allocated swap blocks. depending on the size of the aobj this list
- * of allocated swap blocks is either stored in an array (small objects)
- * or in a hash table (large objects).
- */
-
-/*
- * local structures
+ * An anonymous UVM object (aobj) manages anonymous-memory. In addition to
+ * keeping the list of resident pages, it may also keep a list of allocated
+ * swap blocks. Depending on the size of the object, this list is either
+ * stored in an array (small objects) or in a hash table (large objects).
+ *
+ * Lock order
+ *
+ * uvm_object::vmobjlock ->
+ * uao_list_lock
*/
/*
- * for hash tables, we break the address space of the aobj into blocks
- * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
- * be a power of two.
+ * Note: for hash tables, we break the address space of the aobj into blocks
+ * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
*/
-#define UAO_SWHASH_CLUSTER_SHIFT 4
-#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
+#define UAO_SWHASH_CLUSTER_SHIFT 4
+#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
-/* get the "tag" for this page index */
-#define UAO_SWHASH_ELT_TAG(PAGEIDX) \
- ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
+/* Get the "tag" for this page index. */
+#define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
+#define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
+ ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
-#define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
- ((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
+/* Given an ELT and a page index, find the swap slot. */
+#define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
+ ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
-/* given an ELT and a page index, find the swap slot */
-#define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
- ((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
+/* Given an ELT, return its pageidx base. */
+#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
+ ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
-/* given an ELT, return its pageidx base */
-#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
- ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
+/* The hash function. */
+#define UAO_SWHASH_HASH(aobj, idx) \
+ (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
+ & (aobj)->u_swhashmask)])
/*
- * the swhash hash function
- */
-
-#define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
- (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
- & (AOBJ)->u_swhashmask)])
-
-/*
- * the swhash threshhold determines if we will use an array or a
+ * The threshold which determines whether we will use an array or a
* hash table to store the list of allocated swap blocks.
*/
-
-#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
-#define UAO_USES_SWHASH(AOBJ) \
- ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
+#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
+#define UAO_USES_SWHASH(aobj) \
+ ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
-/*
- * the number of buckets in a swhash, with an upper bound
- */
-
-#define UAO_SWHASH_MAXBUCKETS 256
-#define UAO_SWHASH_BUCKETS(AOBJ) \
- (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
- UAO_SWHASH_MAXBUCKETS))
+/* The number of buckets in a hash, with an upper bound. */
+#define UAO_SWHASH_MAXBUCKETS 256
+#define UAO_SWHASH_BUCKETS(aobj) \
+ (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
/*
* uao_swhash_elt: when a hash table is being used, this structure defines
@@ -135,7 +124,7 @@
* uao_swhash_elt_pool: pool of uao_swhash_elt structures.
* Note: pages for this pool must not come from a pageable kernel map.
*/
-static struct pool uao_swhash_elt_pool;
+static struct pool uao_swhash_elt_pool __cacheline_aligned;
/*
* uvm_aobj: the actual anon-backed uvm_object
@@ -159,10 +148,6 @@
LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
};
-/*
- * local functions
- */
-
static void uao_free(struct uvm_aobj *);
static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
int *, int, vm_prot_t, int, int);
@@ -177,7 +162,6 @@
static bool uao_pagein(struct uvm_aobj *, int, int);
static bool uao_pagein_page(struct uvm_aobj *, int);
-static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
#endif /* defined(VMSWAP) */
/*
@@ -197,12 +181,8 @@
* uao_list: global list of active aobjs, locked by uao_list_lock
*/
-static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
-static kmutex_t uao_list_lock;
-
-/*
- * functions
- */
+static LIST_HEAD(aobjlist, uvm_aobj) uao_list __cacheline_aligned;
+static kmutex_t uao_list_lock __cacheline_aligned;
/*
* hash table/array related functions
@@ -272,7 +252,7 @@
*/
if (aobj->u_flags & UAO_FLAG_NOSWAP)
- return(0);
+ return 0;
/*
* if hashing, look in hash table.
@@ -280,17 +260,14 @@
if (UAO_USES_SWHASH(aobj)) {
elt = uao_find_swhash_elt(aobj, pageidx, false);
- if (elt)
- return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
- else
- return(0);
+ return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
}
/*
* otherwise, look in the array
*/
- return(aobj->u_swslots[pageidx]);
+ return aobj->u_swslots[pageidx];
}
/*
@@ -319,11 +296,8 @@
*/
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
- if (slot == 0)
- return(0);
-
- printf("uao_set_swslot: uobj = %p\n", uobj);
- panic("uao_set_swslot: NOSWAP object");
+ KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
+ return 0;
}
/*
@@ -368,7 +342,7 @@
oldslot = aobj->u_swslots[pageidx];
aobj->u_swslots[pageidx] = slot;
}
- return (oldslot);
+ return oldslot;
}
#endif /* defined(VMSWAP) */
@@ -386,12 +360,10 @@
static void
uao_free(struct uvm_aobj *aobj)
{
+ struct uvm_object *uobj = &aobj->u_obj;
-#if defined(VMSWAP)
- uao_dropswap_range1(aobj, 0, 0);
-#endif /* defined(VMSWAP) */
-
- mutex_exit(aobj->u_obj.vmobjlock);
+ uao_dropswap_range(aobj, 0, 0);
+ mutex_exit(uobj->vmobjlock);
#if defined(VMSWAP)
if (UAO_USES_SWHASH(aobj)) {
@@ -415,7 +387,7 @@
* finally free the aobj itself
*/
- uvm_obj_destroy(&aobj->u_obj, true);
+ uvm_obj_destroy(uobj, true);
kmem_free(aobj, sizeof(struct uvm_aobj));
}
@@ -493,7 +465,7 @@
if (flags) {
aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
- return(&aobj->u_obj);
+ return &aobj->u_obj;
}
}
@@ -519,8 +491,6 @@
return(&aobj->u_obj);
}
-
-
/*
* uao_init: set up aobj pager subsystem
*
@@ -915,9 +885,6 @@
uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
{
-#if defined(VMSWAP)
- struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
-#endif /* defined(VMSWAP) */
voff_t current_offset;
struct vm_page *ptmp = NULL; /* Quell compiler warning */
int lcv, gotpages, maxpages, swslot, pageidx;
@@ -959,7 +926,7 @@
* zero-fill'd.
*/
- if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
+ if (ptmp == NULL && uao_find_swslot(uobj,
current_offset >> PAGE_SHIFT) == 0) {
ptmp = uvm_pagealloc(uobj, current_offset,
NULL, UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
@@ -1122,7 +1089,7 @@
* do the needed "i/o", either reading from swap or zeroing.
*/
- swslot = uao_find_swslot(&aobj->u_obj, pageidx);
+ swslot = uao_find_swslot(uobj, pageidx);
/*
* just zero the page if there's nothing in swap.
@@ -1391,30 +1358,31 @@
}
/*
- * page in a page from an aobj. used for swap_off.
- * returns true if pagein was aborted due to lack of memory.
+ * uao_pagein_page: page in a single page from an anonymous UVM object.
*
- * => aobj must be locked and is returned locked.
+ * => Returns true if pagein was aborted due to lack of memory.
+ * => Object must be locked and is returned locked.
*/
static bool
Home |
Main Index |
Thread Index |
Old Index