Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/chs-ubc2]: src/sys/uvm Chuq apparently forgot to place this on the ubc2 ...
details: https://anonhg.NetBSD.org/src/rev/7ffa8c235275
branches: chs-ubc2
changeset: 471339:7ffa8c235275
user: thorpej <thorpej%NetBSD.org@localhost>
date: Mon Jun 21 15:04:23 1999 +0000
description:
Chuq apparently forgot to place this on the ubc2 branch.
diffstat:
sys/uvm/uvm_bio.c | 633 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 633 insertions(+), 0 deletions(-)
diffs (truncated from 637 to 300 lines):
diff -r a2cbf81faed9 -r 7ffa8c235275 sys/uvm/uvm_bio.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/uvm/uvm_bio.c Mon Jun 21 15:04:23 1999 +0000
@@ -0,0 +1,633 @@
+/* $NetBSD: uvm_bio.c,v 1.1.4.1 1999/06/21 15:04:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1998 Chuck Silvers.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_uvmhist.h"
+
+/*
+ * uvm_bio.c: buffered i/o vnode mapping cache
+ */
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+
+#include <uvm/uvm.h>
+
+/*
+ * global data structures
+ */
+
+/*
+ * local functions
+ */
+
+static void ubc_init __P((void));
+static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
+ vm_page_t *, int,
+ int, vm_fault_t, vm_prot_t, int));
+
+static struct ubc_map *ubc_find_mapping __P((struct uvm_object *,
+ vaddr_t));
+
+/*
+ * local data structues
+ */
+
+#define UBC_HASH(uobj, offset) (((long)(uobj) / sizeof(struct uvm_object) + \
+ (offset) / MAXBSIZE) & ubc_object.hashmask)
+
+/* XXX make this real eventually */
+#define UBC_DEFAULT_READAHEAD_PAGES 1
+
+
+
+struct ubc_map
+{
+ struct uvm_object * uobj; /* mapped object */
+ vaddr_t offset; /* offset into uobj */
+ int refcount; /* refcount on mapping */
+ /* XXX refcount will turn into a rwlock when vnodes start
+ using their locks in shared mode. */
+
+ vaddr_t writeoff; /* overwrite offset */
+ vsize_t writelen; /* overwrite len */
+
+ LIST_ENTRY(ubc_map) hash; /* hash table */
+ TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
+};
+
+
+static struct ubc_object
+{
+ struct uvm_object uobj; /* glue for uvm_map() */
+ void *kva; /* where ubc_object is mapped */
+ struct ubc_map *umap; /* array of ubc_map's */
+
+ LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
+ u_long hashmask; /* mask for hashtable */
+
+ TAILQ_HEAD(, ubc_map) inactive; /* inactive queue for ubc_map's */
+
+} ubc_object;
+
+
+struct uvm_pagerops ubc_pager =
+{
+ ubc_init, /* init */
+ NULL, /* attach */
+ NULL, /* reference */
+ NULL, /* detach */
+ ubc_fault, /* fault */
+ /* ... rest are NULL */
+};
+
+
+/* XXX */
+static int ubc_nwins = 16;
+
+
+/*
+ * ubc_init
+ *
+ * init pager private data structures.
+ */
+static void
+ubc_init()
+{
+ struct ubc_map *umap;
+ int i;
+
+ /*
+ * init ubc_object.
+ * alloc and init ubc_map's.
+ * init inactive queue.
+ * alloc and init hashtable.
+ * map in ubc_object.
+ */
+
+ simple_lock_init(&ubc_object.uobj.vmobjlock);
+ ubc_object.uobj.pgops = &ubc_pager;
+ TAILQ_INIT(&ubc_object.uobj.memq);
+ ubc_object.uobj.uo_npages = 0;
+ ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
+
+ ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
+ M_TEMP, M_NOWAIT);
+ if (ubc_object.umap == NULL) {
+ panic("ubc_init: failed to allocate ubc_maps");
+ }
+ bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
+
+ TAILQ_INIT(&ubc_object.inactive);
+ for (umap = ubc_object.umap;
+ umap < &ubc_object.umap[ubc_nwins];
+ umap++) {
+ TAILQ_INSERT_TAIL(&ubc_object.inactive, umap, inactive);
+ }
+
+ ubc_object.hash = hashinit(ubc_nwins / 4, M_TEMP, M_NOWAIT,
+ &ubc_object.hashmask);
+ if (ubc_object.hash == NULL) {
+ panic("ubc_init: failed to allocate hash\n");
+ }
+
+ for (i = 0; i <= ubc_object.hashmask; i++) {
+ LIST_INIT(&ubc_object.hash[i]);
+ }
+
+ if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
+ ubc_nwins * MAXBSIZE, &ubc_object.uobj, 0,
+ UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE))
+ != KERN_SUCCESS) {
+ panic("ubc_init: failed to map ubc_object\n");
+ }
+
+ /* XXX this shouldn't go here */
+ {
+ static struct uvm_history_ent ubchistbuf[200];
+ ubchistbuf[0] = ubchistbuf[0];
+ UVMHIST_INIT_STATIC(ubchist, ubchistbuf);
+ }
+}
+
+
+/*
+ * ubc_fault: fault routine for ubc mapping
+ */
+static int
+ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
+ struct uvm_faultinfo *ufi;
+ vaddr_t ign1;
+ vm_page_t *ign2;
+ int ign3, ign4;
+ vm_fault_t fault_type;
+ vm_prot_t access_type;
+ int flags;
+{
+ struct uvm_object *uobj;
+ struct uvm_vnode *uvn;
+ struct ubc_map *umap;
+ vaddr_t va, eva, ubc_offset, umap_offset;
+ int i, rv, npages;
+ struct vm_page *pages[MAXBSIZE >> PAGE_SHIFT];
+ boolean_t retry;
+
+ UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
+
+ va = ufi->orig_rvaddr;
+ ubc_offset = va - (vaddr_t)ubc_object.kva;
+
+#ifdef DEBUG
+ if (ufi->entry->object.uvm_obj != &ubc_object.uobj) {
+ panic("ubc_fault: not ubc_object");
+ }
+ if (ubc_offset >= ubc_nwins * MAXBSIZE) {
+ panic("ubc_fault: fault addr 0x%lx outside ubc mapping", va);
+ }
+#endif
+
+ UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
+ va, ubc_offset, access_type,0);
+
+ umap = &ubc_object.umap[ubc_offset / MAXBSIZE];
+ umap_offset = ubc_offset & (MAXBSIZE - 1);
+
+#ifdef DIAGNOSTIC
+ if (umap->refcount == 0) {
+ panic("ubc_fault: umap %p has no refs", umap);
+ }
+#endif
+
+ /* no umap locking needed since we have a ref on the umap */
+ uobj = umap->uobj;
+ uvn = (struct uvm_vnode *)uobj;
+#ifdef DIAGNOSTIC
+ if (uobj == NULL) {
+ panic("ubc_fault: umap %p has null uobj", umap);
+ }
+#endif
+
+ /* XXX limit npages by size of file? */
+ npages = min(UBC_DEFAULT_READAHEAD_PAGES,
+ (MAXBSIZE - umap_offset) >> PAGE_SHIFT);
+
+ /*
+ * no need to try with PGO_LOCKED...
+ * we don't need to have the map locked since we know that
+ * no one will mess with it until our reference is released.
+ */
+ if (flags & PGO_LOCKED) {
+ uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
+ flags &= ~(PGO_LOCKED);
+ }
+
+ /*
+ * XXX
+ * uvn_get is currently pretty dumb about read-ahead,
+ * so right now this will only ever get the 1 page that we need.
+ */
+
+again:
+ /*
+ * XXX workaround for nfs.
+ * if we're writing, make sure that the vm system's notion
+ * of the vnode size is at least big enough to contain this write.
+ * this is because of the problem with nfs mentioned below.
+ * XXX this can happen for reading too, but there it really
+ * requires a second client.
+ */
+ if (access_type == VM_PROT_WRITE &&
+ uvn->u_size < umap->writeoff + umap->writelen) {
+ printf("ubc_fault: bumping size vp %p newsize 0x%x\n",
+ uobj, (int)(umap->writeoff + umap->writelen));
+ uvm_vnp_setsize((struct vnode *)uobj,
+ umap->writeoff + umap->writelen);
+ }
+
+ bzero(pages, sizeof pages);
+ simple_lock(&uobj->vmobjlock);
+
+UVMHIST_LOG(ubchist, "umap_offset 0x%x writeoff 0x%x writelen 0x%x u_size 0x%x",
+ (int)umap_offset, (int)umap->writeoff,
+ (int)umap->writelen, (int)uvn->u_size);
+
+ if (access_type == VM_PROT_WRITE &&
+ umap_offset >= umap->writeoff &&
+ (umap_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
+ umap_offset + PAGE_SIZE >= uvn->u_size - umap->offset)) {
+UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
+ flags |= PGO_OVERWRITE;
+ }
Home |
Main Index |
Thread Index |
Old Index