Subject: Re: genfs_putpages with PGO_SYNCIO
To: None <enami@sm.sony.co.jp>
From: YAMAMOTO Takashi <yamt@mwd.biglobe.ne.jp>
List: tech-kern
Date: 01/17/2003 23:59:40
--NextPart-20030117235344-0036900
Content-Type: Text/Plain; charset=us-ascii
hi.
> > > how about attached one?
> > > it introduces third level nested buf so that we can get error via B_ERROR.
> >
> > Is using `struct buf' here (especially in the interface of GOP_WRITE)
> > really the right thing? Actually, only keeping error and
> > synchronizing is necessary.
>
> sure. I agree that struct buf here is overkill.
> maybe we should have stripped version of struct buf.
i made a new patch, with a new struct (stripped version of buf).
how about this?
YAMAMOTO Takashi
--NextPart-20030117235344-0036900
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="genfs.syncio4.diff"
Index: sys/buf.h
===================================================================
RCS file: /cvs/NetBSD/src/sys/sys/buf.h,v
retrieving revision 1.55
diff -u -p -r1.55 buf.h
--- sys/buf.h 2002/10/06 17:05:56 1.55
+++ sys/buf.h 2003/01/17 14:50:13
@@ -149,19 +149,31 @@ struct bio_ops {
};
/*
+ * The part of buffer header that is needed to know I/O progress.
+ */
+struct buf_ioprog {
+ volatile long bi_flags; /* B_* flags. */
+ long bi_resid; /* Remaining I/O. */
+ long bi_bcount; /* Valid bytes in buffer. */
+ int bi_error; /* Errno value. */
+};
+
+/*
* The buffer header describes an I/O operation in the kernel.
*/
struct buf {
+ struct buf_ioprog b_prog; /* See above */
+#define b_flags b_prog.bi_flags
+#define b_resid b_prog.bi_resid
+#define b_bcount b_prog.bi_bcount
+#define b_error b_prog.bi_error
+
LIST_ENTRY(buf) b_hash; /* Hash chain. */
LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
TAILQ_ENTRY(buf) b_actq; /* Device driver queue when active. */
struct proc *b_proc; /* Associated proc if B_PHYS set. */
- volatile long b_flags; /* B_* flags. */
- int b_error; /* Errno value. */
long b_bufsize; /* Allocated buffer size. */
- long b_bcount; /* Valid bytes in buffer. */
- long b_resid; /* Remaining I/O. */
dev_t b_dev; /* Device associated with buffer. */
struct {
caddr_t b_addr; /* Memory, superblocks, indirect etc. */
@@ -180,6 +192,9 @@ struct buf {
struct workhead b_dep; /* List of filesystem dependencies. */
};
+#define BUF_BIOPROG(bp) (&(bp)->b_prog)
+#define BIOPROG_BUF(bi) ((struct buf*)(bi))
+
/*
* For portability with historic industry practice, the cylinder number has
* to be maintained in the `b_resid' field.
@@ -217,6 +232,7 @@ struct buf {
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_XXX 0x02000000 /* Debugging flag. */
#define B_VFLUSH 0x04000000 /* Buffer is being synced. */
+#define B_STABLE 0x08000000 /* Data should go to stable storage */
/*
* This structure describes a clustered I/O. It is stored in the b_saveaddr
@@ -255,6 +271,7 @@ extern u_int bufpages; /* Number of mem
extern u_int nswbuf; /* Number of swap I/O buffer headers. */
extern struct pool bufpool; /* I/O buf pool */
+extern struct pool bioprogpl; /* I/O buf ioprog pool */
__BEGIN_DECLS
void allocbuf __P((struct buf *, int));
@@ -263,6 +280,11 @@ void bdirty __P((struct buf *));
void bdwrite __P((struct buf *));
void biodone __P((struct buf *));
int biowait __P((struct buf *));
+
+void bioprogdone __P((struct buf_ioprog *));
+int bioprogwait __P((struct buf_ioprog *));
+void bioprogchain __P((struct buf_ioprog *, struct buf_ioprog *));
+
int bread __P((struct vnode *, daddr_t, int,
struct ucred *, struct buf **));
int breada __P((struct vnode *, daddr_t, int, daddr_t, int,
Index: kern/vfs_bio.c
===================================================================
RCS file: /cvs/NetBSD/src/sys/kern/vfs_bio.c,v
retrieving revision 1.85
diff -u -p -r1.85 vfs_bio.c
--- kern/vfs_bio.c 2002/09/06 13:23:52 1.85
+++ kern/vfs_bio.c 2003/01/17 14:50:13
@@ -106,6 +106,7 @@ int needbuffer;
* Buffer pool for I/O buffers.
*/
struct pool bufpool;
+struct pool bioprogpl;
/*
* Insq/Remq for the buffer free lists.
@@ -159,6 +160,8 @@ bufinit()
* buffers.
*/
pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
+ pool_init(&bioprogpl, sizeof(struct buf_ioprog), 0, 0, 0, "bioprogpl",
+ NULL);
for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
TAILQ_INIT(dp);
@@ -490,7 +493,7 @@ brelse(bp)
/* Wake up any proceeses waiting for _this_ buffer to become free. */
if (ISSET(bp->b_flags, B_WANTED)) {
CLR(bp->b_flags, B_WANTED|B_AGE);
- wakeup(bp);
+ wakeup(BUF_BIOPROG(bp));
}
/*
@@ -628,8 +631,8 @@ start:
return NULL;
}
SET(bp->b_flags, B_WANTED);
- err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
- slptimeo);
+ err = tsleep(BUF_BIOPROG(bp), slpflag | (PRIBIO + 1),
+ "getblk", slptimeo);
splx(s);
if (err)
return (NULL);
@@ -849,21 +852,8 @@ int
biowait(bp)
struct buf *bp;
{
- int s;
-
- s = splbio();
- while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
- tsleep(bp, PRIBIO + 1, "biowait", 0);
- splx(s);
- /* check for interruption of I/O (e.g. via NFS), then errors. */
- if (ISSET(bp->b_flags, B_EINTR)) {
- CLR(bp->b_flags, B_EINTR);
- return (EINTR);
- } else if (ISSET(bp->b_flags, B_ERROR))
- return (bp->b_error ? bp->b_error : EIO);
- else
- return (0);
+ return bioprogwait(BUF_BIOPROG(bp));
}
/*
@@ -888,9 +878,7 @@ biodone(bp)
{
int s = splbio();
- if (ISSET(bp->b_flags, B_DONE))
- panic("biodone already");
- SET(bp->b_flags, B_DONE); /* note that it's done */
+ bioprogdone(BUF_BIOPROG(bp));
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
(*bioops.io_complete)(bp);
@@ -900,17 +888,67 @@ biodone(bp)
if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
CLR(bp->b_flags, B_CALL); /* but note callout done */
+ KASSERT(bp->b_iodone);
(*bp->b_iodone)(bp);
} else {
if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */
brelse(bp);
- else { /* or just wakeup the buffer */
+ else { /* or just clear wanted */
CLR(bp->b_flags, B_WANTED);
- wakeup(bp);
}
}
splx(s);
+}
+
+void
+bioprogdone(bip)
+ struct buf_ioprog *bip;
+{
+ KASSERT(ISSET(bip->bi_flags, B_BUSY));
+ KASSERT(!ISSET(bip->bi_flags, B_DONE));
+
+ SET(bip->bi_flags, B_DONE); /* note that it's done */
+
+ if (!ISSET(bip->bi_flags, B_CALL | B_ASYNC)) {
+ wakeup(bip);
+ }
+}
+
+int
+bioprogwait(bip)
+ struct buf_ioprog *bip;
+{
+ int s;
+
+ s = splbio();
+ while (!ISSET(bip->bi_flags, B_DONE | B_DELWRI))
+ tsleep(bip, PRIBIO + 1, "biowait", 0);
+ splx(s);
+
+ /* check for interruption of I/O (e.g. via NFS), then errors. */
+ if (ISSET(bip->bi_flags, B_EINTR)) {
+ CLR(bip->bi_flags, B_EINTR);
+ return (EINTR);
+ } else if (ISSET(bip->bi_flags, B_ERROR))
+ return (bip->bi_error ? bip->bi_error : EIO);
+ else
+ return (0);
+}
+
+void
+bioprogchain(mbip, bip)
+ struct buf_ioprog *mbip;
+ struct buf_ioprog *bip;
+{
+
+ KASSERT(mbip != bip);
+ if (bip->bi_flags & B_ERROR) {
+ mbip->bi_flags |= B_ERROR;
+ mbip->bi_error = bip->bi_error;
+ }
+ mbip->bi_resid -= bip->bi_bcount;
+ KASSERT(mbip->bi_resid >= 0);
}
/*
Index: miscfs/genfs/genfs_node.h
===================================================================
RCS file: /cvs/NetBSD/src/sys/miscfs/genfs/genfs_node.h,v
retrieving revision 1.3
diff -u -p -r1.3 genfs_node.h
--- miscfs/genfs/genfs_node.h 2001/12/18 07:49:36 1.3
+++ miscfs/genfs/genfs_node.h 2003/01/17 14:50:13
@@ -38,15 +38,16 @@ struct vm_page;
struct genfs_ops {
void (*gop_size)(struct vnode *, off_t, off_t *);
int (*gop_alloc)(struct vnode *, off_t, off_t, int, struct ucred *);
- int (*gop_write)(struct vnode *, struct vm_page **, int, int);
+ int (*gop_write)(struct vnode *, struct vm_page **, int, int,
+ struct buf_ioprog *);
};
#define GOP_SIZE(vp, size, eobp) \
(*VTOG(vp)->g_op->gop_size)((vp), (size), (eobp))
#define GOP_ALLOC(vp, off, len, flags, cred) \
(*VTOG(vp)->g_op->gop_alloc)((vp), (off), (len), (flags), (cred))
-#define GOP_WRITE(vp, pgs, npages, flags) \
- (*VTOG(vp)->g_op->gop_write)((vp), (pgs), (npages), (flags))
+#define GOP_WRITE(vp, pgs, npages, flags, bip) \
+ (*VTOG(vp)->g_op->gop_write)((vp), (pgs), (npages), (flags), (bip))
struct genfs_node {
struct genfs_ops *g_op; /* ops vector */
@@ -57,7 +58,9 @@ struct genfs_node {
void genfs_size(struct vnode *, off_t, off_t *);
void genfs_node_init(struct vnode *, struct genfs_ops *);
-int genfs_gop_write(struct vnode *, struct vm_page **, int, int);
-int genfs_compat_gop_write(struct vnode *, struct vm_page **, int, int);
+int genfs_gop_write(struct vnode *, struct vm_page **, int, int,
+ struct buf_ioprog *);
+int genfs_compat_gop_write(struct vnode *, struct vm_page **, int, int,
+ struct buf_ioprog *);
#endif /* _MISCFS_GENFS_GENFS_NODE_H_ */
Index: miscfs/genfs/genfs_vnops.c
===================================================================
RCS file: /cvs/NetBSD/src/sys/miscfs/genfs/genfs_vnops.c,v
retrieving revision 1.68
diff -u -p -r1.68 genfs_vnops.c
--- miscfs/genfs/genfs_vnops.c 2002/11/15 14:01:57 1.68
+++ miscfs/genfs/genfs_vnops.c 2003/01/17 14:50:13
@@ -1024,6 +1024,7 @@ genfs_putpages(void *v)
struct vnode *vp = ap->a_vp;
struct uvm_object *uobj = &vp->v_uobj;
struct simplelock *slock = &uobj->vmobjlock;
+ struct buf_ioprog *bip = NULL;
off_t startoff = ap->a_offlo;
off_t endoff = ap->a_offhi;
off_t off;
@@ -1035,6 +1036,7 @@ genfs_putpages(void *v)
boolean_t wasclean, by_list, needs_clean, yield;
boolean_t async = (flags & PGO_SYNCIO) == 0;
boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
+ boolean_t metbusy = FALSE;
UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
@@ -1102,18 +1104,23 @@ genfs_putpages(void *v)
KASSERT(pg == NULL ||
(pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
(pg->flags & PG_BUSY) != 0);
+
if (by_list) {
if (pg == &endmp) {
break;
}
if (pg->offset < startoff || pg->offset >= endoff ||
pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
+ if (pg->flags & (PG_RELEASED|PG_PAGEOUT))
+ metbusy = TRUE;
pg = TAILQ_NEXT(pg, listq);
continue;
}
off = pg->offset;
} else if (pg == NULL ||
pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
+ if (pg != NULL)
+ metbusy = TRUE;
off += PAGE_SIZE;
if (off < endoff) {
pg = uvm_pagelookup(uobj, off);
@@ -1275,13 +1282,26 @@ genfs_putpages(void *v)
* start the i/o. if we're traversing by list,
* keep our place in the list with a marker page.
*/
+ if (!async && bip == NULL) {
+ bip = pool_get(&bioprogpl, PR_WAITOK);
+ bip->bi_flags = B_BUSY|B_WRITE; /* XXX */
+ /*
+ * no need to set bi_bcount.
+ */
+ /*
+ * set bi_resid to 1 now so that i/o completion
+ * can't occur before we issue all writes.
+ * see below.
+ */
+ bip->bi_resid = 1;
+ }
if (by_list) {
TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
listq);
}
simple_unlock(slock);
- error = GOP_WRITE(vp, pgs, npages, flags);
+ error = GOP_WRITE(vp, pgs, npages, flags, bip);
simple_lock(slock);
if (by_list) {
pg = TAILQ_NEXT(&curmp, listq);
@@ -1332,23 +1352,54 @@ genfs_putpages(void *v)
vp->v_flag &= ~VONWORKLST;
LIST_REMOVE(vp, v_synclist);
}
- splx(s);
- if (!wasclean && !async) {
- s = splbio();
+ if (bip != NULL && metbusy) {
+ KASSERT(!async);
+ KASSERT(!wasclean);
+
+ /*
+ * If we met pages being paged out by other threads,
+ * we should wait for all outputs.
+ */
while (vp->v_numoutput != 0) {
vp->v_flag |= VBWAIT;
- UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
- "genput2", 0);
+ UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock,
+ FALSE, "genput2", 0);
simple_lock(slock);
}
+ /*
+ * As we drained all writes for this vnode,
+ * ours was also completed, of course.
+ */
+ KASSERT(bip->bi_resid == 1);
+ }
+ splx(s);
+ simple_unlock(slock);
+ if (bip != NULL) {
+ KASSERT(!async);
+ KASSERT(!wasclean);
+ KASSERT(bip->bi_resid >= 1);
+
+ s = splbio();
+ /*
+ * Decrement bi_resid now so that it represent
+ * the actual I/O bytes that we issued.
+ *
+ * Note that we initialized it to 1 in the first place.
+ */
+ bip->bi_resid--;
+ if (bip->bi_resid == 0)
+ bioprogdone(bip);
splx(s);
+
+ error = bioprogwait(bip);
+ pool_put(&bioprogpl, bip);
}
- simple_unlock(&uobj->vmobjlock);
return (error);
}
int
-genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
+genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags,
+ struct buf_ioprog *bip)
{
int s, error, run;
int fs_bshift, dev_bshift;
@@ -1361,6 +1412,7 @@ genfs_gop_write(struct vnode *vp, struct
struct vnode *devvp;
boolean_t async = (flags & PGO_SYNCIO) == 0;
UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
+ KASSERT((async && bip == NULL) || (!async && bip != NULL));
UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
vp, pgs, npages, flags);
@@ -1392,10 +1444,19 @@ genfs_gop_write(struct vnode *vp, struct
mbp->b_bufsize = npages << PAGE_SHIFT;
mbp->b_data = (void *)kva;
mbp->b_resid = mbp->b_bcount = bytes;
- mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
- mbp->b_iodone = uvm_aio_biodone;
+ mbp->b_flags = B_BUSY|B_WRITE|B_AGE|B_CALL|B_ASYNC;
mbp->b_vp = vp;
LIST_INIT(&mbp->b_dep);
+ if (bip) {
+ mbp->b_iodone = uvm_aio_biodone2;
+ mbp->b_private = bip;
+ s = splbio();
+ bip->bi_resid += mbp->b_bcount;
+ splx(s);
+ }
+ else {
+ mbp->b_iodone = uvm_aio_biodone;
+ }
bp = NULL;
for (offset = startoffset;
@@ -1432,11 +1493,11 @@ genfs_gop_write(struct vnode *vp, struct
bp->b_resid = bp->b_bcount = iobytes;
bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
bp->b_iodone = uvm_aio_biodone1;
+ bp->b_private = mbp;
bp->b_vp = vp;
LIST_INIT(&bp->b_dep);
}
bp->b_lblkno = 0;
- bp->b_private = mbp;
if (devvp->v_type == VBLK) {
bp->b_dev = devvp->v_rdev;
}
@@ -1444,6 +1505,9 @@ genfs_gop_write(struct vnode *vp, struct
/* adjust physical blkno for partial blocks */
bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
dev_bshift);
+ if (!async) {
+ bp->b_flags |= B_STABLE;
+ }
UVMHIST_LOG(ubchist,
"vp %p offset 0x%x bcount 0x%x blkno 0x%x",
vp, offset, bp->b_bcount, bp->b_blkno);
@@ -1466,9 +1530,10 @@ genfs_gop_write(struct vnode *vp, struct
UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
return (0);
}
- UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
- error = biowait(mbp);
- uvm_aio_aiodone(mbp);
+ /*
+ * We don't have to wait for I/O completions even for PGO_SYNCIO
+ * because genfs_putpages will wait for us.
+ */
UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
return (error);
}
@@ -1601,9 +1666,10 @@ genfs_compat_getpages(void *v)
return (error);
}
+/* ARGSUSED */
int
genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
- int flags)
+ int flags, struct buf_ioprog *bip)
{
off_t offset;
struct iovec iov;
Index: uvm/uvm_extern.h
===================================================================
RCS file: /cvs/NetBSD/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.75
diff -u -p -r1.75 uvm_extern.h
--- uvm/uvm_extern.h 2002/12/11 07:10:20 1.75
+++ uvm/uvm_extern.h 2003/01/17 14:50:13
@@ -660,6 +660,7 @@ void uvm_setpagesize __P((void));
/* uvm_pager.c */
void uvm_aio_biodone1 __P((struct buf *));
+void uvm_aio_biodone2 __P((struct buf *));
void uvm_aio_biodone __P((struct buf *));
void uvm_aio_aiodone __P((struct buf *));
Index: uvm/uvm_pager.c
===================================================================
RCS file: /cvs/NetBSD/src/sys/uvm/uvm_pager.c,v
retrieving revision 1.59
diff -u -p -r1.59 uvm_pager.c
--- uvm/uvm_pager.c 2002/11/09 20:09:52 1.59
+++ uvm/uvm_pager.c 2003/01/17 14:50:13
@@ -248,17 +248,30 @@ uvm_aio_biodone1(bp)
struct buf *bp;
{
struct buf *mbp = bp->b_private;
+ struct buf_ioprog *mbip = BUF_BIOPROG(mbp);
- KASSERT(mbp != bp);
- if (bp->b_flags & B_ERROR) {
- mbp->b_flags |= B_ERROR;
- mbp->b_error = bp->b_error;
- }
- mbp->b_resid -= bp->b_bcount;
- pool_put(&bufpool, bp);
- if (mbp->b_resid == 0) {
+ bioprogchain(mbip, BUF_BIOPROG(bp));
+ if (mbip->bi_resid == 0)
biodone(mbp);
- }
+ pool_put(&bufpool, bp);
+}
+
+/*
+ * interrupt-context iodone handler for middle level bufs of nested i/o.
+ *
+ * => must be at splbio().
+ */
+
+void
+uvm_aio_biodone2(bp)
+ struct buf *bp;
+{
+ struct buf_ioprog *mbip = bp->b_private;
+
+ bioprogchain(mbip, BUF_BIOPROG(bp));
+ if (mbip->bi_resid == 0)
+ bioprogdone(mbip);
+ uvm_aio_biodone(bp);
}
/*
Index: nfs/nfs_bio.c
===================================================================
RCS file: /cvs/NetBSD/src/sys/nfs/nfs_bio.c,v
retrieving revision 1.85
diff -u -p -r1.85 nfs_bio.c
--- nfs/nfs_bio.c 2002/10/29 10:15:16 1.85
+++ nfs/nfs_bio.c 2003/01/17 14:50:13
@@ -989,7 +989,7 @@ nfs_doio(bp, p)
struct vm_page *pgs[npages];
boolean_t needcommit = TRUE;
- if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
+ if ((bp->b_flags & B_STABLE) == 0 && NFS_ISV3(vp)) {
iomode = NFSV3WRITE_UNSTABLE;
} else {
iomode = NFSV3WRITE_FILESYNC;
Index: nfs/nfs_node.c
===================================================================
RCS file: /cvs/NetBSD/src/sys/nfs/nfs_node.c,v
retrieving revision 1.56
diff -u -p -r1.56 nfs_node.c
--- nfs/nfs_node.c 2002/12/01 23:02:10 1.56
+++ nfs/nfs_node.c 2003/01/17 14:50:13
@@ -79,7 +79,8 @@ extern int prtactive;
void nfs_gop_size(struct vnode *, off_t, off_t *);
int nfs_gop_alloc(struct vnode *, off_t, off_t, int, struct ucred *);
-int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
+int nfs_gop_write(struct vnode *, struct vm_page **, int, int,
+ struct buf_ioprog *);
struct genfs_ops nfs_genfsops = {
nfs_gop_size,
@@ -323,12 +324,13 @@ nfs_gop_alloc(struct vnode *vp, off_t of
}
int
-nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
+nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags,
+ struct buf_ioprog *bip)
{
int i;
for (i = 0; i < npages; i++) {
pmap_page_protect(pgs[i], VM_PROT_READ);
}
- return genfs_gop_write(vp, pgs, npages, flags);
+ return genfs_gop_write(vp, pgs, npages, flags, bip);
}
Index: nfs/nfsnode.h
===================================================================
RCS file: /cvs/NetBSD/src/sys/nfs/nfsnode.h,v
retrieving revision 1.37
diff -u -p -r1.37 nfsnode.h
--- nfs/nfsnode.h 2002/12/01 23:02:11 1.37
+++ nfs/nfsnode.h 2003/01/17 14:50:13
@@ -244,7 +244,8 @@ int nfs_truncate __P((void *));
int nfs_update __P((void *));
int nfs_getpages __P((void *));
int nfs_putpages __P((void *));
-int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
+int nfs_gop_write(struct vnode *, struct vm_page **, int, int,
+ struct buf_ioprog *);
int nfs_kqfilter __P((void *));
extern int (**nfsv2_vnodeop_p) __P((void *));
--NextPart-20030117235344-0036900--