summaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-03-20 13:44:31 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:31 -0500
commitbc0fb201b34b12e2d16e8cbd5bb078c1db936304 (patch)
tree58a9aa8bee2de7e42f81c522115cd10078a6a5ef /fs/nfs
parent487b83723ed4d4eaafd5109f36560da4f15c6578 (diff)
NFS: create common routine for waiting for direct I/O to complete
We're about to add asynchrony to the NFS direct write path. Begin by abstracting out the common pieces in the read path. The first piece is nfs_direct_read_wait, which works the same whether the process is waiting for a read or a write. Test plan: Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/direct.c57
1 files changed, 26 insertions, 31 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 094456c3df90..2593f47eaff0 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -159,6 +159,30 @@ static void nfs_direct_req_release(struct kref *kref)
}
/*
+ * Collects and returns the final error value/byte-count.
+ */
+static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
+{
+ int result = -EIOCBQUEUED;
+
+ /* Async requests don't wait here */
+ if (dreq->iocb)
+ goto out;
+
+ result = wait_event_interruptible(dreq->wait,
+ (atomic_read(&dreq->complete) == 0));
+
+ if (!result)
+ result = atomic_read(&dreq->error);
+ if (!result)
+ result = atomic_read(&dreq->count);
+
+out:
+ kref_put(&dreq->kref, nfs_direct_req_release);
+ return (ssize_t) result;
+}
+
+/*
* Note we also set the number of requests we have in the dreq when we are
* done. This prevents races with I/O completion so we will always wait
* until all requests have been dispatched and completed.
@@ -213,7 +237,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
/*
* We must hold a reference to all the pages in this direct read request
* until the RPCs complete. This could be long *after* we are woken up in
- * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
+ * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
*
* In addition, synchronous I/O uses a stack-allocated iocb. Thus we
* can't trust the iocb is still valid here if this is a synchronous
@@ -315,35 +339,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long
} while (count != 0);
}
-/*
- * Collects and returns the final error value/byte-count.
- */
-static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
-{
- int result = -EIOCBQUEUED;
-
- /* Async requests don't wait here */
- if (dreq->iocb)
- goto out;
-
- result = 0;
- if (intr) {
- result = wait_event_interruptible(dreq->wait,
- (atomic_read(&dreq->complete) == 0));
- } else {
- wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
- }
-
- if (!result)
- result = atomic_read(&dreq->error);
- if (!result)
- result = atomic_read(&dreq->count);
-
-out:
- kref_put(&dreq->kref, nfs_direct_req_release);
- return (ssize_t) result;
-}
-
static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages)
{
ssize_t result;
@@ -366,7 +361,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
rpc_clnt_sigmask(clnt, &oldset);
nfs_direct_read_schedule(dreq, user_addr, count, file_offset);
- result = nfs_direct_read_wait(dreq, clnt->cl_intr);
+ result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
return result;