summaryrefslogtreecommitdiff
path: root/fs/nfs/nfs4proc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 14:57:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 14:57:40 -0800
commit57eccf1c2acae2fcb748730881ba75643fc31c81 (patch)
treebe47ac42ef0b2e3e7157ce196ad2ed1224739c6c /fs/nfs/nfs4proc.c
parent5c395ae7033099fc657114ea997858aa622f08b2 (diff)
parent074b1d12fe2500d7d453902f9266e6674b30d84c (diff)
Merge branch 'nfs-for-3.3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
* 'nfs-for-3.3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: NFSv4: Change the default setting of the nfs4_disable_idmapping parameter NFSv4: Save the owner/group name string when doing open NFS: Remove pNFS bloat from the generic write path pnfs-obj: Must return layout on IO error pnfs-obj: pNFS errors are communicated on iodata->pnfs_error NFS: Cache state owners after files are closed NFS: Clean up nfs4_find_state_owners_locked() NFSv4: include bitmap in nfsv4 get acl data nfs: fix a minor do_div portability issue NFSv4.1: cleanup comment and debug printk NFSv4.1: change nfs4_free_slot parameters for dynamic slots NFSv4.1: cleanup init and reset of session slot tables NFSv4.1: fix backchannel slotid off-by-one bug nfs: fix regression in handling of context= option in NFSv4 NFS - fix recent breakage to NFS error handling. NFS: Retry mounting NFSROOT SUNRPC: Clean up the RPCSEC_GSS service ticket requests
Diffstat (limited to 'fs/nfs/nfs4proc.c')
-rw-r--r--fs/nfs/nfs4proc.c177
1 files changed, 93 insertions, 84 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index dcda0ba7af60..75366dc89686 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -52,6 +52,7 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
+#include <linux/nfs_idmap.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
@@ -364,9 +365,8 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* Must be called while holding tbl->slot_tbl_lock
*/
static void
-nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
- int free_slotid = free_slot - tbl->slots;
int slotid = free_slotid;
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
@@ -431,7 +431,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
}
spin_lock(&tbl->slot_tbl_lock);
- nfs4_free_slot(tbl, res->sr_slot);
+ nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
@@ -554,13 +554,10 @@ int nfs41_setup_sequence(struct nfs4_session *session,
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
- /*
- * The state manager will wait until the slot table is empty.
- * Schedule the reset thread
- */
+ /* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
- dprintk("%s Schedule Session Reset\n", __func__);
+ dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
@@ -765,6 +762,8 @@ struct nfs4_opendata {
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
+ struct nfs4_string owner_name;
+ struct nfs4_string group_name;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
@@ -788,6 +787,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
+ nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
@@ -819,6 +819,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
+ p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_CREAT) {
u32 *s;
@@ -855,6 +856,7 @@ static void nfs4_opendata_free(struct kref *kref)
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
+ nfs_fattr_free_names(&p->f_attr);
kfree(p);
}
@@ -1579,6 +1581,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
if (status != 0 || !data->rpc_done)
return status;
+ nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
+
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1611,6 +1615,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return status;
}
+ nfs_fattr_map_and_free_names(server, &data->f_attr);
+
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
@@ -3431,19 +3437,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
-static void buf_to_pages(const void *buf, size_t buflen,
- struct page **pages, unsigned int *pgbase)
-{
- const void *p = buf;
-
- *pgbase = offset_in_page(buf);
- p -= *pgbase;
- while (p < buf + buflen) {
- *(pages++) = virt_to_page(p);
- p += PAGE_CACHE_SIZE;
- }
-}
-
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
@@ -3540,9 +3533,19 @@ out:
nfs4_set_cached_acl(inode, acl);
}
+/*
+ * The getxattr API returns the required buffer length when called with a
+ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
+ * the required buf. On a NULL buf, we send a page of data to the server
+ * guessing that the ACL request can be serviced by a page. If so, we cache
+ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
+ * the cache. If not so, we throw away the page, and cache the required
+ * length. The next getxattr call will then produce another round trip to
+ * the server, this time with the input buf of the required size.
+ */
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES];
+ struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
@@ -3557,41 +3560,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- struct page *localpage = NULL;
- int ret;
+ int ret = -ENOMEM, npages, i, acl_len = 0;
- if (buflen < PAGE_SIZE) {
- /* As long as we're doing a round trip to the server anyway,
- * let's be prepared for a page of acl data. */
- localpage = alloc_page(GFP_KERNEL);
- resp_buf = page_address(localpage);
- if (localpage == NULL)
- return -ENOMEM;
- args.acl_pages[0] = localpage;
- args.acl_pgbase = 0;
- args.acl_len = PAGE_SIZE;
- } else {
- resp_buf = buf;
- buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
+ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* As long as we're doing a round trip to the server anyway,
+ * let's be prepared for a page of acl data. */
+ if (npages == 0)
+ npages = 1;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
}
- ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
+ if (npages > 1) {
+ /* for decoding across pages */
+ args.acl_scratch = alloc_page(GFP_KERNEL);
+ if (!args.acl_scratch)
+ goto out_free;
+ }
+ args.acl_len = npages * PAGE_SIZE;
+ args.acl_pgbase = 0;
+ /* Let decode_getfacl know not to fail if the ACL data is larger than
+ * the page we send as a guess */
+ if (buf == NULL)
+ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
+ resp_buf = page_address(pages[0]);
+
+ dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
+ __func__, buf, buflen, npages, args.acl_len);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
+ &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
- if (res.acl_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, res.acl_len);
+
+ acl_len = res.acl_len - res.acl_data_offset;
+ if (acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, acl_len);
else
- nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
+ nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
+ acl_len);
if (buf) {
ret = -ERANGE;
- if (res.acl_len > buflen)
+ if (acl_len > buflen)
goto out_free;
- if (localpage)
- memcpy(buf, resp_buf, res.acl_len);
+ _copy_from_pages(buf, pages, res.acl_data_offset,
+ res.acl_len);
}
- ret = res.acl_len;
+ ret = acl_len;
out_free:
- if (localpage)
- __free_page(localpage);
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ __free_page(pages[i]);
+ if (args.acl_scratch)
+ __free_page(args.acl_scratch);
return ret;
}
@@ -3622,6 +3644,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
+ /* -ENOENT is returned if there is no ACL or if there is an ACL
+ * but no cached acl data, just the acl length */
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
@@ -5022,23 +5046,6 @@ out:
return ret;
}
-/*
- * Reset the forechannel and backchannel slot tables
- */
-static int nfs4_reset_slot_tables(struct nfs4_session *session)
-{
- int status;
-
- status = nfs4_reset_slot_table(&session->fc_slot_table,
- session->fc_attrs.max_reqs, 1);
- if (status)
- return status;
-
- status = nfs4_reset_slot_table(&session->bc_slot_table,
- session->bc_attrs.max_reqs, 0);
- return status;
-}
-
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
@@ -5084,29 +5091,35 @@ out:
}
/*
- * Initialize the forechannel and backchannel tables
+ * Initialize or reset the forechannel and backchannel tables
*/
-static int nfs4_init_slot_tables(struct nfs4_session *session)
+static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
- int status = 0;
+ int status;
- tbl = &session->fc_slot_table;
+ dprintk("--> %s\n", __func__);
+ /* Fore channel */
+ tbl = &ses->fc_slot_table;
if (tbl->slots == NULL) {
- status = nfs4_init_slot_table(tbl,
- session->fc_attrs.max_reqs, 1);
+ status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
+ if (status) /* -ENOMEM */
+ return status;
+ } else {
+ status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status)
return status;
}
-
- tbl = &session->bc_slot_table;
+ /* Back channel */
+ tbl = &ses->bc_slot_table;
if (tbl->slots == NULL) {
- status = nfs4_init_slot_table(tbl,
- session->bc_attrs.max_reqs, 0);
+ status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status)
- nfs4_destroy_slot_tables(session);
- }
-
+ /* Fore and back channel share a connection so get
+ * both slot tables or neither */
+ nfs4_destroy_slot_tables(ses);
+ } else
+ status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
return status;
}
@@ -5294,13 +5307,9 @@ int nfs4_proc_create_session(struct nfs_client *clp)
if (status)
goto out;
- /* Init and reset the fore channel */
- status = nfs4_init_slot_tables(session);
- dprintk("slot table initialization returned %d\n", status);
- if (status)
- goto out;
- status = nfs4_reset_slot_tables(session);
- dprintk("slot table reset returned %d\n", status);
+ /* Init or reset the session slot tables */
+ status = nfs4_setup_session_slot_tables(session);
+ dprintk("slot table setup returned %d\n", status);
if (status)
goto out;