diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-02 15:16:38 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-02 15:16:38 -0800 |
commit | 69fd110eb650ea7baa82158f3b89a7d86da1d056 (patch) | |
tree | 091e4e8e5863654042638d4165eecdc856bc2bff /net | |
parent | 821fd6f6cb6500cd04a6c7e8f701f9b311a5c2b3 (diff) | |
parent | 4038a2a37e3595c299aecdaa20cb01ceb9c78303 (diff) |
Merge branch 'work.sendmsg' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs sendmsg updates from Al Viro:
"More sendmsg work.
This is a fairly separate isolated stuff (there's a continuation
around lustre, but that one was too late to soak in -next), thus the
separate pull request"
* 'work.sendmsg' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
ncpfs: switch to sock_sendmsg()
ncpfs: don't mess with manually advancing iovec on send
ncpfs: sendmsg does *not* bugger iovec these days
ceph_tcp_sendpage(): use ITER_BVEC sendmsg
afs_send_pages(): use ITER_BVEC
rds: remove dead code
ceph: switch to sock_recvmsg()
usbip_recv(): switch to sock_recvmsg()
iscsi_target: deal with short writes on the tx side
[nbd] pass iov_iter to nbd_xmit()
[nbd] switch sock_xmit() to sock_{send,recv}msg()
[drbd] use sock_sendmsg()
Diffstat (limited to 'net')
-rw-r--r-- | net/ceph/messenger.c | 44 | ||||
-rw-r--r-- | net/rds/page.c | 29 | ||||
-rw-r--r-- | net/rds/rds.h | 7 |
3 files changed, 29 insertions, 51 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index bad3d4ae43f6..38dcf1eb427d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -520,7 +520,8 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; int r; - r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); + r = sock_recvmsg(sock, &msg, msg.msg_flags); if (r == -EAGAIN) r = 0; return r; @@ -529,17 +530,20 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) static int ceph_tcp_recvpage(struct socket *sock, struct page *page, int page_offset, size_t length) { - void *kaddr; - int ret; + struct bio_vec bvec = { + .bv_page = page, + .bv_offset = page_offset, + .bv_len = length + }; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; BUG_ON(page_offset + length > PAGE_SIZE); - - kaddr = kmap(page); - BUG_ON(!kaddr); - ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); - kunmap(page); - - return ret; + iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); + r = sock_recvmsg(sock, &msg, msg.msg_flags); + if (r == -EAGAIN) + r = 0; + return r; } /* @@ -579,18 +583,28 @@ static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, static int ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more) { + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + struct bio_vec bvec; int ret; - struct kvec iov; /* sendpage cannot properly handle pages with page_count == 0, * we need to fallback to sendmsg if that's the case */ if (page_count(page) >= 1) return __ceph_tcp_sendpage(sock, page, offset, size, more); - iov.iov_base = kmap(page) + offset; - iov.iov_len = size; - ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); - kunmap(page); + bvec.bv_page = page; + bvec.bv_offset = offset; + bvec.bv_len = size; + + if (more) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ + + iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); + ret = sock_sendmsg(sock, &msg); + if (ret == -EAGAIN) + ret = 0; return ret; } diff --git a/net/rds/page.c b/net/rds/page.c index e2b5a5832d3d..7cc57e098ddb 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -45,35 +45,6 @@ struct rds_page_remainder { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); -/* - * returns 0 on success or -errno on failure. - * - * We don't have to worry about flush_dcache_page() as this only works - * with private pages. If, say, we were to do directed receive to pinned - * user pages we'd have to worry more about cache coherence. (Though - * the flush_dcache_page() in get_user_pages() would probably be enough). - */ -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user) -{ - unsigned long ret; - void *addr; - - addr = kmap(page); - if (to_user) { - rds_stats_add(s_copy_to_user, bytes); - ret = copy_to_user(ptr, addr + offset, bytes); - } else { - rds_stats_add(s_copy_from_user, bytes); - ret = copy_from_user(addr + offset, ptr, bytes); - } - kunmap(page); - - return ret ? -EFAULT : 0; -} -EXPORT_SYMBOL_GPL(rds_page_copy_user); - /** * rds_page_remainder_alloc - build up regions of a message. * diff --git a/net/rds/rds.h b/net/rds/rds.h index 07fff73dd4f3..966d2ee1f107 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -798,13 +798,6 @@ static inline int rds_message_verify_checksum(const struct rds_header *hdr) /* page.c */ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, gfp_t gfp); -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user); -#define rds_page_copy_to_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 1) -#define rds_page_copy_from_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 0) void rds_page_exit(void); /* recv.c */ |