summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c113
-rw-r--r--net/ipv6/af_inet6.c2
3 files changed, 115 insertions, 2 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f5c562aaef35..3ebf599cebae 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -994,7 +994,7 @@ const struct proto_ops inet_stream_ops = {
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
- .mmap = sock_no_mmap,
+ .mmap = tcp_mmap,
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c768d306b657..438fbca96cd3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1726,6 +1726,119 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
}
EXPORT_SYMBOL(tcp_set_rcvlowat);
+/* When user wants to mmap X pages, we first need to perform the mapping
+ * before freeing any skbs in receive queue, otherwise user would be unable
+ * to fallback to standard recvmsg(). This happens if some data in the
+ * requested block is not exactly fitting in a page.
+ *
+ * We only support order-0 pages for the moment.
+ * mmap() on TCP is very strict, there is no point
+ * trying to accommodate with pathological layouts.
+ */
+int tcp_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned int nr_pages = size >> PAGE_SHIFT;
+ struct page **pages_array = NULL;
+ u32 seq, len, offset, nr = 0;
+ struct sock *sk = sock->sk;
+ const skb_frag_t *frags;
+ struct tcp_sock *tp;
+ struct sk_buff *skb;
+ int ret;
+
+ if (vma->vm_pgoff || !nr_pages)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ /* TODO: Maybe the following is not needed if pages are COW */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ lock_sock(sk);
+
+ ret = -ENOTCONN;
+ if (sk->sk_state == TCP_LISTEN)
+ goto out;
+
+ sock_rps_record_flow(sk);
+
+ if (tcp_inq(sk) < size) {
+ ret = sock_flag(sk, SOCK_DONE) ? -EIO : -EAGAIN;
+ goto out;
+ }
+ tp = tcp_sk(sk);
+ seq = tp->copied_seq;
+ /* Abort if urgent data is in the area */
+ if (unlikely(tp->urg_data)) {
+ u32 urg_offset = tp->urg_seq - seq;
+
+ ret = -EINVAL;
+ if (urg_offset < size)
+ goto out;
+ }
+ ret = -ENOMEM;
+ pages_array = kvmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_array)
+ goto out;
+ skb = tcp_recv_skb(sk, seq, &offset);
+ ret = -EINVAL;
+skb_start:
+ /* We do not support anything not in page frags */
+ offset -= skb_headlen(skb);
+ if ((int)offset < 0)
+ goto out;
+ if (skb_has_frag_list(skb))
+ goto out;
+ len = skb->data_len - offset;
+ frags = skb_shinfo(skb)->frags;
+ while (offset) {
+ if (frags->size > offset)
+ goto out;
+ offset -= frags->size;
+ frags++;
+ }
+ while (nr < nr_pages) {
+ if (len) {
+ if (len < PAGE_SIZE)
+ goto out;
+ if (frags->size != PAGE_SIZE || frags->page_offset)
+ goto out;
+ pages_array[nr++] = skb_frag_page(frags);
+ frags++;
+ len -= PAGE_SIZE;
+ seq += PAGE_SIZE;
+ continue;
+ }
+ skb = skb->next;
+ offset = seq - TCP_SKB_CB(skb)->seq;
+ goto skb_start;
+ }
+ /* OK, we have a full set of pages ready to be inserted into vma */
+ for (nr = 0; nr < nr_pages; nr++) {
+ ret = vm_insert_page(vma, vma->vm_start + (nr << PAGE_SHIFT),
+ pages_array[nr]);
+ if (ret)
+ goto out;
+ }
+ /* operation is complete, we can 'consume' all skbs */
+ tp->copied_seq = seq;
+ tcp_rcv_space_adjust(sk);
+
+ /* Clean up data we have read: This will do ACK frames. */
+ tcp_recv_skb(sk, seq, &offset);
+ tcp_cleanup_rbuf(sk, size);
+
+ ret = 0;
+out:
+ release_sock(sk);
+ kvfree(pages_array);
+ return ret;
+}
+EXPORT_SYMBOL(tcp_mmap);
+
static void tcp_update_recv_tstamps(struct sk_buff *skb,
struct scm_timestamping *tss)
{
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e70d59fb26e1..2c694912df2e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -579,7 +579,7 @@ const struct proto_ops inet6_stream_ops = {
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet_sendmsg, /* ok */
.recvmsg = inet_recvmsg, /* ok */
- .mmap = sock_no_mmap,
+ .mmap = tcp_mmap,
.sendpage = inet_sendpage,
.sendmsg_locked = tcp_sendmsg_locked,
.sendpage_locked = tcp_sendpage_locked,