diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/xen-netfront.c | 194 |
1 files changed, 74 insertions, 120 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a4e50482a230..01a4350eb313 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -421,109 +421,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) xennet_maybe_wake_tx(queue); } -static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, - struct xen_netif_tx_request *tx) -{ - char *data = skb->data; - unsigned long mfn; - RING_IDX prod = queue->tx.req_prod_pvt; - int frags = skb_shinfo(skb)->nr_frags; - unsigned int offset = offset_in_page(data); - unsigned int len = skb_headlen(skb); +static struct xen_netif_tx_request *xennet_make_one_txreq( + struct netfront_queue *queue, struct sk_buff *skb, + struct page *page, unsigned int offset, unsigned int len) +{ unsigned int id; + struct xen_netif_tx_request *tx; grant_ref_t ref; - int i; - /* While the header overlaps a page boundary (including being - larger than a page), split it it into page-sized chunks. */ - while (len > PAGE_SIZE - offset) { - tx->size = PAGE_SIZE - offset; - tx->flags |= XEN_NETTXF_more_data; - len -= tx->size; - data += tx->size; - offset = 0; + len = min_t(unsigned int, PAGE_SIZE - offset, len); - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); - queue->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&queue->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&queue->gref_tx_head); - BUG_ON((signed short)ref < 0); + id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); + ref = gnttab_claim_grant_reference(&queue->gref_tx_head); + BUG_ON((signed short)ref < 0); - mfn = virt_to_mfn(data); - gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, - mfn, GNTMAP_readonly); + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, + page_to_mfn(page), GNTMAP_readonly); - queue->grant_tx_page[id] = virt_to_page(data); - tx->gref = queue->grant_tx_ref[id] = ref; - tx->offset = offset; - tx->size = len; - tx->flags = 0; - } + queue->tx_skbs[id].skb = skb; + queue->grant_tx_page[id] = page; + queue->grant_tx_ref[id] = ref; - /* Grant backend access to each skb fragment page. */ - for (i = 0; i < frags; i++) { - skb_frag_t *frag = skb_shinfo(skb)->frags + i; - struct page *page = skb_frag_page(frag); + tx->id = id; + tx->gref = ref; + tx->offset = offset; + tx->size = len; + tx->flags = 0; - len = skb_frag_size(frag); - offset = frag->page_offset; + return tx; +} - /* Skip unused frames from start of page */ - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; +static struct xen_netif_tx_request *xennet_make_txreqs( + struct netfront_queue *queue, struct xen_netif_tx_request *tx, + struct sk_buff *skb, struct page *page, + unsigned int offset, unsigned int len) +{ + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - while (len > 0) { - unsigned long bytes; - - bytes = PAGE_SIZE - offset; - if (bytes > len) - bytes = len; - - tx->flags |= XEN_NETTXF_more_data; - - id = get_id_from_freelist(&queue->tx_skb_freelist, - queue->tx_skbs); - queue->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&queue->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&queue->gref_tx_head); - BUG_ON((signed short)ref < 0); - - mfn = pfn_to_mfn(page_to_pfn(page)); - gnttab_grant_foreign_access_ref(ref, - queue->info->xbdev->otherend_id, - mfn, GNTMAP_readonly); - - queue->grant_tx_page[id] = page; - tx->gref = queue->grant_tx_ref[id] = ref; - tx->offset = offset; - tx->size = bytes; - tx->flags = 0; - - offset += bytes; - len -= bytes; - - /* Next frame */ - if (offset == PAGE_SIZE && len) { - BUG_ON(!PageCompound(page)); - page++; - offset = 0; - } - } + while (len) { + tx->flags |= XEN_NETTXF_more_data; + tx = xennet_make_one_txreq(queue, skb_get(skb), + page, offset, len); + page++; + offset = 0; + len -= tx->size; } - queue->tx.req_prod_pvt = prod; + return tx; } /* - * Count how many ring slots are required to send the frags of this - * skb. Each frag might be a compound page. + * Count how many ring slots are required to send this skb. Each frag + * might be a compound page. */ -static int xennet_count_skb_frag_slots(struct sk_buff *skb) +static int xennet_count_skb_slots(struct sk_buff *skb) { int i, frags = skb_shinfo(skb)->nr_frags; - int pages = 0; + int pages; + + pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)); for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; @@ -559,18 +518,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { - unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); - struct xen_netif_tx_request *tx; - char *data = skb->data; - RING_IDX i; - grant_ref_t ref; - unsigned long mfn; + struct xen_netif_tx_request *tx, *first_tx; + unsigned int i; int notify; int slots; - unsigned int offset = offset_in_page(data); - unsigned int len = skb_headlen(skb); + struct page *page; + unsigned int offset; + unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; unsigned int num_queues = dev->real_num_tx_queues; @@ -593,18 +549,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; } - slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + - xennet_count_skb_frag_slots(skb); + slots = xennet_count_skb_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", slots, skb->len); if (skb_linearize(skb)) goto drop; - data = skb->data; - offset = offset_in_page(data); - len = skb_headlen(skb); } + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + len = skb_headlen(skb); + spin_lock_irqsave(&queue->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || @@ -614,25 +570,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; } - i = queue->tx.req_prod_pvt; - - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); - queue->tx_skbs[id].skb = skb; - - tx = RING_GET_REQUEST(&queue->tx, i); + /* First request for the linear area. */ + first_tx = tx = xennet_make_one_txreq(queue, skb, + page, offset, len); + page++; + offset = 0; + len -= tx->size; - tx->id = id; - ref = gnttab_claim_grant_reference(&queue->gref_tx_head); - BUG_ON((signed short)ref < 0); - mfn = virt_to_mfn(data); - gnttab_grant_foreign_access_ref( - ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); - queue->grant_tx_page[id] = virt_to_page(data); - tx->gref = queue->grant_tx_ref[id] = ref; - tx->offset = offset; - tx->size = len; - - tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; @@ -640,11 +584,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* remote but checksummed. */ tx->flags |= XEN_NETTXF_data_validated; + /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) - RING_GET_REQUEST(&queue->tx, ++i); + RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); tx->flags |= XEN_NETTXF_extra_info; @@ -659,10 +604,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) gso->flags = 0; } - queue->tx.req_prod_pvt = i + 1; + /* Requests for the rest of the linear area. */ + tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); + + /* Requests for all the frags. */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + tx = xennet_make_txreqs(queue, tx, skb, + skb_frag_page(frag), frag->page_offset, + skb_frag_size(frag)); + } - xennet_make_frags(skb, queue, tx); - tx->size = skb->len; + /* First request has the packet length. */ + first_tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) |