diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 61 |
1 files changed, 28 insertions, 33 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 797ebef73642..2b349a4de3c8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -56,40 +56,34 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) return 0; } -static struct sk_buff *try_bulk_dequeue_skb(struct Qdisc *q, - struct sk_buff *head_skb, - int bytelimit) +static void try_bulk_dequeue_skb(struct Qdisc *q, + struct sk_buff *skb, + const struct netdev_queue *txq) { - struct sk_buff *skb, *tail_skb = head_skb; + int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; while (bytelimit > 0) { - skb = q->dequeue(q); - if (!skb) - break; + struct sk_buff *nskb = q->dequeue(q); - bytelimit -= skb->len; /* covers GSO len */ - skb = validate_xmit_skb(skb, qdisc_dev(q)); - if (!skb) + if (!nskb) break; - while (tail_skb->next) /* GSO list goto tail */ - tail_skb = tail_skb->next; - - tail_skb->next = skb; - tail_skb = skb; + bytelimit -= nskb->len; /* covers GSO len */ + skb->next = nskb; + skb = nskb; } - - return head_skb; + skb->next = NULL; } /* Note that dequeue_skb can possibly return a SKB list (via skb->next). * A requeued skb (via q->gso_skb) can also be a SKB list. */ -static inline struct sk_buff *dequeue_skb(struct Qdisc *q) +static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) { struct sk_buff *skb = q->gso_skb; const struct netdev_queue *txq = q->dev_queue; + *validate = true; if (unlikely(skb)) { /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); @@ -98,21 +92,16 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) q->q.qlen--; } else skb = NULL; + /* skb in gso_skb were already validated */ + *validate = false; } else { if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) { - int bytelimit = qdisc_avail_bulklimit(txq); - skb = q->dequeue(q); - if (skb) { - bytelimit -= skb->len; - skb = validate_xmit_skb(skb, qdisc_dev(q)); - } if (skb && qdisc_may_bulk(q)) - skb = try_bulk_dequeue_skb(q, skb, bytelimit); + try_bulk_dequeue_skb(q, skb, txq); } } - return skb; } @@ -156,19 +145,24 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock) + spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); - HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_xmit_frozen_or_stopped(txq)) - skb = dev_hard_start_xmit(skb, dev, txq, &ret); + /* Note that we validate skb (GSO, checksum, ...) outside of locks */ + if (validate) + skb = validate_xmit_skb_list(skb, dev); - HARD_TX_UNLOCK(dev, txq); + if (skb) { + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (!netif_xmit_frozen_or_stopped(txq)) + skb = dev_hard_start_xmit(skb, dev, txq, &ret); + HARD_TX_UNLOCK(dev, txq); + } spin_lock(root_lock); if (dev_xmit_complete(ret)) { @@ -217,9 +211,10 @@ static inline int qdisc_restart(struct Qdisc *q) struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; + bool validate; /* Dequeue packet */ - skb = dequeue_skb(q); + skb = dequeue_skb(q, &validate); if (unlikely(!skb)) return 0; @@ -229,7 +224,7 @@ static inline int qdisc_restart(struct Qdisc *q) dev = qdisc_dev(q); txq = skb_get_tx_queue(dev, skb); - return sch_direct_xmit(skb, q, dev, txq, root_lock); + return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); } void __qdisc_run(struct Qdisc *q) |