diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_ct.c | 31 | ||||
-rw-r--r-- | net/sched/cls_api.c | 2 | ||||
-rw-r--r-- | net/sched/sch_cake.c | 18 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 3 | ||||
-rw-r--r-- | net/sched/sch_fq_pie.c | 19 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 50 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 8 |
7 files changed, 96 insertions, 35 deletions
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index ec7a1c438df9..a656baa321fe 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb, } err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); - if (err == NF_ACCEPT && - ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { - if (maniptype == NF_NAT_MANIP_SRC) - maniptype = NF_NAT_MANIP_DST; - else - maniptype = NF_NAT_MANIP_SRC; - - err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { + if (ct->status & IPS_SRC_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; + + err = ct_nat_execute(skb, ct, ctinfo, range, + maniptype); + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + err = ct_nat_execute(skb, ct, ctinfo, NULL, + NF_NAT_MANIP_SRC); + } } return err; #else @@ -984,7 +989,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, */ cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); if (!cached) { - if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { + if (tcf_ct_flow_table_lookup(p, skb, family)) { skip_add = true; goto do_nat; } @@ -1022,10 +1027,11 @@ do_nat: * even if the connection is already confirmed. */ nf_conntrack_confirm(skb); - } else if (!skip_add) { - tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); } + if (!skip_add) + tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); + out_push: skb_push_rcsum(skb, nh_ofs); @@ -1202,9 +1208,6 @@ static int tcf_ct_fill_params(struct net *net, sizeof(p->zone)); } - if (p->zone == NF_CT_DEFAULT_ZONE_ID) - return 0; - nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); if (!tmpl) { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 40fbea626dfd..279f9e2a2319 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb, /* If we missed on some chain */ if (ret == TC_ACT_UNSPEC && last_executed_chain) { - ext = skb_ext_add(skb, TC_SKB_EXT); + ext = tc_skb_ext_alloc(skb); if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 7d37638ee1c7..951542843cab 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb, } tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); - if (!tcph) + if (!tcph || tcph->doff < 5) return NULL; return skb_header_pointer(skb, offset, @@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph, length--; continue; } + if (length < 2) + break; opsize = *ptr++; if (opsize < 2 || opsize > length) break; @@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph, length--; continue; } + if (length < 2) + break; opsize = *ptr++; if (opsize < 2 || opsize > length) break; @@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch) /* List of known Diffserv codepoints: * - * Least Effort (CS1) + * Least Effort (CS1, LE) * Best Effort (CS0) * Max Reliability & LLT "Lo" (TOS1) * Max Throughput (TOS2) @@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch) * Total 25 codepoints. */ -/* List of traffic classes in RFC 4594: +/* List of traffic classes in RFC 4594, updated by RFC 8622: * (roughly descending order of contended priority) * (roughly ascending order of uncontended throughput) * @@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch) * Ops, Admin, Management (CS2,TOS1) - eg. ssh * Standard Service (CS0 & unrecognised codepoints) * High Throughput Data (AF1x,TOS2) - eg. web traffic - * Low Priority Data (CS1) - eg. BitTorrent + * Low Priority Data (CS1,LE) - eg. BitTorrent * Total 12 traffic classes. */ @@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch) * Video Streaming (AF4x, AF3x, CS3) * Bog Standard (CS0 etc.) * High Throughput (AF1x, TOS2) - * Background Traffic (CS1) + * Background Traffic (CS1, LE) * * Total 8 traffic classes. */ @@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch) * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4) * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1) * Best Effort (CS0, AF1x, TOS2, and those not specified) - * Background Traffic (CS1) + * Background Traffic (CS1, LE) * * Total 4 traffic classes. */ @@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch) static int cake_config_diffserv3(struct Qdisc *sch) { /* Simplified Diffserv structure with 3 tins. - * Low Priority (CS1) + * Low Priority (CS1, LE) * Best Effort * Latency Sensitive (TOS4, VA, EF, CS6, CS7) */ diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index cd2748e2d4a2..d320bcfb2da2 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch) struct dsmark_qdisc_data *p = qdisc_priv(sch); pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); - qdisc_reset(p->q); + if (p->q) + qdisc_reset(p->q); sch->qstats.backlog = 0; sch->q.qlen = 0; } diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 949163fe68af..cac684952edc 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Classifies packet into corresponding flow */ idx = fq_pie_classify(skb, sch, &ret); - sel_flow = &q->flows[idx]; + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + return ret; + } + idx--; + sel_flow = &q->flows[idx]; /* Checks whether adding a new packet would exceed memory limit */ get_pie_cb(skb)->mem_usage = skb->truesize; memory_limited = q->memory_usage > q->memory_limit + skb->truesize; @@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, goto flow_error; } q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); - if (!q->flows_cnt || q->flows_cnt >= 65536) { + if (!q->flows_cnt || q->flows_cnt > 65536) { NL_SET_ERR_MSG_MOD(extack, - "Number of flows must range in [1..65535]"); + "Number of flows must range in [1..65536]"); goto flow_error; } } @@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t) struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; /* to lock qdisc for probability calculations */ - u16 idx; + u32 idx; root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); @@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, { struct fq_pie_sched_data *q = qdisc_priv(sch); int err; - u16 idx; + u32 idx; pie_params_init(&q->p_params); sch->limit = 10 * 1024; @@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) static void fq_pie_reset(struct Qdisc *sch) { struct fq_pie_sched_data *q = qdisc_priv(sch); - u16 idx; + u32 idx; INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 44991ea726fc..fc8b56bcabf3 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -35,6 +35,25 @@ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; EXPORT_SYMBOL(default_qdisc_ops); +static void qdisc_maybe_clear_missed(struct Qdisc *q, + const struct netdev_queue *txq) +{ + clear_bit(__QDISC_STATE_MISSED, &q->state); + + /* Make sure the below netif_xmit_frozen_or_stopped() + * checking happens after clearing STATE_MISSED. + */ + smp_mb__after_atomic(); + + /* Checking netif_xmit_frozen_or_stopped() again to + * make sure STATE_MISSED is set if the STATE_MISSED + * set by netif_tx_wake_queue()'s rescheduling of + * net_tx_action() is cleared by the above clear_bit(). + */ + if (!netif_xmit_frozen_or_stopped(txq)) + set_bit(__QDISC_STATE_MISSED, &q->state); +} + /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with @@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) } } else { skb = SKB_XOFF_MAGIC; + qdisc_maybe_clear_missed(q, txq); } } @@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, } } else { skb = NULL; + qdisc_maybe_clear_missed(q, txq); } if (lock) spin_unlock(lock); @@ -251,8 +272,10 @@ validate: *validate = true; if ((q->flags & TCQ_F_ONETXQUEUE) && - netif_xmit_frozen_or_stopped(txq)) + netif_xmit_frozen_or_stopped(txq)) { + qdisc_maybe_clear_missed(q, txq); return skb; + } skb = qdisc_dequeue_skb_bad_txq(q); if (unlikely(skb)) { @@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); + else + qdisc_maybe_clear_missed(q, txq); HARD_TX_UNLOCK(dev, txq); } else { @@ -640,8 +665,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct sk_buff *skb = NULL; + bool need_retry = true; int band; +retry: for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { struct skb_array *q = band2list(priv, band); @@ -652,6 +679,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) } if (likely(skb)) { qdisc_update_stats_at_dequeue(qdisc, skb); + } else if (need_retry && + test_bit(__QDISC_STATE_MISSED, &qdisc->state)) { + /* Delay clearing the STATE_MISSED here to reduce + * the overhead of the second spin_trylock() in + * qdisc_run_begin() and __netif_schedule() calling + * in qdisc_run_end(). + */ + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); + + /* Make sure dequeuing happens after clearing + * STATE_MISSED. + */ + smp_mb__after_atomic(); + + need_retry = false; + + goto retry; } else { WRITE_ONCE(qdisc->empty, true); } @@ -1158,8 +1202,10 @@ static void dev_reset_queue(struct net_device *dev, qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) + if (nolock) { + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); spin_unlock_bh(&qdisc->seqlock); + } } static bool some_qdisc_is_busy(struct net_device *dev) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 081c11d5717c..8827987ba903 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch, struct Qdisc *old_q; /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ - qdisc_refcount_inc(new_q); + if (new_q) + qdisc_refcount_inc(new_q); old_q = htb_graft_helper(dev_queue, new_q); WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); } @@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, cl->parent->common.classid, NULL); if (q->offload) { - if (new_q) { + if (new_q) htb_set_lockdep_class_child(new_q); - htb_parent_to_leaf_offload(sch, dev_queue, new_q); - } + htb_parent_to_leaf_offload(sch, dev_queue, new_q); } } |