summaryrefslogtreecommitdiff
path: root/net/sched/sch_red.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_red.c')
-rw-r--r--net/sched/sch_red.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index de2be4d04ed6..4cc0ad0b1189 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -67,7 +67,7 @@ static int red_use_nodrop(struct red_sched_data *q)
return q->flags & TC_RED_NODROP;
}
-static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
if (INET_ECN_set_ce(skb)) {
q->stats.prob_mark++;
- skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
@@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
if (INET_ECN_set_ce(skb)) {
q->stats.forced_mark++;
- skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
@@ -126,7 +126,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
break;
}
- ret = qdisc_enqueue(skb, child, root_lock, to_free);
+ ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
@@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
return ret;
congestion_drop:
- skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;