diff options
author | Stephen Hemminger <stephen@networkplumber.org> | 2017-11-14 11:27:01 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-11-15 14:14:16 +0900 |
commit | bce552fd6f6e37f9567c85c4f0d6d1987eef379f (patch) | |
tree | f8c52d452828447346c96577b5d52ac9ca75bf3e /net/sched | |
parent | 6670e152447732ba90626f36dfc015a13fbf150e (diff) |
netem: use 64 bit divide by rate
Since times are now expressed in nanosecond, need to now do
true 64 bit divide. Old code would truncate rate at 32 bits.
Rename function to better express current usage.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_netem.c | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b686e755fda9..644323d6081c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -339,10 +339,8 @@ static s64 tabledist(s64 mu, s64 sigma, return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } -static u64 packet_len_2_sched_time(unsigned int len, - struct netem_sched_data *q) +static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) { - u64 offset; len += q->packet_overhead; if (q->cell_size) { @@ -352,9 +350,8 @@ static u64 packet_len_2_sched_time(unsigned int len, cells++; len = cells * (q->cell_size + q->cell_overhead); } - offset = (u64)len * NSEC_PER_SEC; - do_div(offset, q->rate); - return offset; + + return div64_u64(len * NSEC_PER_SEC, q->rate); } static void tfifo_reset(struct Qdisc *sch) @@ -556,7 +553,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, now = last->time_to_send; } - delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); + delay += packet_time_ns(qdisc_pkt_len(skb), q); } cb->time_to_send = now + delay; |