summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2019-03-20 11:02:04 +0100
committerDavid S. Miller <davem@davemloft.net>2019-03-20 11:18:54 -0700
commit4bd97d51a5e602ea1fbdab8c2d653513dea17115 (patch)
treee805f47e700c42818aa07cb32d8fa7d2f0922920 /net/core
parent0b963ef20c5624c4cc6c4ef408b301a24b26b96b (diff)
net: dev: rename queue selection helpers.
With the following patches, we are going to use __netdev_pick_tx() in many modules. Rename it to netdev_pick_tx(), to make it clear is a public API. Also rename the existing netdev_pick_tx() to netdev_core_pick_tx(), to avoid name clashes. Suggested-by: Eric Dumazet <edumazet@google.com> Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/netpoll.c2
2 files changed, 10 insertions, 10 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b67f2aa59dd..5dd3e3f7dd12 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3704,8 +3704,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
+static u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -3730,9 +3730,9 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb,
- struct net_device *sb_dev)
+struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
{
int queue_index = 0;
@@ -3748,9 +3748,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
- __netdev_pick_tx);
+ netdev_pick_tx);
else
- queue_index = __netdev_pick_tx(dev, skb, sb_dev);
+ queue_index = netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
@@ -3824,7 +3824,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
else
skb_dst_force(skb);
- txq = netdev_pick_tx(dev, skb, sb_dev);
+ txq = netdev_core_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@@ -4429,7 +4429,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
bool free_skb = true;
int cpu, rc;
- txq = netdev_pick_tx(dev, skb, NULL);
+ txq = netdev_core_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 361aabffb8c0..e365e8fb1c40 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb, NULL);
+ txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;