summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c10
-rw-r--r--net/core/datagram.h15
-rw-r--r--net/core/dev.c82
-rw-r--r--net/core/dev_ioctl.c3
-rw-r--r--net/core/devlink.c184
-rw-r--r--net/core/dst.c17
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c377
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock_reuseport.c2
-rw-r--r--net/core/sysctl_net_core.c8
16 files changed, 525 insertions, 198 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e657289db4ac..91bb5a083fee 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -61,6 +61,8 @@
#include <trace/events/skb.h>
#include <net/busy_poll.h>
+#include "datagram.h"
+
/*
* Is a socket 'connection oriented' ?
*/
@@ -408,10 +410,10 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
EXPORT_SYMBOL(skb_kill_datagram);
-int __skb_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len, bool fault_short,
- size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
- void *data)
+static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, bool fault_short,
+ size_t (*cb)(const void *, size_t, void *,
+ struct iov_iter *), void *data)
{
int start = skb_headlen(skb);
int i, copy = start - offset, start_off = offset, n;
diff --git a/net/core/datagram.h b/net/core/datagram.h
new file mode 100644
index 000000000000..bcfb75bfa3b2
--- /dev/null
+++ b/net/core/datagram.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_CORE_DATAGRAM_H_
+#define _NET_CORE_DATAGRAM_H_
+
+#include <linux/types.h>
+
+struct sock;
+struct sk_buff;
+struct iov_iter;
+
+int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
+#endif /* _NET_CORE_DATAGRAM_H_ */
diff --git a/net/core/dev.c b/net/core/dev.c
index fdcff29df915..b430f851f377 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -131,7 +131,6 @@
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
-#include <linux/pci.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
@@ -146,6 +145,7 @@
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/devlink.h>
#include "net-sysfs.h"
@@ -3468,6 +3468,15 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
__qdisc_drop(skb, &to_free);
rc = NET_XMIT_DROP;
+ } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+ qdisc_run_begin(q)) {
+ qdisc_bstats_cpu_update(q, skb);
+
+ if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
+ __qdisc_run(q);
+
+ qdisc_run_end(q);
+ rc = NET_XMIT_SUCCESS;
} else {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
qdisc_run(q);
@@ -3556,9 +3565,6 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
-DEFINE_PER_CPU(int, xmit_recursion);
-EXPORT_SYMBOL(xmit_recursion);
-
/**
* dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in
@@ -3689,23 +3695,21 @@ get_cpus_map:
}
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
return 0;
}
EXPORT_SYMBOL(dev_pick_tx_zero);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
+u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -3729,10 +3733,11 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
+EXPORT_SYMBOL(netdev_pick_tx);
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb,
- struct net_device *sb_dev)
+struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
{
int queue_index = 0;
@@ -3747,10 +3752,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
- __netdev_pick_tx);
+ queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
else
- queue_index = __netdev_pick_tx(dev, skb, sb_dev);
+ queue_index = netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
@@ -3824,7 +3828,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
else
skb_dst_force(skb);
- txq = netdev_pick_tx(dev, skb, sb_dev);
+ txq = netdev_core_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@@ -3849,8 +3853,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
- if (unlikely(__this_cpu_read(xmit_recursion) >
- XMIT_RECURSION_LIMIT))
+ if (dev_xmit_recursion())
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
@@ -3860,9 +3863,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
- __this_cpu_inc(xmit_recursion);
+ dev_xmit_recursion_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
- __this_cpu_dec(xmit_recursion);
+ dev_xmit_recursion_dec();
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
@@ -3975,9 +3978,9 @@ EXPORT_SYMBOL(rps_sock_flow_table);
u32 rps_cpu_mask __read_mostly;
EXPORT_SYMBOL(rps_cpu_mask);
-struct static_key rps_needed __read_mostly;
+struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
-struct static_key rfs_needed __read_mostly;
+struct static_key_false rfs_needed __read_mostly;
EXPORT_SYMBOL(rfs_needed);
static struct rps_dev_flow *
@@ -4429,7 +4432,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
bool free_skb = true;
int cpu, rc;
- txq = netdev_pick_tx(dev, skb, NULL);
+ txq = netdev_core_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -4503,7 +4506,7 @@ static int netif_rx_internal(struct sk_buff *skb)
}
#ifdef CONFIG_RPS
- if (static_key_false(&rps_needed)) {
+ if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -5174,7 +5177,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
rcu_read_lock();
#ifdef CONFIG_RPS
- if (static_key_false(&rps_needed)) {
+ if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -5222,7 +5225,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
#ifdef CONFIG_RPS
- if (static_key_false(&rps_needed)) {
+ if (static_branch_unlikely(&rps_needed)) {
list_for_each_entry_safe(skb, next, head, list) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -7872,10 +7875,14 @@ int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ int err;
- if (!ops->ndo_get_phys_port_name)
- return -EOPNOTSUPP;
- return ops->ndo_get_phys_port_name(dev, name, len);
+ if (ops->ndo_get_phys_port_name) {
+ err = ops->ndo_get_phys_port_name(dev, name, len);
+ if (err != -EOPNOTSUPP)
+ return err;
+ }
+ return devlink_compat_phys_port_name_get(dev, name, len);
}
EXPORT_SYMBOL(dev_get_phys_port_name);
@@ -7895,14 +7902,21 @@ int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id first = { };
struct net_device *lower_dev;
struct list_head *iter;
- int err = -EOPNOTSUPP;
+ int err;
- if (ops->ndo_get_port_parent_id)
- return ops->ndo_get_port_parent_id(dev, ppid);
+ if (ops->ndo_get_port_parent_id) {
+ err = ops->ndo_get_port_parent_id(dev, ppid);
+ if (err != -EOPNOTSUPP)
+ return err;
+ }
- if (!recurse)
+ err = devlink_compat_switch_id_get(dev, ppid);
+ if (!err || err != -EOPNOTSUPP)
return err;
+ if (!recurse)
+ return -EOPNOTSUPP;
+
netdev_for_each_lower_dev(dev, lower_dev, iter) {
err = dev_get_port_parent_id(lower_dev, ppid, recurse);
if (err)
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 31380fd5a4e2..5163d900bb4f 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -366,7 +366,8 @@ EXPORT_SYMBOL(dev_load);
* dev_ioctl - network device ioctl
* @net: the applicable net namespace
* @cmd: command to issue
- * @arg: pointer to a struct ifreq in user space
+ * @ifr: pointer to a struct ifreq in user space
+ * @need_copyout: whether or not copy_to_user() should be called
*
* Issue ioctl functions to devices. This is normally called by the
* user space syscall interfaces but can sometimes be useful for
diff --git a/net/core/devlink.c b/net/core/devlink.c
index da0a29f30885..b2715a187a11 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/spinlock.h>
#include <rdma/ib_verbs.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -543,12 +544,14 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
goto nla_put_failure;
+
+ spin_lock(&devlink_port->type_lock);
if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
- goto nla_put_failure;
+ goto nla_put_failure_type_locked;
if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
devlink_port->desired_type))
- goto nla_put_failure;
+ goto nla_put_failure_type_locked;
if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
struct net_device *netdev = devlink_port->type_dev;
@@ -557,7 +560,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
netdev->ifindex) ||
nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
netdev->name)))
- goto nla_put_failure;
+ goto nla_put_failure_type_locked;
}
if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
struct ib_device *ibdev = devlink_port->type_dev;
@@ -565,14 +568,17 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
if (ibdev &&
nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
ibdev->name))
- goto nla_put_failure;
+ goto nla_put_failure_type_locked;
}
+ spin_unlock(&devlink_port->type_lock);
if (devlink_nl_port_attrs_put(msg, devlink_port))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
+nla_put_failure_type_locked:
+ spin_unlock(&devlink_port->type_lock);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
@@ -3640,7 +3646,6 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
u64 ret_offset, start_offset, end_offset = 0;
- const struct genl_ops *ops = cb->data;
struct devlink_region *region;
struct nlattr *chunks_attr;
const char *region_name;
@@ -3657,7 +3662,8 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
return -ENOMEM;
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
- attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack);
+ attrs, DEVLINK_ATTR_MAX, devlink_nl_family.policy,
+ cb->extack);
if (err)
goto out_free;
@@ -4491,6 +4497,7 @@ devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
{
mutex_lock(&reporter->devlink->lock);
list_del(&reporter->list);
+ mutex_destroy(&reporter->dump_lock);
mutex_unlock(&reporter->devlink->lock);
if (reporter->dump_fmsg)
devlink_fmsg_free(reporter->dump_fmsg);
@@ -4928,7 +4935,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_GET,
.doit = devlink_nl_cmd_get_doit,
.dumpit = devlink_nl_cmd_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
@@ -4936,21 +4942,18 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_PORT_GET,
.doit = devlink_nl_cmd_port_get_doit,
.dumpit = devlink_nl_cmd_port_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_SET,
.doit = devlink_nl_cmd_port_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_SPLIT,
.doit = devlink_nl_cmd_port_split_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -4958,7 +4961,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -4967,7 +4969,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_GET,
.doit = devlink_nl_cmd_sb_get_doit,
.dumpit = devlink_nl_cmd_sb_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
@@ -4976,7 +4977,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_POOL_GET,
.doit = devlink_nl_cmd_sb_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
@@ -4984,7 +4984,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SB_POOL_SET,
.doit = devlink_nl_cmd_sb_pool_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
@@ -4993,7 +4992,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
.doit = devlink_nl_cmd_sb_port_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
@@ -5001,7 +4999,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
.doit = devlink_nl_cmd_sb_port_pool_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
@@ -5010,7 +5007,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
@@ -5018,7 +5014,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
@@ -5026,7 +5021,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
@@ -5034,7 +5028,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
@@ -5042,14 +5035,12 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_ESWITCH_GET,
.doit = devlink_nl_cmd_eswitch_get_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_ESWITCH_SET,
.doit = devlink_nl_cmd_eswitch_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -5057,49 +5048,42 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
.doit = devlink_nl_cmd_dpipe_table_get,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
.doit = devlink_nl_cmd_dpipe_entries_get,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
.doit = devlink_nl_cmd_dpipe_headers_get,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
.doit = devlink_nl_cmd_dpipe_table_counters_set,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_SET,
.doit = devlink_nl_cmd_resource_set,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_DUMP,
.doit = devlink_nl_cmd_resource_dump,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_RELOAD,
.doit = devlink_nl_cmd_reload,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -5108,14 +5092,12 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_PARAM_GET,
.doit = devlink_nl_cmd_param_get_doit,
.dumpit = devlink_nl_cmd_param_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PARAM_SET,
.doit = devlink_nl_cmd_param_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
@@ -5123,14 +5105,12 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_PORT_PARAM_GET,
.doit = devlink_nl_cmd_port_param_get_doit,
.dumpit = devlink_nl_cmd_port_param_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_PARAM_SET,
.doit = devlink_nl_cmd_port_param_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
@@ -5138,21 +5118,18 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_REGION_GET,
.doit = devlink_nl_cmd_region_get_doit,
.dumpit = devlink_nl_cmd_region_get_dumpit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_REGION_DEL,
.doit = devlink_nl_cmd_region_del,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_REGION_READ,
.dumpit = devlink_nl_cmd_region_read_dumpit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
@@ -5160,7 +5137,6 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_INFO_GET,
.doit = devlink_nl_cmd_info_get_doit,
.dumpit = devlink_nl_cmd_info_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
@@ -5168,35 +5144,30 @@ static const struct genl_ops devlink_nl_ops[] = {
.cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
.doit = devlink_nl_cmd_health_reporter_get_doit,
.dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
- .policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
.doit = devlink_nl_cmd_health_reporter_set_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
.doit = devlink_nl_cmd_health_reporter_recover_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
.doit = devlink_nl_cmd_health_reporter_diagnose_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
.doit = devlink_nl_cmd_health_reporter_dump_get_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -5204,7 +5175,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
.doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
@@ -5212,7 +5182,6 @@ static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_FLASH_UPDATE,
.doit = devlink_nl_cmd_flash_update,
- .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
@@ -5222,6 +5191,7 @@ static struct genl_family devlink_nl_family __ro_after_init = {
.name = DEVLINK_GENL_NAME,
.version = DEVLINK_GENL_VERSION,
.maxattr = DEVLINK_ATTR_MAX,
+ .policy = devlink_nl_policy,
.netnsok = true,
.pre_doit = devlink_nl_pre_doit,
.post_doit = devlink_nl_post_doit,
@@ -5303,6 +5273,7 @@ EXPORT_SYMBOL_GPL(devlink_unregister);
*/
void devlink_free(struct devlink *devlink)
{
+ mutex_destroy(&devlink->lock);
WARN_ON(!list_empty(&devlink->reporter_list));
WARN_ON(!list_empty(&devlink->region_list));
WARN_ON(!list_empty(&devlink->param_list));
@@ -5340,6 +5311,7 @@ int devlink_port_register(struct devlink *devlink,
devlink_port->devlink = devlink;
devlink_port->index = port_index;
devlink_port->registered = true;
+ spin_lock_init(&devlink_port->type_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
INIT_LIST_HEAD(&devlink_port->param_list);
mutex_unlock(&devlink->lock);
@@ -5368,8 +5340,12 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type type,
void *type_dev)
{
+ if (WARN_ON(!devlink_port->registered))
+ return;
+ spin_lock(&devlink_port->type_lock);
devlink_port->type = type;
devlink_port->type_dev = type_dev;
+ spin_unlock(&devlink_port->type_lock);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
}
@@ -5382,8 +5358,39 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port,
void devlink_port_type_eth_set(struct devlink_port *devlink_port,
struct net_device *netdev)
{
- return __devlink_port_type_set(devlink_port,
- DEVLINK_PORT_TYPE_ETH, netdev);
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ /* If driver registers devlink port, it should set devlink port
+ * attributes accordingly so the compat functions are called
+ * and the original ops are not used.
+ */
+ if (ops->ndo_get_phys_port_name) {
+ /* Some drivers use the same set of ndos for netdevs
+ * that have devlink_port registered and also for
+ * those who don't. Make sure that ndo_get_phys_port_name
+ * returns -EOPNOTSUPP here in case it is defined.
+ * Warn if not.
+ */
+ char name[IFNAMSIZ];
+ int err;
+
+ err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
+ WARN_ON(err != -EOPNOTSUPP);
+ }
+ if (ops->ndo_get_port_parent_id) {
+ /* Some drivers use the same set of ndos for netdevs
+ * that have devlink_port registered and also for
+ * those who don't. Make sure that ndo_get_port_parent_id
+ * returns -EOPNOTSUPP here in case it is defined.
+ * Warn if not.
+ */
+ struct netdev_phys_item_id ppid;
+ int err;
+
+ err = ops->ndo_get_port_parent_id(netdev, &ppid);
+ WARN_ON(err != -EOPNOTSUPP);
+ }
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
}
EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
@@ -5396,8 +5403,7 @@ EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev)
{
- return __devlink_port_type_set(devlink_port,
- DEVLINK_PORT_TYPE_IB, ibdev);
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
}
EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
@@ -5408,8 +5414,7 @@ EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
*/
void devlink_port_type_clear(struct devlink_port *devlink_port)
{
- return __devlink_port_type_set(devlink_port,
- DEVLINK_PORT_TYPE_NOTSET, NULL);
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
}
EXPORT_SYMBOL_GPL(devlink_port_type_clear);
@@ -5423,25 +5428,40 @@ EXPORT_SYMBOL_GPL(devlink_port_type_clear);
* @split: indicates if this is split port
* @split_subport_number: if the port is split, this is the number
* of subport.
+ * @switch_id: if the port is part of switch, this is buffer with ID,
+ * otwerwise this is NULL
+ * @switch_id_len: length of the switch_id buffer
*/
void devlink_port_attrs_set(struct devlink_port *devlink_port,
enum devlink_port_flavour flavour,
u32 port_number, bool split,
- u32 split_subport_number)
+ u32 split_subport_number,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ if (WARN_ON(devlink_port->registered))
+ return;
attrs->set = true;
attrs->flavour = flavour;
attrs->port_number = port_number;
attrs->split = split;
attrs->split_subport_number = split_subport_number;
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+ if (switch_id) {
+ attrs->switch_port = true;
+ if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN))
+ switch_id_len = MAX_PHYS_ITEM_ID_LEN;
+ memcpy(attrs->switch_id.id, switch_id, switch_id_len);
+ attrs->switch_id.id_len = switch_id_len;
+ } else {
+ attrs->switch_port = false;
+ }
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
-int devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
- char *name, size_t len)
+static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+ char *name, size_t len)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int n = 0;
@@ -5471,7 +5491,6 @@ int devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
return 0;
}
-EXPORT_SYMBOL_GPL(devlink_port_get_phys_port_name);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
@@ -6447,17 +6466,15 @@ void devlink_compat_running_version(struct net_device *dev,
dev_hold(dev);
rtnl_unlock();
- mutex_lock(&devlink_mutex);
devlink = netdev_to_devlink(dev);
if (!devlink || !devlink->ops->info_get)
- goto unlock_list;
+ goto out;
mutex_lock(&devlink->lock);
__devlink_compat_running_version(devlink, buf, len);
mutex_unlock(&devlink->lock);
-unlock_list:
- mutex_unlock(&devlink_mutex);
+out:
rtnl_lock();
dev_put(dev);
}
@@ -6465,28 +6482,65 @@ unlock_list:
int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
{
struct devlink *devlink;
- int ret = -EOPNOTSUPP;
+ int ret;
dev_hold(dev);
rtnl_unlock();
- mutex_lock(&devlink_mutex);
devlink = netdev_to_devlink(dev);
- if (!devlink || !devlink->ops->flash_update)
- goto unlock_list;
+ if (!devlink || !devlink->ops->flash_update) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
mutex_lock(&devlink->lock);
ret = devlink->ops->flash_update(devlink, file_name, NULL, NULL);
mutex_unlock(&devlink->lock);
-unlock_list:
- mutex_unlock(&devlink_mutex);
+out:
rtnl_lock();
dev_put(dev);
return ret;
}
+int devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct devlink_port *devlink_port;
+
+ /* RTNL mutex is held here which ensures that devlink_port
+ * instance cannot disappear in the middle. No need to take
+ * any devlink lock as only permanent values are accessed.
+ */
+ ASSERT_RTNL();
+
+ devlink_port = netdev_to_devlink_port(dev);
+ if (!devlink_port)
+ return -EOPNOTSUPP;
+
+ return __devlink_port_phys_port_name_get(devlink_port, name, len);
+}
+
+int devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct devlink_port *devlink_port;
+
+ /* RTNL mutex is held here which ensures that devlink_port
+ * instance cannot disappear in the middle. No need to take
+ * any devlink lock as only permanent values are accessed.
+ */
+ ASSERT_RTNL();
+ devlink_port = netdev_to_devlink_port(dev);
+ if (!devlink_port || !devlink_port->attrs.switch_port)
+ return -EOPNOTSUPP;
+
+ memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
+
+ return 0;
+}
+
static int __init devlink_init(void)
{
return genl_register_family(&devlink_nl_family);
diff --git a/net/core/dst.c b/net/core/dst.c
index a263309df115..1f13d90cd0e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -26,23 +26,6 @@
#include <net/dst.h>
#include <net/dst_metadata.h>
-/*
- * Theory of operations:
- * 1) We use a list, protected by a spinlock, to add
- * new entries from both BH and non-BH context.
- * 2) In order to keep spinlock held for a small delay,
- * we use a second list where are stored long lived
- * entries, that are handled by the garbage collect thread
- * fired by a workqueue.
- * 3) This list is guarded by a mutex,
- * so that the gc_task and dst_dev_event() can be synchronized.
- */
-
-/*
- * We want to keep lock & list close together
- * to dirty as few cache lines as possible in __dst_free().
- * As this is not a very strong hint, we dont force an alignment on SMP.
- */
int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 36ed619faf36..4a593853cbf2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -136,6 +136,7 @@ static const char
phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
[ETHTOOL_ID_UNSPEC] = "Unspec",
[ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift",
+ [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down",
};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
@@ -2446,6 +2447,7 @@ static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
if (tuna->len != sizeof(u8) ||
tuna->type_id != ETHTOOL_TUNABLE_U8)
return -EINVAL;
diff --git a/net/core/filter.c b/net/core/filter.c
index fc92ebc4e200..8904e3407163 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -74,6 +74,7 @@
#include <net/seg6.h>
#include <net/seg6_local.h>
#include <net/lwtunnel.h>
+#include <net/ipv6_stubs.h>
/**
* sk_filter_trim_cap - run a packet through a socket filter
@@ -2015,7 +2016,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+ if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
@@ -2023,9 +2024,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
- __this_cpu_inc(xmit_recursion);
+ dev_xmit_recursion_inc();
ret = dev_queue_xmit(skb);
- __this_cpu_dec(xmit_recursion);
+ dev_xmit_recursion_dec();
return ret;
}
@@ -2963,42 +2964,113 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
}
}
-static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
+#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+
+#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
+ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
+
+static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
+ u64 flags)
{
- u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
+ bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
+ u16 mac_len = 0, inner_net = 0, inner_trans = 0;
+ unsigned int gso_type = SKB_GSO_DODGY;
int ret;
- if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
- return -ENOTSUPP;
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
+ /* udp gso_size delineates datagrams, only allow if fixed */
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
+ !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+ return -ENOTSUPP;
+ }
- ret = skb_cow(skb, len_diff);
+ ret = skb_cow_head(skb, len_diff);
if (unlikely(ret < 0))
return ret;
+ if (encap) {
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -ENOTSUPP;
+
+ if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+ return -EINVAL;
+
+ if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
+ return -EINVAL;
+
+ if (skb->encapsulation)
+ return -EALREADY;
+
+ mac_len = skb->network_header - skb->mac_header;
+ inner_net = skb->network_header;
+ inner_trans = skb->transport_header;
+ }
+
ret = bpf_skb_net_hdr_push(skb, off, len_diff);
if (unlikely(ret < 0))
return ret;
+ if (encap) {
+ /* inner mac == inner_net on l3 encap */
+ skb->inner_mac_header = inner_net;
+ skb->inner_network_header = inner_net;
+ skb->inner_transport_header = inner_trans;
+ skb_set_inner_protocol(skb, skb->protocol);
+
+ skb->encapsulation = 1;
+ skb_set_network_header(skb, mac_len);
+
+ if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
+ gso_type |= SKB_GSO_UDP_TUNNEL;
+ else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
+ gso_type |= SKB_GSO_GRE;
+ else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+ gso_type |= SKB_GSO_IPXIP6;
+ else
+ gso_type |= SKB_GSO_IPXIP4;
+
+ if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
+ flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
+ int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
+ sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr);
+
+ skb_set_transport_header(skb, mac_len + nh_len);
+ }
+ }
+
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* Due to header grow, MSS needs to be downgraded. */
- skb_decrease_gso_size(shinfo, len_diff);
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+ skb_decrease_gso_size(shinfo, len_diff);
+
/* Header must be checked, and gso_segs recomputed. */
- shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
}
return 0;
}
-static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
+static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
+ u64 flags)
{
- u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
- return -ENOTSUPP;
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
+ /* udp gso_size delineates datagrams, only allow if fixed */
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
+ !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+ return -ENOTSUPP;
+ }
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
@@ -3012,7 +3084,9 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* Due to header shrink, MSS can be upgraded. */
- skb_increase_gso_size(shinfo, len_diff);
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+ skb_increase_gso_size(shinfo, len_diff);
+
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
@@ -3027,49 +3101,50 @@ static u32 __bpf_skb_max_len(const struct sk_buff *skb)
SKB_MAX_ALLOC;
}
-static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
+BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ u32, mode, u64, flags)
{
- bool trans_same = skb->transport_header == skb->network_header;
u32 len_cur, len_diff_abs = abs(len_diff);
u32 len_min = bpf_skb_net_base_len(skb);
u32 len_max = __bpf_skb_max_len(skb);
__be16 proto = skb->protocol;
bool shrink = len_diff < 0;
+ u32 off;
int ret;
+ if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
+ return -EINVAL;
if (unlikely(len_diff_abs > 0xfffU))
return -EFAULT;
if (unlikely(proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)))
return -ENOTSUPP;
+ off = skb_mac_header_len(skb);
+ switch (mode) {
+ case BPF_ADJ_ROOM_NET:
+ off += bpf_skb_net_base_len(skb);
+ break;
+ case BPF_ADJ_ROOM_MAC:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
len_cur = skb->len - skb_network_offset(skb);
- if (skb_transport_header_was_set(skb) && !trans_same)
- len_cur = skb_network_header_len(skb);
if ((shrink && (len_diff_abs >= len_cur ||
len_cur - len_diff_abs < len_min)) ||
(!shrink && (skb->len + len_diff_abs > len_max &&
!skb_is_gso(skb))))
return -ENOTSUPP;
- ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
- bpf_skb_net_grow(skb, len_diff_abs);
+ ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
+ bpf_skb_net_grow(skb, off, len_diff_abs, flags);
bpf_compute_data_pointers(skb);
return ret;
}
-BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
- u32, mode, u64, flags)
-{
- if (unlikely(flags))
- return -EINVAL;
- if (likely(mode == BPF_ADJ_ROOM_NET))
- return bpf_skb_adjust_net(skb, len_diff);
-
- return -ENOTSUPP;
-}
-
static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
.func = bpf_skb_adjust_room,
.gpl_only = false,
@@ -4480,11 +4555,11 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
u32 flags, bool check_mtu)
{
+ struct fib_nh_common *nhc;
struct in_device *in_dev;
struct neighbour *neigh;
struct net_device *dev;
struct fib_result res;
- struct fib_nh *nh;
struct flowi4 fl4;
int err;
u32 mtu;
@@ -4557,15 +4632,15 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
- nh = &res.fi->fib_nh[res.nh_sel];
+ nhc = res.nhc;
/* do not handle lwt encaps right now */
- if (nh->nh_lwtstate)
+ if (nhc->nhc_lwtstate)
return BPF_FIB_LKUP_RET_UNSUPP_LWT;
- dev = nh->nh_dev;
- if (nh->nh_gw)
- params->ipv4_dst = nh->nh_gw;
+ dev = nhc->nhc_dev;
+ if (nhc->nhc_has_gw)
+ params->ipv4_dst = nhc->nhc_gw.ipv4;
params->rt_metric = res.fi->fib_priority;
@@ -4674,13 +4749,13 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
- if (f6i->fib6_nh.nh_lwtstate)
+ if (f6i->fib6_nh.fib_nh_lws)
return BPF_FIB_LKUP_RET_UNSUPP_LWT;
- if (f6i->fib6_flags & RTF_GATEWAY)
- *dst = f6i->fib6_nh.nh_gw;
+ if (f6i->fib6_nh.fib_nh_has_gw)
+ *dst = f6i->fib6_nh.fib_nh_gw6;
- dev = f6i->fib6_nh.nh_dev;
+ dev = f6i->fib6_nh.fib_nh_dev;
params->rt_metric = f6i->fib6_metric;
/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
@@ -5156,15 +5231,15 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
return sk;
}
-/* bpf_sk_lookup performs the core lookup for different types of sockets,
+/* bpf_skc_lookup performs the core lookup for different types of sockets,
* taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
* Returns the socket as an 'unsigned long' to simplify the casting in the
* callers to satisfy BPF_CALL declarations.
*/
-static unsigned long
-__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
- struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
- u64 flags)
+static struct sock *
+__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+ u64 flags)
{
struct sock *sk = NULL;
u8 family = AF_UNSPEC;
@@ -5192,15 +5267,27 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
put_net(net);
}
+out:
+ return sk;
+}
+
+static struct sock *
+__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+ u64 flags)
+{
+ struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
+ ifindex, proto, netns_id, flags);
+
if (sk)
sk = sk_to_full_sk(sk);
-out:
- return (unsigned long) sk;
+
+ return sk;
}
-static unsigned long
-bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
- u8 proto, u64 netns_id, u64 flags)
+static struct sock *
+bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u64 netns_id, u64 flags)
{
struct net *caller_net;
int ifindex;
@@ -5213,14 +5300,47 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
ifindex = 0;
}
- return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
- proto, netns_id, flags);
+ return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
+ netns_id, flags);
+}
+
+static struct sock *
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u64 netns_id, u64 flags)
+{
+ struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
+ flags);
+
+ if (sk)
+ sk = sk_to_full_sk(sk);
+
+ return sk;
+}
+
+BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
+ netns_id, flags);
}
+static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
+ .func = bpf_skc_lookup_tcp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
- return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags);
+ return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
+ netns_id, flags);
}
static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
@@ -5238,7 +5358,8 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
- return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags);
+ return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
+ netns_id, flags);
}
static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
@@ -5273,8 +5394,9 @@ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex;
- return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
- IPPROTO_UDP, netns_id, flags);
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_UDP, netns_id,
+ flags);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
@@ -5289,14 +5411,38 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.arg5_type = ARG_ANYTHING,
};
+BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ struct net *caller_net = dev_net(ctx->rxq->dev);
+ int ifindex = ctx->rxq->dev->ifindex;
+
+ return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_TCP, netns_id,
+ flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
+ .func = bpf_xdp_skc_lookup_tcp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex;
- return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
- IPPROTO_TCP, netns_id, flags);
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_TCP, netns_id,
+ flags);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
@@ -5311,11 +5457,31 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.arg5_type = ARG_ANYTHING,
};
+BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0,
+ IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
+ .func = bpf_sock_addr_skc_lookup_tcp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
- return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
- IPPROTO_TCP, netns_id, flags);
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0, IPPROTO_TCP,
+ netns_id, flags);
}
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
@@ -5332,8 +5498,9 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
- return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
- IPPROTO_UDP, netns_id, flags);
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0, IPPROTO_UDP,
+ netns_id, flags);
}
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
@@ -5461,6 +5628,74 @@ static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
+
+BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
+ struct tcphdr *, th, u32, th_len)
+{
+#ifdef CONFIG_SYN_COOKIES
+ u32 cookie;
+ int ret;
+
+ if (unlikely(th_len < sizeof(*th)))
+ return -EINVAL;
+
+ /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
+ if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
+ return -EINVAL;
+
+ if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
+ return -EINVAL;
+
+ if (!th->ack || th->rst || th->syn)
+ return -ENOENT;
+
+ if (tcp_synq_no_recent_overflow(sk))
+ return -ENOENT;
+
+ cookie = ntohl(th->ack_seq) - 1;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ if (unlikely(iph_len < sizeof(struct iphdr)))
+ return -EINVAL;
+
+ ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
+ break;
+
+#if IS_BUILTIN(CONFIG_IPV6)
+ case AF_INET6:
+ if (unlikely(iph_len < sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
+ ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
+ break;
+#endif /* CONFIG_IPV6 */
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ if (ret > 0)
+ return 0;
+
+ return -ENOENT;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
+static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
+ .func = bpf_tcp_check_syncookie,
+ .gpl_only = true,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE,
+};
+
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -5586,6 +5821,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sock_addr_sk_lookup_udp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+ return &bpf_sock_addr_skc_lookup_tcp_proto;
#endif /* CONFIG_INET */
default:
return bpf_base_func_proto(func_id);
@@ -5719,6 +5956,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto;
case BPF_FUNC_get_listener_sock:
return &bpf_get_listener_sock_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+ return &bpf_skc_lookup_tcp_proto;
+ case BPF_FUNC_tcp_check_syncookie:
+ return &bpf_tcp_check_syncookie_proto;
+ case BPF_FUNC_skb_ecn_set_ce:
+ return &bpf_skb_ecn_set_ce_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -5754,6 +5997,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_xdp_sk_lookup_tcp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+ return &bpf_xdp_skc_lookup_tcp_proto;
+ case BPF_FUNC_tcp_check_syncookie:
+ return &bpf_tcp_check_syncookie_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -5846,6 +6093,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_lookup_udp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+ return &bpf_skc_lookup_tcp_proto;
#endif
default:
return bpf_base_func_proto(func_id);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 94a450b2191a..795449713ba4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -734,6 +734,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
* @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
* @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
+ * @flags: flags that control the dissection process, e.g.
+ * FLOW_DISSECTOR_F_STOP_AT_L3.
*
* The function will try to retrieve individual keys into target specified
* by flow_dissector from either the skbuff or a raw buffer specified by the
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 126d31ff5ee3..3c5c24a5d9f5 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -18,6 +18,7 @@
#include <net/lwtunnel.h>
#include <net/gre.h>
#include <net/ip6_route.h>
+#include <net/ipv6_stubs.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f8f94303a1f5..c14f0dc0157c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -754,9 +754,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
rcu_assign_pointer(queue->rps_map, map);
if (map)
- static_key_slow_inc(&rps_needed);
+ static_branch_inc(&rps_needed);
if (old_map)
- static_key_slow_dec(&rps_needed);
+ static_branch_dec(&rps_needed);
mutex_unlock(&rps_map_mutex);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 361aabffb8c0..e365e8fb1c40 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb, NULL);
+ txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a51cab95ba64..f9b964fd4e4d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3569,7 +3569,7 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
-/**
+/*
* ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
*/
int ndo_dflt_fdb_add(struct ndmsg *ndm,
@@ -3708,7 +3708,7 @@ out:
return err;
}
-/**
+/*
* ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
*/
int ndo_dflt_fdb_del(struct ndmsg *ndm,
@@ -3847,8 +3847,11 @@ skip:
/**
* ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
- * @nlh: netlink message header
+ * @skb: socket buffer to store message in
+ * @cb: netlink callback
* @dev: netdevice
+ * @filter_dev: ignored
+ * @idx: the number of FDB table entries dumped is added to *@idx
*
* Default netdevice operation to dump the existing unicast address list.
* Returns number of addresses from list put in skb.
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ef2cd5712098..9901f5322852 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -77,6 +77,8 @@
#include <linux/capability.h>
#include <linux/user_namespace.h>
+#include "datagram.h"
+
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
@@ -1105,9 +1107,6 @@ void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
}
EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
-extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *from, size_t length);
-
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
{
return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index d8fe3e549373..dc4aefdf2a08 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -144,6 +144,8 @@ static void reuseport_free_rcu(struct rcu_head *head)
* reuseport_add_sock - Add a socket to the reuseport group of another.
* @sk: New socket to add to the group.
* @sk2: Socket belonging to the existing reuseport group.
+ * @bind_inany: Whether or not the group is bound to a local INANY address.
+ *
* May return ENOMEM and not add socket to group under memory pressure.
*/
int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 84bf2861f45f..1a2685694abd 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -95,12 +95,12 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
if (sock_table != orig_sock_table) {
rcu_assign_pointer(rps_sock_flow_table, sock_table);
if (sock_table) {
- static_key_slow_inc(&rps_needed);
- static_key_slow_inc(&rfs_needed);
+ static_branch_inc(&rps_needed);
+ static_branch_inc(&rfs_needed);
}
if (orig_sock_table) {
- static_key_slow_dec(&rps_needed);
- static_key_slow_dec(&rfs_needed);
+ static_branch_dec(&rps_needed);
+ static_branch_dec(&rfs_needed);
synchronize_rcu();
vfree(orig_sock_table);
}