diff options
Diffstat (limited to 'net/bridge')
-rw-r--r-- | net/bridge/br_device.c | 2 | ||||
-rw-r--r-- | net/bridge/br_fdb.c | 5 | ||||
-rw-r--r-- | net/bridge/br_if.c | 3 | ||||
-rw-r--r-- | net/bridge/br_input.c | 2 | ||||
-rw-r--r-- | net/bridge/br_netlink.c | 334 | ||||
-rw-r--r-- | net/bridge/br_private.h | 47 | ||||
-rw-r--r-- | net/bridge/br_stp.c | 2 | ||||
-rw-r--r-- | net/bridge/br_vlan.c | 251 |
8 files changed, 457 insertions, 189 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index c915c5b408ea..bdfb9544ca03 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -56,7 +56,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); - if (!br_allowed_ingress(br, skb, &vid)) + if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid)) goto out; if (is_broadcast_ether_addr(dest)) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 7826782d62ab..7f7d55132dd5 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -133,12 +133,13 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) static void fdb_del_external_learn(struct net_bridge_fdb_entry *f) { - struct switchdev_obj_fdb fdb = { + struct switchdev_obj_port_fdb fdb = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, .addr = f->addr.addr, .vid = f->vlan_id, }; - switchdev_port_obj_del(f->dst->dev, SWITCHDEV_OBJ_PORT_FDB, &fdb); + switchdev_port_obj_del(f->dst->dev, &fdb.obj); } static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 45e4757c6fd2..934cae9fa317 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -248,7 +248,6 @@ static void del_nbp(struct net_bridge_port *p) list_del_rcu(&p->list); - nbp_vlan_flush(p); br_fdb_delete_by_port(br, p, 0, 1); nbp_update_port_count(br); @@ -257,6 +256,8 @@ static void del_nbp(struct net_bridge_port *p) dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); + /* use the synchronize_rcu done by netdev_rx_handler_unregister */ + nbp_vlan_flush(p); br_multicast_del_port(p); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e27d0dfd2ee9..f5c5a4500e2f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -140,7 +140,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (!p || p->state == BR_STATE_DISABLED) goto drop; - if (!nbp_allowed_ingress(p, skb, &vid)) + if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid)) goto out; /* insert into forwarding database after filtering to avoid spoofing */ diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index bb8bb7b36f04..70efe2edde2b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -22,19 +22,19 @@ #include "br_private_stp.h" static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, - u32 filter_mask, - u16 pvid) + u32 filter_mask) { struct net_bridge_vlan *v; u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; - u16 flags; + u16 flags, pvid; int num_vlans = 0; if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) return 0; + pvid = br_get_pvid(vg); /* Count number of vlan infos */ - list_for_each_entry(v, &vg->vlan_list, vlist) { + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { flags = 0; /* only a context, bridge vlan not activated */ if (!br_vlan_should_use(v)) @@ -74,15 +74,21 @@ initvars: } static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, - u32 filter_mask, u16 pvid) + u32 filter_mask) { + int num_vlans; + if (!vg) return 0; if (filter_mask & RTEXT_FILTER_BRVLAN) return vg->num_vlans; - return __get_num_vlan_infos(vg, filter_mask, pvid); + rcu_read_lock(); + num_vlans = __get_num_vlan_infos(vg, filter_mask); + rcu_read_unlock(); + + return num_vlans; } static size_t br_get_link_af_size_filtered(const struct net_device *dev, @@ -92,19 +98,16 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, struct net_bridge_port *p; struct net_bridge *br; int num_vlan_infos; - u16 pvid = 0; rcu_read_lock(); if (br_port_exists(dev)) { p = br_port_get_rcu(dev); vg = nbp_vlan_group(p); - pvid = nbp_get_pvid(p); } else if (dev->priv_flags & IFF_EBRIDGE) { br = netdev_priv(dev); vg = br_vlan_group(br); - pvid = br_get_pvid(br); } - num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask, pvid); + num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); rcu_read_unlock(); /* Each VLAN is returned in bridge_vlan_info along with flags */ @@ -196,18 +199,18 @@ nla_put_failure: } static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, - struct net_bridge_vlan_group *vg, - u16 pvid) + struct net_bridge_vlan_group *vg) { struct net_bridge_vlan *v; u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; - u16 flags; + u16 flags, pvid; int err = 0; /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan * and mark vlan info with begin and end flags * if vlaninfo represents a range */ + pvid = br_get_pvid(vg); list_for_each_entry(v, &vg->vlan_list, vlist) { flags = 0; if (!br_vlan_should_use(v)) @@ -251,12 +254,13 @@ initvars: } static int br_fill_ifvlaninfo(struct sk_buff *skb, - struct net_bridge_vlan_group *vg, - u16 pvid) + struct net_bridge_vlan_group *vg) { struct bridge_vlan_info vinfo; struct net_bridge_vlan *v; + u16 pvid; + pvid = br_get_pvid(vg); list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; @@ -338,16 +342,12 @@ static int br_fill_ifinfo(struct sk_buff *skb, (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { struct net_bridge_vlan_group *vg; struct nlattr *af; - u16 pvid; int err; - if (port) { + if (port) vg = nbp_vlan_group(port); - pvid = nbp_get_pvid(port); - } else { + else vg = br_vlan_group(br); - pvid = br_get_pvid(br); - } if (!vg || !vg->num_vlans) goto done; @@ -357,9 +357,9 @@ static int br_fill_ifinfo(struct sk_buff *skb, goto nla_put_failure; if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) - err = br_fill_ifvlaninfo_compressed(skb, vg, pvid); + err = br_fill_ifvlaninfo_compressed(skb, vg); else - err = br_fill_ifvlaninfo(skb, vg, pvid); + err = br_fill_ifvlaninfo(skb, vg); if (err) goto nla_put_failure; nla_nest_end(skb, af); @@ -764,6 +764,27 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, + [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, + [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, + .len = ETH_ALEN }, + [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, + [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, + [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, + [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -827,6 +848,158 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (err) return err; } + + if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { + __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); + + err = __br_vlan_set_default_pvid(br, defpvid); + if (err) + return err; + } +#endif + + if (data[IFLA_BR_GROUP_FWD_MASK]) { + u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); + + if (fwd_mask & BR_GROUPFWD_RESTRICTED) + return -EINVAL; + br->group_fwd_mask = fwd_mask; + } + + if (data[IFLA_BR_GROUP_ADDR]) { + u8 new_addr[ETH_ALEN]; + + if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) + return -EINVAL; + memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); + if (!is_link_local_ether_addr(new_addr)) + return -EINVAL; + if (new_addr[5] == 1 || /* 802.3x Pause address */ + new_addr[5] == 2 || /* 802.3ad Slow protocols */ + new_addr[5] == 3) /* 802.1X PAE address */ + return -EINVAL; + spin_lock_bh(&br->lock); + memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); + spin_unlock_bh(&br->lock); + br->group_addr_set = true; + br_recalculate_fwd_mask(br); + } + + if (data[IFLA_BR_FDB_FLUSH]) + br_fdb_flush(br); + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (data[IFLA_BR_MCAST_ROUTER]) { + u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); + + err = br_multicast_set_router(br, multicast_router); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_SNOOPING]) { + u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); + + err = br_multicast_toggle(br, mcast_snooping); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { + u8 val; + + val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); + br->multicast_query_use_ifaddr = !!val; + } + + if (data[IFLA_BR_MCAST_QUERIER]) { + u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); + + err = br_multicast_set_querier(br, mcast_querier); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); + + br->hash_elasticity = val; + } + + if (data[IFLA_BR_MCAST_HASH_MAX]) { + u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); + + err = br_multicast_set_hash_max(br, hash_max); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); + + br->multicast_last_member_count = val; + } + + if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); + + br->multicast_startup_query_count = val; + } + + if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); + + br->multicast_last_member_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); + + br->multicast_membership_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); + + br->multicast_querier_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERY_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); + + br->multicast_query_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); + + br->multicast_query_response_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); + + br->multicast_startup_query_interval = clock_t_to_jiffies(val); + } +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + if (data[IFLA_BR_NF_CALL_IPTABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); + + br->nf_call_iptables = val ? true : false; + } + + if (data[IFLA_BR_NF_CALL_IP6TABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); + + br->nf_call_ip6tables = val ? true : false; + } + + if (data[IFLA_BR_NF_CALL_ARPTABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); + + br->nf_call_arptables = val ? true : false; + } #endif return 0; @@ -843,6 +1016,40 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ + nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ +#endif + nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ + nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ + nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ + nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ #endif 0; } @@ -850,13 +1057,28 @@ static size_t br_get_size(const struct net_device *brdev) static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) { struct net_bridge *br = netdev_priv(brdev); + u64 hello_timer, tcn_timer, topology_change_timer, gc_timer, clockval; u32 forward_delay = jiffies_to_clock_t(br->forward_delay); u32 hello_time = jiffies_to_clock_t(br->hello_time); u32 age_time = jiffies_to_clock_t(br->max_age); u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; + u16 group_fwd_mask = br->group_fwd_mask; u8 vlan_enabled = br_vlan_enabled(br); + struct ifla_bridge_id root_id, bridge_id; + + memset(&bridge_id, 0, sizeof(bridge_id)); + memset(&root_id, 0, sizeof(root_id)); + memcpy(root_id.prio, br->designated_root.prio, sizeof(root_id.prio)); + memcpy(root_id.addr, br->designated_root.addr, sizeof(root_id.addr)); + memcpy(bridge_id.prio, br->bridge_id.prio, sizeof(bridge_id.prio)); + memcpy(bridge_id.addr, br->bridge_id.addr, sizeof(bridge_id.addr)); + hello_timer = br_timer_value(&br->hello_timer); + tcn_timer = br_timer_value(&br->tcn_timer); + topology_change_timer = br_timer_value(&br->topology_change_timer); + gc_timer = br_timer_value(&br->gc_timer); + clockval = 0; if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || @@ -864,11 +1086,69 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || - nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled)) + nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || + nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, group_fwd_mask) || + nla_put(skb, IFLA_BR_ROOT_ID, sizeof(root_id), &root_id) || + nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(bridge_id), &bridge_id) || + nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || + nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || + nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || + nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, + br->topology_change_detected) || + nla_put_u64(skb, IFLA_BR_HELLO_TIMER, hello_timer) || + nla_put_u64(skb, IFLA_BR_TCN_TIMER, tcn_timer) || + nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, + topology_change_timer) || + nla_put_u64(skb, IFLA_BR_GC_TIMER, gc_timer) || + nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_VLAN_FILTERING - if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto)) + if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || + nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid)) + return -EMSGSIZE; +#endif +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || + nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || + nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, + br->multicast_query_use_ifaddr) || + nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || + nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, + br->hash_elasticity) || + nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || + nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, + br->multicast_last_member_count) || + nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, + br->multicast_startup_query_count)) + return -EMSGSIZE; + + clockval = jiffies_to_clock_t(br->multicast_last_member_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_membership_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_querier_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_query_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_query_response_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval)) + return -EMSGSIZE; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, + br->nf_call_iptables ? 1 : 0) || + nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, + br->nf_call_ip6tables ? 1 : 0) || + nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, + br->nf_call_arptables ? 1 : 0)) return -EMSGSIZE; #endif @@ -884,11 +1164,11 @@ static size_t br_get_link_af_size(const struct net_device *dev) if (br_port_exists(dev)) { p = br_port_get_rtnl(dev); num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p), - RTEXT_FILTER_BRVLAN, 0); + RTEXT_FILTER_BRVLAN); } else if (dev->priv_flags & IFF_EBRIDGE) { br = netdev_priv(dev); num_vlans = br_get_num_vlan_infos(br_vlan_group(br), - RTEXT_FILTER_BRVLAN, 0); + RTEXT_FILTER_BRVLAN); } /* Each VLAN is returned in bridge_vlan_info along with flags */ diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index cfe945f5ab8b..09d3ecbcb4f0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -119,6 +119,7 @@ struct net_bridge_vlan { * @vlan_hash: VLAN entry rhashtable * @vlan_list: sorted VLAN entry list * @num_vlans: number of total VLAN entries + * @pvid: PVID VLAN id * * IMPORTANT: Be careful when checking if there're VLAN entries using list * primitives because the bridge can have entries in its list which @@ -130,6 +131,7 @@ struct net_bridge_vlan_group { struct rhashtable vlan_hash; struct list_head vlan_list; u16 num_vlans; + u16 pvid; }; struct net_bridge_fdb_entry @@ -228,7 +230,6 @@ struct net_bridge_port #endif #ifdef CONFIG_BRIDGE_VLAN_FILTERING struct net_bridge_vlan_group *vlgrp; - u16 pvid; #endif }; @@ -340,7 +341,6 @@ struct net_bridge u8 vlan_enabled; __be16 vlan_proto; u16 default_pvid; - u16 pvid; #endif }; @@ -400,7 +400,7 @@ static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v) return v->flags & BRIDGE_VLAN_INFO_BRENTRY; } -/* check if we should use the vlan entry is usable */ +/* check if we should use the vlan entry, returns false if it's only context */ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v) { if (br_vlan_is_master(v)) { @@ -670,10 +670,10 @@ static inline void br_mdb_uninit(void) /* br_vlan.c */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING -bool br_allowed_ingress(struct net_bridge *br, struct sk_buff *skb, u16 *vid); -bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb, - u16 *vid); -bool br_allowed_egress(struct net_bridge_vlan_group *br, +bool br_allowed_ingress(const struct net_bridge *br, + struct net_bridge_vlan_group *vg, struct sk_buff *skb, + u16 *vid); +bool br_allowed_egress(struct net_bridge_vlan_group *vg, const struct sk_buff *skb); bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); struct sk_buff *br_handle_vlan(struct net_bridge *br, @@ -690,6 +690,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); int br_vlan_init(struct net_bridge *br); int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val); +int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid); int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); void nbp_vlan_flush(struct net_bridge_port *port); @@ -725,22 +726,13 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid) return err; } -static inline u16 br_get_pvid(const struct net_bridge *br) -{ - if (!br) - return 0; - - smp_rmb(); - return br->pvid; -} - -static inline u16 nbp_get_pvid(const struct net_bridge_port *p) +static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) { - if (!p) + if (!vg) return 0; smp_rmb(); - return p->pvid; + return vg->pvid; } static inline int br_vlan_enabled(struct net_bridge *br) @@ -748,20 +740,14 @@ static inline int br_vlan_enabled(struct net_bridge *br) return br->vlan_enabled; } #else -static inline bool br_allowed_ingress(struct net_bridge *br, +static inline bool br_allowed_ingress(const struct net_bridge *br, + struct net_bridge_vlan_group *vg, struct sk_buff *skb, u16 *vid) { return true; } -static inline bool nbp_allowed_ingress(struct net_bridge_port *p, - struct sk_buff *skb, - u16 *vid) -{ - return true; -} - static inline bool br_allowed_egress(struct net_bridge_vlan_group *vg, const struct sk_buff *skb) { @@ -834,12 +820,7 @@ static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag) return 0; } -static inline u16 br_get_pvid(const struct net_bridge *br) -{ - return 0; -} - -static inline u16 nbp_get_pvid(const struct net_bridge_port *p) +static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) { return 0; } diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 3a7392e6010e..3a982c02599a 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -40,7 +40,7 @@ void br_log_state(const struct net_bridge_port *p) void br_set_state(struct net_bridge_port *p, unsigned int state) { struct switchdev_attr attr = { - .id = SWITCHDEV_ATTR_PORT_STP_STATE, + .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE, .u.stp_state = state, }; int err; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index e227164bc3e1..eae07ee9bfe0 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -19,6 +19,8 @@ static const struct rhashtable_params br_vlan_rht_params = { .head_offset = offsetof(struct net_bridge_vlan, vnode), .key_offset = offsetof(struct net_bridge_vlan, vid), .key_len = sizeof(u16), + .nelem_hint = 3, + .locks_mul = 1, .max_size = VLAN_N_VID, .obj_cmpfn = br_vlan_cmp, .automatic_shrinking = true, @@ -29,37 +31,37 @@ static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid) return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); } -static void __vlan_add_pvid(u16 *pvid, u16 vid) +static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid) { - if (*pvid == vid) + if (vg->pvid == vid) return; smp_wmb(); - *pvid = vid; + vg->pvid = vid; } -static void __vlan_delete_pvid(u16 *pvid, u16 vid) +static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid) { - if (*pvid != vid) + if (vg->pvid != vid) return; smp_wmb(); - *pvid = 0; + vg->pvid = 0; } static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) { - if (flags & BRIDGE_VLAN_INFO_PVID) { - if (br_vlan_is_master(v)) - __vlan_add_pvid(&v->br->pvid, v->vid); - else - __vlan_add_pvid(&v->port->pvid, v->vid); - } else { - if (br_vlan_is_master(v)) - __vlan_delete_pvid(&v->br->pvid, v->vid); - else - __vlan_delete_pvid(&v->port->pvid, v->vid); - } + struct net_bridge_vlan_group *vg; + + if (br_vlan_is_master(v)) + vg = v->br->vlgrp; + else + vg = v->port->vlgrp; + + if (flags & BRIDGE_VLAN_INFO_PVID) + __vlan_add_pvid(vg, v->vid); + else + __vlan_delete_pvid(vg, v->vid); if (flags & BRIDGE_VLAN_INFO_UNTAGGED) v->flags |= BRIDGE_VLAN_INFO_UNTAGGED; @@ -80,13 +82,14 @@ static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, if (ops->ndo_vlan_rx_add_vid) { err = vlan_vid_add(dev, br->vlan_proto, vid); } else { - struct switchdev_obj_vlan v = { + struct switchdev_obj_port_vlan v = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, .flags = flags, .vid_begin = vid, .vid_end = vid, }; - err = switchdev_port_obj_add(dev, SWITCHDEV_OBJ_PORT_VLAN, &v); + err = switchdev_port_obj_add(dev, &v.obj); if (err == -EOPNOTSUPP) err = 0; } @@ -108,12 +111,12 @@ static void __vlan_add_list(struct net_bridge_vlan *v) else break; } - list_add(&v->vlist, hpos); + list_add_rcu(&v->vlist, hpos); } static void __vlan_del_list(struct net_bridge_vlan *v) { - list_del(&v->vlist); + list_del_rcu(&v->vlist); } static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, @@ -129,12 +132,13 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, if (ops->ndo_vlan_rx_kill_vid) { vlan_vid_del(dev, br->vlan_proto, vid); } else { - struct switchdev_obj_vlan v = { + struct switchdev_obj_port_vlan v = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, .vid_begin = vid, .vid_end = vid, }; - err = switchdev_port_obj_del(dev, SWITCHDEV_OBJ_PORT_VLAN, &v); + err = switchdev_port_obj_del(dev, &v.obj); if (err == -EOPNOTSUPP) err = 0; } @@ -142,6 +146,40 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, return err; } +/* Returns a master vlan, if it didn't exist it gets created. In all cases a + * a reference is taken to the master vlan before returning. + */ +static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid) +{ + struct net_bridge_vlan *masterv; + + masterv = br_vlan_find(br->vlgrp, vid); + if (!masterv) { + /* missing global ctx, create it now */ + if (br_vlan_add(br, vid, 0)) + return NULL; + masterv = br_vlan_find(br->vlgrp, vid); + if (WARN_ON(!masterv)) + return NULL; + } + atomic_inc(&masterv->refcnt); + + return masterv; +} + +static void br_vlan_put_master(struct net_bridge_vlan *masterv) +{ + if (!br_vlan_is_master(masterv)) + return; + + if (atomic_dec_and_test(&masterv->refcnt)) { + rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash, + &masterv->vnode, br_vlan_rht_params); + __vlan_del_list(masterv); + kfree_rcu(masterv, rcu); + } +} + /* This is the shared VLAN add function which works for both ports and bridge * devices. There are four possible calls to this function in terms of the * vlan entry type: @@ -157,7 +195,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) { struct net_bridge_vlan *masterv = NULL; struct net_bridge_port *p = NULL; - struct rhashtable *tbl; + struct net_bridge_vlan_group *vg; struct net_device *dev; struct net_bridge *br; int err; @@ -165,17 +203,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) if (br_vlan_is_master(v)) { br = v->br; dev = br->dev; - tbl = &br->vlgrp->vlan_hash; + vg = br->vlgrp; } else { p = v->port; br = p->br; dev = p->dev; - tbl = &p->vlgrp->vlan_hash; + vg = p->vlgrp; } if (p) { - u16 master_flags = flags; - /* Add VLAN to the device filter if it is supported. * This ensures tagged traffic enters the bridge when * promiscuous mode is disabled by br_manage_promisc(). @@ -186,57 +222,49 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) /* need to work on the master vlan too */ if (flags & BRIDGE_VLAN_INFO_MASTER) { - master_flags |= BRIDGE_VLAN_INFO_BRENTRY; - err = br_vlan_add(br, v->vid, master_flags); + err = br_vlan_add(br, v->vid, flags | + BRIDGE_VLAN_INFO_BRENTRY); if (err) goto out_filt; } - masterv = br_vlan_find(br->vlgrp, v->vid); - if (!masterv) { - /* missing global ctx, create it now */ - err = br_vlan_add(br, v->vid, master_flags); - if (err) - goto out_filt; - masterv = br_vlan_find(br->vlgrp, v->vid); - WARN_ON(!masterv); - } - atomic_inc(&masterv->refcnt); + masterv = br_vlan_get_master(br, v->vid); + if (!masterv) + goto out_filt; v->brvlan = masterv; } - /* Add the dev mac only if it's a usable vlan */ + /* Add the dev mac and count the vlan only if it's usable */ if (br_vlan_should_use(v)) { err = br_fdb_insert(br, p, dev->dev_addr, v->vid); if (err) { br_err(br, "failed insert local address into bridge forwarding table\n"); goto out_filt; } + vg->num_vlans++; } - err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params); + err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, + br_vlan_rht_params); if (err) goto out_fdb_insert; __vlan_add_list(v); __vlan_add_flags(v, flags); - if (br_vlan_is_master(v)) { - if (br_vlan_is_brentry(v)) - br->vlgrp->num_vlans++; - } else { - p->vlgrp->num_vlans++; - } out: return err; out_fdb_insert: - br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid); + if (br_vlan_should_use(v)) { + br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid); + vg->num_vlans--; + } out_filt: if (p) { __vlan_vid_del(dev, br, v->vid); if (masterv) { - atomic_dec(&masterv->refcnt); + br_vlan_put_master(masterv); v->brvlan = NULL; } } @@ -247,61 +275,47 @@ out_filt: static int __vlan_del(struct net_bridge_vlan *v) { struct net_bridge_vlan *masterv = v; + struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; - struct net_bridge *br; int err = 0; - struct rhashtable *tbl; - u16 *pvid; if (br_vlan_is_master(v)) { - br = v->br; - tbl = &v->br->vlgrp->vlan_hash; - pvid = &v->br->pvid; + vg = v->br->vlgrp; } else { p = v->port; - br = p->br; - tbl = &p->vlgrp->vlan_hash; + vg = v->port->vlgrp; masterv = v->brvlan; - pvid = &p->pvid; } - __vlan_delete_pvid(pvid, v->vid); + __vlan_delete_pvid(vg, v->vid); if (p) { err = __vlan_vid_del(p->dev, p->br, v->vid); if (err) goto out; } - if (br_vlan_is_master(v)) { - if (br_vlan_is_brentry(v)) { - v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; - br->vlgrp->num_vlans--; - } - } else { - p->vlgrp->num_vlans--; + if (br_vlan_should_use(v)) { + v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; + vg->num_vlans--; } if (masterv != v) { - rhashtable_remove_fast(tbl, &v->vnode, br_vlan_rht_params); + rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, + br_vlan_rht_params); __vlan_del_list(v); kfree_rcu(v, rcu); } - if (atomic_dec_and_test(&masterv->refcnt)) { - rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash, - &masterv->vnode, br_vlan_rht_params); - __vlan_del_list(masterv); - kfree_rcu(masterv, rcu); - } + br_vlan_put_master(masterv); out: return err; } -static void __vlan_flush(struct net_bridge_vlan_group *vlgrp, u16 *pvid) +static void __vlan_flush(struct net_bridge_vlan_group *vlgrp) { struct net_bridge_vlan *vlan, *tmp; - __vlan_delete_pvid(pvid, *pvid); + __vlan_delete_pvid(vlgrp, vlgrp->pvid); list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist) __vlan_del(vlan); rhashtable_destroy(&vlgrp->vlan_hash); @@ -346,7 +360,7 @@ out: } /* Called under RCU */ -static bool __allowed_ingress(struct rhashtable *tbl, u16 pvid, __be16 proto, +static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto, struct sk_buff *skb, u16 *vid) { const struct net_bridge_vlan *v; @@ -387,6 +401,8 @@ static bool __allowed_ingress(struct rhashtable *tbl, u16 pvid, __be16 proto, } if (!*vid) { + u16 pvid = br_get_pvid(vg); + /* Frame had a tag with VID 0 or did not have a tag. * See if pvid is set on this port. That tells us which * vlan untagged or priority-tagged traffic belongs to. @@ -413,7 +429,7 @@ static bool __allowed_ingress(struct rhashtable *tbl, u16 pvid, __be16 proto, } /* Frame had a valid vlan tag. See if vlan is allowed */ - v = br_vlan_lookup(tbl, *vid); + v = br_vlan_find(vg, *vid); if (v && br_vlan_should_use(v)) return true; drop: @@ -421,25 +437,10 @@ drop: return false; } -bool br_allowed_ingress(struct net_bridge *br, struct sk_buff *skb, u16 *vid) -{ - /* If VLAN filtering is disabled on the bridge, all packets are - * permitted. - */ - if (!br->vlan_enabled) { - BR_INPUT_SKB_CB(skb)->vlan_filtered = false; - return true; - } - - return __allowed_ingress(&br->vlgrp->vlan_hash, br->pvid, - br->vlan_proto, skb, vid); -} - -bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb, - u16 *vid) +bool br_allowed_ingress(const struct net_bridge *br, + struct net_bridge_vlan_group *vg, struct sk_buff *skb, + u16 *vid) { - struct net_bridge *br = p->br; - /* If VLAN filtering is disabled on the bridge, all packets are * permitted. */ @@ -448,8 +449,7 @@ bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb, return true; } - return __allowed_ingress(&p->vlgrp->vlan_hash, p->pvid, br->vlan_proto, - skb, vid); + return __allowed_ingress(vg, br->vlan_proto, skb, vid); } /* Called under RCU. */ @@ -474,27 +474,29 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg, /* Called under RCU */ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) { + struct net_bridge_vlan_group *vg; struct net_bridge *br = p->br; /* If filtering was disabled at input, let it pass. */ if (!br->vlan_enabled) return true; - if (!p->vlgrp->num_vlans) + vg = p->vlgrp; + if (!vg || !vg->num_vlans) return false; if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto) *vid = 0; if (!*vid) { - *vid = nbp_get_pvid(p); + *vid = br_get_pvid(vg); if (!*vid) return false; return true; } - if (br_vlan_find(p->vlgrp, *vid)) + if (br_vlan_find(vg, *vid)) return true; return false; @@ -570,7 +572,7 @@ void br_vlan_flush(struct net_bridge *br) { ASSERT_RTNL(); - __vlan_flush(br_vlan_group(br), &br->pvid); + __vlan_flush(br_vlan_group(br)); } struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid) @@ -691,12 +693,11 @@ int br_vlan_set_proto(struct net_bridge *br, unsigned long val) return err; } -static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 pvid, - u16 vid) +static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid) { struct net_bridge_vlan *v; - if (vid != pvid) + if (vid != vg->pvid) return false; v = br_vlan_lookup(&vg->vlan_hash, vid); @@ -715,18 +716,18 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br) /* Disable default_pvid on all ports where it is still * configured. */ - if (vlan_default_pvid(br->vlgrp, br->pvid, pvid)) + if (vlan_default_pvid(br->vlgrp, pvid)) br_vlan_delete(br, pvid); list_for_each_entry(p, &br->port_list, list) { - if (vlan_default_pvid(p->vlgrp, p->pvid, pvid)) + if (vlan_default_pvid(p->vlgrp, pvid)) nbp_vlan_delete(p, pvid); } br->default_pvid = 0; } -static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) +int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) { const struct net_bridge_vlan *pvent; struct net_bridge_port *p; @@ -734,6 +735,11 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) int err = 0; unsigned long *changed; + if (!pvid) { + br_vlan_disable_default_pvid(br); + return 0; + } + changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), GFP_KERNEL); if (!changed) @@ -745,7 +751,7 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) * user configuration. */ pvent = br_vlan_find(br->vlgrp, pvid); - if ((!old_pvid || vlan_default_pvid(br->vlgrp, br->pvid, old_pvid)) && + if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) && (!pvent || !br_vlan_should_use(pvent))) { err = br_vlan_add(br, pvid, BRIDGE_VLAN_INFO_PVID | @@ -762,7 +768,7 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) * user configuration. */ if ((old_pvid && - !vlan_default_pvid(p->vlgrp, p->pvid, old_pvid)) || + !vlan_default_pvid(p->vlgrp, old_pvid)) || br_vlan_find(p->vlgrp, pvid)) continue; @@ -824,12 +830,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) err = -EPERM; goto unlock; } - - if (!pvid) - br_vlan_disable_default_pvid(br); - else - err = __br_vlan_set_default_pvid(br, pvid); - + err = __br_vlan_set_default_pvid(br, pvid); unlock: rtnl_unlock(); return err; @@ -867,16 +868,20 @@ err_rhtbl: int nbp_vlan_init(struct net_bridge_port *p) { + struct net_bridge_vlan_group *vg; int ret = -ENOMEM; - p->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); - if (!p->vlgrp) + vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); + if (!vg) goto out; - ret = rhashtable_init(&p->vlgrp->vlan_hash, &br_vlan_rht_params); + ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); if (ret) goto err_rhtbl; - INIT_LIST_HEAD(&p->vlgrp->vlan_list); + INIT_LIST_HEAD(&vg->vlan_list); + /* Make sure everything's committed before publishing vg */ + smp_wmb(); + p->vlgrp = vg; if (p->br->default_pvid) { ret = nbp_vlan_add(p, p->br->default_pvid, BRIDGE_VLAN_INFO_PVID | @@ -888,9 +893,9 @@ out: return ret; err_vlan_add: - rhashtable_destroy(&p->vlgrp->vlan_hash); + rhashtable_destroy(&vg->vlan_hash); err_rhtbl: - kfree(p->vlgrp); + kfree(vg); goto out; } @@ -951,5 +956,5 @@ void nbp_vlan_flush(struct net_bridge_port *port) list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist) vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid); - __vlan_flush(nbp_vlan_group(port), &port->pvid); + __vlan_flush(nbp_vlan_group(port)); } |