From 8bf862739a7786ae72409220914df960a0aa80d8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Jan 2016 12:37:52 +0100 Subject: wext: fix message delay/ordering Beniamino reported that he was getting an RTM_NEWLINK message for a given interface, after the RTM_DELLINK for it. It turns out that the message is a wireless extensions message, which was sent because the interface had been connected and disconnection while it was deleted caused a wext message. For its netlink messages, wext uses RTM_NEWLINK, but the message is without all the regular rtnetlink attributes, so "ip monitor link" prints just rudimentary information: 5: wlan1: mtu 1500 qdisc mq state DOWN group default link/ether 02:00:00:00:01:00 brd ff:ff:ff:ff:ff:ff Deleted 5: wlan1: mtu 1500 qdisc noop state DOWN group default link/ether 02:00:00:00:01:00 brd ff:ff:ff:ff:ff:ff 5: wlan1: link/ether (from my hwsim reproduction) This can cause userspace to get confused since it doesn't expect an RTM_NEWLINK message after RTM_DELLINK. The reason for this is that wext schedules a worker to send out the messages, and the scheduling delay can cause the messages to get out to userspace in different order. To fix this, have wext register a netdevice notifier and flush out any pending messages when netdevice state changes. This fixes any ordering whenever the original message wasn't sent by a notifier itself. Cc: stable@vger.kernel.org Reported-by: Beniamino Galvani Signed-off-by: Johannes Berg --- net/wireless/wext-core.c | 51 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index c8717c1d082e..87dd619fb2e9 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { /* IW event code */ +static void wireless_nlevent_flush(void) +{ + struct sk_buff *skb; + struct net *net; + + ASSERT_RTNL(); + + for_each_net(net) { + while ((skb = skb_dequeue(&net->wext_nlevents))) + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, + GFP_KERNEL); + } +} + +static int wext_netdev_notifier_call(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + /* + * When a netdev changes state in any way, flush all pending messages + * to avoid them going out in a strange order, e.g. RTM_NEWLINK after + * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() + * or similar - all of which could otherwise happen due to delays from + * schedule_work(). + */ + wireless_nlevent_flush(); + + return NOTIFY_OK; +} + +static struct notifier_block wext_netdev_notifier = { + .notifier_call = wext_netdev_notifier_call, +}; + static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); @@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { static int __init wireless_nlevent_init(void) { - return register_pernet_subsys(&wext_pernet_ops); + int err = register_pernet_subsys(&wext_pernet_ops); + + if (err) + return err; + + return register_netdevice_notifier(&wext_netdev_notifier); } subsys_initcall(wireless_nlevent_init); @@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { - struct sk_buff *skb; - struct net *net; - rtnl_lock(); - - for_each_net(net) { - while ((skb = skb_dequeue(&net->wext_nlevents))) - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, - GFP_KERNEL); - } - + wireless_nlevent_flush(); rtnl_unlock(); } -- cgit v1.2.3 From cb150b9d23be6ee7f3a0fff29784f1c5b5ac514d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Jan 2016 13:29:34 +0100 Subject: cfg80211/wext: fix message ordering Since cfg80211 frequently takes actions from its netdev notifier call, wireless extensions messages could still be ordered badly since the wext netdev notifier, since wext is built into the kernel, runs before the cfg80211 netdev notifier. For example, the following can happen: 5: wlan1: mtu 1500 qdisc mq state DOWN group default link/ether 02:00:00:00:01:00 brd ff:ff:ff:ff:ff:ff 5: wlan1: link/ether when setting the interface down causes the wext message. To also fix this, export the wireless_nlevent_flush() function and also call it from the cfg80211 notifier. Cc: stable@vger.kernel.org Signed-off-by: Johannes Berg --- net/wireless/core.c | 2 ++ net/wireless/wext-core.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index b0915515640e..8f0bac7e03c4 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, return NOTIFY_DONE; } + wireless_nlevent_flush(); + return NOTIFY_OK; } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 87dd619fb2e9..b50ee5d622e1 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -342,7 +342,7 @@ static const int compat_event_type_size[] = { /* IW event code */ -static void wireless_nlevent_flush(void) +void wireless_nlevent_flush(void) { struct sk_buff *skb; struct net *net; @@ -355,6 +355,7 @@ static void wireless_nlevent_flush(void) GFP_KERNEL); } } +EXPORT_SYMBOL_GPL(wireless_nlevent_flush); static int wext_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) -- cgit v1.2.3 From f39ea2690bd61efec97622c48323f40ed6e16317 Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Wed, 27 Jan 2016 15:46:18 +0000 Subject: mac80211: fix use of uninitialised values in RX aggregation Use kzalloc instead of kmalloc for struct tid_ampdu_rx to initialize the "removed" field (all others are initialized manually). That fixes: UBSAN: Undefined behaviour in net/mac80211/rx.c:932:29 load of value 2 is not a valid value for type '_Bool' CPU: 3 PID: 1134 Comm: kworker/u16:7 Not tainted 4.5.0-rc1+ #265 Workqueue: phy0 rt2x00usb_work_rxdone 0000000000000004 ffff880254a7ba50 ffffffff8181d866 0000000000000007 ffff880254a7ba78 ffff880254a7ba68 ffffffff8188422d ffffffff8379b500 ffff880254a7bab8 ffffffff81884747 0000000000000202 0000000348620032 Call Trace: [] dump_stack+0x45/0x5f [] ubsan_epilogue+0xd/0x40 [] __ubsan_handle_load_invalid_value+0x67/0x70 [] ieee80211_sta_reorder_release.isra.16+0x5ed/0x730 [] ieee80211_prepare_and_rx_handle+0xd04/0x1c00 [] __ieee80211_rx_handle_packet+0x1f3/0x750 [] ieee80211_rx_napi+0x447/0x990 While at it, convert to use sizeof(*tid_agg_rx) instead. Fixes: 788211d81bfdf ("mac80211: fix RX A-MPDU session reorder timer deletion") Cc: stable@vger.kernel.org Signed-off-by: Chris Bainbridge [reword commit message, use sizeof(*tid_agg_rx)] Signed-off-by: Johannes Berg --- net/mac80211/agg-rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 10ad4ac1fa0b..367784be5df2 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, } /* prepare A-MPDU MLME for Rx aggregation */ - tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); + tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); if (!tid_agg_rx) goto end; -- cgit v1.2.3 From f84a93726e0c0c53b5b6dbee96623e7e53e06dd8 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Fri, 29 Jan 2016 11:35:12 +0300 Subject: mac80211: minstrel_ht: fix out-of-bound in minstrel_ht_set_best_prob_rate Patch fixes this splat BUG: KASAN: slab-out-of-bounds in minstrel_ht_update_stats.isra.7+0x6e1/0x9e0 [mac80211] at addr ffff8800cee640f4 Read of size 4 by task swapper/3/0 Signed-off-by: Konstantin Khlebnikov Link: http://lkml.kernel.org/r/CALYGNiNyJhSaVnE35qS6UCGaSb2Dx1_i5HcRavuOX14oTz2P+w@mail.gmail.com Acked-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 3928dbd24e25..93bf2b743e20 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) (max_tp_group != MINSTREL_CCK_GROUP)) return; + max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; + max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; + max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma; + if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) { cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, mrs->prob_ewma); if (cur_tp_avg > tmp_tp_avg) mi->max_prob_rate = index; - max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; - max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; - max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma; max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group, max_gpr_idx, max_gpr_prob); @@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) } else { if (mrs->prob_ewma > tmp_prob) mi->max_prob_rate = index; - if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma) + if (mrs->prob_ewma > max_gpr_prob) mg->max_group_prob_rate = index; } } -- cgit v1.2.3 From 212c5a5e6ba61678be6b5fee576e38bccb50b613 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Tue, 2 Feb 2016 08:12:26 +0100 Subject: mac80211: minstrel: Change expected throughput unit back to Kbps The change from cur_tp to the function minstrel_get_tp_avg/minstrel_ht_get_tp_avg changed the unit used for the current throughput. For example in minstrel_ht the correct conversion between them would be: mrs->cur_tp / 10 == minstrel_ht_get_tp_avg(..). This factor 10 must also be included in the calculation of minstrel_get_expected_throughput and minstrel_ht_get_expected_throughput to return values with the unit [Kbps] instead of [10Kbps]. Otherwise routing algorithms like B.A.T.M.A.N. V will make incorrect decision based on these values. Its kernel based implementation expects expected_throughput always to have the unit [Kbps] and not sometimes [10Kbps] and sometimes [Kbps]. The same requirement has iw or olsrdv2's nl80211 based statistics module which retrieve the same data via NL80211_STA_INFO_TX_BITRATE. Cc: stable@vger.kernel.org Fixes: 6a27b2c40b48 ("mac80211: restructure per-rate throughput calculation into function") Signed-off-by: Sven Eckelmann Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel.c | 2 +- net/mac80211/rc80211_minstrel_ht.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3ece7d1034c8..b54f398cda5d 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) * computing cur_tp */ tmp_mrs = &mi->r[idx].stats; - tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma); + tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10; tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024; return tmp_cur_tp; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 93bf2b743e20..702328bcabdc 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -1335,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) prob = mi->groups[i].rates[j].prob_ewma; /* convert tp_avg from pkt per second in kbps */ - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024; + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10; + tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024; return tp_avg; } -- cgit v1.2.3 From 9f74660bcf1e4cca577be99e54bc77b5df62b508 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 Feb 2016 10:23:59 -0500 Subject: xprtrdma: rpcrdma_bc_receive_call() should init rq_private_buf.len Some NFSv4.1 OPEN requests were hanging waiting for the NFS server to finish recalling delegations. Turns out that each NFSv4.1 CB request on RDMA gets a GARBAGE_ARGS reply from the Linux client. Commit 756b9b37cfb2e3dc added a line in bc_svc_process that overwrites the incoming rq_rcv_buf's length with the value in rq_private_buf.len. But rpcrdma_bc_receive_call() does not invoke xprt_complete_bc_request(), thus rq_private_buf.len is not initialized. svc_process_common() is invoked with a zero-length RPC message, and fails. Fixes: 756b9b37cfb2e3dc ('SUNRPC: Fix callback channel') Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- net/sunrpc/xprtrdma/backchannel.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index cc1251d07297..2dcd7640eeb5 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -341,6 +341,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, rqst->rq_reply_bytes_recvd = 0; rqst->rq_bytes_sent = 0; rqst->rq_xid = headerp->rm_xid; + + rqst->rq_private_buf.len = size; set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); buf = &rqst->rq_rcv_buf; -- cgit v1.2.3 From 437b300c6b28040ad87e4caf042e05f2c5f13c6d Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Tue, 16 Feb 2016 16:20:25 -0500 Subject: auth_gss: fix panic in gss_pipe_downcall() in fips mode On Mon, 15 Feb 2016, Trond Myklebust wrote: > Hi Scott, > > On Mon, Feb 15, 2016 at 2:28 PM, Scott Mayhew wrote: > > md5 is disabled in fips mode, and attempting to import a gss context > > using md5 while in fips mode will result in crypto_alg_mod_lookup() > > returning -ENOENT, which will make its way back up to > > gss_pipe_downcall(), where the BUG() is triggered. Handling the -ENOENT > > allows for a more graceful failure. > > > > Signed-off-by: Scott Mayhew > > --- > > net/sunrpc/auth_gss/auth_gss.c | 3 +++ > > 1 file changed, 3 insertions(+) > > > > diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c > > index 799e65b..c30fc3b 100644 > > --- a/net/sunrpc/auth_gss/auth_gss.c > > +++ b/net/sunrpc/auth_gss/auth_gss.c > > @@ -737,6 +737,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) > > case -ENOSYS: > > gss_msg->msg.errno = -EAGAIN; > > break; > > + case -ENOENT: > > + gss_msg->msg.errno = -EPROTONOSUPPORT; > > + break; > > default: > > printk(KERN_CRIT "%s: bad return from " > > "gss_fill_context: %zd\n", __func__, err); > > -- > > 2.4.3 > > > > Well debugged, but I unfortunately do have to ask if this patch is > sufficient? In addition to -ENOENT, and -ENOMEM, it looks to me as if > crypto_alg_mod_lookup() can also fail with -EINTR, -ETIMEDOUT, and > -EAGAIN. Don't we also want to handle those? You're right, I was focusing on the panic that I could easily reproduce. I'm still not sure how I could trigger those other conditions. > > In fact, peering into the rats nest that is > gss_import_sec_context_kerberos(), it looks as if that is just a tiny > subset of all the errors that we might run into. Perhaps the right > thing to do here is to get rid of the BUG() (but keep the above > printk) and just return a generic error? That sounds fine to me -- updated patch attached. -Scott >From d54c6b64a107a90a38cab97577de05f9a4625052 Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Mon, 15 Feb 2016 15:12:19 -0500 Subject: [PATCH] auth_gss: remove the BUG() from gss_pipe_downcall() Instead return a generic error via gss_msg->msg.errno. None of the errors returned by gss_fill_context() should necessarily trigger a kernel panic. Signed-off-by: Scott Mayhew Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 799e65b944b9..cabf586f47d7 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -740,7 +740,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) default: printk(KERN_CRIT "%s: bad return from " "gss_fill_context: %zd\n", __func__, err); - BUG(); + gss_msg->msg.errno = -EIO; } goto err_release_msg; } -- cgit v1.2.3 From 7a36b930e6ed4702c866dc74a5ad07318a57c688 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 18 Feb 2016 19:49:18 +0100 Subject: mac80211: minstrel_ht: set default tx aggregation timeout to 0 The value 5000 was put here with the addition of the timeout field to ieee80211_start_tx_ba_session. It was originally added in mac80211 to save resources for drivers like iwlwifi, which only supports a limited number of concurrent aggregation sessions. Since iwlwifi does not use minstrel_ht and other drivers don't need this, 0 is a better default - especially since there have been recent reports of aggregation setup related issues reproduced with ath9k. This should improve stability without causing any adverse effects. Cc: stable@vger.kernel.org Acked-by: Avery Pennarun Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 702328bcabdc..409b3640da6c 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -692,7 +692,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) if (likely(sta->ampdu_mlme.tid_tx[tid])) return; - ieee80211_start_tx_ba_session(pubsta, tid, 5000); + ieee80211_start_tx_ba_session(pubsta, tid, 0); } static void -- cgit v1.2.3 From 5e950a78bf5b18ded277a27aed0bcdbe7c1b868a Mon Sep 17 00:00:00 2001 From: Ola Olsson Date: Thu, 11 Feb 2016 01:00:22 +0100 Subject: nl80211: Zero out the connection keys memory when freeing them. The connection keys are zeroed out in all other cases except this one. Let's fix the last one as well. Signed-off-by: Ola Olsson Reviewed-by: Julian Calaby Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d4786f2802aa..711cb7ad6ae0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7547,7 +7547,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && no_ht) { - kfree(connkeys); + kzfree(connkeys); return -EINVAL; } } -- cgit v1.2.3 From b86071528f3261ab592fad5b9b1a02aea3dcabf3 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Mon, 15 Feb 2016 14:35:53 +0100 Subject: cfg80211: stop critical protocol session upon disconnect event When user-space has started a critical protocol session and a disconnect event occurs, the rdev::crit_prot_nlportid remains set. This caused a subsequent NL80211_CMD_CRIT_PROTO_START to fail (-EBUSY). Fix this by clearing the rdev attribute and call .crit_proto_stop() callback upon disconnect event. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: Johannes Berg --- net/wireless/sme.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8020b5b094d4..d49ed7666d4c 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -917,6 +917,12 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); + /* stop critical protocol if supported */ + if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) { + rdev->crit_proto_nlportid = 0; + rdev_crit_proto_stop(rdev, wdev); + } + /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. -- cgit v1.2.3 From b7052cd7bcf3c1478796e93e3dff2b44c9e82943 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 18 Feb 2016 18:55:54 +0000 Subject: sunrpc/cache: fix off-by-one in qword_get() The qword_get() function NUL-terminates its output buffer. If the input string is in hex format \xXXXX... and the same length as the output buffer, there is an off-by-one: int qword_get(char **bpp, char *dest, int bufsize) { ... while (len < bufsize) { ... *dest++ = (h << 4) | l; len++; } ... *dest = '\0'; return len; } This patch ensures the NUL terminator doesn't fall outside the output buffer. Signed-off-by: Stefan Hajnoczi Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields --- net/sunrpc/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2b32fd602669..273bc3a35425 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize) if (bp[0] == '\\' && bp[1] == 'x') { /* HEX STRING */ bp += 2; - while (len < bufsize) { + while (len < bufsize - 1) { int h, l; h = hex_to_bin(bp[0]); -- cgit v1.2.3 From 9bdfb3b79e61c60e1a3e2dc05ad164528afa6b8a Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Sun, 21 Feb 2016 10:12:39 +0300 Subject: tcp: convert cached rtt from usec to jiffies when feeding initial rto Currently it's converted into msecs, thus HZ=1000 intact. Signed-off-by: Konstantin Khlebnikov Fixes: 740b0f1841f6 ("tcp: switch rtt estimations to usec resolution") Signed-off-by: David S. Miller --- net/ipv4/tcp_metrics.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index c8cbc2b4b792..a726d7853ce5 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -550,7 +550,7 @@ reset: */ if (crtt > tp->srtt_us) { /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ - crtt /= 8 * USEC_PER_MSEC; + crtt /= 8 * USEC_PER_SEC / HZ; inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); } else if (tp->srtt_us == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from -- cgit v1.2.3 From 5146d1f151122e868e594c7b45115d64825aee5f Mon Sep 17 00:00:00 2001 From: Bernie Harris Date: Mon, 22 Feb 2016 12:58:05 +1300 Subject: tunnel: Clear IPCB(skb)->opt before dst_link_failure called IPCB may contain data from previous layers (in the observed case the qdisc layer). In the observed scenario, the data was misinterpreted as ip header options, which later caused the ihl to be set to an invalid value (<5). This resulted in an infinite loop in the mips implementation of ip_fast_csum. This patch clears IPCB(skb)->opt before dst_link_failure can be called for various types of tunnels. This change only applies to encapsulated ipv4 packets. The code introduced in 11c21a30 which clears all of IPCB has been removed to be consistent with these changes, and instead the opt field is cleared unconditionally in ip_tunnel_xmit. The change in ip_tunnel_xmit applies to SIT, GRE, and IPIP tunnels. The relevant vti, l2tp, and pptp functions already contain similar code for clearing the IPCB. Signed-off-by: Bernie Harris Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel.c | 3 ++- net/ipv4/udp_tunnel.c | 2 ++ net/ipv6/ip6_gre.c | 2 ++ net/ipv6/ip6_tunnel.c | 2 ++ 4 files changed, 8 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 89e8861e05fc..336e6892a93c 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -661,6 +661,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, inner_iph = (const struct iphdr *)skb_inner_network_header(skb); connected = (tunnel->parms.iph.daddr != 0); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ @@ -758,7 +760,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); dst_link_failure(skb); } else tunnel->err_count = 0; diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 0ec08814f37d..96599d1a1318 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -89,6 +89,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb uh->source = src_port; uh->len = htons(skb->len); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + udp_set_csum(nocheck, skb, src, dst, skb->len); iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a69aad1e29d1..c0d4dc1c5ea4 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -777,6 +777,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) __u32 mtu; int err; + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 137fca42aaa6..6c5dfec7a377 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1180,6 +1180,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + tproto = ACCESS_ONCE(t->parms.proto); if (tproto != IPPROTO_IPIP && tproto != 0) return -1; -- cgit v1.2.3 From a8c4a2522a0808c5c2143612909717d1115c40cf Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 22 Feb 2016 18:43:25 +0100 Subject: ipv4: only create late gso-skb if skb is already set up with CHECKSUM_PARTIAL Otherwise we break the contract with GSO to only pass CHECKSUM_PARTIAL skbs down. This can easily happen with UDP+IPv4 sockets with the first MSG_MORE write smaller than the MTU, second write is a sendfile. Returning -EOPNOTSUPP lets the callers fall back into normal sendmsg path, were we calculate the checksum manually during copying. Commit d749c9cbffd6 ("ipv4: no CHECKSUM_PARTIAL on MSG_MORE corked sockets") started to exposes this bug. Fixes: d749c9cbffd6 ("ipv4: no CHECKSUM_PARTIAL on MSG_MORE corked sockets") Reported-by: Jiri Benc Cc: Jiri Benc Reported-by: Wakko Warner Cc: Wakko Warner Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 64878efa045c..565bf64b2b7d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1236,13 +1236,16 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, if (!skb) return -EINVAL; - cork->length += size; if ((size + skb->len > mtu) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO)) { + if (skb->ip_summed != CHECKSUM_PARTIAL) + return -EOPNOTSUPP; + skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; } + cork->length += size; while (size > 0) { if (skb_is_gso(skb)) { -- cgit v1.2.3 From e7a88e82fe380459b864e05b372638aeacb0f52d Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 17 Feb 2016 20:04:08 +0100 Subject: libceph: don't bail early from try_read() when skipping a message The contract between try_read() and try_write() is that when called each processes as much data as possible. When instructed by osd_client to skip a message, try_read() is violating this contract by returning after receiving and discarding a single message instead of checking for more. try_write() then gets a chance to write out more requests, generating more replies/skips for try_read() to handle, forcing the messenger into a starvation loop. Cc: stable@vger.kernel.org # 3.10+ Reported-by: Varada Kari Signed-off-by: Ilya Dryomov Tested-by: Varada Kari Reviewed-by: Alex Elder --- net/ceph/messenger.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 9cfedf565f5b..fec20819a5ea 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2337,7 +2337,7 @@ static int read_partial_message(struct ceph_connection *con) con->in_base_pos = -front_len - middle_len - data_len - sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; - return 0; + return 1; } else if ((s64)seq - (s64)con->in_seq > 1) { pr_err("read_partial_message bad seq %lld expected %lld\n", seq, con->in_seq + 1); @@ -2363,7 +2363,7 @@ static int read_partial_message(struct ceph_connection *con) sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; con->in_seq++; - return 0; + return 1; } BUG_ON(!con->in_msg); -- cgit v1.2.3 From dbc0d3caff5b7591e0cf8e34ca686ca6f4479ee1 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 19 Feb 2016 11:38:57 +0100 Subject: libceph: use the right footer size when skipping a message ceph_msg_footer is 21 bytes long, while ceph_msg_footer_old is only 13. Don't skip too much when CEPH_FEATURE_MSG_AUTH isn't negotiated. Cc: stable@vger.kernel.org # 3.19+ Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder --- net/ceph/messenger.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index fec20819a5ea..9382619a405b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1197,6 +1197,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, return new_piece; } +static size_t sizeof_footer(struct ceph_connection *con) +{ + return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? + sizeof(struct ceph_msg_footer) : + sizeof(struct ceph_msg_footer_old); +} + static void prepare_message_data(struct ceph_msg *msg, u32 data_len) { BUG_ON(!msg); @@ -2335,7 +2342,7 @@ static int read_partial_message(struct ceph_connection *con) ceph_pr_addr(&con->peer_addr.in_addr), seq, con->in_seq + 1); con->in_base_pos = -front_len - middle_len - data_len - - sizeof(m->footer); + sizeof_footer(con); con->in_tag = CEPH_MSGR_TAG_READY; return 1; } else if ((s64)seq - (s64)con->in_seq > 1) { @@ -2360,7 +2367,7 @@ static int read_partial_message(struct ceph_connection *con) /* skip this message */ dout("alloc_msg said skip message\n"); con->in_base_pos = -front_len - middle_len - data_len - - sizeof(m->footer); + sizeof_footer(con); con->in_tag = CEPH_MSGR_TAG_READY; con->in_seq++; return 1; -- cgit v1.2.3 From cd8140c673d9ba9be3591220e1b2226d9e1e40d3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 19 Feb 2016 11:38:57 +0100 Subject: libceph: don't spam dmesg with stray reply warnings Commit d15f9d694b77 ("libceph: check data_len in ->alloc_msg()") mistakenly bumped the log level on the "tid %llu unknown, skipping" message. Turn it back into a dout() - stray replies are perfectly normal when OSDs flap, crash, get killed for testing purposes, etc. Cc: stable@vger.kernel.org # 4.3+ Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder --- net/ceph/osd_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3534e12683d3..5bc053778fed 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2853,8 +2853,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (!req) { - pr_warn("%s osd%d tid %llu unknown, skipping\n", - __func__, osd->o_osd, tid); + dout("%s osd%d tid %llu unknown, skipping\n", __func__, + osd->o_osd, tid); m = NULL; *skip = 1; goto out; -- cgit v1.2.3 From 2da897e51d7f23f872224218b9a4314503b0d360 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 23 Feb 2016 02:05:26 +0100 Subject: bpf: fix csum setting for bpf_set_tunnel_key The fix in 35e2d1152b22 ("tunnels: Allow IPv6 UDP checksums to be correctly controlled.") changed behavior for bpf_set_tunnel_key() when in use with IPv6 and thus uncovered a bug that TUNNEL_CSUM needed to be set but wasn't. As a result, the stack dropped ingress vxlan IPv6 packets, that have been sent via eBPF through collect meta data mode due to checksum now being zero. Since after LCO, we enable IPv4 checksum by default, so make that analogous and only provide a flag BPF_F_ZERO_CSUM_TX for the user to turn it off in IPv4 case. Fixes: 35e2d1152b22 ("tunnels: Allow IPv6 UDP checksums to be correctly controlled.") Fixes: c6c33454072f ("bpf: support ipv6 for bpf_skb_{set,get}_tunnel_key") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 94d26201080d..bba502f7cd57 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1752,7 +1752,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) u8 compat[sizeof(struct bpf_tunnel_key)]; struct ip_tunnel_info *info; - if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6))) + if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { @@ -1776,7 +1776,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) info = &md->u.tun_info; info->mode = IP_TUNNEL_INFO_TX; - info->key.tun_flags = TUNNEL_KEY; + info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM; info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; info->key.ttl = from->tunnel_ttl; @@ -1787,6 +1787,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) sizeof(from->remote_ipv6)); } else { info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); + if (flags & BPF_F_ZERO_CSUM_TX) + info->key.tun_flags &= ~TUNNEL_CSUM; } return 0; -- cgit v1.2.3 From 9b368814b336b0a1a479135eb2815edbc00efd3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Wed, 24 Feb 2016 04:21:42 +0100 Subject: net: fix bridge multicast packet checksum validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to update the skb->csum after pulling the skb, otherwise an unnecessary checksum (re)computation can ocure for IGMP/MLD packets in the bridge code. Additionally this fixes the following splats for network devices / bridge ports with support for and enabled RX checksum offloading: [...] [ 43.986968] eth0: hw csum failure [ 43.990344] CPU: 3 PID: 0 Comm: swapper/3 Not tainted 4.4.0 #2 [ 43.996193] Hardware name: BCM2709 [ 43.999647] [<800204e0>] (unwind_backtrace) from [<8001cf14>] (show_stack+0x10/0x14) [ 44.007432] [<8001cf14>] (show_stack) from [<801ab614>] (dump_stack+0x80/0x90) [ 44.014695] [<801ab614>] (dump_stack) from [<802e4548>] (__skb_checksum_complete+0x6c/0xac) [ 44.023090] [<802e4548>] (__skb_checksum_complete) from [<803a055c>] (ipv6_mc_validate_checksum+0x104/0x178) [ 44.032959] [<803a055c>] (ipv6_mc_validate_checksum) from [<802e111c>] (skb_checksum_trimmed+0x130/0x188) [ 44.042565] [<802e111c>] (skb_checksum_trimmed) from [<803a06e8>] (ipv6_mc_check_mld+0x118/0x338) [ 44.051501] [<803a06e8>] (ipv6_mc_check_mld) from [<803b2c98>] (br_multicast_rcv+0x5dc/0xd00) [ 44.060077] [<803b2c98>] (br_multicast_rcv) from [<803aa510>] (br_handle_frame_finish+0xac/0x51c) [...] Fixes: 9afd85c9e455 ("net: Export IGMP/MLD message validation code") Reported-by: Álvaro Fernández Rojas Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller --- net/core/skbuff.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5bf88f58bee7..8616d1147c93 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2947,6 +2947,24 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page, } EXPORT_SYMBOL_GPL(skb_append_pagefrags); +/** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + /** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update @@ -4084,9 +4102,9 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, if (!pskb_may_pull(skb_chk, offset)) goto err; - __skb_pull(skb_chk, offset); + skb_pull_rcsum(skb_chk, offset); ret = skb_chkf(skb_chk); - __skb_push(skb_chk, offset); + skb_push_rcsum(skb_chk, offset); if (ret) goto err; -- cgit v1.2.3 From 472681d57a5dde7c6d16b05469be57f1c4ed9d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?MINOURA=20Makoto=20/=20=E7=AE=95=E6=B5=A6=20=E7=9C=9F?= Date: Thu, 25 Feb 2016 14:20:48 +0900 Subject: net: ndo_fdb_dump should report -EMSGSIZE to rtnl_fdb_dump. When the send skbuff reaches the end, nlmsg_put and friends returns -EMSGSIZE but it is silently thrown away in ndo_fdb_dump. It is called within a for_each_netdev loop and the first fdb entry of a following netdev could fit in the remaining skbuff. This breaks the mechanism of cb->args[0] and idx to keep track of the entries that are already dumped, which results missing entries in bridge fdb show command. Signed-off-by: Minoura Makoto Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 15 ++++++++++----- net/core/rtnetlink.c | 6 ++++++ net/switchdev/switchdev.c | 5 ++++- 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 82e3e9705017..dcea4f4c62b3 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -723,6 +723,8 @@ int br_fdb_dump(struct sk_buff *skb, struct net_bridge_fdb_entry *f; hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { + int err; + if (idx < cb->args[0]) goto skip; @@ -741,12 +743,15 @@ int br_fdb_dump(struct sk_buff *skb, if (!filter_dev && f->dst) goto skip; - if (fdb_fill_info(skb, br, f, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI) < 0) + err = fdb_fill_info(skb, br, f, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI); + if (err < 0) { + cb->args[1] = err; break; + } skip: ++idx; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d735e854f916..8261d95dd846 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2911,6 +2911,7 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb, nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc); out: netif_addr_unlock_bh(dev); + cb->args[1] = err; return idx; } EXPORT_SYMBOL(ndo_dflt_fdb_dump); @@ -2944,6 +2945,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) ops = br_dev->netdev_ops; } + cb->args[1] = 0; for_each_netdev(net, dev) { if (brport_idx && (dev->ifindex != brport_idx)) continue; @@ -2971,12 +2973,16 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev, idx); } + if (cb->args[1] == -EMSGSIZE) + break; if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL, idx); else idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx); + if (cb->args[1] == -EMSGSIZE) + break; cops = NULL; } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 47f7da58a7f0..8b5833c1ff2e 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1093,8 +1093,11 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, .cb = cb, .idx = idx, }; + int err; - switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); + err = switchdev_port_obj_dump(dev, &dump.fdb.obj, + switchdev_port_fdb_dump_cb); + cb->args[1] = err; return dump.idx; } EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); -- cgit v1.2.3 From 9acc54beb474c81148e2946603d141cf8716b19f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 26 Feb 2016 22:13:40 +0100 Subject: mac80211: check PN correctly for GCMP-encrypted fragmented MPDUs Just like for CCMP we need to check that for GCMP the fragments have PNs that increment by one; the spec was updated to fix this security issue and now has the following text: The receiver shall discard MSDUs and MMPDUs whose constituent MPDU PN values are not incrementing in steps of 1. Adapt the code for CCMP to work for GCMP as well, luckily the relevant fields already alias each other so no code duplication is needed (just check the aliasing with BUILD_BUG_ON.) Cc: stable@vger.kernel.org Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/rx.c | 36 +++++++++++++++++++++++++++--------- 2 files changed, 28 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b84f6aa32c08..f006f4a44c0e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -92,7 +92,7 @@ struct ieee80211_fragment_entry { u16 extra_len; u16 last_frag; u8 rx_queue; - bool ccmp; /* Whether fragments were encrypted with CCMP */ + bool check_sequential_pn; /* needed for CCMP/GCMP */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ }; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bc081850ac0e..bf2c3f281dcd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1753,7 +1753,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, entry->seq = seq; entry->rx_queue = rx_queue; entry->last_frag = frag; - entry->ccmp = 0; + entry->check_sequential_pn = false; entry->extra_len = 0; return entry; @@ -1849,15 +1849,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) rx->seqno_idx, &(rx->skb)); if (rx->key && (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ieee80211_has_protected(fc)) { int queue = rx->security_idx; - /* Store CCMP PN so that we can verify that the next - * fragment has a sequential PN value. */ - entry->ccmp = 1; + + /* Store CCMP/GCMP PN so that we can verify that the + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); + BUILD_BUG_ON(offsetof(struct ieee80211_key, + u.ccmp.rx_pn) != + offsetof(struct ieee80211_key, + u.gcmp.rx_pn)); + BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); } return RX_QUEUED; } @@ -1872,15 +1884,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; } - /* Verify that MPDUs within one MSDU have sequential PN values. - * (IEEE 802.11i, 8.3.3.4.5) */ - if (entry->ccmp) { + /* "The receiver shall discard MSDUs and MMPDUs whose constituent + * MPDU PN values are not incrementing in steps of 1." + * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) + * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) + */ + if (entry->check_sequential_pn) { int i; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; + if (!rx->key || (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && - rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) + rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) return RX_DROP_UNUSABLE; memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { -- cgit v1.2.3 From 1ec7bae8bec9b72e347e01330c745ab5cdd66f0e Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Tue, 1 Mar 2016 00:29:00 +0200 Subject: mac80211: Fix Public Action frame RX in AP mode Public Action frames use special rules for how the BSSID field (Address 3) is set. A wildcard BSSID is used in cases where the transmitter and recipient are not members of the same BSS. As such, we need to accept Public Action frames with wildcard BSSID. Commit db8e17324553 ("mac80211: ignore frames between TDLS peers when operating as AP") added a rule that drops Action frames to TDLS-peers based on an Action frame having different DA (Address 1) and BSSID (Address 3) values. This is not correct since it misses the possibility of BSSID being a wildcard BSSID in which case the Address 1 would not necessarily match. Fix this by allowing mac80211 to accept wildcard BSSID in an Action frame when in AP mode. Fixes: db8e17324553 ("mac80211: ignore frames between TDLS peers when operating as AP") Cc: stable@vger.kernel.org Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bf2c3f281dcd..60d093f40f1d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3384,6 +3384,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; /* ignore action frames to TDLS-peers */ if (ieee80211_is_action(hdr->frame_control) && + !is_broadcast_ether_addr(bssid) && !ether_addr_equal(bssid, hdr->addr1)) return false; } -- cgit v1.2.3 From c36dd3eaf1a674a45b58b922258d6eaa8932e292 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 24 Feb 2016 12:07:17 +0100 Subject: mac80211: minstrel_ht: fix a logic error in RTS/CTS handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RTS/CTS needs to be enabled if the rate is a fallback rate *or* if it's a dual-stream rate and the sta is in dynamic SMPS mode. Cc: stable@vger.kernel.org Fixes: a3ebb4e1b763 ("mac80211: minstrel_ht: handle peers in dynamic SMPS") Reported-by: Matías Richart Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 409b3640da6c..370d677b547b 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -872,7 +872,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, * - if station is in dynamic SMPS (and streams > 1) * - for fallback rates, to increase chances of getting through */ - if (offset > 0 && + if (offset > 0 || (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC && group->streams > 1)) { ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; -- cgit v1.2.3 From 40b4f0fd74e46c017814618d67ec9127ff20f157 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 28 Feb 2016 10:03:51 +0800 Subject: sctp: lack the check for ports in sctp_v6_cmp_addr As the member .cmp_addr of sctp_af_inet6, sctp_v6_cmp_addr should also check the port of addresses, just like sctp_v4_cmp_addr, cause it's invoked by sctp_cmp_addr_exact(). Now sctp_v6_cmp_addr just check the port when two addresses have different family, and lack the port check for two ipv6 addresses. that will make sctp_hash_cmp() cannot work well. so fix it by adding ports comparison in sctp_v6_cmp_addr(). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/ipv6.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ec529121f38a..ce46f1c7f133 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -526,6 +526,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, } return 0; } + if (addr1->v6.sin6_port != addr2->v6.sin6_port) + return 0; if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) return 0; /* If this is a linklocal address, compare the scope_id. */ -- cgit v1.2.3 From 3d73e8fac8f84942f15307d6d9cb1dba843d3fb2 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 28 Feb 2016 10:33:11 +0800 Subject: sctp: sctp_remaddr_seq_show use the wrong variable to dump transport info Now in sctp_remaddr_seq_show(), we use variable *tsp to get the param *v. but *tsp is also used to traversal transport_addr_list, which will cover the previous value, and make sctp_transport_put work on the wrong transport. So fix it by adding a new variable to get the param *v. Fixes: fba4c330c5b9 ("sctp: hold transport before we access t->asoc in sctp proc") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/proc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ded7d931a6a5..963dffcc2618 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -482,7 +482,7 @@ static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_association *assoc; - struct sctp_transport *tsp; + struct sctp_transport *transport, *tsp; if (v == SEQ_START_TOKEN) { seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " @@ -490,10 +490,10 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) return 0; } - tsp = (struct sctp_transport *)v; - if (!sctp_transport_hold(tsp)) + transport = (struct sctp_transport *)v; + if (!sctp_transport_hold(transport)) return 0; - assoc = tsp->asoc; + assoc = transport->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, transports) { @@ -546,7 +546,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); } - sctp_transport_put(tsp); + sctp_transport_put(transport); return 0; } -- cgit v1.2.3 From 1837b2e2bcd23137766555a63867e649c0b637f0 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Mon, 29 Feb 2016 15:03:33 -0800 Subject: mld, igmp: Fix reserved tailroom calculation The current reserved_tailroom calculation fails to take hlen and tlen into account. skb: [__hlen__|__data____________|__tlen___|__extra__] ^ ^ head skb_end_offset In this representation, hlen + data + tlen is the size passed to alloc_skb. "extra" is the extra space made available in __alloc_skb because of rounding up by kmalloc. We can reorder the representation like so: [__hlen__|__data____________|__extra__|__tlen___] ^ ^ head skb_end_offset The maximum space available for ip headers and payload without fragmentation is min(mtu, data + extra). Therefore, reserved_tailroom = data + extra + tlen - min(mtu, data + extra) = skb_end_offset - hlen - min(mtu, skb_end_offset - hlen - tlen) = skb_tailroom - min(mtu, skb_tailroom - tlen) ; after skb_reserve(hlen) Compare the second line to the current expression: reserved_tailroom = skb_end_offset - min(mtu, skb_end_offset) and we can see that hlen and tlen are not taken into account. The min() in the third line can be expanded into: if mtu < skb_tailroom - tlen: reserved_tailroom = skb_tailroom - mtu else: reserved_tailroom = tlen Depending on hlen, tlen, mtu and the number of multicast address records, the current code may output skbs that have less tailroom than dev->needed_tailroom or it may output more skbs than needed because not all space available is used. Fixes: 4c672e4b ("ipv6: mld: fix add_grhead skb_over_panic for devs with large MTUs") Signed-off-by: Benjamin Poirier Acked-by: Hannes Frederic Sowa Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 3 +-- net/ipv6/mcast.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 05e4cba14162..b3086cf27027 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -356,9 +356,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) skb_dst_set(skb, &rt->dst); skb->dev = dev; - skb->reserved_tailroom = skb_end_offset(skb) - - min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); + skb_tailroom_reserve(skb, mtu, tlen); skb_reset_network_header(skb); pip = ip_hdr(skb); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 5ee56d0a8699..d64ee7e83664 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) return NULL; skb->priority = TC_PRIO_CONTROL; - skb->reserved_tailroom = skb_end_offset(skb) - - min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); + skb_tailroom_reserve(skb, mtu, tlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* : -- cgit v1.2.3 From f214fc402967e1bc94ad7f39faa03db5813d6849 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 1 Mar 2016 11:07:09 +0100 Subject: tipc: Revert "tipc: use existing sk_write_queue for outgoing packet chain" reverts commit 94153e36e709e ("tipc: use existing sk_write_queue for outgoing packet chain") In Commit 94153e36e709e, we assume that we fill & empty the socket's sk_write_queue within the same lock_sock() session. This is not true if the link is congested. During congestion, the socket lock is released while we wait for the congestion to cease. This implementation causes a nullptr exception, if the user space program has several threads accessing the same socket descriptor. Consider two threads of the same program performing the following: Thread1 Thread2 -------------------- ---------------------- Enter tipc_sendmsg() Enter tipc_sendmsg() lock_sock() lock_sock() Enter tipc_link_xmit(), ret=ELINKCONG spin on socket lock.. sk_wait_event() : release_sock() grab socket lock : Enter tipc_link_xmit(), ret=0 : release_sock() Wakeup after congestion lock_sock() skb = skb_peek(pktchain); !! TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; In this case, the second thread transmits the buffers belonging to both thread1 and thread2 successfully. When the first thread wakeup after the congestion it assumes that the pktchain is intact and operates on the skb's in it, which leads to the following exception: [2102.439969] BUG: unable to handle kernel NULL pointer dereference at 00000000000000d0 [2102.440074] IP: [] __tipc_link_xmit+0x2b0/0x4d0 [tipc] [2102.440074] PGD 3fa3f067 PUD 3fa6b067 PMD 0 [2102.440074] Oops: 0000 [#1] SMP [2102.440074] CPU: 2 PID: 244 Comm: sender Not tainted 3.12.28 #1 [2102.440074] RIP: 0010:[] [] __tipc_link_xmit+0x2b0/0x4d0 [tipc] [...] [2102.440074] Call Trace: [2102.440074] [] ? schedule+0x29/0x70 [2102.440074] [] ? tipc_node_unlock+0x46/0x170 [tipc] [2102.440074] [] tipc_link_xmit+0x51/0xf0 [tipc] [2102.440074] [] tipc_send_stream+0x11e/0x4f0 [tipc] [2102.440074] [] ? __wake_up_sync+0x20/0x20 [2102.440074] [] tipc_send_packet+0x1c/0x20 [tipc] [2102.440074] [] sock_sendmsg+0xa8/0xd0 [2102.440074] [] ? release_sock+0x145/0x170 [2102.440074] [] ___sys_sendmsg+0x3d8/0x3e0 [2102.440074] [] ? _raw_spin_unlock+0xe/0x10 [2102.440074] [] ? handle_mm_fault+0x6ca/0x9d0 [2102.440074] [] ? set_next_entity+0x85/0xa0 [2102.440074] [] ? _raw_spin_unlock_irq+0xe/0x20 [2102.440074] [] ? finish_task_switch+0x5c/0xc0 [2102.440074] [] ? __schedule+0x34c/0x950 [2102.440074] [] __sys_sendmsg+0x42/0x80 [2102.440074] [] SyS_sendmsg+0x12/0x20 [2102.440074] [] system_call_fastpath+0x16/0x1b In this commit, we maintain the skb list always in the stack. Signed-off-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 69c29050f14a..4d420bb27396 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -673,7 +673,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; struct iov_iter save = msg->msg_iter; uint mtu; int rc; @@ -687,14 +687,16 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, msg_set_nameupper(mhdr, seq->upper); msg_set_hdr_sz(mhdr, MCAST_H_SIZE); + skb_queue_head_init(&pktchain); + new_mtu: mtu = tipc_bcast_get_mtu(net); - rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain); + rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain); if (unlikely(rc < 0)) return rc; do { - rc = tipc_bcast_xmit(net, pktchain); + rc = tipc_bcast_xmit(net, &pktchain); if (likely(!rc)) return dsz; @@ -704,7 +706,7 @@ new_mtu: if (!rc) continue; } - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); if (rc == -EMSGSIZE) { msg->msg_iter = save; goto new_mtu; @@ -863,7 +865,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) struct net *net = sock_net(sk); struct tipc_msg *mhdr = &tsk->phdr; u32 dnode, dport; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; struct sk_buff *skb; struct tipc_name_seq *seq; struct iov_iter save; @@ -924,17 +926,18 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) msg_set_hdr_sz(mhdr, BASIC_H_SIZE); } + skb_queue_head_init(&pktchain); save = m->msg_iter; new_mtu: mtu = tipc_node_get_mtu(net, dnode, tsk->portid); - rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); + rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain); if (rc < 0) return rc; do { - skb = skb_peek(pktchain); + skb = skb_peek(&pktchain); TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; - rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid); + rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid); if (likely(!rc)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -946,7 +949,7 @@ new_mtu: if (!rc) continue; } - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); if (rc == -EMSGSIZE) { m->msg_iter = save; goto new_mtu; @@ -1016,7 +1019,7 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); u32 portid = tsk->portid; int rc = -EINVAL; @@ -1044,17 +1047,19 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); dnode = tsk_peer_node(tsk); + skb_queue_head_init(&pktchain); next: save = m->msg_iter; mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); - rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); + rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain); if (unlikely(rc < 0)) return rc; + do { if (likely(!tsk_conn_cong(tsk))) { - rc = tipc_node_xmit(net, pktchain, dnode, portid); + rc = tipc_node_xmit(net, &pktchain, dnode, portid); if (likely(!rc)) { tsk->sent_unacked++; sent += send; @@ -1063,7 +1068,7 @@ next: goto next; } if (rc == -EMSGSIZE) { - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); tsk->max_pkt = tipc_node_get_mtu(net, dnode, portid); m->msg_iter = save; @@ -1077,7 +1082,7 @@ next: rc = tipc_wait_for_sndpkt(sock, &timeo); } while (!rc); - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); return sent ? sent : rc; } -- cgit v1.2.3 From 5d150a985520bbe3cb2aa1ceef24a7e32f20c15f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 1 Mar 2016 16:15:16 +0100 Subject: ipv6: re-enable fragment header matching in ipv6_find_hdr When ipv6_find_hdr is used to find a fragment header (caller specifies target NEXTHDR_FRAGMENT) we erronously return -ENOENT for all fragments with nonzero offset. Before commit 9195bb8e381d, when target was specified, we did not enter the exthdr walk loop as nexthdr == target so this used to work. Now we do (so we can skip empty route headers). When we then stumble upon a frag with nonzero frag_off we must return -ENOENT ("header not found") only if the caller did not specifically request NEXTHDR_FRAGMENT. This allows nfables exthdr expression to match ipv6 fragments, e.g. via nft add rule ip6 filter input frag frag-off gt 0 Fixes: 9195bb8e381d ("ipv6: improve ipv6_find_hdr() to skip empty routing headers") Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv6/exthdrs_core.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 5c5d23e59da5..9508a20fbf61 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, *fragoff = _frag_off; return hp->nexthdr; } - return -ENOENT; + if (!found) + return -ENOENT; + if (fragoff) + *fragoff = _frag_off; + break; } hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { -- cgit v1.2.3 From 44ef548f4f8bbc739070583f9844ff0cd8fcc6b4 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 3 Mar 2016 14:34:14 +0100 Subject: net: sched: fix act_ipt for LOG target Before calling the destroy() or target() callbacks, the family parameter field has to be initialized. Otherwise at least the LOG target will refuse to work and upon removal oops the kernel. Cc: Jamal Hadi Salim Signed-off-by: Phil Sutter Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_ipt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d05869646515..6b70399ab781 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -62,6 +62,7 @@ static void ipt_destroy_target(struct xt_entry_target *t) struct xt_tgdtor_param par = { .target = t->u.kernel.target, .targinfo = t->data, + .family = NFPROTO_IPV4, }; if (par.target->destroy != NULL) par.target->destroy(&par); @@ -195,6 +196,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, par.hooknum = ipt->tcfi_hook; par.target = ipt->tcfi_t->u.kernel.target; par.targinfo = ipt->tcfi_t->data; + par.family = NFPROTO_IPV4; ret = par.target->target(skb, &par); switch (ret) { -- cgit v1.2.3 From 4de13d7ed6ffdcbb34317acaa9236f121176f5f8 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Thu, 3 Mar 2016 17:54:54 +0100 Subject: tipc: fix nullptr crash during subscription cancel commit 4d5cfcba2f6e ('tipc: fix connection abort during subscription cancel'), removes the check for a valid subscription before calling tipc_nametbl_subscribe(). This will lead to a nullptr exception when we process a subscription cancel request. For a cancel request, a null subscription is passed to tipc_nametbl_subscribe() resulting in exception. In this commit, we call tipc_nametbl_subscribe() only for a valid subscription. Fixes: 4d5cfcba2f6e ('tipc: fix connection abort during subscription cancel') Reported-by: Anders Widell Signed-off-by: Parthasarathy Bhuvaragan Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/subscr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 69ee2eeef968..f9ff73a8d815 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub)) return tipc_conn_terminate(tn->topsrv, subscrb->conid); - tipc_nametbl_subscribe(sub); + if (sub) + tipc_nametbl_subscribe(sub); } /* Handle one request to establish a new subscriber */ -- cgit v1.2.3 From 59dca1d8a6725a121dae6c452de0b2611d5865dc Mon Sep 17 00:00:00 2001 From: Bill Sommerfeld Date: Fri, 4 Mar 2016 14:47:21 -0800 Subject: udp6: fix UDP/IPv6 encap resubmit path IPv4 interprets a negative return value from a protocol handler as a request to redispatch to a new protocol. In contrast, IPv6 interprets a negative value as an error, and interprets a positive value as a request for redispatch. UDP for IPv6 was unaware of this difference. Change __udp6_lib_rcv() to return a positive value for redispatch. Note that the socket's encap_rcv hook still needs to return a negative value to request dispatch, and in the case of IPv6 packets, adjust IP6CB(skb)->nhoff to identify the byte containing the next protocol. Signed-off-by: Bill Sommerfeld Signed-off-by: David S. Miller --- net/ipv6/udp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 22e28a44e3c8..422dd014aa2c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -962,11 +962,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ + /* a return value > 0 means to resubmit the input */ if (ret > 0) - return -ret; + return ret; return 0; } -- cgit v1.2.3 From a9d99ce28ed359d68cf6f3c1a69038aefedf6d6a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 6 Mar 2016 09:29:21 -0800 Subject: tcp: fix tcpi_segs_in after connection establishment If final packet (ACK) of 3WHS is lost, it appears we do not properly account the following incoming segment into tcpi_segs_in While we are at it, starts segs_in with one, to count the SYN packet. We do not yet count number of SYN we received for a request sock, we might add this someday. packetdrill script showing proper behavior after fix : // Tests tcpi_segs_in when 3rd packet (ACK) of 3WHS is lost 0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 +0 < S 0:0(0) win 32792 +0 > S. 0:0(0) ack 1 +.020 < P. 1:1001(1000) ack 1 win 32792 +0 accept(3, ..., ...) = 4 +.000 %{ assert tcpi_segs_in == 2, 'tcpi_segs_in=%d' % tcpi_segs_in }% Fixes: 2efd055c53c06 ("tcp: add tcpi_segs_in and tcpi_segs_out to tcp_info") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_minisocks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75632a925824..9b02af2139d3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; - newtp->segs_in = 0; + newtp->segs_in = 1; newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; @@ -815,6 +815,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->sk_state; + tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); if (!sock_owned_by_user(child)) { ret = tcp_rcv_state_process(child, skb); /* Wakeup parent, send SIGIO */ -- cgit v1.2.3