summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c45
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sunrpc/auth_generic.c17
23 files changed, 120 insertions, 85 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a98628086452..a97d97a3a512 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
memset(skb->cb, 0, sizeof(struct caif_payload_info));
- if (cf_sk->layer.dn == NULL)
+ if (cf_sk->layer.dn == NULL) {
+ kfree_skb(skb);
return -EINVAL;
+ }
return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
}
err = transmit_skb(skb, cf_sk,
msg->msg_flags&MSG_DONTWAIT, timeo);
- if (err < 0) {
- kfree_skb(skb);
+ if (err < 0)
+ /* skb is already freed */
goto pipe_err;
- }
+
sent += size;
}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e7..94b08612a4d8 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
{
struct cfmuxl *muxl = container_obj(layr);
struct cflayer *layer;
- int idx;
rcu_read_lock();
list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
- layer->id != 0) {
-
- idx = layer->id % UP_CACHE_SIZE;
- spin_lock_bh(&muxl->receive_lock);
- RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
- list_del_rcu(&layer->node);
- spin_unlock_bh(&muxl->receive_lock);
- }
+ layer->id != 0)
+ cfmuxl_remove_uplayer(layr, layer->id);
+
/* NOTE: ctrlcmd is not allowed to block */
layer->ctrlcmd(layer, ctrl, phyid);
}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3b..761ad9d6cc3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
} else {
pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
memcpy(&client->fsid, fsid, sizeof(*fsid));
- ceph_debugfs_client_init(client);
- client->have_fsid = true;
}
return 0;
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42bd..1845cde26227 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
#include <linux/ceph/mon_client.h>
#include <linux/ceph/libceph.h>
+#include <linux/ceph/debugfs.h>
#include <linux/ceph/decode.h>
-
#include <linux/ceph/auth.h>
/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
client->monc.monmap = monmap;
kfree(old);
+ if (!client->have_fsid) {
+ client->have_fsid = true;
+ mutex_unlock(&monc->mutex);
+ /*
+ * do debugfs initialization without mutex to avoid
+ * creating a locking dependency
+ */
+ ceph_debugfs_client_init(client);
+ goto out_unlocked;
+ }
out:
mutex_unlock(&monc->mutex);
+out_unlocked:
wake_up_all(&client->auth_wq);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
+ unsigned int maclen = skb->dev->hard_header_len;
for (p = napi->gro_list; p; p = p->next) {
unsigned long diffs;
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
- diffs |= compare_ether_header(skb_mac_header(p),
- skb_gro_mac_header(skb));
+ if (maclen == ETH_HLEN)
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
+ else if (!diffs)
+ diffs = memcmp(skb_mac_header(p),
+ skb_gro_mac_header(skb),
+ maclen);
NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
if (!dev->ethtool_ops->flash_device)
return -EOPNOTSUPP;
+ efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+
return dev->ethtool_ops->flash_device(dev, &efl);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev);
- if (dev->priv_flags & IFF_SLAVE) {
+ if (dev->flags & IFF_SLAVE) {
if (dev->npinfo) {
struct net_device *bond_dev = dev->master;
struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
spin_lock_irqsave(&prioidx_map_lock, flags);
prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+ if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+ return -ENOSPC;
+ }
set_bit(prioidx, prioidx_map);
spin_unlock_irqrestore(&prioidx_map_lock, flags);
- if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
- return -ENOSPC;
-
atomic_set(&max_prioidx, prioidx);
*prio = prioidx;
return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
static void update_netdev_tables(void)
{
struct net_device *dev;
- u32 max_len = atomic_read(&max_prioidx);
+ u32 max_len = atomic_read(&max_prioidx) + 1;
struct netprio_map *map;
rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
struct netprio_map *old;
- u32 max_len = atomic_read(&max_prioidx);
/*
* Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
*/
switch (event) {
-
- case NETDEV_REGISTER:
- if (max_len)
- extend_netdev_table(dev, max_len);
- break;
case NETDEV_UNREGISTER:
old = rtnl_dereference(dev->priomap);
RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
void sock_update_netprioidx(struct sock *sk)
{
- struct cgroup_netprio_state *state;
if (in_interrupt())
return;
- rcu_read_lock();
- state = task_netprio_state(current);
- sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
- rcu_read_unlock();
+
+ sk->sk_cgrp_prioidx = task_netprioidx(current);
}
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776f..d183262943d9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
config INET_UDP_DIAG
tristate "UDP: socket monitoring interface"
- depends on INET_DIAG
+ depends on INET_DIAG && (IPV6 || IPV6=n)
default n
---help---
Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f0..63e49890ad31 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
if (addr_type == RTN_UNICAST &&
(arp_fwd_proxy(in_dev, dev, rt) ||
arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
- pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
+ (rt->dst.dev != dev &&
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f7679075..42dd1a90edea 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
- ip_rt_get_source(&optptr[srrptr-1], skb, rt);
ip_hdr(skb)->daddr = opt->nexthop;
+ ip_rt_get_source(&optptr[srrptr-1], skb, rt);
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4cb9cd2f2c39..7a7724da9bff 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
struct ctl_table *table;
- unsigned long limit;
table = ipv4_net_table;
if (!net_eq(net, &init_net)) {
@@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
net->ipv4.sysctl_rt_cache_rebuild_count = 4;
tcp_init_mem(net);
- limit = nr_free_buffer_pages() / 8;
- limit = max(limit, 128UL);
- net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
- net->ipv4.sysctl_tcp_mem[1] = limit;
- net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 06373b4a449a..37755ccc0e96 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
}
EXPORT_SYMBOL(tcp_shutdown);
+bool tcp_check_oom(struct sock *sk, int shift)
+{
+ bool too_many_orphans, out_of_socket_memory;
+
+ too_many_orphans = tcp_too_many_orphans(sk, shift);
+ out_of_socket_memory = tcp_out_of_memory(sk);
+
+ if (too_many_orphans && net_ratelimit())
+ pr_info("TCP: too many orphaned sockets\n");
+ if (out_of_socket_memory && net_ratelimit())
+ pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
+ return too_many_orphans || out_of_socket_memory;
+}
+
void tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
}
if (sk->sk_state != TCP_CLOSE) {
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk, 0)) {
- if (net_ratelimit())
- printk(KERN_INFO "TCP: too many of orphaned "
- "sockets\n");
+ if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk),
@@ -3218,7 +3229,6 @@ __setup("thash_entries=", set_thash_entries);
void tcp_init_mem(struct net *net)
{
- /* Set per-socket limits to no more than 1/128 the pressure threshold */
unsigned long limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
@@ -3287,7 +3297,8 @@ void __init tcp_init(void)
sysctl_max_syn_backlog = max(128, cnt / 256);
tcp_init_mem(&init_net);
- limit = nr_free_buffer_pages() / 8;
+ /* Set per-socket limits to no more than 1/128 the pressure threshold */
+ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
limit = max(limit, 128UL);
max_share = min(4UL*1024*1024, limit);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 976034f82320..53c8ce4046b2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
return in_sack;
}
-static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
- struct tcp_sacktag_state *state,
+/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
+static u8 tcp_sacktag_one(struct sock *sk,
+ struct tcp_sacktag_state *state, u8 sacked,
+ u32 start_seq, u32 end_seq,
int dup_sack, int pcount)
{
struct tcp_sock *tp = tcp_sk(sk);
- u8 sacked = TCP_SKB_CB(skb)->sacked;
int fack_count = state->fack_count;
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans &&
- after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+ after(end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
state->reord = min(fack_count, state->reord);
}
/* Nothing to do; acked frame is about to be dropped (was ACKed). */
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+ if (!after(end_seq, tp->snd_una))
return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
/* New sack for not retransmitted frame,
* which was in hole. It is reordering.
*/
- if (before(TCP_SKB_CB(skb)->seq,
+ if (before(start_seq,
tcp_highest_sack_seq(tp)))
state->reord = min(fack_count,
state->reord);
/* SACK enhanced F-RTO (RFC4138; Appendix B) */
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
+ if (!after(end_seq, tp->frto_highmark))
state->flag |= FLAG_ONLY_ORIG_SACKED;
}
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
- before(TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(tp->lost_skb_hint)->seq))
+ before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
tp->lost_cnt_hint += pcount;
if (fack_count > tp->fackets_out)
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
return sacked;
}
+/* Shift newly-SACKed bytes from this skb to the immediately previous
+ * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
+ */
static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
unsigned int pcount, int shifted, int mss,
@@ -1395,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
+ u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
+ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
BUG_ON(!pcount);
- if (skb == tp->lost_skb_hint)
+ /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
+ if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1424,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = 0;
}
- /* We discard results */
- tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
+ /* Adjust counters and hints for the newly sacked sequence range but
+ * discard the return value since prev is already marked.
+ */
+ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+ start_seq, end_seq, dup_sack, pcount);
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1664,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
break;
if (in_sack) {
- TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
- state,
- dup_sack,
- tcp_skb_pcount(skb));
+ TCP_SKB_CB(skb)->sacked =
+ tcp_sacktag_one(sk,
+ state,
+ TCP_SKB_CB(skb)->sacked,
+ TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(skb)->end_seq,
+ dup_sack,
+ tcp_skb_pcount(skb));
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..94d683a61cba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.iov[0].iov_len, IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+ /* When socket is gone, all binding information is lost.
+ * routing might fail in this case. using iif for oif to
+ * make sure we can deliver it
+ */
+ arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399df..cd2e0723266d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
if (sk->sk_err_soft)
shift++;
- if (tcp_too_many_orphans(sk, shift)) {
- if (net_ratelimit())
- printk(KERN_INFO "Out of socket memory\n");
-
+ if (tcp_check_oom(sk, shift)) {
/* Catch exceptional cases, when connection requires reset.
* 1. Last segment was sent recently. */
if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ffd..ae3a035f5390 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
if (toklen <= (n_parts + 1) * 4)
return -EINVAL;
- princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL);
+ princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
if (!princ->name_parts)
return -ENOMEM;
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
_debug("n_elem %d", n_elem);
- td = kcalloc(sizeof(struct krb5_tagged_data), n_elem,
+ td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
GFP_KERNEL);
if (!td)
return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a3..7e267d7b9c75 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) <
- sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
+ qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2776012132ea..5da548fa7ae9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) <
- sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
+ qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
}
@@ -502,9 +501,8 @@ tfifo_dequeue:
/* if more time remaining? */
if (cb->time_to_send <= psched_get_time()) {
- skb = qdisc_dequeue_tail(sch);
- if (unlikely(!skb))
- goto qdisc_dequeue;
+ __skb_unlink(skb, &sch->q);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
#ifdef CONFIG_NET_CLS_ACT
/*
@@ -540,7 +538,6 @@ deliver:
qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
}
-qdisc_dequeue:
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
if (skb)
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7a..d7eea99333e9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) <
- sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
+ qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9acf..60d47180f043 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) <
- sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
- return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
+ qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
+ return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
}
static unsigned int sfq_hash(const struct sfq_sched_data *q,
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 1426ec3d0a53..75762f346975 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -92,6 +92,7 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
if (gcred->acred.group_info != NULL)
get_group_info(gcred->acred.group_info);
gcred->acred.machine_cred = acred->machine_cred;
+ gcred->acred.principal = acred->principal;
dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
gcred->acred.machine_cred ? "machine" : "generic",
@@ -123,6 +124,17 @@ generic_destroy_cred(struct rpc_cred *cred)
call_rcu(&cred->cr_rcu, generic_free_cred_callback);
}
+static int
+machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags)
+{
+ if (!gcred->acred.machine_cred ||
+ gcred->acred.principal != acred->principal ||
+ gcred->acred.uid != acred->uid ||
+ gcred->acred.gid != acred->gid)
+ return 0;
+ return 1;
+}
+
/*
* Match credentials against current process creds.
*/
@@ -132,9 +144,12 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
int i;
+ if (acred->machine_cred)
+ return machine_cred_match(acred, gcred, flags);
+
if (gcred->acred.uid != acred->uid ||
gcred->acred.gid != acred->gid ||
- gcred->acred.machine_cred != acred->machine_cred)
+ gcred->acred.machine_cred != 0)
goto out_nomatch;
/* Optimisation in the case where pointers are identical... */