summaryrefslogtreecommitdiff
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-27 11:15:14 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-27 11:15:14 -0500
commit9f30e5c5c2a4a2cbd438eadf083ca16d9a7fdc7a (patch)
tree24780cdc40bd7542dde866d1411ba18e6ea5ed9c /net/sched/sch_generic.c
parent04f629f730fcd30c811777d186b15c38737eaa3c (diff)
parent1a4bb1d14f7c0c4df418d08eb8e24d1c0e54b06a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says: ==================== pull request (net-next): ipsec-next 2017-12-22 1) Separate ESP handling from segmentation for GRO packets. This unifies the IPsec GSO and non GSO codepath. 2) Add asynchronous callbacks for xfrm on layer 2. This adds the necessary infrastructure to core networking. 3) Allow to use the layer2 IPsec GSO codepath for software crypto, all infrastructure is there now. 4) Also allow IPsec GSO with software crypto for local sockets. 5) Don't require synchronous crypto fallback on IPsec offloading, it is not needed anymore. 6) Check for xdo_dev_state_free and only call it if implemented. From Shannon Nelson. 7) Check for the required add and delete functions when a driver registers xdo_dev_ops. From Shannon Nelson. 8) Define xfrmdev_ops only with offload config. From Shannon Nelson. 9) Update the xfrm stats documentation. From Shannon Nelson. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 10aaa3b615ce..28b2a7964133 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -32,6 +32,7 @@
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <trace/events/qdisc.h>
+#include <net/xfrm.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/* skb in gso_skb were already validated */
*validate = false;
+ if (xfrm_offload(skb))
+ *validate = true;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -285,6 +288,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
+ bool again = false;
/* And release qdisc */
if (root_lock)
@@ -292,7 +296,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
- skb = validate_xmit_skb_list(skb, dev);
+ skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (unlikely(again)) {
+ if (root_lock)
+ spin_lock(root_lock);
+
+ dev_requeue_skb(skb, q);
+ return false;
+ }
+#endif
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());