diff options
author | David S. Miller <davem@davemloft.net> | 2018-07-12 16:42:40 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-07-12 16:42:40 -0700 |
commit | c8c81de96be3a5a22c7d92143fb32abfb975e50c (patch) | |
tree | 4a94b819a0719567f80c61a3e87992dad3d59d5d | |
parent | 0761680d521559813648fb430ddeb479c97ab060 (diff) | |
parent | fb321f25e582ae45e6b792fb38a3fc261dc5131e (diff) |
Merge branch 's390-qeth-updates'
Julian Wiedmann says:
====================
s390/qeth: updates 2018-07-11
please apply this first batch of qeth patches for net-next. It brings the
usual cleanups, and some performance improvements to the transmit paths.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/s390/net/qeth_core.h | 22 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 131 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_mpc.h | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2.h | 5 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 20 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 360 |
6 files changed, 281 insertions, 259 deletions
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a246a618f9a4..a932aac62d0e 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -465,7 +465,6 @@ struct qeth_qdio_out_buffer { struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; - struct qaob *aob; struct qeth_qdio_out_q *q; struct qeth_qdio_out_buffer *next_pending; }; @@ -662,7 +661,6 @@ struct qeth_card_info { int portno; enum qeth_card_types type; enum qeth_link_types link_type; - int is_multicast_different; int initial_mtu; int max_mtu; int broadcast_capable; @@ -935,6 +933,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv); +static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, + struct sk_buff *skb, + int ipv, int cast_type) +{ + if (IS_IQD(card) && cast_type != RTN_UNICAST) + return card->qdio.out_qs[card->qdio.no_out_queues - 1]; + if (!card->qdio.do_prio_queueing) + return card->qdio.out_qs[card->qdio.default_out_queue]; + return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; +} + extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; extern const struct attribute_group *qeth_generic_attr_groups[]; @@ -972,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, void *); struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); -int qeth_query_setadapterparms(struct qeth_card *); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); @@ -998,11 +1008,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -int qeth_bridgeport_query_ports(struct qeth_card *card, - enum qeth_sbp_roles *role, enum qeth_sbp_states *state); -int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); -int qeth_bridgeport_an_set(struct qeth_card *card, int enable); -int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); @@ -1026,7 +1031,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); -int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); void qeth_trace_features(struct qeth_card *); void qeth_close_dev(struct qeth_card *); int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d01ac29fd986..d80972b9bfc7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -473,7 +473,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == QETH_QDIO_BUF_HANDLED_DELAYED)) { /* for recovery situations */ - q->bufs[bidx]->aob = q->bufstates[bidx].aob; qeth_init_qdio_out_buf(q, bidx); QETH_CARD_TEXT(q->card, 2, "clprecov"); } @@ -510,7 +509,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, } qeth_notify_skbs(buffer->q, buffer, notification); - buffer->aob = NULL; /* Free dangling allocations. The attached skbs are handled by * qeth_cleanup_handled_pending(). */ @@ -1267,8 +1265,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate) + struct qeth_qdio_out_buffer *buf) { int i; @@ -1276,23 +1273,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - if (newbufstate == QETH_QDIO_BUF_EMPTY) { - qeth_release_skbs(buf); - } + qeth_release_skbs(buf); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; - buf->buffer->element[i].length = 0; - buf->buffer->element[i].addr = NULL; - buf->buffer->element[i].eflags = 0; - buf->buffer->element[i].sflags = 0; } - buf->buffer->element[15].eflags = 0; - buf->buffer->element[15].sflags = 0; + + qeth_scrub_qdio_buffer(buf->buffer, + QETH_MAX_BUFFER_ELEMENTS(queue->card)); buf->next_element_to_fill = 0; - atomic_set(&buf->state, newbufstate); + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) @@ -1303,7 +1296,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(q, q->bufs[j]); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -1544,8 +1537,6 @@ static void qeth_determine_card_type(struct qeth_card *card) card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->info.type = CARD_RDEV(card)->id.driver_info; card->qdio.no_out_queues = QETH_MAX_QUEUES; - if (card->info.type == QETH_CARD_TYPE_IQD) - card->info.is_multicast_different = 0x0103; qeth_update_from_chp_desc(card); } @@ -2473,32 +2464,20 @@ static int qeth_ulp_setup(struct qeth_card *card) static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) { - int rc; struct qeth_qdio_out_buffer *newbuf; - rc = 0; newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); - if (!newbuf) { - rc = -ENOMEM; - goto out; - } + if (!newbuf) + return -ENOMEM; + newbuf->buffer = q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; - newbuf->aob = NULL; newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; - if (q->bufstates) { - q->bufstates[bidx].user = newbuf; - QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); - QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); - QETH_CARD_TEXT_(q->card, 2, "%lx", - (long) newbuf->next_pending); - } -out: - return rc; + return 0; } static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) @@ -2908,8 +2887,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) QDIO_MAX_BUFFERS_PER_Q); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], - card->qdio.out_qs[i]->bufs[j], - QETH_QDIO_BUF_EMPTY); + card->qdio.out_qs[i]->bufs[j]); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; @@ -3076,7 +3054,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, return iob; } -int qeth_query_setadapterparms(struct qeth_card *card) +static int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -3089,7 +3067,6 @@ int qeth_query_setadapterparms(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); static int qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3129,7 +3106,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, return 0; } -int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) +static int qeth_query_ipassists(struct qeth_card *card, + enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; @@ -3141,7 +3119,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3180,7 +3157,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, return qeth_send_ipa_cmd(card, iob, qeth_query_switch_attributes_cb, sw_info); } -EXPORT_SYMBOL_GPL(qeth_query_switch_attributes); static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3634,10 +3610,10 @@ out: } EXPORT_SYMBOL_GPL(qeth_configure_cq); - -static void qeth_qdio_cq_handler(struct qeth_card *card, - unsigned int qdio_err, - unsigned int queue, int first_element, int count) { +static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, + unsigned int queue, int first_element, + int count) +{ struct qeth_qdio_q *cq = card->qdio.c_q; int i; int rc; @@ -3663,25 +3639,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; - int e; + int e = 0; - e = 0; while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); - buffer->element[e].addr = NULL; - buffer->element[e].eflags = 0; - buffer->element[e].sflags = 0; - buffer->element[e].length = 0; - ++e; } - - buffer->element[15].eflags = 0; - buffer->element[15].sflags = 0; + qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, card->qdio.c_q->next_buf_to_init, @@ -3760,11 +3728,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); } - buffer->aob = queue->bufstates[bidx].aob; QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - QETH_CARD_TEXT(queue->card, 5, "aob"); - QETH_CARD_TEXT_(queue->card, 5, "%lx", - virt_to_phys(buffer->aob)); /* prepare the queue slot for re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, @@ -3782,8 +3746,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, n); } - qeth_clear_output_buffer(queue, buffer, - QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(queue, buffer); } qeth_cleanup_handled_pending(queue, bidx, 0); } @@ -3810,15 +3773,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) * Note: Function assumes that we have 4 outbound queues. */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv, int cast_type) + int ipv) { __be16 *tci; u8 tos; - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_PREC: @@ -5887,31 +5846,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev) return 0; } -static struct ccwgroup_driver qeth_core_ccwgroup_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "qeth", - }, - .ccw_driver = &qeth_ccw_driver, - .setup = qeth_core_probe_device, - .remove = qeth_core_remove_device, - .set_online = qeth_core_set_online, - .set_offline = qeth_core_set_offline, - .shutdown = qeth_core_shutdown, - .prepare = NULL, - .complete = NULL, - .freeze = qeth_core_freeze, - .thaw = qeth_core_thaw, - .restore = qeth_core_restore, -}; - static ssize_t group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; - err = ccwgroup_create_dev(qeth_core_root_dev, - &qeth_core_ccwgroup_driver, 3, buf); + err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, + buf); return err ? err : count; } @@ -5929,6 +5870,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = { NULL, }; +static struct ccwgroup_driver qeth_core_ccwgroup_driver = { + .driver = { + .groups = qeth_drv_attr_groups, + .owner = THIS_MODULE, + .name = "qeth", + }, + .ccw_driver = &qeth_ccw_driver, + .setup = qeth_core_probe_device, + .remove = qeth_core_remove_device, + .set_online = qeth_core_set_online, + .set_offline = qeth_core_set_offline, + .shutdown = qeth_core_shutdown, + .prepare = NULL, + .complete = NULL, + .freeze = qeth_core_freeze, + .thaw = qeth_core_thaw, + .restore = qeth_core_restore, +}; + int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; @@ -6620,7 +6580,6 @@ static int __init qeth_core_init(void) rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; - qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 878e62f35169..54c35224262a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -64,6 +64,8 @@ enum qeth_card_types { QETH_CARD_TYPE_OSX = 2, }; +#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) + #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ enum qeth_link_types { diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index f2130051ca11..ddc615b431a8 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[]; int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, + enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2487f0aeb165..8ac243de7a9e 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -26,7 +26,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static void qeth_l2_set_rx_mode(struct net_device *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -186,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) - return RTN_UNSPEC; + return RTN_UNICAST; if (is_broadcast_ether_addr(skb->data)) return RTN_BROADCAST; if (is_multicast_ether_addr(skb->data)) return RTN_MULTICAST; - return RTN_UNSPEC; + return RTN_UNICAST; } static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, @@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); } - qeth_l2_set_rx_mode(card->dev); return rc; } @@ -770,18 +768,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if (card->qdio.do_prio_queueing || (cast_type && - card->info.is_multicast_different)) - queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, - ipv, cast_type)]; - else - queue = card->qdio.out_qs[card->qdio.default_out_queue]; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { card->stats.tx_carrier_errors++; goto tx_drop; } + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); @@ -1125,13 +1118,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { __qeth_l2_open(card->dev); + qeth_l2_set_rx_mode(card->dev); } else { rtnl_lock(); dev_open(card->dev); rtnl_unlock(); } - /* this also sets saved unicast addresses */ - qeth_l2_set_rx_mode(card->dev); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -1877,7 +1869,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, return rc; return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); static int qeth_bridgeport_set_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2025,7 +2016,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable) rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); return qeth_anset_makerc(card, rc, response); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set); static bool qeth_bridgeport_is_in_use(struct qeth_card *card) { diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 5905dc63e256..062f62b49294 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) return cast_type; - return RTN_UNSPEC; + return RTN_UNICAST; } rcu_read_unlock(); /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; else if (be16_to_cpu(skb->protocol) == ETH_P_IP) return ipv4_is_multicast(ip_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; /* ... and MAC address */ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) @@ -1997,22 +1997,21 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) return RTN_MULTICAST; /* default to unicast */ - return RTN_UNSPEC; + return RTN_UNICAST; } -static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, + unsigned int data_len) { char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; - hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len - ETH_HLEN; + hdr->hdr.l3.length = data_len; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; - iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); + iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2051,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); } + if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + /* OSA only: */ if (!ipv) { hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; @@ -2156,104 +2161,141 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } -static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + int cast_type) { - int rc; - __be16 *tag; + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len, nr_frags; + unsigned char eth_hdr[ETH_HLEN]; + unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; - int hdr_elements = 0; - int elements; - struct qeth_card *card = dev->ml_priv; - struct sk_buff *new_skb = NULL; - int ipv = qeth_get_ip_version(skb); - int cast_type = qeth_l3_get_cast_type(skb); - struct qeth_qdio_out_q *queue = - card->qdio.out_qs[card->qdio.do_prio_queueing - || (cast_type && card->info.is_multicast_different) ? - qeth_get_priority_queue(card, skb, ipv, cast_type) : - card->qdio.default_out_queue]; - int tx_bytes = skb->len; + int elements, push_len, rc; unsigned int hd_len = 0; - bool use_tso; - int data_offset = -1; - unsigned int nr_frags; - - if (((card->info.type == QETH_CARD_TYPE_IQD) && - (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || - ((card->options.cq == QETH_CQ_ENABLED) && - (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || - card->options.sniffer) - goto tx_drop; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { - card->stats.tx_carrier_errors++; - goto tx_drop; + /* compress skb to fit into one IO buffer: */ + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; } - if ((cast_type == RTN_BROADCAST) && - (card->info.broadcast_capable == 0)) - goto tx_drop; + /* re-use the L2 header area for the HW header: */ + rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); + if (rc) + return rc; + skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); + skb_pull(skb, ETH_HLEN); + frame_len = skb->len; + nr_frags = skb_shinfo(skb)->nr_frags; - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); + push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was added discontiguous from skb->data */ + hd_len = hw_hdr_len; + hdr_elements = 1; } - /* Ignore segment size from skb_is_gso(), 1 page is always used. */ - use_tso = skb_is_gso(skb) && - (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); + if (!elements) { + rc = -E2BIG; + goto out; + } + elements += hdr_elements; - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = sizeof(*hdr); - hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); - if (!hdr) - goto tx_drop; - hdr_elements++; - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) - + VLAN_HLEN); - if (!new_skb) - goto tx_drop; + if (skb->protocol == htons(ETH_P_AF_IUCV)) + qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); + else + qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); - if (ipv == 4) { - skb_pull(new_skb, ETH_HLEN); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, + elements); + } +out: + if (!rc) { + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; } - - if (ipv != 4 && skb_vlan_tag_present(new_skb)) { - skb_push(new_skb, VLAN_HLEN); - skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); - skb_copy_to_linear_data_offset(new_skb, 4, - new_skb->data + 8, 4); - skb_copy_to_linear_data_offset(new_skb, 8, - new_skb->data + 12, 4); - tag = (__be16 *)(new_skb->data + 12); - *tag = cpu_to_be16(ETH_P_8021Q); - *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) { + /* roll back to ETH header */ + skb_pull(skb, push_len); + skb_push(skb, ETH_HLEN); + skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); } } + return rc; +} - netif_stop_queue(dev); +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) +{ + unsigned int hd_len, nr_frags; + int elements, len, rc; + __be16 *tag; + struct qeth_hdr *hdr = NULL; + int hdr_elements = 0; + struct sk_buff *new_skb = NULL; + int tx_bytes = skb->len; + bool use_tso; + + /* Ignore segment size from skb_is_gso(), 1 page is always used. */ + use_tso = skb_is_gso(skb) && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); + + /* create a clone with writeable headroom */ + new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + + VLAN_HLEN); + if (!new_skb) + return -ENOMEM; + + if (ipv == 4) { + skb_pull(new_skb, ETH_HLEN); + } else if (skb_vlan_tag_present(new_skb)) { + skb_push(new_skb, VLAN_HLEN); + skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); + skb_copy_to_linear_data_offset(new_skb, 4, + new_skb->data + 8, 4); + skb_copy_to_linear_data_offset(new_skb, 8, + new_skb->data + 12, 4); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); + } /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { - int lin_rc = skb_linearize(new_skb); + if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { + rc = skb_linearize(new_skb); if (card->options.performance_stats) { - if (lin_rc) + if (rc) card->perf_stats.tx_linfail++; else card->perf_stats.tx_lin++; } - if (lin_rc) - goto tx_drop; + if (rc) + goto out; } nr_frags = skb_shinfo(new_skb)->nr_frags; @@ -2265,60 +2307,37 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_tso_fill_header(card, hdr, new_skb); hdr_elements++; } else { - if (data_offset < 0) { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - - sizeof(struct qeth_hdr)); - } else { - if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) - qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); - else { - qeth_l3_fill_header(card, hdr, new_skb, ipv, - cast_type, - new_skb->len - data_offset); - } - } - - if (new_skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; - } + hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); + qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, + new_skb->len - sizeof(struct qeth_hdr)); } elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements, - (data_offset > 0) ? data_offset : 0); + qeth_get_elements_no(card, new_skb, hdr_elements, 0); if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; + rc = -E2BIG; + goto out; } elements += hdr_elements; - if (card->info.type != QETH_CARD_TYPE_IQD) { - int len; - if (use_tso) { - hd_len = sizeof(struct qeth_hdr_tso) + - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); - len = hd_len; - } else { - len = sizeof(struct qeth_hdr_layer3); - } - - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) - goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, - hd_len, elements); - } else - rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, - hd_len); + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { + hd_len = 0; + len = sizeof(struct qeth_hdr_layer3); + } + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { + rc = -EINVAL; + goto out; + } + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, + elements); +out: if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { @@ -2332,30 +2351,68 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, card->perf_stats.sg_frags_sent += nr_frags + 1; } } - rc = NETDEV_TX_OK; } else { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); + if (new_skb != skb) + dev_kfree_skb_any(new_skb); + } + return rc; +} - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + int cast_type = qeth_l3_get_cast_type(skb); + struct qeth_card *card = dev->ml_priv; + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if (IS_IQD(card)) { + if (card->options.sniffer) + goto tx_drop; + if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || + (card->options.cq == QETH_CQ_ENABLED && + skb->protocol != htons(ETH_P_AF_IUCV))) goto tx_drop; } - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->state != CARD_STATE_UP || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; + } + + if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) + goto tx_drop; + + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) + rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); + else + rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; @@ -2497,9 +2554,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; - if (!card->info.guestlan) { card->dev->features |= NETIF_F_SG; card->dev->hw_features |= NETIF_F_TSO | @@ -2519,6 +2573,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; + rc = qeth_l3_iqd_read_initial_mac(card); if (rc) return rc; @@ -2534,12 +2589,18 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->max_mtu = ETH_MAX_MTU; card->dev->dev_port = card->info.portno; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + card->dev->hw_features |= NETIF_F_SG; + card->dev->vlan_features |= NETIF_F_SG; + netif_keep_dst(card->dev); - netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE); + if (card->dev->hw_features & NETIF_F_TSO) + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -2666,11 +2727,12 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); - if (recovery_mode) + if (recovery_mode) { __qeth_l3_open(card->dev); - else + qeth_l3_set_rx_mode(card->dev); + } else { dev_open(card->dev); - qeth_l3_set_rx_mode(card->dev); + } rtnl_unlock(); } qeth_trace_features(card); |