summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c72
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c40
8 files changed, 76 insertions, 97 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index fd558267d1cb..d8f6bb4f53fc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -41,7 +41,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
-#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/if_infiniband.h>
#include <linux/mutex.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3484e8ba24a4..0c4e59b906cd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -131,7 +131,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
@@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
- unsigned long flags;
unsigned psn;
int ret;
@@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
cm_id->context = p;
p->jiffies = jiffies;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
list_add(&p->list, &priv->cm.passive_ids);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
return 0;
@@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
- unsigned long flags;
int ret;
switch (event->event) {
@@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = netdev_priv(p->dev);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
if (list_empty(&p->list))
ret = 0; /* Connection is going away already. */
else {
list_del_init(&p->list);
ret = -ECONNRESET;
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
if (ret) {
ib_destroy_qp(p->qp);
kfree(p);
@@ -351,8 +349,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
u64 mapping[IPOIB_CM_RX_SG];
int frags;
- ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n",
- wr_id, wc->opcode, wc->status);
+ ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -408,7 +406,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb->mac.raw = skb->data;
+ skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
dev->last_rx = jiffies;
@@ -452,7 +450,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb->len, tx->mtu);
++priv->stats.tx_dropped;
++priv->stats.tx_errors;
- ipoib_cm_skb_too_long(dev, skb, tx->mtu - INFINIBAND_ALEN);
+ ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
@@ -504,8 +502,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
struct ipoib_tx_buf *tx_req;
unsigned long flags;
- ipoib_dbg_data(priv, "cm send completion: id %d, op %d, status: %d\n",
- wr_id, wc->opcode, wc->status);
+ ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
+ wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
@@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
- unsigned long flags;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return;
ib_destroy_cm_id(priv->cm.id);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
list_del_init(&p->list);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
ib_destroy_cm_id(p->id);
ib_destroy_qp(p->qp);
kfree(p);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
cancel_delayed_work(&priv->cm.stale_task);
}
@@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
struct sk_buff *skb;
- unsigned long flags;
p->mtu = be32_to_cpu(data->mtu);
@@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
skb_queue_head_init(&skqueue);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
@@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
- unsigned long flags;
int ret;
switch (event->event) {
@@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
- spin_lock_irqsave(&priv->tx_lock, flags);
+ spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
neigh = tx->neigh;
@@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
}
spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ spin_unlock_irq(&priv->tx_lock);
break;
default:
break;
@@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
struct ipoib_cm_tx *p;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_lock, flags);
+ spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ spin_unlock_irq(&priv->tx_lock);
ipoib_cm_tx_destroy(p);
- spin_lock_irqsave(&priv->tx_lock, flags);
+ spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
}
spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ spin_unlock_irq(&priv->tx_lock);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
@@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb;
- unsigned long flags;
unsigned mtu = priv->mcast_mtu;
- spin_lock_irqsave(&priv->tx_lock, flags);
+ spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ spin_unlock_irq(&priv->tx_lock);
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
#endif
dev_kfree_skb_any(skb);
- spin_lock_irqsave(&priv->tx_lock, flags);
+ spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
}
spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ spin_unlock_irq(&priv->tx_lock);
}
void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
@@ -1088,23 +1081,22 @@ static void ipoib_cm_stale_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.stale_task.work);
struct ipoib_cm_rx *p;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
/* List if sorted by LRU, start from tail,
* stop when we see a recently used entry */
p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
- if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
+ if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_del_init(&p->list);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
ib_destroy_cm_id(p->id);
ib_destroy_qp(p->qp);
kfree(p);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f2aa923ddbea..1bdb9101911a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -172,8 +172,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct sk_buff *skb;
u64 addr;
- ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
- wr_id, wc->opcode, wc->status);
+ ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
@@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->slid != priv->local_lid ||
wc->src_qp != priv->qp->qp_num) {
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb->mac.raw = skb->data;
+ skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
dev->last_rx = jiffies;
@@ -245,8 +245,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_tx_buf *tx_req;
unsigned long flags;
- ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
- wr_id, wc->opcode, wc->status);
+ ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
+ wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
@@ -328,9 +328,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
u64 addr;
- if (unlikely(skb->len > priv->mcast_mtu + INFINIBAND_ALEN)) {
+ if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
- skb->len, priv->mcast_mtu + INFINIBAND_ALEN);
+ skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
++priv->stats.tx_dropped;
++priv->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f9dbc6f68145..b4c380c5a3ba 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -380,7 +380,7 @@ static void path_rec_completion(int status,
struct net_device *dev = path->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah = NULL;
- struct ipoib_neigh *neigh;
+ struct ipoib_neigh *neigh, *tn;
struct sk_buff_head skqueue;
struct sk_buff *skb;
unsigned long flags;
@@ -395,14 +395,10 @@ static void path_rec_completion(int status,
skb_queue_head_init(&skqueue);
if (!status) {
- struct ib_ah_attr av = {
- .dlid = be16_to_cpu(pathrec->dlid),
- .sl = pathrec->sl,
- .port_num = priv->port,
- .static_rate = pathrec->rate
- };
-
- ah = ipoib_create_ah(dev, priv->pd, &av);
+ struct ib_ah_attr av;
+
+ if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
@@ -418,7 +414,7 @@ static void path_rec_completion(int status,
while ((skb = __skb_dequeue(&path->queue)))
__skb_queue_tail(&skqueue, skb);
- list_for_each_entry(neigh, &path->neigh_list, list) {
+ list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
@@ -814,7 +810,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
queue_work(ipoib_workqueue, &priv->restart_task);
}
-static void ipoib_neigh_destructor(struct neighbour *n)
+static void ipoib_neigh_cleanup(struct neighbour *n)
{
struct ipoib_neigh *neigh;
struct ipoib_dev_priv *priv = netdev_priv(n->dev);
@@ -822,7 +818,7 @@ static void ipoib_neigh_destructor(struct neighbour *n)
struct ipoib_ah *ah = NULL;
ipoib_dbg(priv,
- "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
+ "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
IPOIB_QPN(n->ha),
IPOIB_GID_RAW_ARG(n->ha + 4));
@@ -874,7 +870,7 @@ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
{
- parms->neigh_destructor = ipoib_neigh_destructor;
+ parms->neigh_cleanup = ipoib_neigh_cleanup;
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 56c87a81bb67..54fbead4de01 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -644,6 +644,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret = 0;
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ib_sa_free_multicast(mcast->mc);
+
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mcmember.mgid));
@@ -655,9 +658,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
}
- if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ib_sa_free_multicast(mcast->mc);
-
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cae8c96a55f8..8960196ffb0f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -245,7 +245,6 @@ struct iser_conn {
wait_queue_head_t wait; /* waitq for conn/disconn */
atomic_t post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
- struct work_struct comperror_work; /* conn term sleepable ctx*/
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3261bb327281..3651072f6c1f 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -658,6 +658,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
{
int deferred;
int is_rdma_aligned = 1;
+ struct iser_regd_buf *regd;
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
@@ -672,20 +673,20 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
}
if (iser_ctask->dir[ISER_DIR_IN]) {
- deferred = iser_regd_buff_release
- (&iser_ctask->rdma_regd[ISER_DIR_IN]);
+ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
if (deferred) {
- iser_err("References remain for BUF-IN rdma reg\n");
- BUG();
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+ atomic_read(&regd->ref_count));
}
}
if (iser_ctask->dir[ISER_DIR_OUT]) {
- deferred = iser_regd_buff_release
- (&iser_ctask->rdma_regd[ISER_DIR_OUT]);
+ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
if (deferred) {
- iser_err("References remain for BUF-OUT rdma reg\n");
- BUG();
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+ atomic_read(&regd->ref_count));
}
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 693b77002897..1fc967464a28 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,6 @@
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(struct work_struct *work);
static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
@@ -480,7 +479,6 @@ int iser_conn_init(struct iser_conn **ibconn)
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
- INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -753,26 +751,6 @@ int iser_post_send(struct iser_desc *tx_desc)
return ret_val;
}
-static void iser_comp_error_worker(struct work_struct *work)
-{
- struct iser_conn *ib_conn =
- container_of(work, struct iser_conn, comperror_work);
-
- /* getting here when the state is UP means that the conn is being *
- * terminated asynchronously from the iSCSI layer's perspective. */
- if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
-
- /* complete the termination process if disconnect event was delivered *
- * note there are no more non completed posts to the QP */
- if (ib_conn->disc_evt_flag) {
- ib_conn->state = ISER_CONN_DOWN;
- wake_up_interruptible(&ib_conn->wait);
- }
-}
-
static void iser_handle_comp_error(struct iser_desc *desc)
{
struct iser_dto *dto = &desc->dto;
@@ -791,8 +769,22 @@ static void iser_handle_comp_error(struct iser_desc *desc)
}
if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
- atomic_read(&ib_conn->post_send_buf_count) == 0)
- schedule_work(&ib_conn->comperror_work);
+ atomic_read(&ib_conn->post_send_buf_count) == 0) {
+ /* getting here when the state is UP means that the conn is *
+ * being terminated asynchronously from the iSCSI layer's *
+ * perspective. */
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING))
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+
+ /* complete the termination process if disconnect event was delivered *
+ * note there are no more non completed posts to the QP */
+ if (ib_conn->disc_evt_flag) {
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
+ }
+ }
}
static void iser_cq_tasklet_fn(unsigned long data)