summaryrefslogtreecommitdiff
path: root/drivers/target/iscsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/iscsi')
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c24
3 files changed, 4 insertions, 23 deletions
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index c04cd0832dec..406903398dfd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -207,7 +207,6 @@ struct cxgbit_sock {
/* socket lock */
spinlock_t lock;
wait_queue_head_t waitq;
- wait_queue_head_t ack_waitq;
bool lock_owner;
struct kref kref;
u32 max_iso_npdu;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 493070cedbc7..518ded214e74 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1360,7 +1360,6 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
cxgbit_sock_reset_wr_list(csk);
spin_lock_init(&csk->lock);
init_waitqueue_head(&csk->waitq);
- init_waitqueue_head(&csk->ack_waitq);
csk->lock_owner = false;
if (cxgbit_alloc_csk_skb(csk)) {
@@ -1887,7 +1886,6 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
if (csk->snd_una != snd_una) {
csk->snd_una = snd_una;
dst_confirm(csk->dst);
- wake_up(&csk->ack_waitq);
}
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index fcdc4211e3c2..9b3eb2e8c92a 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -284,18 +284,6 @@ void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
}
}
-static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
-{
- spin_lock_bh(&csk->lock);
-
- if (before(csk->write_seq, csk->snd_una + csk->snd_win))
- csk->lock_owner = true;
-
- spin_unlock_bh(&csk->lock);
-
- return csk->lock_owner;
-}
-
static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
{
struct sk_buff_head backlogq;
@@ -325,20 +313,16 @@ static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
int ret = 0;
- wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
+ spin_lock_bh(&csk->lock);
+ csk->lock_owner = true;
+ spin_unlock_bh(&csk->lock);
if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
signal_pending(current))) {
__kfree_skb(skb);
__skb_queue_purge(&csk->ppodq);
ret = -1;
- spin_lock_bh(&csk->lock);
- if (csk->lock_owner) {
- spin_unlock_bh(&csk->lock);
- goto unlock;
- }
- spin_unlock_bh(&csk->lock);
- return ret;
+ goto unlock;
}
csk->write_seq += skb->len +