aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/bsg.c2
-rw-r--r--drivers/scsi/scsi_lib.c80
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c3
-rw-r--r--drivers/scsi/scsi_transport_srp.c6
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/snic/snic_disc.c2
-rw-r--r--include/scsi/scsi_device.h2
8 files changed, 50 insertions, 49 deletions
diff --git a/block/bsg.c b/block/bsg.c
index 7eca43f33d7f..c53f24243bf2 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -36,7 +36,7 @@ static inline struct bsg_device *to_bsg_device(struct inode *inode)
}
#define BSG_DEFAULT_CMDS 64
-#define BSG_MAX_DEVS 32768
+#define BSG_MAX_DEVS (1 << MINORBITS)
static DEFINE_IDA(bsg_minor_ida);
static struct class *bsg_class;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 496bdfc19c95..b7f78e53184a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2731,24 +2731,16 @@ void scsi_start_queue(struct scsi_device *sdev)
blk_mq_unquiesce_queue(sdev->request_queue);
}
-static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
+static void scsi_stop_queue(struct scsi_device *sdev)
{
/*
* The atomic variable of ->queue_stopped covers that
* blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
*
- * However, we still need to wait until quiesce is done
- * in case that queue has been stopped.
+ * The caller needs to wait until quiesce is done.
*/
- if (!cmpxchg(&sdev->queue_stopped, 0, 1)) {
- if (nowait)
- blk_mq_quiesce_queue_nowait(sdev->request_queue);
- else
- blk_mq_quiesce_queue(sdev->request_queue);
- } else {
- if (!nowait)
- blk_mq_wait_quiesce_done(sdev->request_queue->tag_set);
- }
+ if (!cmpxchg(&sdev->queue_stopped, 0, 1))
+ blk_mq_quiesce_queue_nowait(sdev->request_queue);
}
/**
@@ -2775,19 +2767,19 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev)
* request queue.
*/
if (!ret)
- scsi_stop_queue(sdev, true);
+ scsi_stop_queue(sdev);
return ret;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
/**
- * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
+ * scsi_device_block - try to transition to the SDEV_BLOCK state
* @sdev: device to block
+ * @data: dummy argument, ignored
*
- * Pause SCSI command processing on the specified device and wait until all
- * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
- *
- * Returns zero if successful or a negative error code upon failure.
+ * Pause SCSI command processing on the specified device. Callers must wait
+ * until all ongoing scsi_queue_rq() calls have finished after this function
+ * returns.
*
* Note:
* This routine transitions the device to the SDEV_BLOCK state (which must be
@@ -2795,17 +2787,26 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
* is paused until the device leaves the SDEV_BLOCK state. See also
* scsi_internal_device_unblock().
*/
-static int scsi_internal_device_block(struct scsi_device *sdev)
+static void scsi_device_block(struct scsi_device *sdev, void *data)
{
int err;
+ enum scsi_device_state state;
mutex_lock(&sdev->state_mutex);
err = __scsi_internal_device_block_nowait(sdev);
+ state = sdev->sdev_state;
if (err == 0)
- scsi_stop_queue(sdev, false);
+ /*
+ * scsi_stop_queue() must be called with the state_mutex
+ * held. Otherwise a simultaneous scsi_start_queue() call
+ * might unquiesce the queue before we quiesce it.
+ */
+ scsi_stop_queue(sdev);
+
mutex_unlock(&sdev->state_mutex);
- return err;
+ WARN_ONCE(err, "%s: failed to block %s in state %d\n",
+ __func__, dev_name(&sdev->sdev_gendev), state);
}
/**
@@ -2888,36 +2889,35 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev,
return ret;
}
-static void
-device_block(struct scsi_device *sdev, void *data)
-{
- int ret;
-
- ret = scsi_internal_device_block(sdev);
-
- WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
- dev_name(&sdev->sdev_gendev), ret);
-}
-
static int
target_block(struct device *dev, void *data)
{
if (scsi_is_target_device(dev))
starget_for_each_device(to_scsi_target(dev), NULL,
- device_block);
+ scsi_device_block);
return 0;
}
+/**
+ * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state
+ * @dev: a parent device of one or more scsi_target devices
+ * @shost: the Scsi_Host to which this device belongs
+ *
+ * Iterate over all children of @dev, which should be scsi_target devices,
+ * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for
+ * ongoing scsi_queue_rq() calls to finish. May sleep.
+ *
+ * Note:
+ * @dev must not itself be a scsi_target device.
+ */
void
-scsi_target_block(struct device *dev)
+scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
{
- if (scsi_is_target_device(dev))
- starget_for_each_device(to_scsi_target(dev), NULL,
- device_block);
- else
- device_for_each_child(dev, NULL, target_block);
+ WARN_ON_ONCE(scsi_is_target_device(dev));
+ device_for_each_child(dev, NULL, target_block);
+ blk_mq_wait_quiesce_done(&shost->tag_set);
}
-EXPORT_SYMBOL_GPL(scsi_target_block);
+EXPORT_SYMBOL_GPL(scsi_block_targets);
static void
device_unblock(struct scsi_device *sdev, void *data)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 64ff2629eaf9..b04075f19445 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3451,7 +3451,7 @@ fc_remote_port_delete(struct fc_rport *rport)
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_block(&rport->dev);
+ scsi_block_targets(shost, &rport->dev);
/* see if we need to kill io faster than waiting for device loss */
if ((rport->fast_io_fail_tmo != -1) &&
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b9b97300e3b3..e527ece12453 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1943,13 +1943,14 @@ static void __iscsi_block_session(struct work_struct *work)
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
block_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
- scsi_target_block(&session->dev);
+ scsi_block_targets(shost, &session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
if (session->recovery_tmo >= 0)
queue_delayed_work(session->workq,
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 87d0fb8dc503..64f6b22e8cc0 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -396,7 +396,7 @@ static void srp_reconnect_work(struct work_struct *work)
}
/*
- * scsi_target_block() must have been called before this function is
+ * scsi_block_targets() must have been called before this function is
* called to guarantee that no .queuecommand() calls are in progress.
*/
static void __rport_fail_io_fast(struct srp_rport *rport)
@@ -480,7 +480,7 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
rport->state);
- scsi_target_block(&shost->shost_gendev);
+ scsi_block_targets(shost, &shost->shost_gendev);
if (fast_io_fail_tmo >= 0)
queue_delayed_work(system_long_wq,
&rport->fast_io_fail_work,
@@ -548,7 +548,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
* later is ok though, scsi_internal_device_unblock_nowait()
* treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
*/
- scsi_target_block(&shost->shost_gendev);
+ scsi_block_targets(shost, &shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 037f8c98a6d3..6c04cf941dac 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -71,7 +71,7 @@ static int sg_proc_init(void);
#define SG_ALLOW_DIO_DEF 0
-#define SG_MAX_DEVS 32768
+#define SG_MAX_DEVS (1 << MINORBITS)
/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
* of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index 8fbf3c1b1311..3e2e5783924d 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -214,7 +214,7 @@ snic_tgt_del(struct work_struct *work)
scsi_flush_work(shost);
/* Block IOs on child devices, stops new IOs */
- scsi_target_block(&tgt->dev);
+ scsi_block_targets(shost, &tgt->dev);
/* Cleanup IOs */
snic_tgt_scsi_abort_io(tgt);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b2cdb078b7bd..75b2235b99e2 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -456,7 +456,7 @@ extern void scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun,
enum scsi_scan_mode rescan);
extern void scsi_target_reap(struct scsi_target *);
-extern void scsi_target_block(struct device *);
+void scsi_block_targets(struct Scsi_Host *shost, struct device *dev);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
extern const char *scsi_device_state_name(enum scsi_device_state);