diff options
| author | Hannes Reinecke <hare@suse.de> | 2017-11-17 07:56:27 +0100 |
|---|---|---|
| committer | Hannes Reinecke <hare@suse.com> | 2021-03-14 12:56:17 +0100 |
| commit | 65a1d16588d3c3cb7b09f0f162c571be3533afc4 (patch) | |
| tree | d77f046fc4872cb454e763a454a815bb21d83a63 | |
| parent | d5a2b439cfd4818e84e84c43c7e499d258af4500 (diff) | |
| download | scsi-devel-65a1d16588d3c3cb7b09f0f162c571be3533afc4.tar.gz | |
virtio-scsi: Add FC transport class
When a device announces an 'FC' protocol we should be pulling
in the FC transport class to have the rports etc setup correctly.
Signed-off-by: Hannes Reinecke <hare@suse.com>
| -rw-r--r-- | drivers/scsi/virtio_scsi.c | 351 |
1 files changed, 333 insertions, 18 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index da4529f057476..278c72debeed8 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -22,11 +22,15 @@ #include <linux/virtio_scsi.h> #include <linux/cpu.h> #include <linux/blkdev.h> +#include <linux/delay.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_devinfo.h> +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +#include <scsi/scsi_transport_fc.h> +#endif #include <linux/seqlock.h> #include <linux/blk-mq-virtio.h> @@ -69,6 +73,42 @@ struct virtio_scsi_vq { struct virtqueue *vq; }; +/* + * Per-target queue state. + * + * This struct holds the data needed by the queue steering policy. When a + * target is sent multiple requests, we need to drive them to the same queue so + * that FIFO processing order is kept. However, if a target was idle, we can + * choose a queue arbitrarily. In this case the queue is chosen according to + * the current VCPU, so the driver expects the number of request queues to be + * equal to the number of VCPUs. This makes it easy and fast to select the + * queue, and also lets the driver optimize the IRQ affinity for the virtqueues + * (each virtqueue's affinity is set to the CPU that "owns" the queue). + * + * tgt_seq is held to serialize reading and writing req_vq. + * + * Decrements of reqs are never concurrent with writes of req_vq: before the + * decrement reqs will be != 0; after the decrement the virtqueue completion + * routine will not use the req_vq so it can be changed by a new request. + * Thus they can happen outside the tgt_seq, provided of course we make reqs + * an atomic_t. + */ +struct virtio_scsi_target_state { + struct list_head list; + struct fc_rport *rport; + struct virtio_scsi *vscsi; + int target_id; + bool removed; + + seqcount_t tgt_seq; + + /* Count of outstanding requests. */ + atomic_t reqs; + + /* Currently active virtqueue for requests sent to this target. */ + struct virtio_scsi_vq *req_vq; +}; + /* Driver instance state */ struct virtio_scsi { struct virtio_device *vdev; @@ -83,9 +123,13 @@ struct virtio_scsi { /* Protected by event_vq lock */ bool stop_events; + int protocol; int next_target_id; + u64 wwnn; + u64 wwpn; struct work_struct rescan_work; struct virtio_scsi_cmd rescan_cmd; + struct list_head target_list; spinlock_t rescan_lock; struct virtio_scsi_vq ctrl_vq; @@ -95,6 +139,9 @@ struct virtio_scsi { static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct scsi_transport_template *virtio_transport_template; +#endif static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) { @@ -115,6 +162,9 @@ static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + struct fc_rport *rport; +#endif struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; @@ -122,6 +172,11 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) "cmd %p response %u status %#02x sense_len %u\n", sc, resp->response, resp->status, resp->sense_len); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + rport = starget_to_rport(scsi_target(sc->device)); + if (rport) + tgt = rport->dd_data; +#endif sc->result = resp->status; virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); switch (resp->response) { @@ -516,10 +571,11 @@ static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, static void virtio_scsi_init_hdr(struct virtio_device *vdev, struct virtio_scsi_cmd_req *cmd, + int target_id, struct scsi_cmnd *sc) { cmd->lun[0] = 1; - cmd->lun[1] = sc->device->id; + cmd->lun[1] = target_id; cmd->lun[2] = (sc->device->lun >> 8) | 0x40; cmd->lun[3] = sc->device->lun & 0xff; cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); @@ -531,12 +587,14 @@ static void virtio_scsi_init_hdr(struct virtio_device *vdev, #ifdef CONFIG_BLK_DEV_INTEGRITY static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, + int target_id, struct scsi_cmnd *sc) { struct request *rq = sc->request; struct blk_integrity *bi; - virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); + virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, + target_id, sc); if (!rq || !scsi_prot_sg_count(sc)) return; @@ -563,14 +621,42 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, return &vscsi->req_vqs[hwq]; } +static struct virtio_scsi_target_state * +virtscsi_get_target(struct virtio_scsi *vscsi, struct scsi_device *sdev, + int *target_id) +{ + struct virtio_scsi_target_state *tgt = NULL; + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (vscsi->protocol == SCSI_PROTOCOL_FCP) { + struct fc_rport *rport = + starget_to_rport(scsi_target(sdev)); + if (rport && rport->dd_data) { + tgt = rport->dd_data; + if (tgt && tgt->removed) + return NULL; + *target_id = tgt->target_id; + } + return tgt; + } +#endif + tgt = scsi_target(sdev)->hostdata; + if (tgt && tgt->removed) + return NULL; + *target_id = sdev->id; + return tgt; +} + static int virtscsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(shost); struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + struct virtio_scsi_target_state *tgt; bool kick; unsigned long flags; + int target_id; int req_size; int ret; @@ -586,15 +672,24 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost, BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); + tgt = virtscsi_get_target(vscsi, sc->device, &target_id); + if (!tgt) { + sc->result = DID_NO_CONNECT << 16; + sc->scsi_done(sc); + return 0; + } + #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { - virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); + virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, + target_id, sc); memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd_pi); } else #endif { - virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); + virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, + target_id, sc); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd); } @@ -648,8 +743,12 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; + int target_id; sdev_printk(KERN_INFO, sc->device, "device reset\n"); + if (!virtscsi_get_target(vscsi, sc->device, &target_id)) + return FAST_IO_FAIL; + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; @@ -660,7 +759,7 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) .subtype = cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), .lun[0] = 1, - .lun[1] = sc->device->id, + .lun[1] = target_id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, }; @@ -706,8 +805,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; + int target_id; scmd_printk(KERN_INFO, sc, "abort\n"); + if (!virtscsi_get_target(vscsi, sc->device, &target_id)) + return FAST_IO_FAIL; + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; @@ -717,7 +820,7 @@ static int virtscsi_abort(struct scsi_cmnd *sc) .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, .lun[0] = 1, - .lun[1] = sc->device->id, + .lun[1] = target_id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), @@ -733,6 +836,73 @@ static int virtscsi_map_queues(struct Scsi_Host *shost) return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); } +static int virtscsi_target_alloc( + struct Scsi_Host *sh; + struct virtio_scsi *vscsi; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + struct fc_rport *rport; +#endif + struct virtio_scsi_target_state *tgt; + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + rport = starget_to_rport(starget); + if (rport) { + tgt = rport->dd_data; + sh = rport_to_shost(rport); + vscsi = shost_priv(sh); + } else { +#endif + sh = dev_to_shost(starget->dev.parent); + vscsi = shost_priv(sh); + spin_lock_irq(&vscsi->rescan_lock); + list_for_each_entry(tgt, &vscsi->target_list, list) { + if (tgt->target_id == starget->id) { + starget->hostdata = tgt; + break; + } + } + spin_unlock_irq(&vscsi->rescan_lock); + if (!starget->hostdata) { + dev_printk(KERN_WARNING, &starget->dev, + "target %d lookup failed\n", starget->id); + return -ENODEV; + } + tgt = starget->hostdata; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + } +#endif + seqcount_init(&tgt->tgt_seq); + atomic_set(&tgt->reqs, 0); + tgt->req_vq = &vscsi->req_vqs[0]; + tgt->vscsi = vscsi; + return 0; +} + +static void virtscsi_target_destroy(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = starget->hostdata; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + struct fc_rport *rport; + + rport = starget_to_rport(starget); + if (rport) { + tgt = rport->dd_data; + rport->dd_data = NULL; + } else { +#endif + tgt = starget->hostdata; + starget->hostdata = NULL; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + } +#endif + if (tgt) { + spin_lock_irq(&tgt->vscsi->rescan_lock); + list_del_init(&tgt->list); + spin_unlock_irq(&tgt->vscsi->rescan_lock); + kfree(tgt); + } +} + static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) { struct virtio_scsi *vscsi = shost_priv(shost); @@ -757,6 +927,7 @@ static void virtscsi_rescan_work(struct work_struct *work) struct Scsi_Host *sh = virtio_scsi_host(vscsi->vdev); int target_id, ret, transport; struct virtio_scsi_cmd *cmd = &vscsi->rescan_cmd; + struct virtio_scsi_target_state *tgt, *tmp, *old = NULL; DECLARE_COMPLETION_ONSTACK(comp); spin_lock_irq(&vscsi->rescan_lock); @@ -780,13 +951,8 @@ static void virtscsi_rescan_work(struct work_struct *work) ret = virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, sizeof(cmd->req.rescan), sizeof(cmd->resp.rescan)); if (ret < 0) { - spin_lock_irq(&vscsi->rescan_lock); - vscsi->next_target_id = -1; - spin_unlock_irq(&vscsi->rescan_lock); dev_dbg(&sh->shost_gendev, "rescan: failed to sent command\n"); - sh->sequential_scan = true; - scsi_scan_host(sh); - return; + goto out; } wait_for_completion(&comp); @@ -799,17 +965,69 @@ static void virtscsi_rescan_work(struct work_struct *work) return; } transport = virtio32_to_cpu(vscsi->vdev, cmd->resp.rescan.transport); - spin_lock_irq(&vscsi->rescan_lock); - vscsi->next_target_id = target_id + 1; - spin_unlock_irq(&vscsi->rescan_lock); shost_printk(KERN_INFO, sh, - "found %s target %d (WWN %*phN)\n", + "allocate %s target %d (WWN %*phN)\n", transport == SCSI_PROTOCOL_FCP ? "FC" : "SAS", target_id, 8, cmd->resp.rescan.port_wwn); - scsi_scan_target(&sh->shost_gendev, 0, target_id, - SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + + tgt = kmalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) { + shost_printk(KERN_WARNING, sh, + "rescan: out of memory for rport\n"); + goto out; + } + tgt->target_id = (target_id & 0xff); + tgt->removed = false; + spin_lock_irq(&vscsi->rescan_lock); + vscsi->next_target_id = tgt->target_id + 1; + list_for_each_entry(tmp, &vscsi->target_list, list) { + if (tgt->target_id == tmp->target_id) { + old = tmp; + break; + } + } + if (old) { + kfree(tgt); + tgt = old; + } else + list_add_tail(&tgt->list, &vscsi->target_list); + spin_unlock_irq(&vscsi->rescan_lock); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (transport == SCSI_PROTOCOL_FCP) { + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport; + + rport_ids.node_name = wwn_to_u64(cmd->resp.rescan.node_wwn); + rport_ids.port_name = wwn_to_u64(cmd->resp.rescan.port_wwn); + rport_ids.port_id = (target_id >> 8); + rport_ids.roles = FC_RPORT_ROLE_FCP_TARGET; + rport = fc_remote_port_add(sh, 0, &rport_ids); + if (rport) { + tgt->rport = rport; + rport->dd_data = tgt; + } else { + spin_lock_irq(&vscsi->rescan_lock); + list_del(&tgt->list); + spin_unlock_irq(&vscsi->rescan_lock); + kfree(tgt); + tgt = NULL; + } + } else { +#endif + scsi_scan_target(&sh->shost_gendev, 0, tgt->target_id, + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + } +#endif queue_work(system_freezable_wq, &vscsi->rescan_work); return; +out: + spin_lock_irq(&vscsi->rescan_lock); + vscsi->next_target_id = -1; + spin_unlock_irq(&vscsi->rescan_lock); + sh->sequential_scan = true; + scsi_scan_host(sh); + return; } static int virtscsi_scan_host(struct virtio_scsi *vscsi) @@ -843,12 +1061,18 @@ static int virtscsi_scan_host(struct virtio_scsi *vscsi) transport == SCSI_PROTOCOL_FCP ? "FC" : "SAS", 8, cmd->resp.rescan.node_wwn, 8, cmd->resp.rescan.port_wwn); + vscsi->protocol = transport; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + vscsi->wwnn = wwn_to_u64(cmd->resp.rescan.node_wwn); + vscsi->wwpn = wwn_to_u64(cmd->resp.rescan.port_wwn); +#endif return 0; } static void virtscsi_scan_start(struct Scsi_Host *sh) { struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt; if (!sh->sequential_scan && virtscsi_scan_host(vscsi) < 0) { shost_printk(KERN_INFO, sh, @@ -865,6 +1089,27 @@ static void virtscsi_scan_start(struct Scsi_Host *sh) spin_unlock_irq(&vscsi->rescan_lock); return; } +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (vscsi->protocol == SCSI_PROTOCOL_FCP) { + fc_host_node_name(sh) = vscsi->wwnn; + fc_host_port_name(sh) = vscsi->wwpn; + fc_host_port_id(sh) = 0x00ff00; + fc_host_port_type(sh) = FC_PORTTYPE_NPIV; + fc_host_port_state(sh) = FC_PORTSTATE_BLOCKED; + list_for_each_entry(tgt, &vscsi->target_list, list) { + if (tgt->rport) { + fc_remote_port_delete(tgt->rport); + tgt->rport = NULL; + } + tgt->removed = true; + } + } else { +#endif + list_for_each_entry(tgt, &vscsi->target_list, list) + tgt->removed = true; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + } +#endif vscsi->next_target_id = 0; dev_dbg(&sh->shost_gendev, "rescan: start\n"); spin_unlock_irq(&vscsi->rescan_lock); @@ -879,6 +1124,10 @@ int virtscsi_scan_finished(struct Scsi_Host *sh, unsigned long time) spin_lock_irq(&vscsi->rescan_lock); if (vscsi->next_target_id != -1) ret = 0; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + else if (vscsi->protocol == SCSI_PROTOCOL_FCP) + fc_host_port_state(sh) = FC_PORTSTATE_ONLINE; +#endif spin_unlock_irq(&vscsi->rescan_lock); if (!ret) flush_work(&vscsi->rescan_work); @@ -903,6 +1152,36 @@ static struct device_attribute *virtscsi_shost_attrs[] = { NULL, }; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static int virtscsi_issue_lip(struct Scsi_Host *shost) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + unsigned long start = jiffies; + struct virtio_scsi_target_state *tgt; + + spin_lock_irq(&vscsi->rescan_lock); + if (vscsi->next_target_id != -1) { + spin_unlock_irq(&vscsi->rescan_lock); + return 0; + } + fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; + list_for_each_entry(tgt, &vscsi->target_list, list) { + if (tgt->rport) { + fc_remote_port_delete(tgt->rport); + tgt->rport = NULL; + } + } + vscsi->next_target_id = 0; + spin_unlock_irq(&vscsi->rescan_lock); + queue_work(system_freezable_wq, &vscsi->rescan_work); + + while (!virtscsi_scan_finished(shost, jiffies - start)) + msleep(10); + + return 0; +} +#endif + static struct scsi_host_template virtscsi_host_template = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -925,6 +1204,22 @@ static struct scsi_host_template virtscsi_host_template = { .shost_attrs = virtscsi_shost_attrs, }; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct fc_function_template virtscsi_transport_functions = { + .dd_fcrport_size = sizeof(struct virtio_scsi_target_state *), + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_port_id = 1, + .show_host_port_state = 1, + .show_host_port_type = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .show_rport_dev_loss_tmo = 1, + .issue_fc_host_lip = virtscsi_issue_lip, +}; +#endif + #define virtscsi_config_get(vdev, fld) \ ({ \ __virtio_native_type(struct virtio_scsi_config, fld) __val; \ @@ -1041,7 +1336,9 @@ static int virtscsi_probe(struct virtio_device *vdev) vscsi->num_queues = num_queues; vdev->priv = shost; vscsi->next_target_id = -1; + vscsi->protocol = SCSI_PROTOCOL_SAS; spin_lock_init(&vscsi->rescan_lock); + INIT_LIST_HEAD(&vscsi->target_list); INIT_WORK(&vscsi->rescan_work, virtscsi_rescan_work); err = virtscsi_init(vdev, vscsi); @@ -1079,6 +1376,11 @@ static int virtscsi_probe(struct virtio_device *vdev) } #endif + virtscsi_scan_host(vscsi); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (vscsi->protocol == SCSI_PROTOCOL_FCP) + shost->transportt = virtio_transport_template; +#endif err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; @@ -1183,6 +1485,12 @@ static int __init init(void) pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + virtio_transport_template = + fc_attach_transport(&virtscsi_transport_functions); + if (!virtio_transport_template) + goto error; +#endif ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; @@ -1190,6 +1498,10 @@ static int __init init(void) return 0; error: +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (virtio_transport_template) + fc_release_transport(virtio_transport_template); +#endif mempool_destroy(virtscsi_cmd_pool); virtscsi_cmd_pool = NULL; kmem_cache_destroy(virtscsi_cmd_cache); @@ -1199,6 +1511,9 @@ error: static void __exit fini(void) { +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_release_transport(virtio_transport_template); +#endif unregister_virtio_driver(&virtio_scsi_driver); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); |
