[PATCH v4 18/21] ibmvfc: send Cancel MAD down each hw scsi channel

Brian King brking at linux.vnet.ibm.com
Wed Jan 13 08:46:45 AEDT 2021


On 1/11/21 5:12 PM, Tyrel Datwyler wrote:
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index b0b0212344f3..24e1278acfeb 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -2418,18 +2418,79 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
>  	return evt;
>  }
>  
> -/**
> - * ibmvfc_cancel_all - Cancel all outstanding commands to the device
> - * @sdev:	scsi device to cancel commands
> - * @type:	type of error recovery being performed
> - *
> - * This sends a cancel to the VIOS for the specified device. This does
> - * NOT send any abort to the actual device. That must be done separately.
> - *
> - * Returns:
> - *	0 on success / other on failure
> - **/
> -static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
> +static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
> +{
> +	struct ibmvfc_host *vhost = shost_priv(sdev->host);
> +	struct ibmvfc_event *evt, *found_evt, *temp;
> +	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
> +	unsigned long flags;
> +	int num_hwq, i;
> +	LIST_HEAD(cancelq);
> +	u16 status;
> +
> +	ENTER;
> +	spin_lock_irqsave(vhost->host->host_lock, flags);
> +	num_hwq = vhost->scsi_scrqs.active_queues;
> +	for (i = 0; i < num_hwq; i++) {
> +		spin_lock(queues[i].q_lock);
> +		spin_lock(&queues[i].l_lock);
> +		found_evt = NULL;
> +		list_for_each_entry(evt, &queues[i].sent, queue_list) {
> +			if (evt->cmnd && evt->cmnd->device == sdev) {
> +				found_evt = evt;
> +				break;
> +			}
> +		}
> +		spin_unlock(&queues[i].l_lock);
> +

I really don't like the way the first for loop grabs all the q_locks, and then
there is a second for loop that drops all these locks... I think this code would
be cleaner if it looked like:

		if (found_evt && vhost->logged_in) {
			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
			evt->sync_iu = &queues[i].cancel_rsp;
			ibmvfc_send_event(evt, vhost, default_timeout);
			list_add_tail(&evt->cancel, &cancelq);
		}

		spin_unlock(queues[i].q_lock);

	}

> +		if (!found_evt)
> +			continue;
> +
> +		if (vhost->logged_in) {
> +			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
> +			evt->sync_iu = &queues[i].cancel_rsp;
> +			ibmvfc_send_event(evt, vhost, default_timeout);
> +			list_add_tail(&evt->cancel, &cancelq);
> +		}
> +	}
> +
> +	for (i = 0; i < num_hwq; i++)
> +		spin_unlock(queues[i].q_lock);
> +	spin_unlock_irqrestore(vhost->host->host_lock, flags);
> +
> +	if (list_empty(&cancelq)) {
> +		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
> +			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
> +		return 0;
> +	}
> +
> +	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
> +
> +	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
> +		wait_for_completion(&evt->comp);
> +		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);

You probably want a list_del(&evt->cancel) here.

> +		ibmvfc_free_event(evt);
> +
> +		if (status != IBMVFC_MAD_SUCCESS) {
> +			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
> +			switch (status) {
> +			case IBMVFC_MAD_DRIVER_FAILED:
> +			case IBMVFC_MAD_CRQ_ERROR:
> +			/* Host adapter most likely going through reset, return success to
> +			 * the caller will wait for the command being cancelled to get returned
> +			 */
> +				break;
> +			default:
> +				break;

I think this default break needs to be a return -EIO.

> +			}
> +		}
> +	}
> +
> +	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
> +	return 0;
> +}
> +
> +static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
>  {
>  	struct ibmvfc_host *vhost = shost_priv(sdev->host);
>  	struct ibmvfc_event *evt, *found_evt;
> @@ -2498,6 +2559,27 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
>  	return 0;
>  }
>  



-- 
Brian King
Power Linux I/O
IBM Linux Technology Center



More information about the Linuxppc-dev mailing list