[PATCH 04/13] ibmvfc: add alloc/dealloc routines for SCSI Sub-CRQ Channels
Brian King
brking at linux.vnet.ibm.com
Sat Nov 28 04:46:55 AEDT 2020
On 11/25/20 7:48 PM, Tyrel Datwyler wrote:
> Allocate a set of Sub-CRQs in advance. During channel setup the client
> and VIOS negotiate the number of queues the VIOS supports and the number
> that the client desires to request. Its possible that the final channel
> resources allocated is less than requested, but the client is still
> responsible for sending handles for every queue it is hoping for.
>
> Also, provide deallocation cleanup routines.
>
> Signed-off-by: Tyrel Datwyler <tyreld at linux.ibm.com>
> ---
> drivers/scsi/ibmvscsi/ibmvfc.c | 115 +++++++++++++++++++++++++++++++++
> drivers/scsi/ibmvscsi/ibmvfc.h | 1 +
> 2 files changed, 116 insertions(+)
>
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index 260b82e3cc01..571abdb48384 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -4983,6 +4983,114 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
> return retrc;
> }
>
> +static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
> + int index)
> +{
> + struct device *dev = vhost->dev;
> + struct vio_dev *vdev = to_vio_dev(dev);
> + struct ibmvfc_sub_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
> + int rc = -ENOMEM;
> +
> + ENTER;
> +
> + scrq->msgs = (struct ibmvfc_sub_crq *)get_zeroed_page(GFP_KERNEL);
> + if (!scrq->msgs)
> + return rc;
> +
> + scrq->size = PAGE_SIZE / sizeof(*scrq->msgs);
> + scrq->msg_token = dma_map_single(dev, scrq->msgs, PAGE_SIZE,
> + DMA_BIDIRECTIONAL);
> +
> + if (dma_mapping_error(dev, scrq->msg_token))
> + goto dma_map_failed;
> +
> + rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
> + &scrq->cookie, &scrq->hw_irq);
> +
> + if (rc) {
> + dev_warn(dev, "Error registering sub-crq: %d\n", rc);
> + dev_warn(dev, "Firmware may not support MQ\n");
> + goto reg_failed;
> + }
> +
> + scrq->hwq_id = index;
> + scrq->vhost = vhost;
> +
> + LEAVE;
> + return 0;
> +
> +reg_failed:
> + dma_unmap_single(dev, scrq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
> +dma_map_failed:
> + free_page((unsigned long)scrq->msgs);
> + LEAVE;
> + return rc;
> +}
> +
> +static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
> +{
> + struct device *dev = vhost->dev;
> + struct vio_dev *vdev = to_vio_dev(dev);
> + struct ibmvfc_sub_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
> + long rc;
> +
> + ENTER;
> +
> + do {
> + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
> + scrq->cookie);
> + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
> +
> + if (rc)
> + dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
> +
> + dma_unmap_single(dev, scrq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
> + free_page((unsigned long)scrq->msgs);
> + LEAVE;
> +}
> +
> +static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
> +{
> + int i, j;
> +
> + ENTER;
> +
> + vhost->scsi_scrqs.scrqs = kcalloc(vhost->client_scsi_channels,
> + sizeof(*vhost->scsi_scrqs.scrqs),
> + GFP_KERNEL);
> + if (!vhost->scsi_scrqs.scrqs)
> + return -1;
> +
> + for (i = 0; i < vhost->client_scsi_channels; i++) {
> + if (ibmvfc_register_scsi_channel(vhost, i)) {
> + for (j = i; j > 0; j--)
> + ibmvfc_deregister_scsi_channel(vhost, j - 1);
> + kfree(vhost->scsi_scrqs.scrqs);
> + LEAVE;
> + return -1;
> + }
> + }
> +
> + LEAVE;
> + return 0;
> +}
> +
> +static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
> +{
> + int i;
> +
> + ENTER;
> + if (!vhost->scsi_scrqs.scrqs)
> + return;
> +
> + for (i = 0; i < vhost->client_scsi_channels; i++)
> + ibmvfc_deregister_scsi_channel(vhost, i);
> +
> + vhost->scsi_scrqs.active_queues = 0;
> + kfree(vhost->scsi_scrqs.scrqs);
Do you want to NULL this out after you free it do you don't keep
a reference to a freed page around?
> + LEAVE;
> +}
> +
> /**
> * ibmvfc_free_mem - Free memory for vhost
> * @vhost: ibmvfc host struct
--
Brian King
Power Linux I/O
IBM Linux Technology Center
More information about the Linuxppc-dev
mailing list