[PATCH 08/10] scsi/ibmvfc: Replace tasklet with work
Davidlohr Bueso
dave at stgolabs.net
Tue May 31 09:15:10 AEST 2022
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. Use a workqueue instead and
run in task context - albeit the increased concurrency (tasklets
safe against themselves), but the handler is done under both the
vhost's host_lock + crq.q_lock so should be safe.
Cc: Tyrel Datwyler <tyreld at linux.ibm.com>
Cc: Michael Ellerman <mpe at ellerman.id.au
Cc: linuxppc-dev at lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave at stgolabs.net>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 21 ++++++++++++---------
drivers/scsi/ibmvscsi/ibmvfc.h | 3 ++-
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..31b1900489e7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
if (rc)
msleep(100);
@@ -3689,22 +3689,22 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(to_vio_dev(vhost->dev));
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return IRQ_HANDLED;
}
/**
- * ibmvfc_tasklet - Interrupt handler tasklet
+ * ibmvfc_work - work handler
* @data: ibmvfc host struct
*
* Returns:
* Nothing
**/
-static void ibmvfc_tasklet(void *data)
+static void ibmvfc_workfn(struct work_struct *work)
{
- struct ibmvfc_host *vhost = data;
- struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_host *vhost;
+ struct vio_dev *vdev;
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
struct ibmvfc_event *evt, *temp;
@@ -3712,6 +3712,9 @@ static void ibmvfc_tasklet(void *data)
int done = 0;
LIST_HEAD(evt_doneq);
+ vhost = container_of(work, struct ibmvfc_host, work);
+ vdev = to_vio_dev(vhost->dev);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(vhost->crq.q_lock);
while (!done) {
@@ -5722,7 +5725,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
retrc = 0;
- tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+ INIT_WORK(&vhost->work, ibmvfc_workfn);
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
@@ -5738,7 +5741,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
return retrc;
req_irq_failed:
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6213,7 +6216,7 @@ static int ibmvfc_resume(struct device *dev)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(vdev);
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..7eca3622a2fa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <scsi/viosrp.h>
#define IBMVFC_NAME "ibmvfc"
@@ -892,7 +893,7 @@ struct ibmvfc_host {
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
- struct tasklet_struct tasklet;
+ struct work_struct work;
struct work_struct rport_add_work_q;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
--
2.36.1
More information about the Linuxppc-dev
mailing list