[PATCH 11/24] netfs: Use bh-disabling spinlocks for rreq->lock
David Howells
dhowells at redhat.com
Tue Jul 30 02:19:40 AEST 2024
Use bh-disabling spinlocks when accessing rreq->lock because, in the
future, it may be twiddled from softirq context when cleanup is driven from
cache backend DIO completion.
Signed-off-by: David Howells <dhowells at redhat.com>
cc: Jeff Layton <jlayton at kernel.org>
cc: netfs at lists.linux.dev
cc: linux-fsdevel at vger.kernel.org
---
fs/netfs/write_collect.c | 4 ++--
fs/netfs/write_issue.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index e105ac270090..5f504b03a1e7 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -466,7 +466,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
cancel:
/* Remove if completely consumed. */
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
@@ -482,7 +482,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
}
}
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 6e835670dc58..0354c992467b 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -189,7 +189,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -200,7 +200,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
stream->construct = subreq;
}
More information about the Linux-erofs
mailing list