[PATCH 4/5] cachefiles: cyclic allocation of msg_id to avoid reuse
libaokun at huaweicloud.com
libaokun at huaweicloud.com
Wed Apr 24 13:34:08 AEST 2024
From: Baokun Li <libaokun1 at huawei.com>
Reusing the msg_id after a maliciously completed reopen request may cause
a read request to remain unprocessed and result in a hung, as shown below:
t1 | t2 | t3
-------------------------------------------------
cachefiles_ondemand_select_req
cachefiles_ondemand_object_is_close(A)
cachefiles_ondemand_set_object_reopening(A)
queue_work(fscache_object_wq, &info->work)
ondemand_object_worker
cachefiles_ondemand_init_object(A)
cachefiles_ondemand_send_req(OPEN)
// get msg_id 6
wait_for_completion(&req_A->done)
cachefiles_ondemand_daemon_read
// read msg_id 6 req_A
cachefiles_ondemand_get_fd
copy_to_user
// Malicious completion msg_id 6
copen 6,-1
// reopen fails, want daemon to close fd,
// then set object to close, retrigger reopen
cachefiles_ondemand_init_object(B)
cachefiles_ondemand_send_req(OPEN)
// new open req_B reuse msg_id 6
// daemon successfully copen msg_id 6, so it won't close the fd.
// object is always reopening, so read requests are not processed
// resulting in a hung
Therefore allocate the msg_id cyclically to avoid reusing the msg_id for
a very short duration of time causing the above problem.
Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie")
Signed-off-by: Baokun Li <libaokun1 at huawei.com>
---
fs/cachefiles/internal.h | 1 +
fs/cachefiles/ondemand.c | 92 +++++++++++++++++++++++-----------------
2 files changed, 54 insertions(+), 39 deletions(-)
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 8ecd296cc1c4..9200c00f3e98 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -128,6 +128,7 @@ struct cachefiles_cache {
unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
+ u32 msg_id_next;
};
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index f6440b3e7368..6171e1a8cfa1 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -404,51 +404,65 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
if (ret)
goto out;
- do {
- /*
- * Stop enqueuing the request when daemon is dying. The
- * following two operations need to be atomic as a whole.
- * 1) check cache state, and
- * 2) enqueue request if cache is alive.
- * Otherwise the request may be enqueued after xarray has been
- * flushed, leaving the orphan request never being completed.
- *
- * CPU 1 CPU 2
- * ===== =====
- * test CACHEFILES_DEAD bit
- * set CACHEFILES_DEAD bit
- * flush requests in the xarray
- * enqueue the request
- */
- xas_lock(&xas);
-
- if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
- cachefiles_ondemand_object_is_dropping(object)) {
- xas_unlock(&xas);
- ret = -EIO;
- goto out;
- }
+retry:
+ /*
+ * Stop enqueuing the request when daemon is dying. The
+ * following two operations need to be atomic as a whole.
+ * 1) check cache state, and
+ * 2) enqueue request if cache is alive.
+ * Otherwise the request may be enqueued after xarray has been
+ * flushed, leaving the orphan request never being completed.
+ *
+ * CPU 1 CPU 2
+ * ===== =====
+ * test CACHEFILES_DEAD bit
+ * set CACHEFILES_DEAD bit
+ * flush requests in the xarray
+ * enqueue the request
+ */
+ xas_lock(&xas);
- /* coupled with the barrier in cachefiles_flush_reqs() */
- smp_mb();
+ if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
+ cachefiles_ondemand_object_is_dropping(object)) {
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+ }
- if (opcode == CACHEFILES_OP_CLOSE &&
- !cachefiles_ondemand_object_is_open(object)) {
- WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
- xas_unlock(&xas);
- ret = -EIO;
- goto out;
- }
+ /* coupled with the barrier in cachefiles_flush_reqs() */
+ smp_mb();
+
+ if (opcode == CACHEFILES_OP_CLOSE &&
+ !cachefiles_ondemand_object_is_open(object)) {
+ WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+ }
+ /*
+ * Cyclically find a free xas to avoid msg_id reuse that would
+ * cause the daemon to successfully copen a stale msg_id.
+ */
+ xas.xa_index = cache->msg_id_next;
+ xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART) {
xas.xa_index = 0;
- xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
- if (xas.xa_node == XAS_RESTART)
- xas_set_err(&xas, -EBUSY);
- xas_store(&xas, req);
+ xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
+ }
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -EBUSY);
+
+ xas_store(&xas, req);
+ if (xas_valid(&xas)) {
+ cache->msg_id_next = xas.xa_index + 1;
xas_clear_mark(&xas, XA_FREE_MARK);
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
- xas_unlock(&xas);
- } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xas_unlock(&xas);
+ if (xas_nomem(&xas, GFP_KERNEL))
+ goto retry;
ret = xas_error(&xas);
if (ret)
--
2.39.2
More information about the Linux-erofs
mailing list