[PATCH 07/17] raid6: improve the public interface
Christoph Hellwig
hch at lst.de
Tue Mar 24 17:40:42 AEDT 2026
Stop directly calling into function pointers from users of the RAID6 PQ
API, and provide exported functions with proper documentation and
API gurantees asserts where applicable instead.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
Documentation/crypto/async-tx-api.rst | 4 +-
crypto/async_tx/async_pq.c | 6 +-
crypto/async_tx/async_raid6_recov.c | 4 +-
drivers/md/raid5.c | 4 +-
fs/btrfs/raid56.c | 8 +-
include/linux/raid/pq.h | 19 +--
lib/raid/raid6/algos.c | 137 +++++++++++++++++-
lib/raid/raid6/arm/recov_neon.c | 4 +-
.../raid6/loongarch/recov_loongarch_simd.c | 8 +-
lib/raid/raid6/recov.c | 4 +-
lib/raid/raid6/riscv/recov_rvv.c | 4 +-
lib/raid/raid6/s390/recov_s390xc.c | 4 +-
lib/raid/raid6/x86/recov_avx2.c | 4 +-
lib/raid/raid6/x86/recov_avx512.c | 4 +-
lib/raid/raid6/x86/recov_ssse3.c | 4 +-
15 files changed, 170 insertions(+), 48 deletions(-)
diff --git a/Documentation/crypto/async-tx-api.rst b/Documentation/crypto/async-tx-api.rst
index f88a7809385e..49fcfc66314a 100644
--- a/Documentation/crypto/async-tx-api.rst
+++ b/Documentation/crypto/async-tx-api.rst
@@ -82,9 +82,9 @@ xor_val xor a series of source buffers and set a flag if the
pq generate the p+q (raid6 syndrome) from a series of source buffers
pq_val validate that a p and or q buffer are in sync with a given series of
sources
-datap (raid6_datap_recov) recover a raid6 data block and the p block
+datap (raid6_recov_datap) recover a raid6 data block and the p block
from the given sources
-2data (raid6_2data_recov) recover 2 raid6 data blocks from the given
+2data (raid6_recov_2data) recover 2 raid6 data blocks from the given
sources
======== ====================================================================
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 0ce6f07b4e0d..f3574f80d1df 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -131,11 +131,11 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
}
}
if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
- BUG_ON(!raid6_call.xor_syndrome);
+ BUG_ON(!raid6_can_xor_syndrome());
if (start >= 0)
- raid6_call.xor_syndrome(disks, start, stop, len, srcs);
+ raid6_xor_syndrome(disks, start, stop, len, srcs);
} else
- raid6_call.gen_syndrome(disks, len, srcs);
+ raid6_gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index f2dc6af6e6a7..305ea1421a3e 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -418,7 +418,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
else
ptrs[i] = page_address(blocks[i]) + offs[i];
- raid6_2data_recov(disks, bytes, faila, failb, ptrs);
+ raid6_recov_2data(disks, bytes, faila, failb, ptrs);
async_tx_sync_epilog(submit);
@@ -501,7 +501,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
else
ptrs[i] = page_address(blocks[i]) + offs[i];
- raid6_datap_recov(disks, bytes, faila, ptrs);
+ raid6_recov_datap(disks, bytes, faila, ptrs);
async_tx_sync_epilog(submit);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a8e8d431071b..13943e2b3d51 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6944,7 +6944,7 @@ raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
if (kstrtoul(page, 10, &new))
return -EINVAL;
- if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ if (new != PARITY_DISABLE_RMW && !raid6_can_xor_syndrome())
return -EINVAL;
if (new != PARITY_DISABLE_RMW &&
@@ -7635,7 +7635,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->level = mddev->new_level;
if (conf->level == 6) {
conf->max_degraded = 2;
- if (raid6_call.xor_syndrome)
+ if (raid6_can_xor_syndrome())
conf->rmw_level = PARITY_ENABLE_RMW;
else
conf->rmw_level = PARITY_DISABLE_RMW;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index ad091c56ce77..4ab3cccb11f1 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1410,7 +1410,7 @@ static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int
rbio_qstripe_paddr(rbio, sector_nr, step_nr));
assert_rbio(rbio);
- raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ raid6_gen_syndrome(rbio->real_stripes, step, pointers);
} else {
/* raid5 */
memcpy(pointers[rbio->nr_data], pointers[0], step);
@@ -1992,10 +1992,10 @@ static void recover_vertical_step(struct btrfs_raid_bio *rbio,
}
if (failb == rbio->real_stripes - 2) {
- raid6_datap_recov(rbio->real_stripes, step,
+ raid6_recov_datap(rbio->real_stripes, step,
faila, pointers);
} else {
- raid6_2data_recov(rbio->real_stripes, step,
+ raid6_recov_2data(rbio->real_stripes, step,
faila, failb, pointers);
}
} else {
@@ -2649,7 +2649,7 @@ static bool verify_one_parity_step(struct btrfs_raid_bio *rbio,
if (has_qstripe) {
assert_rbio(rbio);
/* RAID6, call the library function to fill in our P/Q. */
- raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ raid6_gen_syndrome(rbio->real_stripes, step, pointers);
} else {
/* RAID5. */
memcpy(pointers[nr_data], pointers[0], step);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f27a866c287f..425a227591c0 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -11,6 +11,16 @@
#include <linux/blkdev.h>
#include <linux/mm.h>
+void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs);
+void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
+ void **ptrs);
+bool raid6_can_xor_syndrome(void);
+
+void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+void raid6_recov_datap(int disks, size_t bytes, int faila,
+ void **ptrs);
+
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
@@ -20,9 +30,6 @@ struct raid6_calls {
int priority; /* Relative priority ranking if non-zero */
};
-/* Selected algorithm */
-extern struct raid6_calls raid6_call;
-
/* Various routine sets */
extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
@@ -92,10 +99,4 @@ extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
-/* Recovery routines */
-extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
- void **ptrs);
-extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
- void **ptrs);
-
#endif /* LINUX_RAID_RAID6_H */
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index 985c60bb00a4..4958c58e3f69 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -16,8 +16,83 @@
#include <linux/gfp.h>
#include <kunit/visibility.h>
-struct raid6_calls raid6_call;
-EXPORT_SYMBOL_GPL(raid6_call);
+static const struct raid6_recov_calls *raid6_recov_algo;
+
+/* Selected algorithm */
+static struct raid6_calls raid6_call;
+
+/**
+ * raid6_gen_syndrome - generate RAID6 P/Q parity
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Generate @bytes worth of RAID6 P and Q parity in @ptrs[@disks - 2] and
+ * @ptrs[@disks - 1] respectively from the memory pointed to by @ptrs[0] to
+ * @ptrs[@disks - 3].
+ *
+ * @disks must be at least 3, and the memory pointed to by each member of @ptrs
+ * must be at least 64-byte aligned. @bytes must be non-zero and a multiple of
+ * 512.
+ *
+ * See https://kernel.org/pub/linux/kernel/people/hpa/raid6.pdf for underlying
+ * algorithm.
+ */
+void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_gen_syndrome);
+
+/**
+ * raid6_xor_syndrome - update RAID6 P/Q parity
+ * @disks: number of "disks" to operate on including parity
+ * @start: first index into @disk to update
+ * @stop: last index into @disk to update
+ * @bytes: length in bytes of each vector
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Update @bytes worth of RAID6 P and Q parity in @ptrs[@disks - 2] and
+ * @ptrs[@disks - 1] respectively for the memory pointed to by
+ * @ptrs[@start.. at stop].
+ *
+ * This is used to update parity in place using the following sequence:
+ *
+ * 1) call raid6_xor_syndrome(disk, start, stop, ...) for the existing data.
+ * 2) update the the data in @ptrs[@start.. at stop].
+ * 3) call raid6_xor_syndrome(disk, start, stop, ...) for the new data.
+ *
+ * Data between @start and @stop that is not changed should be filled
+ * with a pointer to the kernel zero page.
+ *
+ * @disks must be at least 3, and the memory pointed to by each member of @ptrs
+ * must be at least 64-byte aligned. @bytes must be non-zero and a multiple of
+ * 512. @stop must be larger or equal to @start.
+ */
+void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
+ void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(stop < start);
+
+ raid6_call.xor_syndrome(disks, start, stop, bytes, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_xor_syndrome);
+
+/*
+ * raid6_can_xor_syndrome - check if raid6_xor_syndrome() can be used
+ *
+ * Returns %true if raid6_can_xor_syndrome() can be used, else %false.
+ */
+bool raid6_can_xor_syndrome(void)
+{
+ return !!raid6_call.xor_syndrome;
+}
+EXPORT_SYMBOL_GPL(raid6_can_xor_syndrome);
const struct raid6_calls * const raid6_algos[] = {
#if defined(__i386__) && !defined(__arch_um__)
@@ -84,11 +159,58 @@ const struct raid6_calls * const raid6_algos[] = {
};
EXPORT_SYMBOL_IF_KUNIT(raid6_algos);
-void (*raid6_2data_recov)(int, size_t, int, int, void **);
-EXPORT_SYMBOL_GPL(raid6_2data_recov);
+/**
+ * raid6_recov_2data - recover two missing data disks
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @faila: first failed data disk index
+ * @failb: second failed data disk index
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Rebuild @bytes of missing data in @ptrs[@faila] and @ptrs[@failb] from the
+ * data in the remaining disks and the two parities pointed to by the other
+ * indices between 0 and @disks - 1 in @ptrs. @disks includes the data disks
+ * and the two parities. @faila must be smaller than @failb.
+ *
+ * Memory pointed to by each pointer in @ptrs must be page aligned and is
+ * limited to %PAGE_SIZE.
+ */
+void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
+ void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(bytes > PAGE_SIZE);
+ WARN_ON_ONCE(failb <= faila);
+
+ raid6_recov_algo->data2(disks, bytes, faila, failb, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_recov_2data);
+
+/**
+ * raid6_recov_datap - recover a missing data disk and missing P-parity
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @faila: failed data disk index
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Rebuild @bytes of missing data in @ptrs[@faila] and the missing P-parity in
+ * @ptrs[@disks - 2] from the data in the remaining disks and the Q-parity
+ * pointed to by the other indices between 0 and @disks - 1 in @ptrs. @disks
+ * includes the data disks and the two parities.
+ *
+ * Memory pointed to by each pointer in @ptrs must be page aligned and is
+ * limited to %PAGE_SIZE.
+ */
+void raid6_recov_datap(int disks, size_t bytes, int faila, void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(bytes > PAGE_SIZE);
-void (*raid6_datap_recov)(int, size_t, int, void **);
-EXPORT_SYMBOL_GPL(raid6_datap_recov);
+ raid6_recov_algo->datap(disks, bytes, faila, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_recov_datap);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_X86
@@ -133,8 +255,7 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
best = *algo;
if (best) {
- raid6_2data_recov = best->data2;
- raid6_datap_recov = best->datap;
+ raid6_recov_algo = best;
pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
diff --git a/lib/raid/raid6/arm/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
index 461bdb98b5cc..21c26da6290a 100644
--- a/lib/raid/raid6/arm/recov_neon.c
+++ b/lib/raid/raid6/arm/recov_neon.c
@@ -35,7 +35,7 @@ static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -69,7 +69,7 @@ static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/loongarch/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
index 4d4563209647..7d4d349322b3 100644
--- a/lib/raid/raid6/loongarch/recov_loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
@@ -49,7 +49,7 @@ static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -201,7 +201,7 @@ static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
@@ -323,7 +323,7 @@ static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -440,7 +440,7 @@ static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index 211e1df28963..cc7e4dc1eaa6 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -37,7 +37,7 @@ static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -75,7 +75,7 @@ static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/riscv/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
index f77d9c430687..3ff39826e33f 100644
--- a/lib/raid/raid6/riscv/recov_rvv.c
+++ b/lib/raid/raid6/riscv/recov_rvv.c
@@ -164,7 +164,7 @@ static void raid6_2data_recov_rvv(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -199,7 +199,7 @@ static void raid6_datap_recov_rvv(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/s390/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
index 0f32217b7123..2bc4c85174de 100644
--- a/lib/raid/raid6/s390/recov_s390xc.c
+++ b/lib/raid/raid6/s390/recov_s390xc.c
@@ -40,7 +40,7 @@ static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -84,7 +84,7 @@ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
index 325310c81e1c..bef82a38d8eb 100644
--- a/lib/raid/raid6/x86/recov_avx2.c
+++ b/lib/raid/raid6/x86/recov_avx2.c
@@ -34,7 +34,7 @@ static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -199,7 +199,7 @@ static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
index 08de77fcb8bd..06c70e771eaa 100644
--- a/lib/raid/raid6/x86/recov_avx512.c
+++ b/lib/raid/raid6/x86/recov_avx512.c
@@ -43,7 +43,7 @@ static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -241,7 +241,7 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
index 002bef1e0847..5ca7d56f23d8 100644
--- a/lib/raid/raid6/x86/recov_ssse3.c
+++ b/lib/raid/raid6/x86/recov_ssse3.c
@@ -36,7 +36,7 @@ static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -206,7 +206,7 @@ static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
--
2.47.3
More information about the Linuxppc-dev
mailing list