[PATCH RFC v1 4/7] swiotlb: to implement io_tlb_high_mem

Dongli Zhang dongli.zhang at oracle.com
Thu Jun 9 10:55:50 AEST 2022


This patch is to implement the extra 'io_tlb_high_mem'. In the future, the
device drivers may choose to use either 'io_tlb_default_mem' or
'io_tlb_high_mem' as dev->dma_io_tlb_mem.

The highmem buffer is regarded as active if
(high_nslabs && io_tlb_high_mem.nslabs) returns true.

Cc: Konrad Wilk <konrad.wilk at oracle.com>
Cc: Joe Jin <joe.jin at oracle.com>
Signed-off-by: Dongli Zhang <dongli.zhang at oracle.com>
---
 arch/powerpc/kernel/dma-swiotlb.c |   8 ++-
 arch/x86/kernel/pci-dma.c         |   5 +-
 include/linux/swiotlb.h           |   2 +-
 kernel/dma/swiotlb.c              | 103 +++++++++++++++++++++---------
 4 files changed, 84 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index ba256c37bcc0..f18694881264 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -20,9 +20,11 @@ void __init swiotlb_detect_4g(void)
 
 static int __init check_swiotlb_enabled(void)
 {
-	if (ppc_swiotlb_enable)
-		swiotlb_print_info();
-	else
+	if (ppc_swiotlb_enable) {
+		swiotlb_print_info(false);
+		if (swiotlb_high_active())
+			swiotlb_print_info(true);
+	} else
 		swiotlb_exit();
 
 	return 0;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 30bbe4abb5d6..1504b349b312 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -196,7 +196,10 @@ static int __init pci_iommu_init(void)
 	/* An IOMMU turned us off. */
 	if (x86_swiotlb_enable) {
 		pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-		swiotlb_print_info();
+
+		swiotlb_print_info(false);
+		if (swiotlb_high_active())
+			swiotlb_print_info(true);
 	} else {
 		swiotlb_exit();
 	}
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index e61c074c55eb..8196bf961aab 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -166,7 +166,7 @@ static inline void swiotlb_adjust_size(unsigned long size)
 #endif /* CONFIG_SWIOTLB */
 
 extern bool swiotlb_high_active(void);
-extern void swiotlb_print_info(void);
+extern void swiotlb_print_info(bool high);
 
 #ifdef CONFIG_DMA_RESTRICTED_POOL
 struct page *swiotlb_alloc(struct device *dev, size_t size);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 7988883ca7f9..ff82b281ce01 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -101,6 +101,21 @@ setup_io_tlb_npages(char *str)
 }
 early_param("swiotlb", setup_io_tlb_npages);
 
+static struct io_tlb_mem *io_tlb_mem_get(bool high)
+{
+	return high ? &io_tlb_high_mem : &io_tlb_default_mem;
+}
+
+static unsigned long nslabs_get(bool high)
+{
+	return high ? high_nslabs : default_nslabs;
+}
+
+static char *swiotlb_name_get(bool high)
+{
+	return high ? "high" : "default";
+}
+
 bool swiotlb_high_active(void)
 {
 	return high_nslabs && io_tlb_high_mem.nslabs;
@@ -133,17 +148,18 @@ void __init swiotlb_adjust_size(unsigned long size)
 	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
 }
 
-void swiotlb_print_info(void)
+void swiotlb_print_info(bool high)
 {
-	struct io_tlb_mem *mem = &io_tlb_default_mem;
+	struct io_tlb_mem *mem = io_tlb_mem_get(high);
 
 	if (!mem->nslabs) {
 		pr_warn("No low mem\n");
 		return;
 	}
 
-	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
-	       (mem->nslabs << IO_TLB_SHIFT) >> 20);
+	pr_info("%s mapped [mem %pa-%pa] (%luMB)\n",
+		swiotlb_name_get(high), &mem->start, &mem->end,
+		(mem->nslabs << IO_TLB_SHIFT) >> 20);
 }
 
 static inline unsigned long io_tlb_offset(unsigned long val)
@@ -184,15 +200,9 @@ static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
 }
 #endif
 
-/*
- * Early SWIOTLB allocation may be too early to allow an architecture to
- * perform the desired operations.  This function allows the architecture to
- * call SWIOTLB when the operations are possible.  It needs to be called
- * before the SWIOTLB memory is used.
- */
-void __init swiotlb_update_mem_attributes(void)
+static void __init __swiotlb_update_mem_attributes(bool high)
 {
-	struct io_tlb_mem *mem = &io_tlb_default_mem;
+	struct io_tlb_mem *mem = io_tlb_mem_get(high);
 	void *vaddr;
 	unsigned long bytes;
 
@@ -207,6 +217,19 @@ void __init swiotlb_update_mem_attributes(void)
 		mem->vaddr = vaddr;
 }
 
+/*
+ * Early SWIOTLB allocation may be too early to allow an architecture to
+ * perform the desired operations.  This function allows the architecture to
+ * call SWIOTLB when the operations are possible.  It needs to be called
+ * before the SWIOTLB memory is used.
+ */
+void __init swiotlb_update_mem_attributes(void)
+{
+	__swiotlb_update_mem_attributes(false);
+	if (swiotlb_high_active())
+		__swiotlb_update_mem_attributes(true);
+}
+
 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
 		unsigned long nslabs, unsigned int flags, bool late_alloc)
 {
@@ -240,15 +263,13 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
 	return;
 }
 
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
-		int (*remap)(void *tlb, unsigned long nslabs, bool high))
+static void __init
+__swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+		     int (*remap)(void *tlb, unsigned long nslabs, bool high),
+		     bool high)
 {
-	struct io_tlb_mem *mem = &io_tlb_default_mem;
-	unsigned long nslabs = default_nslabs;
+	struct io_tlb_mem *mem = io_tlb_mem_get(high);
+	unsigned long nslabs = nslabs_get(high);
 	size_t alloc_size;
 	size_t bytes;
 	void *tlb;
@@ -274,7 +295,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 		return;
 	}
 
-	if (remap && remap(tlb, nslabs, false) < 0) {
+	if (remap && remap(tlb, nslabs, high) < 0) {
 		memblock_free(tlb, PAGE_ALIGN(bytes));
 
 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
@@ -293,7 +314,20 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
 
 	if (flags & SWIOTLB_VERBOSE)
-		swiotlb_print_info();
+		swiotlb_print_info(high);
+}
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+		int (*remap)(void *tlb, unsigned long nslabs, bool high))
+{
+	__swiotlb_init_remap(addressing_limit, flags, remap, false);
+	if (high_nslabs)
+		__swiotlb_init_remap(addressing_limit, flags | SWIOTLB_ANY,
+				     remap, true);
 }
 
 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
@@ -364,23 +398,20 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
 	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
 
-	swiotlb_print_info();
+	swiotlb_print_info(false);
 	return 0;
 }
 
-void __init swiotlb_exit(void)
+static void __init __swiotlb_exit(bool high)
 {
-	struct io_tlb_mem *mem = &io_tlb_default_mem;
+	struct io_tlb_mem *mem = io_tlb_mem_get(high);
 	unsigned long tbl_vaddr;
 	size_t tbl_size, slots_size;
 
-	if (swiotlb_force_bounce)
-		return;
-
 	if (!mem->nslabs)
 		return;
 
-	pr_info("tearing down default memory pool\n");
+	pr_info("tearing down %s memory pool\n", swiotlb_name_get(high));
 	tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
 	tbl_size = PAGE_ALIGN(mem->end - mem->start);
 	slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
@@ -397,6 +428,16 @@ void __init swiotlb_exit(void)
 	memset(mem, 0, sizeof(*mem));
 }
 
+void __init swiotlb_exit(void)
+{
+	if (swiotlb_force_bounce)
+		return;
+
+	__swiotlb_exit(false);
+	if (swiotlb_high_active())
+		__swiotlb_exit(true);
+}
+
 /*
  * Return the offset into a iotlb slot required to keep the device happy.
  */
@@ -786,6 +827,10 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
 static int __init __maybe_unused swiotlb_create_default_debugfs(void)
 {
 	swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
+
+	if (swiotlb_high_active())
+		swiotlb_create_debugfs_files(&io_tlb_high_mem, "swiotlb-hi");
+
 	return 0;
 }
 
-- 
2.17.1



More information about the Linuxppc-dev mailing list