[PATCH v3 9/9] powerpc/mm/radix: Implement mark_rodata_ro() for radix
Balbir Singh
bsingharora at gmail.com
Tue Jun 6 14:29:45 AEST 2017
The patch splits the linear page mapping such that
the ones with kernel text are mapped as 2M and others
are mapped with the largest possible size - 1G. The downside
of this is that we split a 1G mapping into 512 2M mappings
for the kernel, but in the absence of that we cannot support
R/O areas in 1G, the kernel size is much smaller and using
1G as the granularity will waste a lot of space at the cost
of optimizing the TLB. The text itself should fit into about
6-8 mappings, so the effect should not be all that bad.
Signed-off-by: Balbir Singh <bsingharora at gmail.com>
---
arch/powerpc/mm/pgtable-radix.c | 68 +++++++++++++++++++++++++++++++++++++++--
1 file changed, 66 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 8f42309..7c46dbc 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -11,6 +11,7 @@
#include <linux/sched/mm.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
+#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -19,9 +20,12 @@
#include <asm/mmu.h>
#include <asm/firmware.h>
#include <asm/powernv.h>
+#include <asm/sections.h>
#include <trace/events/thp.h>
+int mmu_radix_linear_psize = PAGE_SIZE;
+
static int native_register_process_table(unsigned long base, unsigned long pg_sz,
unsigned long table_size)
{
@@ -111,7 +115,52 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
#ifdef CONFIG_STRICT_KERNEL_RWX
void radix__mark_rodata_ro(void)
{
- pr_warn("Not yet implemented for radix\n");
+ unsigned long start = (unsigned long)_stext;
+ unsigned long end = (unsigned long)__init_begin;
+ unsigned long idx;
+ unsigned int step, shift;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
+ pr_info("R/O rodata not supported\n");
+ return;
+ }
+
+ shift = mmu_psize_defs[mmu_radix_linear_psize].shift;
+ step = 1 << shift;
+
+ start = ((start + step - 1) >> shift) << shift;
+ end = (end >> shift) << shift;
+
+ pr_devel("marking ro start %lx, end %lx, step %x\n",
+ start, end, step);
+
+ for (idx = start; idx < end; idx += step) {
+ pgdp = pgd_offset_k(idx);
+ pudp = pud_alloc(&init_mm, pgdp, idx);
+ if (!pudp)
+ continue;
+ if (pud_huge(*pudp)) {
+ ptep = (pte_t *)pudp;
+ goto update_the_pte;
+ }
+ pmdp = pmd_alloc(&init_mm, pudp, idx);
+ if (!pmdp)
+ continue;
+ if (pmd_huge(*pmdp)) {
+ ptep = pmdp_ptep(pmdp);
+ goto update_the_pte;
+ }
+ ptep = pte_alloc_kernel(pmdp, idx);
+ if (!ptep)
+ continue;
+update_the_pte:
+ pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0);
+ }
+
}
#endif
@@ -129,6 +178,7 @@ static int __meminit create_physical_mapping(unsigned long start,
unsigned long end)
{
unsigned long addr, mapping_size = 0;
+ unsigned long max_mapping_size;
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
@@ -137,9 +187,12 @@ static int __meminit create_physical_mapping(unsigned long start,
gap = end - addr;
previous_size = mapping_size;
+ max_mapping_size = PUD_SIZE;
+retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
- mmu_psize_defs[MMU_PAGE_1G].shift)
+ mmu_psize_defs[MMU_PAGE_1G].shift &&
+ PUD_SIZE <= max_mapping_size)
mapping_size = PUD_SIZE;
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift)
@@ -147,6 +200,17 @@ static int __meminit create_physical_mapping(unsigned long start,
else
mapping_size = PAGE_SIZE;
+ if (mapping_size == PUD_SIZE &&
+ addr <= __pa_symbol(__init_begin) &&
+ (addr + mapping_size) >= __pa_symbol(_stext)) {
+ max_mapping_size = PMD_SIZE;
+ goto retry;
+ }
+
+ if (addr <= __pa_symbol(__init_begin) &&
+ (addr + mapping_size) >= __pa_symbol(_stext))
+ mmu_radix_linear_psize = mapping_size;
+
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
start = addr;
--
2.9.4
More information about the Linuxppc-dev
mailing list