[PATCH 2/2][RFC] PPC64 no-exec support for kernel space
Jake Moilanen
moilanen at austin.ibm.com
Wed Oct 13 00:58:52 EST 2004
Here is the kernel piece of no-exec. It marks all non-text pages as
no-execute.
It depends on the no-exec for user-space patch.
Thanks,
Jake
Signed-off-by: Jake Moilanen <moilanen at austin.ibm.com>
---
diff -puN arch/ppc64/kernel/module.c~nx-kernel-ppc64 arch/ppc64/kernel/module.c
--- linux-2.6-bk/arch/ppc64/kernel/module.c~nx-kernel-ppc64 Thu Oct 7 15:23:55 2004
+++ linux-2.6-bk-moilanen/arch/ppc64/kernel/module.c Thu Oct 7 15:23:55 2004
@@ -102,7 +102,8 @@ void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
- return vmalloc(size);
+
+ return vmalloc_exec(size);
}
/* Free memory returned from module_alloc */
diff -puN arch/ppc64/mm/fault.c~nx-kernel-ppc64 arch/ppc64/mm/fault.c
--- linux-2.6-bk/arch/ppc64/mm/fault.c~nx-kernel-ppc64 Thu Oct 7 15:23:55 2004
+++ linux-2.6-bk-moilanen/arch/ppc64/mm/fault.c Thu Oct 7 15:23:55 2004
@@ -75,6 +75,21 @@ static int store_updates_sp(struct pt_re
return 0;
}
+pte_t *lookup_address(unsigned long address)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pmd_t *pmd;
+
+ if (pgd_none(*pgd))
+ return NULL;
+
+ pmd = pmd_offset(pgd, address);
+ if (pmd_none(*pmd))
+ return NULL;
+
+ return pte_offset_kernel(pmd, address);
+}
+
/*
* The error_code parameter is
* - DSISR for a non-SLB data access fault,
@@ -93,6 +108,7 @@ int do_page_fault(struct pt_regs *regs,
unsigned long is_write = error_code & 0x02000000;
unsigned long trap = TRAP(regs);
unsigned long is_exec = trap == 0x400;
+ pte_t *ptep;
BUG_ON((trap == 0x380) || (trap == 0x480));
@@ -245,6 +261,15 @@ bad_area_nosemaphore:
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, current);
return 0;
+ }
+
+ ptep = lookup_address(address);
+
+ if (ptep && pte_present(*ptep) && !pte_exec(*ptep)) {
+ if (printk_ratelimit())
+ printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
+ show_stack(current, (unsigned long *)__get_SP());
+ do_exit(SIGKILL);
}
return SIGSEGV;
diff -puN arch/ppc64/mm/hash_utils.c~nx-kernel-ppc64 arch/ppc64/mm/hash_utils.c
--- linux-2.6-bk/arch/ppc64/mm/hash_utils.c~nx-kernel-ppc64 Thu Oct 7 15:23:55 2004
+++ linux-2.6-bk-moilanen/arch/ppc64/mm/hash_utils.c Thu Oct 7 15:23:55 2004
@@ -52,6 +52,7 @@
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/abs_addr.h>
+#include <asm/sections.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -89,12 +90,23 @@ static inline void loop_forever(void)
;
}
+int is_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
+ return 1;
+
+ return 0;
+}
+
+
+
#ifdef CONFIG_PPC_MULTIPLATFORM
static inline void create_pte_mapping(unsigned long start, unsigned long end,
unsigned long mode, int large)
{
unsigned long addr;
unsigned int step;
+ unsigned long tmp_mode;
if (large)
step = 16*MB;
@@ -112,6 +124,13 @@ static inline void create_pte_mapping(un
else
vpn = va >> PAGE_SHIFT;
+
+ tmp_mode = mode;
+
+ /* Make non-kernel text non-executable */
+ if (!is_kernel_text(addr))
+ tmp_mode = mode | HW_NO_EXEC;
+
hash = hpt_hash(vpn, large);
hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
@@ -120,12 +139,12 @@ static inline void create_pte_mapping(un
if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, mode, 1, large);
+ 0, tmp_mode, 1, large);
else
#endif /* CONFIG_PPC_PSERIES */
ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, mode, 1, large);
+ 0, tmp_mode, 1, large);
if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -239,8 +258,6 @@ unsigned int hash_page_do_lazy_icache(un
{
struct page *page;
-#define PPC64_HWNOEXEC (1 << 2)
-
if (!pfn_valid(pte_pfn(pte)))
return pp;
@@ -251,8 +268,8 @@ unsigned int hash_page_do_lazy_icache(un
if (trap == 0x400) {
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
- } else
- pp |= PPC64_HWNOEXEC;
+ } else
+ pp |= HW_NO_EXEC;
}
return pp;
}
diff -puN include/asm-ppc64/mmu.h~nx-kernel-ppc64 include/asm-ppc64/mmu.h
diff -puN include/asm-ppc64/pgtable.h~nx-kernel-ppc64 include/asm-ppc64/pgtable.h
--- linux-2.6-bk/include/asm-ppc64/pgtable.h~nx-kernel-ppc64 Thu Oct 7 15:23:55 2004
+++ linux-2.6-bk-moilanen/include/asm-ppc64/pgtable.h Thu Oct 7 15:23:55 2004
@@ -101,6 +101,12 @@
/* Bits 0x7000 identify the index within an HPT Group */
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX)
+#define HW_NO_EXEC _PAGE_EXEC /* This is used when the bit is
+ * inverted, even though it's the
+ * same value, hopefully it will be
+ * clearer in the code what is
+ * going on. */
+
/* PAGE_MASK gives the right answer below, but only by accident */
/* It should be preserving the high 48 bits and then specifically */
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
@@ -120,6 +126,7 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
/*
* POWER4 and newer have per page execute protection, older chips can only
@@ -266,6 +273,7 @@ int hash_huge_page(struct mm_struct *mm,
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
+static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
diff -puN arch/ppc64/kernel/iSeries_setup.c~nx-kernel-ppc64 arch/ppc64/kernel/iSeries_setup.c
--- linux-2.6-bk/arch/ppc64/kernel/iSeries_setup.c~nx-kernel-ppc64 Thu Oct 7 15:23:55 2004
+++ linux-2.6-bk-moilanen/arch/ppc64/kernel/iSeries_setup.c Thu Oct 7 15:23:55 2004
@@ -622,6 +622,7 @@ static void __init iSeries_bolt_kernel(u
{
unsigned long pa;
unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
+ unsigned long tmp_mode;
HPTE hpte;
for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
@@ -630,6 +631,12 @@ static void __init iSeries_bolt_kernel(u
unsigned long va = (vsid << 28) | (pa & 0xfffffff);
unsigned long vpn = va >> PAGE_SHIFT;
unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
+
+ tmp_mode = mode_rw;
+
+ /* Make non-kernel text non-executable */
+ if (!is_kernel_text(ea))
+ tmp_mode = mode_rw | HW_NO_EXEC;
if (hpte.dw0.dw0.v) {
/* HPTE exists, so just bolt it */
_
More information about the Linuxppc64-dev
mailing list