[4/5] STAB cleanup - remove check duplication
David Gibson
david at gibson.dropbear.id.au
Tue Aug 3 12:15:24 EST 2004
Rearrange the ste_allocate()/__ste_allocate() path in the PPC64
segment table code more sensibly. This moves various valid address
checks into the lower-level __ste_allocate(), meaning the checks don't
need to be duplicated in preload_stab().
Signed-off-by: David Gibson <dwg at au1.ibm.com>
Index: working-2.6/arch/ppc64/mm/stab.c
===================================================================
--- working-2.6.orig/arch/ppc64/mm/stab.c 2004-07-30 13:17:34.828858504 +1000
+++ working-2.6/arch/ppc64/mm/stab.c 2004-07-30 13:39:57.338854248 +1000
@@ -106,30 +106,14 @@
return (global_entry | (castout_entry & 0x7));
}
-static inline void __ste_allocate(unsigned long esid, unsigned long vsid)
-{
- unsigned char stab_entry;
- unsigned long offset;
-
- stab_entry = make_ste(get_paca()->stab_addr, esid, vsid);
-
- if ((esid << SID_SHIFT) >= KERNELBASE)
- return;
-
- offset = __get_cpu_var(stab_cache_ptr);
- if (offset < NR_STAB_CACHE_ENTRIES)
- __get_cpu_var(stab_cache[offset++]) = stab_entry;
- else
- offset = NR_STAB_CACHE_ENTRIES+1;
- __get_cpu_var(stab_cache_ptr) = offset;
-}
-
/*
- * Allocate a segment table entry for the given ea.
+ * Allocate a segment table entry for the given ea and mm
*/
-int ste_allocate(unsigned long ea)
+static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
{
unsigned long vsid;
+ unsigned char stab_entry;
+ unsigned long offset;
/* Check for invalid effective addresses. */
if (!IS_VALID_EA(ea))
@@ -139,19 +123,34 @@
if (ea >= KERNELBASE) {
vsid = get_kernel_vsid(ea);
} else {
- if (!current->mm)
+ if (! mm)
return 1;
- vsid = get_vsid(current->mm->context.id, ea);
+ vsid = get_vsid(mm->context.id, ea);
}
- __ste_allocate(GET_ESID(ea), vsid);
- /* Order update */
- asm volatile("sync":::"memory");
+ stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
+
+ if (ea < KERNELBASE) {
+ offset = __get_cpu_var(stab_cache_ptr);
+ if (offset < NR_STAB_CACHE_ENTRIES)
+ __get_cpu_var(stab_cache[offset++]) = stab_entry;
+ else
+ offset = NR_STAB_CACHE_ENTRIES+1;
+ __get_cpu_var(stab_cache_ptr) = offset;
+
+ /* Order update */
+ asm volatile("sync":::"memory");
+ }
return 0;
}
+int ste_allocate(unsigned long ea)
+{
+ return __ste_allocate(ea, current->mm);
+}
+
/*
* preload some userspace segments into the segment table.
*/
@@ -160,34 +159,24 @@
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
- unsigned long vsid;
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
- if (!IS_VALID_EA(pc) || (pc >= KERNELBASE))
- return;
- vsid = get_vsid(mm->context.id, pc);
- __ste_allocate(GET_ESID(pc), vsid);
+ __ste_allocate(pc, mm);
if (GET_ESID(pc) == GET_ESID(stack))
return;
- if (!IS_VALID_EA(stack) || (stack >= KERNELBASE))
- return;
- vsid = get_vsid(mm->context.id, stack);
- __ste_allocate(GET_ESID(stack), vsid);
+ __ste_allocate(stack, mm);
if ((GET_ESID(pc) == GET_ESID(unmapped_base))
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
- if (!IS_VALID_EA(unmapped_base) || (unmapped_base >= KERNELBASE))
- return;
- vsid = get_vsid(mm->context.id, unmapped_base);
- __ste_allocate(GET_ESID(unmapped_base), vsid);
+ __ste_allocate(unmapped_base, mm);
/* Order update */
asm volatile("sync" : : : "memory");
--
David Gibson | For every complex problem there is a
david AT gibson.dropbear.id.au | solution which is simple, neat and
| wrong.
http://www.ozlabs.org/people/dgibson
** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/
More information about the Linuxppc64-dev
mailing list