[RFC] implicit hugetlb pages (mmu_context_to_struct)
Adam Litke
agl at us.ibm.com
Sat Jan 10 08:29:38 EST 2004
mmu_context_to_struct (2.6.0):
This patch converts the mmu_context variable to a structure. It is
needed for the dynamic address space resizing patch.
-- snip --
diff -purN linux-2.6.0/arch/ppc64/kernel/htab.c linux-2.6.0-context-struct/arch/ppc64/kernel/htab.c
--- linux-2.6.0/arch/ppc64/kernel/htab.c 2003-12-17 18:58:57.000000000 -0800
+++ linux-2.6.0-context-struct/arch/ppc64/kernel/htab.c 2004-01-08 15:23:05.000000000 -0800
@@ -390,7 +390,7 @@ int hash_page(unsigned long ea, unsigned
if (mm == NULL)
return 1;
- vsid = get_vsid(mm->context, ea);
+ vsid = get_vsid(mm->context.flags, ea);
break;
case IO_REGION_ID:
mm = &ioremap_mm;
diff -purN linux-2.6.0/arch/ppc64/kernel/stab.c linux-2.6.0-context-struct/arch/ppc64/kernel/stab.c
--- linux-2.6.0/arch/ppc64/kernel/stab.c 2003-12-17 18:59:17.000000000 -0800
+++ linux-2.6.0-context-struct/arch/ppc64/kernel/stab.c 2004-01-08 15:23:05.000000000 -0800
@@ -270,14 +270,14 @@ int ste_allocate(unsigned long ea)
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
kernel_segment = 1;
vsid = get_kernel_vsid(ea);
- context = REGION_ID(ea);
+ context.flags = REGION_ID(ea);
} else {
if (! current->mm)
return 1;
context = current->mm->context;
- vsid = get_vsid(context, ea);
+ vsid = get_vsid(context.flags, ea);
}
esid = GET_ESID(ea);
@@ -307,7 +307,7 @@ static void preload_stab(struct task_str
for (esid = 0; esid < 16; esid++) {
unsigned long ea = esid << SID_SHIFT;
- vsid = get_vsid(mm->context, ea);
+ vsid = get_vsid(mm->context.flags, ea);
__ste_allocate(esid, vsid, 0, mm->context);
}
} else {
@@ -321,7 +321,7 @@ static void preload_stab(struct task_str
if (!IS_VALID_EA(pc) ||
(REGION_ID(pc) >= KERNEL_REGION_ID))
return;
- vsid = get_vsid(mm->context, pc);
+ vsid = get_vsid(mm->context.flags, pc);
__ste_allocate(GET_ESID(pc), vsid, 0, mm->context);
}
@@ -329,7 +329,7 @@ static void preload_stab(struct task_str
if (!IS_VALID_EA(stack) ||
(REGION_ID(stack) >= KERNEL_REGION_ID))
return;
- vsid = get_vsid(mm->context, stack);
+ vsid = get_vsid(mm->context.flags, stack);
__ste_allocate(GET_ESID(stack), vsid, 0, mm->context);
}
}
diff -purN linux-2.6.0/arch/ppc64/mm/hugetlbpage.c linux-2.6.0-context-struct/arch/ppc64/mm/hugetlbpage.c
--- linux-2.6.0/arch/ppc64/mm/hugetlbpage.c 2003-12-17 18:58:50.000000000 -0800
+++ linux-2.6.0-context-struct/arch/ppc64/mm/hugetlbpage.c 2004-01-08 15:50:29.000000000 -0800
@@ -245,7 +245,7 @@ static int open_32bit_htlbpage_range(str
struct vm_area_struct *vma;
unsigned long addr;
- if (mm->context & CONTEXT_LOW_HPAGES)
+ if (mm->context.flags & CONTEXT_LOW_HPAGES)
return 0; /* The window is already open */
/* Check no VMAs are in the region */
@@ -282,7 +282,7 @@ static int open_32bit_htlbpage_range(str
/* FIXME: do we need to scan for PTEs too? */
- mm->context |= CONTEXT_LOW_HPAGES;
+ mm->context.flags |= CONTEXT_LOW_HPAGES;
/* the context change must make it to memory before the slbia,
* so that further SLB misses do the right thing. */
@@ -590,7 +590,6 @@ full_search:
}
}
-
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
@@ -780,7 +779,7 @@ static void flush_hash_hugepage(mm_conte
BUG_ON(hugepte_bad(pte));
BUG_ON(!in_hugepage_area(context, ea));
- vsid = get_vsid(context, ea);
+ vsid = get_vsid(context.flags, ea);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> LARGE_PAGE_SHIFT;
diff -purN linux-2.6.0/arch/ppc64/mm/init.c linux-2.6.0-context-struct/arch/ppc64/mm/init.c
--- linux-2.6.0/arch/ppc64/mm/init.c 2003-12-17 18:58:57.000000000 -0800
+++ linux-2.6.0-context-struct/arch/ppc64/mm/init.c 2004-01-08 15:23:05.000000000 -0800
@@ -275,7 +275,7 @@ flush_tlb_page(struct vm_area_struct *vm
break;
case USER_REGION_ID:
pgd = pgd_offset( vma->vm_mm, vmaddr );
- context = vma->vm_mm->context;
+ context = vma->vm_mm->context.flags;
/* XXX are there races with checking cpu_vm_mask? - Anton */
tmp = cpumask_of_cpu(smp_processor_id());
@@ -327,7 +327,7 @@ __flush_tlb_range(struct mm_struct *mm,
break;
case USER_REGION_ID:
pgd = pgd_offset(mm, start);
- context = mm->context;
+ context = mm->context.flags;
/* XXX are there races with checking cpu_vm_mask? - Anton */
tmp = cpumask_of_cpu(smp_processor_id());
@@ -431,7 +431,7 @@ void __init mm_init_ppc64(void)
mmu_context_queue.tail = NUM_USER_CONTEXT-1;
mmu_context_queue.size = NUM_USER_CONTEXT;
for(index=0; index < NUM_USER_CONTEXT ;index++) {
- mmu_context_queue.elements[index] = index+FIRST_USER_CONTEXT;
+ mmu_context_queue.elements[index].flags = index+FIRST_USER_CONTEXT;
}
/* Setup guard pages for the Paca's */
@@ -717,7 +717,7 @@ void update_mmu_cache(struct vm_area_str
return;
ptep = find_linux_pte(pgdir, ea);
- vsid = get_vsid(vma->vm_mm->context, ea);
+ vsid = get_vsid(vma->vm_mm->context.flags, ea);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
diff -purN linux-2.6.0/arch/ppc64/xmon/xmon.c linux-2.6.0-context-struct/arch/ppc64/xmon/xmon.c
--- linux-2.6.0/arch/ppc64/xmon/xmon.c 2003-12-17 18:59:28.000000000 -0800
+++ linux-2.6.0-context-struct/arch/ppc64/xmon/xmon.c 2004-01-08 15:23:05.000000000 -0800
@@ -1936,7 +1936,7 @@ mem_translate()
// if in user range, use the current task's page directory
else if ( ( ea >= USER_START ) && ( ea <= USER_END ) ) {
mm = current->mm;
- vsid = get_vsid(mm->context, ea );
+ vsid = get_vsid(mm->context.flags, ea );
}
pgdir = mm->pgd;
va = ( vsid << 28 ) | ( ea & 0x0fffffff );
diff -purN linux-2.6.0/include/asm-ppc64/mmu.h linux-2.6.0-context-struct/include/asm-ppc64/mmu.h
--- linux-2.6.0/include/asm-ppc64/mmu.h 2003-12-17 18:59:05.000000000 -0800
+++ linux-2.6.0-context-struct/include/asm-ppc64/mmu.h 2004-01-08 15:42:11.000000000 -0800
@@ -15,8 +15,10 @@
#ifndef __ASSEMBLY__
-/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+/* Time to allow for more things here */
+typedef struct {
+ unsigned long flags;
+} mm_context_t;
#ifdef CONFIG_HUGETLB_PAGE
#define CONTEXT_LOW_HPAGES (1UL<<63)
diff -purN linux-2.6.0/include/asm-ppc64/mmu_context.h linux-2.6.0-context-struct/include/asm-ppc64/mmu_context.h
--- linux-2.6.0/include/asm-ppc64/mmu_context.h 2003-12-17 18:58:40.000000000 -0800
+++ linux-2.6.0-context-struct/include/asm-ppc64/mmu_context.h 2004-01-08 15:43:07.000000000 -0800
@@ -127,8 +127,8 @@ destroy_context(struct mm_struct *mm)
#endif
mmu_context_queue.size++;
- mmu_context_queue.elements[index] =
- mm->context & ~CONTEXT_LOW_HPAGES;
+ mmu_context_queue.elements[index].flags =
+ mm->context.flags & ~CONTEXT_LOW_HPAGES;
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
diff -purN linux-2.6.0/include/asm-ppc64/page.h linux-2.6.0-context-struct/include/asm-ppc64/page.h
--- linux-2.6.0/include/asm-ppc64/page.h 2003-12-17 18:58:04.000000000 -0800
+++ linux-2.6.0-context-struct/include/asm-ppc64/page.h 2004-01-08 15:51:39.000000000 -0800
@@ -32,6 +32,7 @@
/* For 64-bit processes the hugepage range is 1T-1.5T */
#define TASK_HPAGE_BASE (0x0000010000000000UL)
#define TASK_HPAGE_END (0x0000018000000000UL)
+
/* For 32-bit processes the hugepage range is 2-3G */
#define TASK_HPAGE_BASE_32 (0x80000000UL)
#define TASK_HPAGE_END_32 (0xc0000000UL)
@@ -39,14 +40,14 @@
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) \
( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
- ((current->mm->context & CONTEXT_LOW_HPAGES) && \
+ ((current->mm->context.flags & CONTEXT_LOW_HPAGES) && \
(addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
+
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
#define in_hugepage_area(context, addr) \
((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \
((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
- (((context) & CONTEXT_LOW_HPAGES) && \
+ (((context.flags) & CONTEXT_LOW_HPAGES) && \
(((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32)))))
#else /* !CONFIG_HUGETLB_PAGE */
diff -purN linux-2.6.0/include/asm-ppc64/tlb.h linux-2.6.0-context-struct/include/asm-ppc64/tlb.h
--- linux-2.6.0/include/asm-ppc64/tlb.h 2003-12-17 18:58:40.000000000 -0800
+++ linux-2.6.0-context-struct/include/asm-ppc64/tlb.h 2004-01-08 15:23:05.000000000 -0800
@@ -65,7 +65,7 @@ static inline void __tlb_remove_tlb_entr
if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
local = 1;
- flush_hash_range(tlb->mm->context, i, local);
+ flush_hash_range(tlb->mm->context.flags, i, local);
i = 0;
}
}
@@ -84,7 +84,7 @@ static inline void tlb_flush(struct mmu_
if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
local = 1;
- flush_hash_range(tlb->mm->context, batch->index, local);
+ flush_hash_range(tlb->mm->context.flags, batch->index, local);
batch->index = 0;
}
--
Adam Litke - (agl at us.ibm.com)
IBM Linux Technology Center
** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/
More information about the Linuxppc64-dev
mailing list