[Lguest] [PATCH v2 2/3] lguest: Physical Address Extension support

Matias Zabaljauregui zabaljauregui at gmail.com
Mon Sep 29 14:40:07 EST 2008


This patch adds Physical Address Extension support to lguest.

Signed-off-by: Matias Zabaljauregui <zabaljauregui at gmail.com>

diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index c70e12b..a108c09 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,7 +2,6 @@ config LGUEST_GUEST
 	bool "Lguest guest support"
 	select PARAVIRT
 	depends on X86_32
-	depends on !X86_PAE
 	depends on !X86_VOYAGER
 	select VIRTIO
 	select VIRTIO_RING
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index d9249a8..fc2331e 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -334,8 +334,12 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
 	case 1:	/* Basic feature request. */
 		/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
 		*cx &= 0x00002201;
-		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */
+		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */
+#ifdef CONFIG_X86_PAE
+		*dx &= 0x07808151;
+#else
 		*dx &= 0x07808111;
+#endif
 		/* The Host can do a nice optimization if it knows that the
 		 * kernel mappings (addresses above 0xC0000000 or whatever
 		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
@@ -481,15 +485,34 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
 	lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
 }

+#ifdef CONFIG_X86_PAE
 /* The Guest calls this to set a top-level entry.  Again, we set the entry then
  * tell the Host which top-level page we changed, and the index of the entry we
  * changed. */
+static void lguest_set_pud(pud_t *pudp, pud_t pudval)
+{
+	*pudp = pudval;
+	/* 32 bytes aligned pdpt address. */
+	lazy_hcall(LHCALL_SET_PUD, __pa(pudp) & 0xFFFFFFE0,
+		   (__pa(pudp) & 0x1F) / 8, 0);
+}
+
+/* The Guest calls this to set a PMD entry, when PAE is active */
+static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	*pmdp = pmdval;
+	lazy_hcall(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
+		   (__pa(pmdp) & (PAGE_SIZE - 1)) / 8, 0);
+}
+
+#else
 static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
 {
 	*pmdp = pmdval;
 	lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
 		   (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
 }
+#endif

 /* There are a couple of legacy places where the kernel sets a PTE, but we
  * don't know the top level any more.  This is useless for us, since we don't
@@ -501,12 +524,57 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
  * anything changed until we've done the first page table switch. */
 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
 {
+#ifdef CONFIG_X86_PAE
+	ptep->pte_high = pteval.pte_high;
+	smp_wmb();
+	ptep->pte_low = pteval.pte_low;
+#else
 	*ptep = pteval;
+#endif
+
+	/* Don't bother with hypercall before initial setup. */
+	if (current_cr3)
+		lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+#ifdef CONFIG_X86_PAE
+static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+	set_64bit((u64 *)ptep, pte.pte);
+
 	/* Don't bother with hypercall before initial setup. */
 	if (current_cr3)
 		lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
 }

+static inline void lguest_set_pte_present(struct mm_struct *mm,
+						unsigned long addr,
+						pte_t *ptep, pte_t pte)
+{
+	ptep->pte_low = 0;
+	smp_wmb();
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+
+	lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pte.pte_low);
+}
+
+void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	ptep->pte_low = 0;
+	smp_wmb();
+	ptep->pte_high = 0;
+
+	lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
+}
+
+void lguest_pmd_clear(pmd_t *pmdp)
+{
+	lguest_set_pmd(pmdp, __pmd(0));
+}
+#endif
+
 /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
  * native page table operations.  On native hardware you can set a new page
  * table entry whenever you want, but if you want to remove one you have to do
@@ -983,6 +1051,14 @@ __init void lguest_init(void)
 	pv_mmu_ops.set_pte = lguest_set_pte;
 	pv_mmu_ops.set_pte_at = lguest_set_pte_at;
 	pv_mmu_ops.set_pmd = lguest_set_pmd;
+
+#ifdef CONFIG_X86_PAE
+	pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
+	pv_mmu_ops.set_pte_present = lguest_set_pte_at;
+	pv_mmu_ops.pte_clear = lguest_pte_clear;
+	pv_mmu_ops.pmd_clear = lguest_pmd_clear;
+	pv_mmu_ops.set_pud = lguest_set_pud;
+#endif
 	pv_mmu_ops.read_cr2 = lguest_read_cr2;
 	pv_mmu_ops.read_cr3 = lguest_read_cr3;
 	pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 76f2b36..5d491bb 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
 config LGUEST
 	tristate "Linux hypervisor example code"
-	depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER
+	depends on X86_32 && EXPERIMENTAL && FUTEX && !X86_VOYAGER
 	select HVC_DRIVER
 	---help---
 	  This is a very simple module which allows you to run
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 54d66f0..c5d6678 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -78,6 +78,11 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
 	case LHCALL_SET_PMD:
 		guest_set_pmd(cpu->lg, args->arg1, args->arg2);
 		break;
+#ifdef CONFIG_X86_PAE
+	case LHCALL_SET_PUD:
+		guest_set_pud(cpu->lg, args->arg1, args->arg2);
+		break;
+#endif
 	case LHCALL_SET_CLOCKEVENT:
 		guest_set_clockevent(cpu, args->arg1);
 		break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index f2c641e..1549ba4 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -18,7 +18,7 @@ int init_pagetables(struct page **switcher_page, unsigned int pages);

 struct pgdir
 {
-	unsigned long gpgdir;
+	pgd_t *gpgdir;
 	pgd_t *pgdir;
 };

@@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
  * in the kernel. */
 #define pgd_flags(x)	(pgd_val(x) & ~PAGE_MASK)
 #define pgd_pfn(x)	(pgd_val(x) >> PAGE_SHIFT)
+#define pmd_flags(x)    (pmd_val(x) & ~PAGE_MASK)
+#define pmd_pfn(x)	(pmd_val(x) >> PAGE_SHIFT)

 /* interrupts_and_traps.c: */
 void maybe_do_interrupt(struct lg_cpu *cpu);
@@ -168,6 +170,9 @@ int init_guest_pagetable(struct lguest *lg);
 void free_guest_pagetable(struct lguest *lg);
 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
 void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#ifdef CONFIG_X86_PAE
+void guest_set_pud(struct lguest *lg, unsigned long gpgdir, u32 i);
+#endif
 void guest_pagetable_clear_all(struct lg_cpu *cpu);
 void guest_pagetable_flush_user(struct lg_cpu *cpu);
 void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 99b6f66..f606d68 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -47,12 +47,17 @@
  *  (vii) Setting up the page tables initially.
  :*/

-
 /* 1024 entries in a page table page maps 1024 pages: 4MB.  The Switcher is
  * conveniently placed at the top 4MB, so it uses a separate, complete PTE
  * page.  */
 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)

+/* For PAE we need the PMD index as well. We can use the last 2MB, so we
+ * will need the last pmd entry of the last pmd page. */
+#ifdef CONFIG_X86_PAE
+#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
+#endif
+
 /* We actually need a separate PTE page for each CPU.  Remember that after the
  * Switcher code itself comes two pages for each CPU, and we don't want this
  * CPU's guest to see the pages of any other CPU. */
@@ -73,39 +78,90 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
 {
 	unsigned int index = pgd_index(vaddr);

+#ifndef CONFIG_X86_PAE
 	/* We kill any Guest trying to touch the Switcher addresses. */
 	if (index >= SWITCHER_PGD_INDEX) {
 		kill_guest(cpu, "attempt to access switcher pages");
 		index = 0;
 	}
+#endif
 	/* Return a pointer index'th pgd entry for the i'th page table. */
 	return &cpu->lg->pgdirs[i].pgdir[index];
 }

+#ifdef CONFIG_X86_PAE
+/* This routine then takes the PGD entry given above, which contains the
+ * address of the PMD page.  It then returns a pointer to the PMD entry for the
+ * given address. */
+static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
+{
+	unsigned int index = pmd_index(vaddr);
+	pmd_t *page;
+
+	/* We kill any Guest trying to touch the Switcher addresses. */
+	if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
+					index >= SWITCHER_PMD_INDEX) {
+		kill_guest(cpu, "attempt to access switcher pages");
+		index = 0;
+	}
+
+	/* You should never call this if the PGD entry wasn't valid */
+	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+
+	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+	return &page[index];
+}
+#endif
+
 /* This routine then takes the page directory entry returned above, which
  * contains the address of the page table entry (PTE) page.  It then returns a
  * pointer to the PTE entry for the given address. */
-static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
+static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
 {
+#ifdef CONFIG_X86_PAE
+	pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
+	pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
+
+	/* You should never call this if the PMD entry wasn't valid */
+	BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
 	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+
 	/* You should never call this if the PGD entry wasn't valid */
 	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
-	return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
+#endif
+	return &page[pte_index(vaddr)];
 }

 /* These two functions just like the above two, except they access the Guest
  * page tables.  Hence they return a Guest address. */
-static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
+static pgd_t *gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
 {
 	unsigned int index = vaddr >> (PGDIR_SHIFT);
-	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
+	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index;
+}
+
+#ifdef CONFIG_X86_PAE
+static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
+{
+	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
 }
+#endif

-static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+static unsigned long gpte_addr(struct lg_cpu *cpu,
+				pgd_t gpgd, unsigned long vaddr)
 {
+#ifdef CONFIG_X86_PAE
+	pmd_t gpmd = lgread(cpu,
+		(unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+	unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
+#else
 	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
 	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
-	return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
+#endif
+	return gpage + pte_index(vaddr) * sizeof(pte_t);
 }
 /*:*/

@@ -184,11 +240,24 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)

 static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
 {
+#ifdef CONFIG_X86_PAE
+	if ((pgd_flags(gpgd) & ~_PAGE_PRESENT) ||
+#else
 	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
+#endif
 	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
 		kill_guest(cpu, "bad page directory entry");
 }

+#ifdef CONFIG_X86_PAE
+static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+{
+	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
+	   (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
+		kill_guest(cpu, "bad page middle directory entry");
+}
+#endif
+
 /*H:330
  * (i) Looking up a page table entry when the Guest faults.
  *
@@ -207,14 +276,21 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 	pte_t gpte;
 	pte_t *spte;

+#ifdef CONFIG_X86_PAE
+	pmd_t *spmd;
+	pmd_t gpmd;
+#endif
+
 	/* First step: get the top-level Guest page table entry. */
-	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+	gpgd = lgread(cpu, (unsigned long) gpgd_addr(cpu, vaddr), pgd_t);
+
 	/* Toplevel not present?  We can't map it in. */
 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
 		return 0;

 	/* Now look at the matching shadow entry. */
 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
+
 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
 		/* No shadow entry: allocate a new shadow PTE page. */
 		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
@@ -231,9 +307,38 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 		*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
 	}

+#ifdef CONFIG_X86_PAE
+	gpmd = lgread(cpu, (unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+	/* middle level not present?  We can't map it in. */
+	if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+		return 0;
+
+	/* Now look at the matching shadow entry. */
+	spmd = spmd_addr(cpu, *spgd, vaddr);
+
+	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+		/* No shadow entry: allocate a new shadow PTE page. */
+		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+
+		/* This is not really the Guest's fault, but killing it is
+		* simple for this corner case. */
+		if (!ptepage) {
+			kill_guest(cpu, "out of memory allocating pte page");
+			return 0;
+		}
+
+		/* We check that the Guest pmd is OK. */
+		check_gpmd(cpu, gpmd);
+
+		/* And we copy the flags to the shadow PMD entry.  The page
+		 * number in the shadow PMD is the page we just allocated. */
+		*spmd = __pmd(__pa(ptepage) | pmd_flags(gpmd));
+	}
+#endif
+
 	/* OK, now we look at the lower level in the Guest page table: keep its
 	 * address, because we might update it later. */
-	gpte_ptr = gpte_addr(gpgd, vaddr);
+	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
 	gpte = lgread(cpu, gpte_ptr, pte_t);

 	/* If this page isn't in the Guest page tables, we can't page it in. */
@@ -259,7 +364,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 		gpte = pte_mkdirty(gpte);

 	/* Get the pointer to the shadow PTE entry we're going to set. */
-	spte = spte_addr(*spgd, vaddr);
+	spte = spte_addr(cpu, *spgd, vaddr);
 	/* If there was a valid shadow PTE entry here before, we release it.
 	 * This can happen with a write to a previously read-only entry. */
 	release_pte(*spte);
@@ -300,15 +405,24 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
 {
 	pgd_t *spgd;
 	unsigned long flags;
+#ifdef CONFIG_X86_PAE
+	pmd_t *spmd;
+#endif

 	/* Look at the current top level entry: is it present? */
 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
 		return 0;

+#ifdef CONFIG_X86_PAE
+	spmd = spmd_addr(cpu, *spgd, vaddr);
+	if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
+		return 0;
+#endif
+
 	/* Check the flags on the pte entry itself: it must be present and
 	 * writable. */
-	flags = pte_flags(*(spte_addr(*spgd, vaddr)));
+	flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));

 	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
 }
@@ -322,8 +436,44 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
 		kill_guest(cpu, "bad stack page %#lx", vaddr);
 }

+#ifdef CONFIG_X86_PAE
+static void release_pmd(pmd_t *spmd)
+{
+	/* If the entry's not present, there's nothing to release. */
+	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+		unsigned int i;
+		pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
+		/* For each entry in the page, we might need to release it. */
+		for (i = 0; i < PTRS_PER_PTE; i++)
+			release_pte(ptepage[i]);
+		/* Now we can free the page of PTEs */
+		free_page((long)ptepage);
+		/* And zero out the PMD entry so we never release it twice. */
+		*spmd = __pmd(0);
+	}
+}
+
 /*H:450 If we chase down the release_pgd() code, it looks like this: */
-static void release_pgd(struct lguest *lg, pgd_t *spgd)
+static void release_pgd(pgd_t *spgd)
+{
+	/* If the entry's not present, there's nothing to release. */
+	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+		unsigned int i;
+		pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+		for (i = 0; i < PTRS_PER_PMD; i++)
+			release_pmd(&pmdpage[i]);
+
+		/* Now we can free the page of PMDs */
+		free_page((long)pmdpage);
+		/* And zero out the PGD entry we we never release it twice. */
+		*spgd = __pgd(0);
+	}
+}
+
+#else /* !CONFIG_X86_PAE */
+
+/*H:450 If we chase down the release_pgd() code, it looks like this: */
+static void release_pgd(pgd_t *spgd)
 {
 	/* If the entry's not present, there's nothing to release. */
 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -342,6 +492,8 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd)
 	}
 }

+#endif
+
 /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
  * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
  * It simply releases every PTE page from 0 up to the Guest's kernel address. */
@@ -350,7 +502,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
 	unsigned int i;
 	/* Release every pgd entry up to the kernel's address. */
 	for (i = 0; i < pgd_index(lg->kernel_address); i++)
-		release_pgd(lg, lg->pgdirs[idx].pgdir + i);
+		release_pgd(lg->pgdirs[idx].pgdir + i);
 }

 /*H:440 (v) Flushing (throwing away) page tables,
@@ -370,23 +522,34 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
 	pgd_t gpgd;
 	pte_t gpte;

+#ifdef CONFIG_X86_PAE
+	pmd_t gpmd;
+#endif
+
 	/* First step: get the top-level Guest page table entry. */
-	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+	gpgd = lgread(cpu, (unsigned long) gpgd_addr(cpu, vaddr), pgd_t);
 	/* Toplevel not present?  We can't map it in. */
 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
 		kill_guest(cpu, "Bad address %#lx", vaddr);

-	gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
+#ifdef CONFIG_X86_PAE
+	gpmd = lgread(cpu, (unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+	if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+		kill_guest(cpu, "Bad address %#lx", vaddr);
+#endif
+
+	gpte = lgread(cpu, (unsigned long) gpte_addr(cpu, gpgd, vaddr), pte_t);
 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
 		kill_guest(cpu, "Bad address %#lx", vaddr);

 	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
 }

+
 /* We keep several page tables.  This is a simple routine to find the page
  * table (if any) corresponding to this top-level address the Guest has given
  * us. */
-static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+static unsigned int find_pgdir(struct lguest *lg, pgd_t *pgtable)
 {
 	unsigned int i;
 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
@@ -399,11 +562,13 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
  * allocate a new one (and so the kernel parts are not there), we set
  * blank_pgdir. */
 static unsigned int new_pgdir(struct lg_cpu *cpu,
-			      unsigned long gpgdir,
+			      pgd_t *gpgdir,
 			      int *blank_pgdir)
 {
 	unsigned int next;
-
+#ifdef CONFIG_X86_PAE
+	pmd_t *pmd_table;
+#endif
 	/* We pick one entry at random to throw out.  Choosing the Least
 	 * Recently Used might be better, but this is easy. */
 	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
@@ -414,10 +579,27 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
 		/* If the allocation fails, just keep using the one we have */
 		if (!cpu->lg->pgdirs[next].pgdir)
 			next = cpu->cpu_pgd;
+#ifdef CONFIG_X86_PAE
+		else {
+			/* In PAE mode, allocate a pmd page and populate the
+			 * last pgd entry. */
+			pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+			if (!pmd_table)
+				next = cpu->cpu_pgd;
+			else {
+				set_pgd(cpu->lg->pgdirs[next].pgdir +
+						SWITCHER_PGD_INDEX,
+				     __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+
+				/* This is a blank page, so there are no kernel
+				 * mappings: caller must map the stack! */
+				*blank_pgdir = 1;
+			}
+		}
+#else
 		else
-			/* This is a blank page, so there are no kernel
-			 * mappings: caller must map the stack! */
 			*blank_pgdir = 1;
+#endif
 	}
 	/* Record which Guest toplevel this shadows. */
 	cpu->lg->pgdirs[next].gpgdir = gpgdir;
@@ -437,11 +619,11 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
 	int newpgdir, repin = 0;

 	/* Look to see if we have this one already. */
-	newpgdir = find_pgdir(cpu->lg, pgtable);
+	newpgdir = find_pgdir(cpu->lg, (pgd_t *)pgtable);
 	/* If not, we allocate or mug an existing one: if it's a fresh one,
 	 * repin gets set to 1. */
 	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
-		newpgdir = new_pgdir(cpu, pgtable, &repin);
+		newpgdir = new_pgdir(cpu, (pgd_t *)pgtable, &repin);
 	/* Change the current pgd index to the new one. */
 	cpu->cpu_pgd = newpgdir;
 	/* If it was completely blank, we map in the Guest kernel stack */
@@ -456,12 +638,28 @@ static void release_all_pagetables(struct lguest *lg)
 {
 	unsigned int i, j;

+#ifdef CONFIG_X86_PAE
+	pgd_t *spgd;
+	pmd_t *pmdpage;
+#endif
+
 	/* Every shadow pagetable this Guest has */
 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
-		if (lg->pgdirs[i].pgdir)
+		if (lg->pgdirs[i].pgdir) {
 			/* Every PGD entry except the Switcher at the top */
 			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
-				release_pgd(lg, lg->pgdirs[i].pgdir + j);
+				release_pgd(lg->pgdirs[i].pgdir + j);
+#ifdef CONFIG_X86_PAE
+			/* Get the last pmd page. */
+			spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
+			pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+			/* And release the pmd entries of that pmd page,
+			 * except for the switcher pmd. */
+			for (i = 0; i < SWITCHER_PMD_INDEX; i++)
+				release_pmd(&pmdpage[i]);
+#endif
+		}
 }

 /* We also throw away everything when a Guest tells us it's changed a kernel
@@ -503,23 +701,38 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
 	/* Look up the matching shadow page directory entry. */
 	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);

+#ifdef CONFIG_X86_PAE
+	pmd_t *spmd;
+#endif
+
 	/* If the top level isn't present, there's no entry to update. */
 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
-		/* Otherwise, we start by releasing the existing entry. */
-		pte_t *spte = spte_addr(*spgd, vaddr);
-		release_pte(*spte);
-
-		/* If they're setting this entry as dirty or accessed, we might
-		 * as well put that entry they've given us in now.  This shaves
-		 * 10% off a copy-on-write micro-benchmark. */
-		if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
-			check_gpte(cpu, gpte);
-			*spte = gpte_to_spte(cpu, gpte,
-					     pte_flags(gpte) & _PAGE_DIRTY);
-		} else
-			/* Otherwise kill it and we can demand_page() it in
-			 * later. */
-			*spte = __pte(0);
+
+#ifdef CONFIG_X86_PAE
+		spmd = spmd_addr(cpu, *spgd, vaddr);
+		if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+#endif
+
+			/* Otherwise, we start by releasing
+			 * the existing entry. */
+			pte_t *spte = spte_addr(cpu, *spgd, vaddr);
+			release_pte(*spte);
+
+			/* If they're setting this entry as dirty or accessed,
+			 *  we might as well put that entry they've given us
+			 * in now.  This shaves 10% off a
+			 * copy-on-write micro-benchmark. */
+			if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+				check_gpte(cpu, gpte);
+				*spte = gpte_to_spte(cpu, gpte,
+						pte_flags(gpte) & _PAGE_DIRTY);
+			} else
+				/* Otherwise kill it and we can demand_page()
+				 * it in later. */
+				*spte = __pte(0);
+#ifdef CONFIG_X86_PAE
+		}
+#endif
 	}
 }

@@ -545,7 +758,7 @@ void guest_set_pte(struct lg_cpu *cpu,
 				do_set_pte(cpu, i, vaddr, gpte);
 	} else {
 		/* Is this page table one we have a shadow for? */
-		int pgdir = find_pgdir(cpu->lg, gpgdir);
+		int pgdir = find_pgdir(cpu->lg, (pgd_t *)gpgdir);
 		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
 			/* If so, do the update. */
 			do_set_pte(cpu, pgdir, vaddr, gpte);
@@ -566,9 +779,31 @@ void guest_set_pte(struct lg_cpu *cpu,
  *
  * So with that in mind here's our code to to update a (top-level) PGD entry:
  */
-void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
+
+#ifdef CONFIG_X86_PAE
+void guest_set_pud(struct lguest *lg, unsigned long pudp, u32 idx)
+{
+	int pgdir;
+	pgd_t *gpgdir = (pgd_t *) pudp;
+
+	/* If they're talking about a page table we have a shadow for... */
+	pgdir = find_pgdir(lg, gpgdir);
+	if (pgdir < ARRAY_SIZE(lg->pgdirs))
+		/* ... throw it away. */
+		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
+}
+
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
+{
+	release_all_pagetables(lg);
+}
+
+#else /*!CONFIG_X86_PAE*/
+
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
 {
 	int pgdir;
+	pgd_t *gpgdir = (pgd_t *) pmdp;

 	/* The kernel seems to try to initialize this early on: we ignore its
 	 * attempts to map over the Switcher. */
@@ -579,8 +814,9 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
 	pgdir = find_pgdir(lg, gpgdir);
 	if (pgdir < ARRAY_SIZE(lg->pgdirs))
 		/* ... throw it away. */
-		release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
+		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
 }
+#endif

 /* Once we know how much memory we have we can construct simple identity
  * (which set virtual == physical) and linear mappings
@@ -594,8 +830,14 @@ static unsigned long setup_pagetables(unsigned long mem,
 {
 	pgd_t *pgdir;
 	pte_t *linear;
-	unsigned int mapped_pages, i, linear_pages, phys_linear;
-
+	unsigned int mapped_pages, i, linear_pages;
+
+#ifdef CONFIG_X86_PAE
+	u64 *pmds;
+	unsigned int j;
+#else
+	unsigned int phys_linear;
+#endif
 	/* We have mapped_pages frames to map, so we need
 	 * linear_pages page tables to map them. */
 	mapped_pages = mem / PAGE_SIZE;
@@ -607,14 +849,26 @@ static unsigned long setup_pagetables(unsigned long mem,
 	/* Now we use the next linear_pages pages as pte pages */
 	linear = (void *)pgdir - linear_pages * PAGE_SIZE;

+#ifdef CONFIG_X86_PAE
+	pmds = (void *)linear - PAGE_SIZE;
+ #endif
+
 	/* Linear mapping is easy: put every page's address into the
 	 * mapping in order. */
 	for (i = 0; i < mapped_pages; i++)
 		set_pte(&linear[i], pfn_pte(i,
 			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)));

+#ifdef CONFIG_X86_PAE
 	/* The top level points to the linear page table pages above.
 	 * We setup the identity and linear mappings here. */
+	for (i = 0, j = 0; i < mapped_pages; i += PTRS_PER_PTE, j++) {
+		pmds[j] = ((unsigned long)(linear+i) - mem_base) |
+			_PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
+	}
+	pgdir[0] = pgdir[3] = __pgd((((u32)pmds) - mem_base) | _PAGE_PRESENT);
+
+#else
 	phys_linear = (unsigned long)linear - mem_base;
 	for (i = 0; i < mapped_pages; i += PTRS_PER_PTE)
 		pgdir[i / PTRS_PER_PTE] =
@@ -622,6 +876,8 @@ static unsigned long setup_pagetables(unsigned long mem,
 			     __pgd((phys_linear + i * sizeof(pte_t)) |
 				 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));

+#endif
+
 	/* We return the top level (guest-physical) address: the kernel needs
 	 * to know where it is. */
 	return (unsigned long)pgdir - mem_base;
@@ -636,6 +892,11 @@ int init_guest_pagetable(struct lguest *lg)
 	unsigned long initrd_size = 0, mem = 0;
 	struct boot_params *boot = (struct boot_params *) lg->mem_base;

+#ifdef CONFIG_X86_PAE
+	pgd_t *pgd;
+	pmd_t *pmd_table;
+#endif
+
 	/* Get the guest memory size and the ramdisk size
 	 * from the boot header located at lg->mem_base*/
 	if (copy_from_user(&mem, &boot->e820_map[0].size, 8))
@@ -645,11 +906,22 @@ int init_guest_pagetable(struct lguest *lg)

 	/* We start on the first shadow page table, and give it a blank PGD
 	 * page. */
-	lg->pgdirs[0].gpgdir = setup_pagetables(mem,
+	lg->pgdirs[0].gpgdir = (pgd_t *) setup_pagetables(mem,
 				initrd_size, (unsigned long) lg->mem_base);
 	lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
 	if (!lg->pgdirs[0].pgdir)
 		return -ENOMEM;
+#ifdef CONFIG_X86_PAE
+	pgd = lg->pgdirs[0].pgdir;
+	pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+	if (!pmd_table)
+		return -ENOMEM;
+
+	set_pgd(pgd + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table)
+						| _PAGE_PRESENT));
+#endif
+
+
 	lg->cpus[0].cpu_pgd = 0;
 	return 0;
 }
@@ -657,21 +929,36 @@ int init_guest_pagetable(struct lguest *lg)
 /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
 void page_table_guest_data_init(struct lg_cpu *cpu)
 {
+#ifdef CONFIG_X86_PAE
+	const unsigned long reserve_mb = 2;
+#else
+	const unsigned long reserve_mb = 4;
+#endif
+
 	/* We get the kernel address: above this is all kernel memory. */
 	if (get_user(cpu->lg->kernel_address,
-		     &cpu->lg->lguest_data->kernel_address)
-	    /* We tell the Guest that it can't use the top 4MB of virtual
-	     * addresses used by the Switcher. */
-	    || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
-	    || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
+		&cpu->lg->lguest_data->kernel_address)
+		/* We tell the Guest that it can't use the top 2 or 4 MB
+		 * of virtual addresses used by the Switcher. */
+		|| put_user(reserve_mb * 1024 * 1024,
+			&cpu->lg->lguest_data->reserve_mem)
+		|| put_user((unsigned long) cpu->lg->pgdirs[0].gpgdir,
+					&cpu->lg->lguest_data->pgdir))
 		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);

 	/* In flush_user_mappings() we loop from 0 to
 	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
 	 * Switcher mappings, so check that now. */
+#ifdef CONFIG_X86_PAE
+	if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX)
+		if (pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
+			kill_guest(cpu, "bad kernel address %#lx",
+					cpu->lg->kernel_address);
+#else
 	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
 		kill_guest(cpu, "bad kernel address %#lx",
 				 cpu->lg->kernel_address);
+#endif
 }

 /* When a Guest dies, our cleanup is fairly simple. */
@@ -695,15 +982,28 @@ void free_guest_pagetable(struct lguest *lg)
 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 {
 	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
-	pgd_t switcher_pgd;
 	pte_t regs_pte;
 	unsigned long pfn;

+#ifdef CONFIG_X86_PAE
+	pmd_t switcher_pmd;
+	pmd_t *pmd_table;
+
+	switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >>
+				PAGE_SHIFT, __pgprot(__PAGE_KERNEL));
+	pmd_table = __va(pgd_pfn(cpu->lg->
+			pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
+								<< PAGE_SHIFT);
+	pmd_table[SWITCHER_PMD_INDEX] = switcher_pmd;
+
+#else
+	pgd_t switcher_pgd;
+
 	/* Make the last PGD entry for this Guest point to the Switcher's PTE
 	 * page for this CPU (with appropriate flags). */
 	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
-
 	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+#endif

 	/* We also change the Switcher PTE page.  When we're running the Guest,
 	 * we want the Guest's "regs" page to appear where the first Switcher
@@ -714,7 +1014,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 	 * again. */
 	pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
 	regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
-	switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
+	switcher_pte_page[(unsigned long)pages / PAGE_SIZE % PTRS_PER_PTE]
+								= regs_pte;
 }
 /*:*/

@@ -723,7 +1024,7 @@ static void free_switcher_pte_pages(void)
 	unsigned int i;

 	for_each_possible_cpu(i)
-		free_page((long)switcher_pte_page(i));
+	free_page((long)switcher_pte_page(i));
 }

 /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index be4a724..ad08f70 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -17,8 +17,13 @@
 /* Pages for switcher itself, then two pages per cpu */
 #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)

+#ifdef CONFIG_X86_PAE
+/* We map at -2M for ease of mapping into the guest (one PTE page). */
+#define SWITCHER_ADDR 0xFFE00000
+#else
 /* We map at -4M for ease of mapping into the guest (one PTE page). */
 #define SWITCHER_ADDR 0xFFC00000
+#endif

 /* Found in switcher.S */
 extern unsigned long default_idt_entries[];
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index a3241f2..c0860dc 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -17,6 +17,7 @@
 #define LHCALL_SET_PMD		15
 #define LHCALL_LOAD_TLS		16
 #define LHCALL_NOTIFY		17
+#define LHCALL_SET_PUD		18

 #define LGUEST_TRAP_ENTRY 0x1F

@@ -32,7 +33,7 @@
  * to make requests of the Host Itself.
  *
  * Our hypercall mechanism uses the highest unused trap code (traps 32 and
- * above are used by real hardware interrupts).  Fifteen hypercalls are
+ * above are used by real hardware interrupts).  Eighteen hypercalls are
  * available: the hypercall number is put in the %eax register, and the
  * arguments (when required) are placed in %edx, %ebx and %ecx.  If a return
  * value makes sense, it's returned in %eax.








More information about the Lguest mailing list