[Lguest] lguest: mapping switcher would thwack fixmap

Rusty Russell rusty at rustcorp.com.au
Wed Apr 17 12:08:15 EST 2013


Paul Bolle <pebolle at tiscali.nl> writes:
> On Thu, 2013-04-11 at 18:59 +0200, Paul Bolle wrote:
>> 1) And now running the lguest tools doesn't triple fault anymore. It
>> just panics the kernel. Progress!
>
> That must have been a very lucky run. I've tried half a dozen times to
> run the lguest tool with a serial line attached and every time all I got
> was a reboot. Nothing whatsoever was printed to the (serial) console.

OK, I did some cleanups along the way, but this seems to work for me.

You can either grab my pending-rebases branch from git.kernel.org (which
has all my pending patches in it), or apply the following mega-patch
which should be equivalent.

Please report back if this fixes the problem for you...

Thanks!
Rusty.

diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 0d97deb..e2d4a4a 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -11,18 +11,11 @@
 
 #define GUEST_PL 1
 
-/* Every guest maps the core switcher code. */
-#define SHARED_SWITCHER_PAGES \
-	DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
-/* Pages for switcher itself, then two pages per cpu */
-#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
-
-/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
-#ifdef CONFIG_X86_PAE
-#define SWITCHER_ADDR 0xFFE00000
-#else
-#define SWITCHER_ADDR 0xFFC00000
-#endif
+/* Page for Switcher text itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
+
+/* Where we map the Switcher, in both Host and Guest. */
+extern unsigned long switcher_addr;
 
 /* Found in switcher.S */
 extern unsigned long default_idt_entries[];
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index a5ebc00..0bf1e4e 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -20,9 +20,9 @@
 #include <asm/asm-offsets.h>
 #include "lg.h"
 
-
+unsigned long switcher_addr;
+struct page **lg_switcher_pages;
 static struct vm_struct *switcher_vma;
-static struct page **switcher_page;
 
 /* This One Big lock protects all inter-guest data structures. */
 DEFINE_MUTEX(lguest_lock);
@@ -52,13 +52,21 @@ static __init int map_switcher(void)
 	 * easy.
 	 */
 
+	/* We assume Switcher text fits into a single page. */
+	if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
+		printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
+		       end_switcher_text - start_switcher_text);
+		return -EINVAL;
+	}
+
 	/*
 	 * We allocate an array of struct page pointers.  map_vm_area() wants
 	 * this, rather than just an array of pages.
 	 */
-	switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
-				GFP_KERNEL);
-	if (!switcher_page) {
+	lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
+				    * TOTAL_SWITCHER_PAGES,
+				    GFP_KERNEL);
+	if (!lg_switcher_pages) {
 		err = -ENOMEM;
 		goto out;
 	}
@@ -68,32 +76,29 @@ static __init int map_switcher(void)
 	 * so we make sure they're zeroed.
 	 */
 	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
-		switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
-		if (!switcher_page[i]) {
+		lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+		if (!lg_switcher_pages[i]) {
 			err = -ENOMEM;
 			goto free_some_pages;
 		}
 	}
 
 	/*
-	 * First we check that the Switcher won't overlap the fixmap area at
-	 * the top of memory.  It's currently nowhere near, but it could have
-	 * very strange effects if it ever happened.
+	 * We place the Switcher underneath the fixmap area, which is the
+	 * highest virtual address we can get.  This is important, since we
+	 * tell the Guest it can't access this memory, so we want its ceiling
+	 * as high as possible.
 	 */
-	if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
-		err = -ENOMEM;
-		printk("lguest: mapping switcher would thwack fixmap\n");
-		goto free_pages;
-	}
+	switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;
 
 	/*
-	 * Now we reserve the "virtual memory area" we want: 0xFFC00000
-	 * (SWITCHER_ADDR).  We might not get it in theory, but in practice
-	 * it's worked so far.  The end address needs +1 because __get_vm_area
-	 * allocates an extra guard page, so we need space for that.
+	 * Now we reserve the "virtual memory area" we want.  We might
+	 * not get it in theory, but in practice it's worked so far.
+	 * The end address needs +1 because __get_vm_area allocates an
+	 * extra guard page, so we need space for that.
 	 */
 	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
-				     VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+				     VM_ALLOC, switcher_addr, switcher_addr
 				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
 	if (!switcher_vma) {
 		err = -ENOMEM;
@@ -103,12 +108,12 @@ static __init int map_switcher(void)
 
 	/*
 	 * This code actually sets up the pages we've allocated to appear at
-	 * SWITCHER_ADDR.  map_vm_area() takes the vma we allocated above, the
+	 * switcher_addr.  map_vm_area() takes the vma we allocated above, the
 	 * kind of pages we're mapping (kernel pages), and a pointer to our
 	 * array of struct pages.  It increments that pointer, but we don't
 	 * care.
 	 */
-	pagep = switcher_page;
+	pagep = lg_switcher_pages;
 	err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
 	if (err) {
 		printk("lguest: map_vm_area failed: %i\n", err);
@@ -133,8 +138,8 @@ free_pages:
 	i = TOTAL_SWITCHER_PAGES;
 free_some_pages:
 	for (--i; i >= 0; i--)
-		__free_pages(switcher_page[i], 0);
-	kfree(switcher_page);
+		__free_pages(lg_switcher_pages[i], 0);
+	kfree(lg_switcher_pages);
 out:
 	return err;
 }
@@ -149,8 +154,8 @@ static void unmap_switcher(void)
 	vunmap(switcher_vma->addr);
 	/* Now we just need to free the pages we copied the switcher into */
 	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
-		__free_pages(switcher_page[i], 0);
-	kfree(switcher_page);
+		__free_pages(lg_switcher_pages[i], 0);
+	kfree(lg_switcher_pages);
 }
 
 /*H:032
@@ -323,15 +328,10 @@ static int __init init(void)
 	if (err)
 		goto out;
 
-	/* Now we set up the pagetable implementation for the Guests. */
-	err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
-	if (err)
-		goto unmap;
-
 	/* We might need to reserve an interrupt vector. */
 	err = init_interrupts();
 	if (err)
-		goto free_pgtables;
+		goto unmap;
 
 	/* /dev/lguest needs to be registered. */
 	err = lguest_device_init();
@@ -346,8 +346,6 @@ static int __init init(void)
 
 free_interrupts:
 	free_interrupts();
-free_pgtables:
-	free_pagetables();
 unmap:
 	unmap_switcher();
 out:
@@ -359,7 +357,6 @@ static void __exit fini(void)
 {
 	lguest_device_remove();
 	free_interrupts();
-	free_pagetables();
 	unmap_switcher();
 
 	lguest_arch_host_fini();
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 295df06..2eef40b 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -14,11 +14,10 @@
 
 #include <asm/lguest.h>
 
-void free_pagetables(void);
-int init_pagetables(struct page **switcher_page, unsigned int pages);
-
 struct pgdir {
 	unsigned long gpgdir;
+	bool switcher_mapped;
+	int last_host_cpu;
 	pgd_t *pgdir;
 };
 
@@ -124,6 +123,7 @@ bool lguest_address_ok(const struct lguest *lg,
 		       unsigned long addr, unsigned long len);
 void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
 void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
+extern struct page **lg_switcher_pages;
 
 /*H:035
  * Using memory-copy operations like that is usually inconvient, so we
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 3b62be16..b24add5 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -7,7 +7,7 @@
  * converted Guest pages when running the Guest.
 :*/
 
-/* Copyright (C) Rusty Russell IBM Corporation 2006.
+/* Copyright (C) Rusty Russell IBM Corporation 2013.
  * GPL v2 and any later version */
 #include <linux/mm.h>
 #include <linux/gfp.h>
@@ -62,22 +62,11 @@
  * will need the last pmd entry of the last pmd page.
  */
 #ifdef CONFIG_X86_PAE
-#define SWITCHER_PMD_INDEX 	(PTRS_PER_PMD - 1)
-#define RESERVE_MEM 		2U
 #define CHECK_GPGD_MASK		_PAGE_PRESENT
 #else
-#define RESERVE_MEM 		4U
 #define CHECK_GPGD_MASK		_PAGE_TABLE
 #endif
 
-/*
- * We actually need a separate PTE page for each CPU.  Remember that after the
- * Switcher code itself comes two pages for each CPU, and we don't want this
- * CPU's guest to see the pages of any other CPU.
- */
-static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
-#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
-
 /*H:320
  * The page table code is curly enough to need helper functions to keep it
  * clear and clean.  The kernel itself provides many of them; one advantage
@@ -95,13 +84,6 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
 {
 	unsigned int index = pgd_index(vaddr);
 
-#ifndef CONFIG_X86_PAE
-	/* We kill any Guest trying to touch the Switcher addresses. */
-	if (index >= SWITCHER_PGD_INDEX) {
-		kill_guest(cpu, "attempt to access switcher pages");
-		index = 0;
-	}
-#endif
 	/* Return a pointer index'th pgd entry for the i'th page table. */
 	return &cpu->lg->pgdirs[i].pgdir[index];
 }
@@ -117,13 +99,6 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
 	unsigned int index = pmd_index(vaddr);
 	pmd_t *page;
 
-	/* We kill any Guest trying to touch the Switcher addresses. */
-	if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
-					index >= SWITCHER_PMD_INDEX) {
-		kill_guest(cpu, "attempt to access switcher pages");
-		index = 0;
-	}
-
 	/* You should never call this if the PGD entry wasn't valid */
 	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
 	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
@@ -275,122 +250,181 @@ static void release_pte(pte_t pte)
 }
 /*:*/
 
-static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
+static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
 {
 	if ((pte_flags(gpte) & _PAGE_PSE) ||
-	    pte_pfn(gpte) >= cpu->lg->pfn_limit)
+	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {
 		kill_guest(cpu, "bad page table entry");
+		return false;
+	}
+	return true;
 }
 
-static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
+static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
 {
 	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
-	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
+	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
 		kill_guest(cpu, "bad page directory entry");
+		return false;
+	}
+	return true;
 }
 
 #ifdef CONFIG_X86_PAE
-static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
 {
 	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
-	   (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
+	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
 		kill_guest(cpu, "bad page middle directory entry");
+		return false;
+	}
+	return true;
 }
 #endif
 
-/*H:330
- * (i) Looking up a page table entry when the Guest faults.
- *
- * We saw this call in run_guest(): when we see a page fault in the Guest, we
- * come here.  That's because we only set up the shadow page tables lazily as
- * they're needed, so we get page faults all the time and quietly fix them up
- * and return to the Guest without it knowing.
+/*H:331
+ * This is the core routine to walk the shadow page tables and find the page
+ * table entry for a specific address.
  *
- * If we fixed up the fault (ie. we mapped the address), this routine returns
- * true.  Otherwise, it was a real fault and we need to tell the Guest.
+ * If allocate is set, then we allocate any missing levels, setting the flags
+ * on the new page directory and mid-level directories using the arguments
+ * (which are copied from the Guest's page table entries).
  */
-bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
+			int pgd_flags, int pmd_flags)
 {
-	pgd_t gpgd;
 	pgd_t *spgd;
-	unsigned long gpte_ptr;
-	pte_t gpte;
-	pte_t *spte;
-
 	/* Mid level for PAE. */
 #ifdef CONFIG_X86_PAE
 	pmd_t *spmd;
-	pmd_t gpmd;
 #endif
 
-	/* First step: get the top-level Guest page table entry. */
-	if (unlikely(cpu->linear_pages)) {
-		/* Faking up a linear mapping. */
-		gpgd = __pgd(CHECK_GPGD_MASK);
-	} else {
-		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
-		/* Toplevel not present?  We can't map it in. */
-		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
-			return false;
-	}
-
-	/* Now look at the matching shadow entry. */
+	/* Get top level entry. */
 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
 		/* No shadow entry: allocate a new shadow PTE page. */
-		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+		unsigned long ptepage;
+
+		/* If they didn't want us to allocate anything, stop. */
+		if (!allocate)
+			return NULL;
+
+		ptepage = get_zeroed_page(GFP_KERNEL);
 		/*
 		 * This is not really the Guest's fault, but killing it is
 		 * simple for this corner case.
 		 */
 		if (!ptepage) {
 			kill_guest(cpu, "out of memory allocating pte page");
-			return false;
+			return NULL;
 		}
-		/* We check that the Guest pgd is OK. */
-		check_gpgd(cpu, gpgd);
 		/*
 		 * And we copy the flags to the shadow PGD entry.  The page
 		 * number in the shadow PGD is the page we just allocated.
 		 */
-		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
+		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
 	}
 
+	/*
+	 * Intel's Physical Address Extension actually uses three levels of
+	 * page tables, so we need to look in the mid-level.
+	 */
 #ifdef CONFIG_X86_PAE
-	if (unlikely(cpu->linear_pages)) {
-		/* Faking up a linear mapping. */
-		gpmd = __pmd(_PAGE_TABLE);
-	} else {
-		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
-		/* Middle level not present?  We can't map it in. */
-		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
-			return false;
-	}
-
-	/* Now look at the matching shadow entry. */
+	/* Now look at the mid-level shadow entry. */
 	spmd = spmd_addr(cpu, *spgd, vaddr);
 
 	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
 		/* No shadow entry: allocate a new shadow PTE page. */
-		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+		unsigned long ptepage;
+
+		/* If they didn't want us to allocate anything, stop. */
+		if (!allocate)
+			return NULL;
+
+		ptepage = get_zeroed_page(GFP_KERNEL);
 
 		/*
 		 * This is not really the Guest's fault, but killing it is
 		 * simple for this corner case.
 		 */
 		if (!ptepage) {
-			kill_guest(cpu, "out of memory allocating pte page");
-			return false;
+			kill_guest(cpu, "out of memory allocating pmd page");
+			return NULL;
 		}
 
-		/* We check that the Guest pmd is OK. */
-		check_gpmd(cpu, gpmd);
-
 		/*
 		 * And we copy the flags to the shadow PMD entry.  The page
 		 * number in the shadow PMD is the page we just allocated.
 		 */
-		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
+		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
+	}
+#endif
+
+	/* Get the pointer to the shadow PTE entry we're going to set. */
+	return spte_addr(cpu, *spgd, vaddr);
+}
+
+/*H:330
+ * (i) Looking up a page table entry when the Guest faults.
+ *
+ * We saw this call in run_guest(): when we see a page fault in the Guest, we
+ * come here.  That's because we only set up the shadow page tables lazily as
+ * they're needed, so we get page faults all the time and quietly fix them up
+ * and return to the Guest without it knowing.
+ *
+ * If we fixed up the fault (ie. we mapped the address), this routine
+ * returns the page table entry.  Otherwise, it returns NULL: a real
+ * fault and we need to tell the Guest.
+ *
+ * limit is usually set to the address of the first Switcher page: if a Guest
+ * hits that.
+ */
+bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+{
+	unsigned long gpte_ptr;
+	pte_t gpte;
+	pte_t *spte;
+	pmd_t gpmd;
+	pgd_t gpgd;
+
+	/* We never demand page the Switcher, so trying is a mistake. */
+	if (vaddr >= switcher_addr)
+		return false;
+
+	/* First step: get the top-level Guest page table entry. */
+	if (unlikely(cpu->linear_pages)) {
+		/* Faking up a linear mapping. */
+		gpgd = __pgd(CHECK_GPGD_MASK);
+	} else {
+		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+		/* Toplevel not present?  We can't map it in. */
+		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+			return false;
+
+		/* 
+		 * This kills the Guest if it has weird flags or tries to
+		 * refer to a "physical" address outside the bounds.
+		 */
+		if (!check_gpgd(cpu, gpgd))
+			return false;
+	}
+
+	/* This "mid-level" entry is only used for non-linear, PAE mode. */
+	gpmd = __pmd(_PAGE_TABLE);
+
+#ifdef CONFIG_X86_PAE
+	if (likely(!cpu->linear_pages)) {
+		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+		/* Middle level not present?  We can't map it in. */
+		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+			return false;
+
+		/* 
+		 * This kills the Guest if it has weird flags or tries to
+		 * refer to a "physical" address outside the bounds.
+		 */
+		if (!check_gpmd(cpu, gpmd))
+			return false;
 	}
 
 	/*
@@ -433,7 +467,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 	 * Check that the Guest PTE flags are OK, and the page number is below
 	 * the pfn_limit (ie. not mapping the Launcher binary).
 	 */
-	check_gpte(cpu, gpte);
+	if (!check_gpte(cpu, gpte))
+		return false;
 
 	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
 	gpte = pte_mkyoung(gpte);
@@ -441,7 +476,9 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 		gpte = pte_mkdirty(gpte);
 
 	/* Get the pointer to the shadow PTE entry we're going to set. */
-	spte = spte_addr(cpu, *spgd, vaddr);
+	spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
+	if (!spte)
+		return false;
 
 	/*
 	 * If there was a valid shadow PTE entry here before, we release it.
@@ -493,29 +530,23 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
  */
 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
 {
-	pgd_t *spgd;
+	pte_t *spte;
 	unsigned long flags;
 
-#ifdef CONFIG_X86_PAE
-	pmd_t *spmd;
-#endif
-	/* Look at the current top level entry: is it present? */
-	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
-	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
+	/* You can't put your stack in the Switcher! */
+	if (vaddr >= switcher_addr)
 		return false;
 
-#ifdef CONFIG_X86_PAE
-	spmd = spmd_addr(cpu, *spgd, vaddr);
-	if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
+	/* If there's no shadow PTE, it's not writable. */
+	spte = find_spte(cpu, vaddr, false, 0, 0);
+	if (!spte)
 		return false;
-#endif
 
 	/*
 	 * Check the flags on the pte entry itself: it must be present and
 	 * writable.
 	 */
-	flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
-
+	flags = pte_flags(*spte);
 	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
 }
 
@@ -678,9 +709,6 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
 			      int *blank_pgdir)
 {
 	unsigned int next;
-#ifdef CONFIG_X86_PAE
-	pmd_t *pmd_table;
-#endif
 
 	/*
 	 * We pick one entry at random to throw out.  Choosing the Least
@@ -695,29 +723,11 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
 		if (!cpu->lg->pgdirs[next].pgdir)
 			next = cpu->cpu_pgd;
 		else {
-#ifdef CONFIG_X86_PAE
 			/*
-			 * In PAE mode, allocate a pmd page and populate the
-			 * last pgd entry.
+			 * This is a blank page, so there are no kernel
+			 * mappings: caller must map the stack!
 			 */
-			pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
-			if (!pmd_table) {
-				free_page((long)cpu->lg->pgdirs[next].pgdir);
-				set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
-				next = cpu->cpu_pgd;
-			} else {
-				set_pgd(cpu->lg->pgdirs[next].pgdir +
-					SWITCHER_PGD_INDEX,
-					__pgd(__pa(pmd_table) | _PAGE_PRESENT));
-				/*
-				 * This is a blank page, so there are no kernel
-				 * mappings: caller must map the stack!
-				 */
-				*blank_pgdir = 1;
-			}
-#else
 			*blank_pgdir = 1;
-#endif
 		}
 	}
 	/* Record which Guest toplevel this shadows. */
@@ -725,9 +735,50 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
 	/* Release all the non-kernel mappings. */
 	flush_user_mappings(cpu->lg, next);
 
+	/* This hasn't run on any CPU at all. */
+	cpu->lg->pgdirs[next].last_host_cpu = -1;
+
 	return next;
 }
 
+/*H:501
+ * We do need the Switcher code mapped at all times, so we allocate that
+ * part of the Guest page table here.  We map the Switcher code immediately,
+ * but defer mapping of the guest register page and IDT/LDT etc page until
+ * just before we run the guest in map_switcher_in_guest().
+ *
+ * We *could* do this setup in map_switcher_in_guest(), but at that point
+ * we've interrupts disabled, and allocating pages like that is fraught: we
+ * can't sleep if we need to free up some memory.
+ */
+static bool allocate_switcher_mapping(struct lg_cpu *cpu)
+{
+	int i;
+
+	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
+				       CHECK_GPGD_MASK, _PAGE_TABLE);
+		if (!pte)
+			return false;
+
+		/*
+		 * Map the switcher page if not already there.  It might
+		 * already be there because we call allocate_switcher_mapping()
+		 * in guest_set_pgd() just in case it did discard our Switcher
+		 * mapping, but it probably didn't.
+		 */
+		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
+			/* Get a reference to the Switcher page. */
+			get_page(lg_switcher_pages[0]);
+			/* Create a read-only, exectuable, kernel-style PTE */
+			set_pte(pte,
+				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
+		}
+	}
+	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
+	return true;
+}
+
 /*H:470
  * Finally, a routine which throws away everything: all PGD entries in all
  * the shadow page tables, including the Guest's kernel mappings.  This is used
@@ -738,28 +789,16 @@ static void release_all_pagetables(struct lguest *lg)
 	unsigned int i, j;
 
 	/* Every shadow pagetable this Guest has */
-	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
-		if (lg->pgdirs[i].pgdir) {
-#ifdef CONFIG_X86_PAE
-			pgd_t *spgd;
-			pmd_t *pmdpage;
-			unsigned int k;
-
-			/* Get the last pmd page. */
-			spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
-			pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
-
-			/*
-			 * And release the pmd entries of that pmd page,
-			 * except for the switcher pmd.
-			 */
-			for (k = 0; k < SWITCHER_PMD_INDEX; k++)
-				release_pmd(&pmdpage[k]);
-#endif
-			/* Every PGD entry except the Switcher at the top */
-			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
-				release_pgd(lg->pgdirs[i].pgdir + j);
-		}
+	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
+		if (!lg->pgdirs[i].pgdir)
+			continue;
+
+		/* Every PGD entry. */
+		for (j = 0; j < PTRS_PER_PGD; j++)
+			release_pgd(lg->pgdirs[i].pgdir + j);
+		lg->pgdirs[i].switcher_mapped = false;
+		lg->pgdirs[i].last_host_cpu = -1;
+	}
 }
 
 /*
@@ -773,6 +812,9 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
 	release_all_pagetables(cpu->lg);
 	/* We need the Guest kernel stack mapped again. */
 	pin_stack_pages(cpu);
+	/* And we need Switcher allocated. */
+	if (!allocate_switcher_mapping(cpu))
+		kill_guest(cpu, "Cannot populate switcher mapping");
 }
 
 /*H:430
@@ -808,9 +850,17 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
 		newpgdir = new_pgdir(cpu, pgtable, &repin);
 	/* Change the current pgd index to the new one. */
 	cpu->cpu_pgd = newpgdir;
-	/* If it was completely blank, we map in the Guest kernel stack */
+	/*
+	 * If it was completely blank, we map in the Guest kernel stack and
+	 * the Switcher.
+	 */
 	if (repin)
 		pin_stack_pages(cpu);
+
+	if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
+		if (!allocate_switcher_mapping(cpu))
+			kill_guest(cpu, "Cannot populate switcher mapping");
+	}
 }
 /*:*/
 
@@ -865,7 +915,8 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
 			 * micro-benchmark.
 			 */
 			if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
-				check_gpte(cpu, gpte);
+				if (!check_gpte(cpu, gpte))
+					return;
 				set_pte(spte,
 					gpte_to_spte(cpu, gpte,
 						pte_flags(gpte) & _PAGE_DIRTY));
@@ -897,6 +948,12 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
 void guest_set_pte(struct lg_cpu *cpu,
 		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
 {
+	/* We don't let you remap the Switcher; we need it to get back! */
+	if (vaddr >= switcher_addr) {
+		kill_guest(cpu, "attempt to set pte into Switcher pages");
+		return;
+	}
+
 	/*
 	 * Kernel mappings must be changed on all top levels.  Slow, but doesn't
 	 * happen often.
@@ -933,14 +990,23 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
 {
 	int pgdir;
 
-	if (idx >= SWITCHER_PGD_INDEX)
+	if (idx > PTRS_PER_PGD) {
+		kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
+			   idx, PTRS_PER_PGD);
 		return;
+	}
 
 	/* If they're talking about a page table we have a shadow for... */
 	pgdir = find_pgdir(lg, gpgdir);
-	if (pgdir < ARRAY_SIZE(lg->pgdirs))
+	if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
 		/* ... throw it away. */
 		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
+		/* That might have been the Switcher mapping, remap it. */
+		if (!allocate_switcher_mapping(&lg->cpus[0])) {
+			kill_guest(&lg->cpus[0],
+				   "Cannot populate switcher mapping");
+		}
+	}
 }
 
 #ifdef CONFIG_X86_PAE
@@ -958,6 +1024,9 @@ void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
  * we will populate on future faults.  The Guest doesn't have any actual
  * pagetables yet, so we set linear_pages to tell demand_page() to fake it
  * for the moment.
+ *
+ * We do need the Switcher to be mapped at all times, so we allocate that
+ * part of the Guest page table here.
  */
 int init_guest_pagetable(struct lguest *lg)
 {
@@ -971,21 +1040,34 @@ int init_guest_pagetable(struct lguest *lg)
 
 	/* We start with a linear mapping until the initialize. */
 	cpu->linear_pages = true;
+
+	/* Allocate the page tables for the Switcher. */
+	if (!allocate_switcher_mapping(cpu)) {
+		release_all_pagetables(lg);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
 /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
 void page_table_guest_data_init(struct lg_cpu *cpu)
 {
+	/*
+	 * We tell the Guest that it can't use the virtual addresses
+	 * used by the Switcher.  This trick is equivalent to 4GB -
+	 * switcher_addr.
+	 */
+	u32 top = ~switcher_addr + 1;
+
 	/* We get the kernel address: above this is all kernel memory. */
 	if (get_user(cpu->lg->kernel_address,
-		&cpu->lg->lguest_data->kernel_address)
+		     &cpu->lg->lguest_data->kernel_address)
 		/*
-		 * We tell the Guest that it can't use the top 2 or 4 MB
-		 * of virtual addresses used by the Switcher.
+		 * We tell the Guest that it can't use the top virtual
+		 * addresses (used by the Switcher).
 		 */
-		|| put_user(RESERVE_MEM * 1024 * 1024,
-			    &cpu->lg->lguest_data->reserve_mem)) {
+	    || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
 		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
 		return;
 	}
@@ -995,12 +1077,7 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
 	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
 	 * Switcher mappings, so check that now.
 	 */
-#ifdef CONFIG_X86_PAE
-	if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
-		pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
-#else
-	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
-#endif
+	if (cpu->lg->kernel_address >= switcher_addr)
 		kill_guest(cpu, "bad kernel address %#lx",
 				 cpu->lg->kernel_address);
 }
@@ -1017,102 +1094,96 @@ void free_guest_pagetable(struct lguest *lg)
 		free_page((long)lg->pgdirs[i].pgdir);
 }
 
-/*H:480
- * (vi) Mapping the Switcher when the Guest is about to run.
- *
- * The Switcher and the two pages for this CPU need to be visible in the
- * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
- * for each CPU already set up, we just need to hook them in now we know which
- * Guest is about to run on this CPU.
+/*H:481
+ * This clears the Switcher mappings for cpu #i.
  */
-void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
+static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
 {
-	pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
-	pte_t regs_pte;
+	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
+	pte_t *pte;
 
-#ifdef CONFIG_X86_PAE
-	pmd_t switcher_pmd;
-	pmd_t *pmd_table;
-
-	switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
-			       PAGE_KERNEL_EXEC);
-
-	/* Figure out where the pmd page is, by reading the PGD, and converting
-	 * it to a virtual address. */
-	pmd_table = __va(pgd_pfn(cpu->lg->
-			pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
-								<< PAGE_SHIFT);
-	/* Now write it into the shadow page table. */
-	set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
-#else
-	pgd_t switcher_pgd;
-
-	/*
-	 * Make the last PGD entry for this Guest point to the Switcher's PTE
-	 * page for this CPU (with appropriate flags).
-	 */
-	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
-
-	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
-
-#endif
-	/*
-	 * We also change the Switcher PTE page.  When we're running the Guest,
-	 * we want the Guest's "regs" page to appear where the first Switcher
-	 * page for this CPU is.  This is an optimization: when the Switcher
-	 * saves the Guest registers, it saves them into the first page of this
-	 * CPU's "struct lguest_pages": if we make sure the Guest's register
-	 * page is already mapped there, we don't have to copy them out
-	 * again.
-	 */
-	regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
-	set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
-}
-/*:*/
-
-static void free_switcher_pte_pages(void)
-{
-	unsigned int i;
+	/* Clear the mappings for both pages. */
+	pte = find_spte(cpu, base, false, 0, 0);
+	release_pte(*pte);
+	set_pte(pte, __pte(0));
 
-	for_each_possible_cpu(i)
-		free_page((long)switcher_pte_page(i));
+	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+	release_pte(*pte);
+	set_pte(pte, __pte(0));
 }
 
-/*H:520
- * Setting up the Switcher PTE page for given CPU is fairly easy, given
- * the CPU number and the "struct page"s for the Switcher code itself.
+/*H:480
+ * (vi) Mapping the Switcher when the Guest is about to run.
+ *
+ * The Switcher and the two pages for this CPU need to be visible in the Guest
+ * (and not the pages for other CPUs).
  *
- * Currently the Switcher is less than a page long, so "pages" is always 1.
+ * The pages for the pagetables have all been allocated before: we just need
+ * to make sure the actual PTEs are up-to-date for the CPU we're about to run
+ * on.
  */
-static __init void populate_switcher_pte_page(unsigned int cpu,
-					      struct page *switcher_page[],
-					      unsigned int pages)
+void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 {
-	unsigned int i;
-	pte_t *pte = switcher_pte_page(cpu);
+	unsigned long base;
+	struct page *percpu_switcher_page, *regs_page;
+	pte_t *pte;
+	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
+
+	/* Switcher page should always be mapped by now! */
+	BUG_ON(!pgdir->switcher_mapped);
+
+	/* 
+	 * Remember that we have two pages for each Host CPU, so we can run a
+	 * Guest on each CPU without them interfering.  We need to make sure
+	 * those pages are mapped correctly in the Guest, but since we usually
+	 * run on the same CPU, we cache that, and only update the mappings
+	 * when we move.
+	 */
+	if (pgdir->last_host_cpu == raw_smp_processor_id())
+		return;
 
-	/* The first entries are easy: they map the Switcher code. */
-	for (i = 0; i < pages; i++) {
-		set_pte(&pte[i], mk_pte(switcher_page[i],
-				__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
+	/* -1 means unknown so we remove everything. */
+	if (pgdir->last_host_cpu == -1) {
+		unsigned int i;
+		for_each_possible_cpu(i)
+			remove_switcher_percpu_map(cpu, i);
+	} else {
+		/* We know exactly what CPU mapping to remove. */
+		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
 	}
 
-	/* The only other thing we map is this CPU's pair of pages. */
-	i = pages + cpu*2;
-
-	/* First page (Guest registers) is writable from the Guest */
-	set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
-			 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
+	/*
+	 * When we're running the Guest, we want the Guest's "regs" page to
+	 * appear where the first Switcher page for this CPU is.  This is an
+	 * optimization: when the Switcher saves the Guest registers, it saves
+	 * them into the first page of this CPU's "struct lguest_pages": if we
+	 * make sure the Guest's register page is already mapped there, we
+	 * don't have to copy them out again.
+	 */
+	/* Find the shadow PTE for this regs page. */
+	base = switcher_addr + PAGE_SIZE
+		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
+	pte = find_spte(cpu, base, false, 0, 0);
+	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
+	get_page(regs_page);
+	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
 
 	/*
-	 * The second page contains the "struct lguest_ro_state", and is
-	 * read-only.
+	 * We map the second page of the struct lguest_pages read-only in
+	 * the Guest: the IDT, GDT and other things it's not supposed to
+	 * change.
 	 */
-	set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
-			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
+	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+	percpu_switcher_page
+		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
+	get_page(percpu_switcher_page);
+	set_pte(pte, mk_pte(percpu_switcher_page,
+			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
+
+	pgdir->last_host_cpu = raw_smp_processor_id();
 }
 
-/*
+/*H:490
  * We've made it through the page table code.  Perhaps our tired brains are
  * still processing the details, or perhaps we're simply glad it's over.
  *
@@ -1124,29 +1195,3 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
  *
  * There is just one file remaining in the Host.
  */
-
-/*H:510
- * At boot or module load time, init_pagetables() allocates and populates
- * the Switcher PTE page for each CPU.
- */
-__init int init_pagetables(struct page **switcher_page, unsigned int pages)
-{
-	unsigned int i;
-
-	for_each_possible_cpu(i) {
-		switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
-		if (!switcher_pte_page(i)) {
-			free_switcher_pte_pages();
-			return -ENOMEM;
-		}
-		populate_switcher_pte_page(i, switcher_page, pages);
-	}
-	return 0;
-}
-/*:*/
-
-/* Cleaning up simply involves freeing the PTE page for each CPU. */
-void free_pagetables(void)
-{
-	free_switcher_pte_pages();
-}
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 4af12e1..f0a3347 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -59,14 +59,13 @@ static struct {
 /* Offset from where switcher.S was compiled to where we've copied it */
 static unsigned long switcher_offset(void)
 {
-	return SWITCHER_ADDR - (unsigned long)start_switcher_text;
+	return switcher_addr - (unsigned long)start_switcher_text;
 }
 
-/* This cpu's struct lguest_pages. */
+/* This cpu's struct lguest_pages (after the Switcher text page) */
 static struct lguest_pages *lguest_pages(unsigned int cpu)
 {
-	return &(((struct lguest_pages *)
-		  (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+	return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
 }
 
 static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);


More information about the Lguest mailing list