[Lguest] [PATCH] x86/cpu: Fix SMAP check in PVOPS environments

Rusty Russell rusty at rustcorp.com.au
Fri Jun 5 12:58:01 AEST 2015


"H. Peter Anvin" <hpa at zytor.com> writes:
> On 06/04/2015 12:55 PM, Rusty Russell wrote:
>> 
>> Yeah, hard cases make bad law.
>> 
>> I'm not too unhappy with this fix; ideally we'd rename save_fl and
>> restore_fl to save_eflags_if and restore_eflags_if too.
>> 
>
> I would be fine with this... but please document what the bloody
> semantics of pvops is actually supposed to be.

*cough*.

Lightly compile tested...

Subject: x86: rename save_fl/restore_fl paravirt ops to highlight eflags.
From: Rusty Russell <rusty at rustcorp.com.au>

As the comment in arch/x86/include/asm/paravirt_types.h says:

	 * Get/set interrupt state.  save_fl and restore_fl are only
	 * expected to use X86_EFLAGS_IF; all other bits
	 * returned from save_fl are undefined, and may be ignored by
	 * restore_fl.

Signed-off-by: Rusty Russell <rusty at rustcorp.com.au>

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8957810ad7d1..5ad7b0e62a77 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -801,12 +801,12 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
+	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_eflags_if);
 }
 
 static inline notrace void arch_local_irq_restore(unsigned long f)
 {
-	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
+	PVOP_VCALLEE1(pv_irq_ops.restore_eflags_if, f);
 }
 
 static inline notrace void arch_local_irq_disable(void)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index f7b0b5c112f2..05425c8544f1 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -204,8 +204,8 @@ struct pv_irq_ops {
 	 * NOTE: These functions callers expect the callee to preserve
 	 * more registers than the standard C calling convention.
 	 */
-	struct paravirt_callee_save save_fl;
-	struct paravirt_callee_save restore_fl;
+	struct paravirt_callee_save save_eflags_if;
+	struct paravirt_callee_save restore_eflags_if;
 	struct paravirt_callee_save irq_disable;
 	struct paravirt_callee_save irq_enable;
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c614dd492f5f..d42e33b66ee6 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -321,8 +321,8 @@ struct pv_time_ops pv_time_ops = {
 };
 
 __visible struct pv_irq_ops pv_irq_ops = {
-	.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
-	.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+	.save_eflags_if = __PV_IS_CALLEE_SAVE(native_save_fl),
+	.restore_eflags_if = __PV_IS_CALLEE_SAVE(native_restore_fl),
 	.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
 	.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
 	.safe_halt = native_safe_halt,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index d9f32e6d6ab6..cf20c1b37f43 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -2,8 +2,8 @@
 
 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
+DEF_NATIVE(pv_irq_ops, restore_eflags_if, "push %eax; popf");
+DEF_NATIVE(pv_irq_ops, save_eflags_if, "pushf; pop %eax");
 DEF_NATIVE(pv_cpu_ops, iret, "iret");
 DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
@@ -38,8 +38,8 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 	switch (type) {
 		PATCH_SITE(pv_irq_ops, irq_disable);
 		PATCH_SITE(pv_irq_ops, irq_enable);
-		PATCH_SITE(pv_irq_ops, restore_fl);
-		PATCH_SITE(pv_irq_ops, save_fl);
+		PATCH_SITE(pv_irq_ops, restore_eflags_if);
+		PATCH_SITE(pv_irq_ops, save_eflags_if);
 		PATCH_SITE(pv_cpu_ops, iret);
 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
 		PATCH_SITE(pv_mmu_ops, read_cr2);
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index a1da6737ba5b..24eaaa5f9aa6 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -4,8 +4,8 @@
 
 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
+DEF_NATIVE(pv_irq_ops, restore_eflags_if, "pushq %rdi; popfq");
+DEF_NATIVE(pv_irq_ops, save_eflags_if, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -45,8 +45,8 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 			end = end_##ops##_##x;			\
 			goto patch_site
 	switch(type) {
-		PATCH_SITE(pv_irq_ops, restore_fl);
-		PATCH_SITE(pv_irq_ops, save_fl);
+		PATCH_SITE(pv_irq_ops, restore_eflags_if);
+		PATCH_SITE(pv_irq_ops, save_eflags_if);
 		PATCH_SITE(pv_irq_ops, irq_enable);
 		PATCH_SITE(pv_irq_ops, irq_disable);
 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index ee22c1d93ae5..c7f09f3fb31d 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -78,8 +78,8 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
 	switch (type) {
 	case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
 	case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
-	case PARAVIRT_PATCH(pv_irq_ops.save_fl):
-	case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
+	case PARAVIRT_PATCH(pv_irq_ops.save_eflags_if):
+	case PARAVIRT_PATCH(pv_irq_ops.restore_eflags_if):
 		return paravirt_patch_default(type, clobbers, ibuf, addr, len);
 	default:
 		return native_patch(type, clobbers, ibuf, addr, len);
@@ -119,8 +119,8 @@ static void __init set_vsmp_pv_ops(void)
 		/* Setup irq ops and turn on vSMP  IRQ fastpath handling */
 		pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
 		pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
-		pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
-		pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
+		pv_irq_ops.save_eflags_if  = PV_CALLEE_SAVE(vsmp_save_fl);
+		pv_irq_ops.restore_eflags_if  = PV_CALLEE_SAVE(vsmp_restore_fl);
 		pv_init_ops.patch = vsmp_patch;
 		ctl &= ~(1 << 4);
 	}
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 8f9a133cc099..5f35bf3ff0b0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1426,8 +1426,8 @@ __init void lguest_init(void)
 	 */
 
 	/* Interrupt-related operations */
-	pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
-	pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
+	pv_irq_ops.save_eflags_if = PV_CALLEE_SAVE(lguest_save_fl);
+	pv_irq_ops.restore_eflags_if = __PV_IS_CALLEE_SAVE(lg_restore_fl);
 	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
 	pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
 	pv_irq_ops.safe_halt = lguest_safe_halt;
diff --git a/arch/x86/lguest/head_32.S b/arch/x86/lguest/head_32.S
index d5ae63f5ec5d..03b94d38fbd7 100644
--- a/arch/x86/lguest/head_32.S
+++ b/arch/x86/lguest/head_32.S
@@ -64,7 +64,7 @@ LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
 
 /*G:033
  * But using those wrappers is inefficient (we'll see why that doesn't matter
- * for save_fl and irq_disable later).  If we write our routines carefully in
+ * for lguest_save_fl and irq_disable later).  If we write our routines carefully in
  * assembler, we can avoid clobbering any registers and avoid jumping through
  * the wrapper functions.
  *
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 46957ead3060..d9511df0d63c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1074,8 +1074,8 @@ void xen_setup_vcpu_info_placement(void)
 	 * percpu area for all cpus, so make use of it. Note that for
 	 * PVH we want to use native IRQ mechanism. */
 	if (have_vcpu_info_placement && !xen_pvh_domain()) {
-		pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
-		pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
+		pv_irq_ops.save_eflags_if = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
+		pv_irq_ops.restore_eflags_if = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
 		pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
 		pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
 		pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
@@ -1102,8 +1102,8 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
 	switch (type) {
 		SITE(pv_irq_ops, irq_enable);
 		SITE(pv_irq_ops, irq_disable);
-		SITE(pv_irq_ops, save_fl);
-		SITE(pv_irq_ops, restore_fl);
+		SITE(pv_irq_ops, save_eflags_if);
+		SITE(pv_irq_ops, restore_eflags_if);
 #undef SITE
 
 	patch_site:
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index a1207cb6472a..a7f58c48c2e1 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -115,8 +115,8 @@ static void xen_halt(void)
 }
 
 static const struct pv_irq_ops xen_irq_ops __initconst = {
-	.save_fl = PV_CALLEE_SAVE(xen_save_fl),
-	.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
+	.save_eflags_if = PV_CALLEE_SAVE(xen_save_fl),
+	.restore_eflags_if = PV_CALLEE_SAVE(xen_restore_fl),
 	.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
 	.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
 


More information about the Lguest mailing list