summaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch')
-rw-r--r--system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch273
1 files changed, 273 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch b/system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch
new file mode 100644
index 0000000000..f4efabeb46
--- /dev/null
+++ b/system/xen/xsa/xsa263-4.10-0005-x86-spec_ctrl-Rename-bits-of-infrastructure-to-avoid.patch
@@ -0,0 +1,273 @@
+From 5cc3611de7d09140e55caa2c2d120ad326fff937 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Mon, 30 Apr 2018 14:20:23 +0100
+Subject: [PATCH] x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE
+ and VMEXIT
+
+In hindsight, using NATIVE and VMEXIT as naming terminology was not clever.
+A future change wants to split SPEC_CTRL_EXIT_TO_GUEST into PV and HVM
+specific implementations, and using VMEXIT as a term is completely wrong.
+
+Take the opportunity to fix some stale documentation in spec_ctrl_asm.h. The
+IST helpers were missing from the large comment block, and since
+SPEC_CTRL_ENTRY_FROM_INTR_IST was introduced, we've gained a new piece of
+functionality which currently depends on the fine grain control, which exists
+in lieu of livepatching. Note this in the comment.
+
+No functional change.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Wei Liu <wei.liu2@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Release-acked-by: Juergen Gross <jgross@suse.com>
+(cherry picked from commit d9822b8a38114e96e4516dc998f4055249364d5d)
+---
+ xen/arch/x86/hvm/svm/entry.S | 4 ++--
+ xen/arch/x86/hvm/vmx/entry.S | 4 ++--
+ xen/arch/x86/spec_ctrl.c | 20 ++++++++++----------
+ xen/arch/x86/x86_64/compat/entry.S | 2 +-
+ xen/arch/x86/x86_64/entry.S | 2 +-
+ xen/include/asm-x86/cpufeatures.h | 4 ++--
+ xen/include/asm-x86/spec_ctrl_asm.h | 36 +++++++++++++++++++++++++-----------
+ 7 files changed, 43 insertions(+), 29 deletions(-)
+
+diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
+index bf092fe..5e7c080 100644
+--- a/xen/arch/x86/hvm/svm/entry.S
++++ b/xen/arch/x86/hvm/svm/entry.S
+@@ -83,7 +83,7 @@ UNLIKELY_END(svm_trace)
+ mov VCPUMSR_spec_ctrl_raw(%rax), %eax
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
++ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+
+ pop %r15
+ pop %r14
+@@ -108,7 +108,7 @@ UNLIKELY_END(svm_trace)
+
+ GET_CURRENT(bx)
+
+- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
++ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+ mov VCPU_svm_vmcb(%rbx),%rcx
+diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
+index e750544..aa2f103 100644
+--- a/xen/arch/x86/hvm/vmx/entry.S
++++ b/xen/arch/x86/hvm/vmx/entry.S
+@@ -38,7 +38,7 @@ ENTRY(vmx_asm_vmexit_handler)
+ movb $1,VCPU_vmx_launched(%rbx)
+ mov %rax,VCPU_hvm_guest_cr2(%rbx)
+
+- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
++ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+ mov %rsp,%rdi
+@@ -76,7 +76,7 @@ UNLIKELY_END(realmode)
+ mov VCPUMSR_spec_ctrl_raw(%rax), %eax
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
++ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+
+ mov VCPU_hvm_guest_cr2(%rbx),%rax
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index b62cfcc..015a9e2 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -35,8 +35,8 @@ static enum ind_thunk {
+ THUNK_JMP,
+ } opt_thunk __initdata = THUNK_DEFAULT;
+ static int8_t __initdata opt_ibrs = -1;
+-static bool __initdata opt_rsb_native = true;
+-static bool __initdata opt_rsb_vmexit = true;
++static bool __initdata opt_rsb_pv = true;
++static bool __initdata opt_rsb_hvm = true;
+ bool __read_mostly opt_ibpb = true;
+ uint8_t __read_mostly default_xen_spec_ctrl;
+ uint8_t __read_mostly default_spec_ctrl_flags;
+@@ -69,9 +69,9 @@ static int __init parse_bti(const char *s)
+ else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
+ opt_ibpb = val;
+ else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 )
+- opt_rsb_native = val;
++ opt_rsb_pv = val;
+ else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 )
+- opt_rsb_vmexit = val;
++ opt_rsb_hvm = val;
+ else
+ rc = -EINVAL;
+
+@@ -116,8 +116,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
+ " IBRS-" : "",
+ opt_ibpb ? " IBPB" : "",
+- boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
+- boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
++ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB_NATIVE" : "",
++ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB_VMEXIT" : "");
+
+ printk("XPTI: %s\n",
+ boot_cpu_has(X86_FEATURE_NO_XPTI) ? "disabled" : "enabled");
+@@ -307,9 +307,9 @@ void __init init_speculation_mitigations(void)
+ * If a processors speculates to 32bit PV guest kernel mappings, it is
+ * speculating in 64bit supervisor mode, and can leak data.
+ */
+- if ( opt_rsb_native )
++ if ( opt_rsb_pv )
+ {
+- setup_force_cpu_cap(X86_FEATURE_RSB_NATIVE);
++ setup_force_cpu_cap(X86_FEATURE_SC_RSB_PV);
+ default_spec_ctrl_flags |= SCF_ist_rsb;
+ }
+
+@@ -317,8 +317,8 @@ void __init init_speculation_mitigations(void)
+ * HVM guests can always poison the RSB to point at Xen supervisor
+ * mappings.
+ */
+- if ( opt_rsb_vmexit )
+- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
++ if ( opt_rsb_hvm )
++ setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
+
+ /* Check we have hardware IBPB support before using it... */
+ if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) )
+diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
+index a47cb9d..6a27d98 100644
+--- a/xen/arch/x86/x86_64/compat/entry.S
++++ b/xen/arch/x86/x86_64/compat/entry.S
+@@ -166,7 +166,7 @@ ENTRY(compat_restore_all_guest)
+ mov VCPUMSR_spec_ctrl_raw(%rax), %eax
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
++ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+
+ RESTORE_ALL adj=8 compat=1
+ .Lft0: iretq
+diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
+index 41d3ec2..0a0763a 100644
+--- a/xen/arch/x86/x86_64/entry.S
++++ b/xen/arch/x86/x86_64/entry.S
+@@ -196,7 +196,7 @@ restore_all_guest:
+ mov %r15d, %eax
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
++ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+
+ RESTORE_ALL
+ testw $TRAP_syscall,4(%rsp)
+diff --git a/xen/include/asm-x86/cpufeatures.h b/xen/include/asm-x86/cpufeatures.h
+index ca58b0e..f9aa5d7 100644
+--- a/xen/include/asm-x86/cpufeatures.h
++++ b/xen/include/asm-x86/cpufeatures.h
+@@ -27,6 +27,6 @@ XEN_CPUFEATURE(IND_THUNK_LFENCE,(FSCAPINTS+0)*32+13) /* Use IND_THUNK_LFENCE */
+ XEN_CPUFEATURE(IND_THUNK_JMP, (FSCAPINTS+0)*32+14) /* Use IND_THUNK_JMP */
+ XEN_CPUFEATURE(XEN_IBPB, (FSCAPINTS+0)*32+15) /* IBRSB || IBPB */
+ XEN_CPUFEATURE(SC_MSR, (FSCAPINTS+0)*32+16) /* MSR_SPEC_CTRL used by Xen */
+-XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for native */
+-XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for vmexit */
++XEN_CPUFEATURE(SC_RSB_PV, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for PV */
++XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for HVM */
+ XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+20) /* XPTI mitigation not in use */
+diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h
+index 17dd2cc..3d156ed 100644
+--- a/xen/include/asm-x86/spec_ctrl_asm.h
++++ b/xen/include/asm-x86/spec_ctrl_asm.h
+@@ -72,11 +72,14 @@
+ *
+ * The following ASM fragments implement this algorithm. See their local
+ * comments for further details.
+- * - SPEC_CTRL_ENTRY_FROM_VMEXIT
++ * - SPEC_CTRL_ENTRY_FROM_HVM
+ * - SPEC_CTRL_ENTRY_FROM_PV
+ * - SPEC_CTRL_ENTRY_FROM_INTR
++ * - SPEC_CTRL_ENTRY_FROM_INTR_IST
++ * - SPEC_CTRL_EXIT_TO_XEN_IST
+ * - SPEC_CTRL_EXIT_TO_XEN
+- * - SPEC_CTRL_EXIT_TO_GUEST
++ * - SPEC_CTRL_EXIT_TO_PV
++ * - SPEC_CTRL_EXIT_TO_HVM
+ */
+
+ .macro DO_OVERWRITE_RSB tmp=rax
+@@ -117,7 +120,7 @@
+ mov %\tmp, %rsp /* Restore old %rsp */
+ .endm
+
+-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
++.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
+ /*
+ * Requires %rbx=current, %rsp=regs/cpuinfo
+ * Clobbers %rax, %rcx, %rdx
+@@ -217,23 +220,23 @@
+ .endm
+
+ /* Use after a VMEXIT from an HVM guest. */
+-#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
++#define SPEC_CTRL_ENTRY_FROM_HVM \
+ ALTERNATIVE __stringify(ASM_NOP40), \
+- DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
++ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \
+ ALTERNATIVE __stringify(ASM_NOP36), \
+- DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
++ DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
+
+ /* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
+ #define SPEC_CTRL_ENTRY_FROM_PV \
+ ALTERNATIVE __stringify(ASM_NOP40), \
+- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
++ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
+ ALTERNATIVE __stringify(ASM_NOP25), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
+
+ /* Use in interrupt/exception context. May interrupt Xen or PV context. */
+ #define SPEC_CTRL_ENTRY_FROM_INTR \
+ ALTERNATIVE __stringify(ASM_NOP40), \
+- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
++ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
+ ALTERNATIVE __stringify(ASM_NOP33), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
+
+@@ -242,12 +245,22 @@
+ ALTERNATIVE __stringify(ASM_NOP17), \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
+
+-/* Use when exiting to guest context. */
+-#define SPEC_CTRL_EXIT_TO_GUEST \
++/* Use when exiting to PV guest context. */
++#define SPEC_CTRL_EXIT_TO_PV \
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+
+-/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
++/* Use when exiting to HVM guest context. */
++#define SPEC_CTRL_EXIT_TO_HVM \
++ ALTERNATIVE __stringify(ASM_NOP24), \
++ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
++
++/*
++ * Use in IST interrupt/exception context. May interrupt Xen or PV context.
++ * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
++ * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
++ * been reloaded.
++ */
+ .macro SPEC_CTRL_ENTRY_FROM_INTR_IST
+ /*
+ * Requires %rsp=regs, %r14=stack_end
+@@ -294,6 +307,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
+ UNLIKELY_END(\@_serialise)
+ .endm
+
++/* Use when exiting to Xen in IST context. */
+ .macro SPEC_CTRL_EXIT_TO_XEN_IST
+ /*
+ * Requires %rbx=stack_end
+--
+2.1.4
+