diff options
Diffstat (limited to 'system/xen')
14 files changed, 393 insertions, 40 deletions
diff --git a/system/xen/dom0/config-4.4.14-xen.i486 b/system/xen/dom0/config-4.4.19-xen.i486 index a2e0a41984..b1014d4caf 100644 --- a/system/xen/dom0/config-4.4.14-xen.i486 +++ b/system/xen/dom0/config-4.4.19-xen.i486 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.4.14 Kernel Configuration +# Linux/x86 4.4.19 Kernel Configuration # # CONFIG_64BIT is not set CONFIG_X86_32=y diff --git a/system/xen/dom0/config-4.4.14-xen.x86_64 b/system/xen/dom0/config-4.4.19-xen.x86_64 index 57a21f68fd..c9b1e20335 100644 --- a/system/xen/dom0/config-4.4.14-xen.x86_64 +++ b/system/xen/dom0/config-4.4.19-xen.x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.4.14 Kernel Configuration +# Linux/x86 4.4.19 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y @@ -397,7 +397,7 @@ CONFIG_XEN_PVHVM=y CONFIG_XEN_512GB=y CONFIG_XEN_SAVE_RESTORE=y # CONFIG_XEN_DEBUG_FS is not set -# CONFIG_XEN_PVH is not set +CONFIG_XEN_PVH=y CONFIG_KVM_GUEST=y # CONFIG_KVM_DEBUG_FS is not set # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set diff --git a/system/xen/dom0/kernel-xen.sh b/system/xen/dom0/kernel-xen.sh index 15e7439f33..afc6a67482 100644 --- a/system/xen/dom0/kernel-xen.sh +++ b/system/xen/dom0/kernel-xen.sh @@ -5,8 +5,8 @@ # Written by Chris Abela <chris.abela@maltats.com>, 20100515 # Modified by Mario Preksavec <mario@slackware.hr> -KERNEL=${KERNEL:-4.4.14} -XEN=${XEN:-4.6.3} +KERNEL=${KERNEL:-4.4.19} +XEN=${XEN:-4.7.0} BOOTLOADER=${BOOTLOADER:-lilo} ROOTMOD=${ROOTMOD:-ext4} diff --git a/system/xen/domU/domU.sh b/system/xen/domU/domU.sh index bdcee3ff6a..5d0825cebe 100644 --- a/system/xen/domU/domU.sh +++ b/system/xen/domU/domU.sh @@ -7,7 +7,7 @@ set -e -KERNEL=${KERNEL:-4.4.14} +KERNEL=${KERNEL:-4.4.19} # Build an image for the root file system and another for the swap # Default values : 8GB and 500MB resepectively. diff --git a/system/xen/patches/xen-4.7-regression-when-saving-a-pv-guest.patch b/system/xen/patches/xen-4.7-regression-when-saving-a-pv-guest.patch new file mode 100644 index 0000000000..457fa51f23 --- /dev/null +++ b/system/xen/patches/xen-4.7-regression-when-saving-a-pv-guest.patch @@ -0,0 +1,12 @@ +diff --git a/tools/libxc/xc_sr_save_x86_pv.c b/tools/libxc/xc_sr_save_x86_pv.c +index 4a29460..7043409 100644 +--- a/tools/libxc/xc_sr_save_x86_pv.c ++++ b/tools/libxc/xc_sr_save_x86_pv.c +@@ -430,6 +430,8 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t p2m_cr3) + + if ( level == 2 ) + { ++ if ( saved_idx == idx_end ) ++ saved_idx++; + max_pfn = ((xen_pfn_t)saved_idx << 9) * fpp - 1; + if ( max_pfn < ctx->x86_pv.max_pfn ) diff --git a/system/xen/xen.SlackBuild b/system/xen/xen.SlackBuild index 21afbcb9a9..e84c2d74e8 100644 --- a/system/xen/xen.SlackBuild +++ b/system/xen/xen.SlackBuild @@ -23,12 +23,13 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PRGNAM=xen -VERSION=${VERSION:-4.6.3} -BUILD=${BUILD:-2} +VERSION=${VERSION:-4.7.0} +BUILD=${BUILD:-1} TAG=${TAG:-_SBo} -SEABIOS=${SEABIOS:-1.8.2} +SEABIOS=${SEABIOS:-1.9.2} OVMF=${OVMF:-52a99493cce88a9d4ec8a02d7f1bd1a1001ce60d} +IPXE=${IPXE:-9a93db3f0947484e30e753bbd61a10b17336e20e} if [ -z "$ARCH" ]; then case "$( uname -m )" in @@ -116,15 +117,21 @@ for i in $CWD/xsa/* ; do esac done -# Tweak some things +# Upstream fixes +patch -p1 <$CWD/patches/xen-4.7-regression-when-saving-a-pv-guest.patch + +# Don't link with libssh and bluez by default sed "s/@@CONF_LIBSSH2@@/$CONF_LIBSSH2/;s/@@CONF_BLUEZ@@/$CONF_BLUEZ/" \ $CWD/patches/qemu_configure_options.diff | patch -p1 + +# Remove hardlinks patch -p1 <$CWD/patches/symlinks_instead_of_hardlinks.diff # Let's not download stuff during the build... patch -p1 <$CWD/patches/use_already_present_ipxe.diff -cp $CWD/ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz \ - tools/firmware/etherboot/_ipxe.tar.gz + +# Copy already present source tarballs +cp $CWD/ipxe-git-$IPXE.tar.gz tools/firmware/etherboot/_ipxe.tar.gz ( # Seabios cd tools/firmware @@ -195,7 +202,7 @@ find $PKG/boot/ -type l -a -name "xen-*" -exec rm -f {} \; 2>/dev/null || true rmdir $PKG/etc/rc.d/init.d/ # Append .new to config files -for i in $PKG/etc/{default/*,{qemu,xen}/*.conf} ; do mv $i $i.new ; done +for i in $PKG/etc/{default/*,xen/*.conf} ; do mv $i $i.new ; done find $PKG | xargs file | grep -e "executable" -e "shared object" | grep ELF \ | cut -f 1 -d : | xargs strip --strip-unneeded 2> /dev/null || true diff --git a/system/xen/xen.info b/system/xen/xen.info index 32ea026b57..0ad01f3d59 100644 --- a/system/xen/xen.info +++ b/system/xen/xen.info @@ -1,7 +1,7 @@ PRGNAM="xen" -VERSION="4.6.3" +VERSION="4.7.0" HOMEPAGE="http://www.xenproject.org/" -DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.6.3.tar.gz \ +DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.7.0.tar.gz \ http://mirror.slackware.hr/sources/xen-extfiles/ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz \ http://mirror.slackware.hr/sources/xen-extfiles/lwip-1.3.0.tar.gz \ http://mirror.slackware.hr/sources/xen-extfiles/zlib-1.2.3.tar.gz \ @@ -11,9 +11,9 @@ DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.6.3.tar.gz \ http://mirror.slackware.hr/sources/xen-extfiles/polarssl-1.1.4-gpl.tgz \ http://mirror.slackware.hr/sources/xen-extfiles/gmp-4.3.2.tar.bz2 \ http://mirror.slackware.hr/sources/xen-extfiles/tpm_emulator-0.7.4.tar.gz \ - http://mirror.slackware.hr/sources/xen-extfiles/seabios-1.8.2.tar.gz + http://mirror.slackware.hr/sources/xen-extfiles/seabios-1.9.2.tar.gz http://mirror.slackware.hr/sources/xen-extfiles/ovmf-git-52a99493cce88a9d4ec8a02d7f1bd1a1001ce60d.tar.gz" -MD5SUM="26419d8477082dbdb32ec75b00f00643 \ +MD5SUM="3aa4e01bf37a3a5bc8572907cb88e649 \ 7496268cebf47d5c9ccb0696e3b26065 \ 36cc57650cffda9a0269493be2a169bb \ debc62758716a169df9f62e6ab2bc634 \ @@ -23,7 +23,7 @@ MD5SUM="26419d8477082dbdb32ec75b00f00643 \ 7b72caf22b01464ee7d6165f2fd85f44 \ dd60683d7057917e34630b4a787932e8 \ e26becb8a6a2b6695f6b3e8097593db8 \ - d08a501fb918698f24a0de012c687729 \ + 32201f54c5fb478914d0bb2449b18454 \ bd4b1d36212692fa4874ecad2a42abed" REQUIRES="acpica yajl" DOWNLOAD_x86_64="" diff --git a/system/xen/xsa/xsa182-4.6.patch b/system/xen/xsa/xsa182-unstable.patch index be2047d688..3e40e8a530 100644 --- a/system/xen/xsa/xsa182-4.6.patch +++ b/system/xen/xsa/xsa182-unstable.patch @@ -1,4 +1,4 @@ -From f48a75b0c10ac79b287ca2b580ecb9ea2f696607 Mon Sep 17 00:00:00 2001 +From 00593655e231ed5ea20704120037026e33b83fbb Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Mon, 11 Jul 2016 14:32:03 +0100 Subject: [PATCH] x86/pv: Remove unsafe bits from the mod_l?_entry() fastpath @@ -19,10 +19,10 @@ Reviewed-by: Tim Deegan <tim@xen.org> 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c -index daf02ab..8dd22b8 100644 +index dbcf6cb..56ca19f 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c -@@ -1780,6 +1780,14 @@ static inline int update_intpte(intpte_t *p, +@@ -1852,6 +1852,14 @@ static inline int update_intpte(intpte_t *p, _t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \ (_m), (_v), (_ad)) @@ -37,8 +37,8 @@ index daf02ab..8dd22b8 100644 /* Update the L1 entry at pl1e to new value nl1e. */ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, unsigned long gl1mfn, int preserve_ad, -@@ -1820,9 +1828,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, - return -EINVAL; +@@ -1891,9 +1899,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, + nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(nl1e)); } - /* Fast path for identical mapping, r/w, presence, and cachability. */ @@ -48,8 +48,8 @@ index daf02ab..8dd22b8 100644 + if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) ) { adjust_guest_l1e(nl1e, pt_dom); - if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu, -@@ -1904,11 +1911,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, + rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu, +@@ -1970,11 +1977,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, return -EINVAL; } @@ -63,7 +63,7 @@ index daf02ab..8dd22b8 100644 { adjust_guest_l2e(nl2e, d); if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) ) -@@ -1973,8 +1977,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, +@@ -2039,8 +2043,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, return -EINVAL; } @@ -74,7 +74,7 @@ index daf02ab..8dd22b8 100644 { adjust_guest_l3e(nl3e, d); rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad); -@@ -2037,8 +2041,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e, +@@ -2103,8 +2107,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e, return -EINVAL; } @@ -86,10 +86,10 @@ index daf02ab..8dd22b8 100644 adjust_guest_l4e(nl4e, d); rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad); diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h -index 66b611c..1a59ed8 100644 +index 224852a..4ae387f 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h -@@ -311,6 +311,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t); +@@ -313,6 +313,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t); #define _PAGE_AVAIL2 _AC(0x800,U) #define _PAGE_AVAIL _AC(0xE00,U) #define _PAGE_PSE_PAT _AC(0x1000,U) diff --git a/system/xen/xsa/xsa183-4.6.patch b/system/xen/xsa/xsa183-unstable.patch index 84d70077c8..573c530112 100644 --- a/system/xen/xsa/xsa183-4.6.patch +++ b/system/xen/xsa/xsa183-unstable.patch @@ -1,4 +1,4 @@ -From 777ebe30e81ab284f9b78392875fe884a593df35 Mon Sep 17 00:00:00 2001 +From 2fd4f34058fb5f87fbd80978dbd2cb458aff565d Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 15 Jun 2016 18:32:14 +0100 Subject: [PATCH] x86/entry: Avoid SMAP violation in @@ -27,10 +27,10 @@ v2: 2 files changed, 5 insertions(+) diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S -index 0e3db7c..1eaf4bb 100644 +index 7f02afd..e80c53c 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S -@@ -350,6 +350,7 @@ ENTRY(compat_int80_direct_trap) +@@ -318,6 +318,7 @@ ENTRY(compat_int80_direct_trap) compat_create_bounce_frame: ASSERT_INTERRUPTS_ENABLED mov %fs,%edi @@ -38,15 +38,15 @@ index 0e3db7c..1eaf4bb 100644 testb $2,UREGS_cs+8(%rsp) jz 1f /* Push new frame at registered guest-OS stack base. */ -@@ -403,6 +404,7 @@ UNLIKELY_START(nz, compat_bounce_failsafe) - movl %ds,%eax - .Lft12: movl %eax,%fs:0*4(%rsi) # DS - UNLIKELY_END(compat_bounce_failsafe) +@@ -364,6 +365,7 @@ compat_create_bounce_frame: + movl TRAPBOUNCE_error_code(%rdx),%eax + .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE + 1: + ASM_CLAC /* Rewrite our stack frame and return to guest-OS mode. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\ -@@ -448,6 +450,7 @@ compat_crash_page_fault_4: +@@ -403,6 +405,7 @@ compat_crash_page_fault_4: addl $4,%esi compat_crash_page_fault: .Lft14: mov %edi,%fs @@ -55,10 +55,10 @@ index 0e3db7c..1eaf4bb 100644 call show_page_walk jmp dom_crash_sync_extable diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S -index 6e27508..0c2e63a 100644 +index ad8c64c..f7178cd 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S -@@ -462,9 +462,11 @@ domain_crash_page_fault_16: +@@ -420,9 +420,11 @@ domain_crash_page_fault_16: domain_crash_page_fault_8: addq $8,%rsi domain_crash_page_fault: @@ -68,7 +68,7 @@ index 6e27508..0c2e63a 100644 ENTRY(dom_crash_sync_extable) + ASM_CLAC # Get out of the guest-save area of the stack. - GET_STACK_BASE(%rax) + GET_STACK_END(ax) leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp -- 2.1.4 diff --git a/system/xen/xsa/xsa185.patch b/system/xen/xsa/xsa185.patch new file mode 100644 index 0000000000..a4c133ee19 --- /dev/null +++ b/system/xen/xsa/xsa185.patch @@ -0,0 +1,38 @@ +From 30aba4992b18245c436f16df7326a16c01a51570 Mon Sep 17 00:00:00 2001 +From: Jan Beulich <jbeulich@suse.com> +Date: Mon, 8 Aug 2016 10:58:12 +0100 +Subject: x86/32on64: don't allow recursive page tables from L3 + +L3 entries are special in PAE mode, and hence can't reasonably be used +for setting up recursive (and hence linear) page table mappings. Since +abuse is possible when the guest in fact gets run on 4-level page +tables, this needs to be excluded explicitly. + +This is XSA-185. + +Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com> +Reported-by: 栾尚聪(好风) <shangcong.lsc@alibaba-inc.com> +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> +--- + xen/arch/x86/mm.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c +index 109b8be..69b8b8d 100644 +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -1122,7 +1122,9 @@ get_page_from_l3e( + + rc = get_page_and_type_from_pagenr( + l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1); +- if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) ) ++ if ( unlikely(rc == -EINVAL) && ++ !is_pv_32bit_domain(d) && ++ get_l3_linear_pagetable(l3e, pfn, d) ) + rc = 0; + + return rc; +-- +2.1.4 + diff --git a/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch b/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch new file mode 100644 index 0000000000..b257497085 --- /dev/null +++ b/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch @@ -0,0 +1,73 @@ +From e938be013ba73ff08fa4f1d8670501aacefde7fb Mon Sep 17 00:00:00 2001 +From: Andrew Cooper <andrew.cooper3@citrix.com> +Date: Fri, 22 Jul 2016 16:02:54 +0000 +Subject: [PATCH 1/2] x86/emulate: Correct boundary interactions of emulated + instructions + +This reverts most of c/s 0640ffb6 "x86emul: fix rIP handling". + +Experimentally, in long mode processors will execute an instruction stream +which crosses the 64bit -1 -> 0 virtual boundary, whether the instruction +boundary is aligned on the virtual boundary, or is misaligned. + +In compatibility mode, Intel processors will execute an instruction stream +which crosses the 32bit -1 -> 0 virtual boundary, while AMD processors raise a +segmentation fault. Xen's segmentation behaviour matches AMD. + +For 16bit code, hardware does not ever truncated %ip. %eip is always used and +behaves normally as a 32bit register, including in 16bit protected mode +segments, as well as in Real and Unreal mode. + +This is XSA-186 + +Reported-by: Brian Marcotte <marcotte@panix.com> +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Jan Beulich <jbeulich@suse.com> +--- + xen/arch/x86/x86_emulate/x86_emulate.c | 22 ++++------------------ + 1 file changed, 4 insertions(+), 18 deletions(-) + +diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c +index d5a56cf..bf3529a 100644 +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -1570,10 +1570,6 @@ x86_emulate( + #endif + } + +- /* Truncate rIP to def_ad_bytes (2 or 4) if necessary. */ +- if ( def_ad_bytes < sizeof(_regs.eip) ) +- _regs.eip &= (1UL << (def_ad_bytes * 8)) - 1; +- + /* Prefix bytes. */ + for ( ; ; ) + { +@@ -3906,21 +3902,11 @@ x86_emulate( + + /* Commit shadow register state. */ + _regs.eflags &= ~EFLG_RF; +- switch ( __builtin_expect(def_ad_bytes, sizeof(_regs.eip)) ) +- { +- uint16_t ip; + +- case 2: +- ip = _regs.eip; +- _regs.eip = ctxt->regs->eip; +- *(uint16_t *)&_regs.eip = ip; +- break; +-#ifdef __x86_64__ +- case 4: +- _regs.rip = _regs._eip; +- break; +-#endif +- } ++ /* Zero the upper 32 bits of %rip if not in long mode. */ ++ if ( def_ad_bytes < sizeof(_regs.eip) ) ++ _regs.eip = (uint32_t)_regs.eip; ++ + *ctxt->regs = _regs; + + done: +-- +2.1.4 + diff --git a/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch b/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch new file mode 100644 index 0000000000..cb73a81042 --- /dev/null +++ b/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch @@ -0,0 +1,28 @@ +From: Andrew Cooper <andrew.cooper3@citrix.com> +Subject: hvm/fep: Allow testing of instructions crossing the -1 -> 0 virtual boundary + +The Force Emulation Prefix is named to follow its PV counterpart for cpuid or +rdtsc, but isn't really an instruction prefix. It behaves as a break-out into +Xen, with the purpose of emulating the next instruction in the current state. + +It is important to be able to test legal situations which occur in real +hardware, including instruction which cross certain boundaries, and +instructions starting at 0. + +Reported-by: Brian Marcotte <marcotte@panix.com> +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Jan Beulich <jbeulich@suse.com> + +--- a/xen/arch/x86/hvm/hvm.c ++++ b/xen/arch/x86/hvm/hvm.c +@@ -3905,6 +3905,10 @@ void hvm_ud_intercept(struct cpu_user_re + { + regs->eip += sizeof(sig); + regs->eflags &= ~X86_EFLAGS_RF; ++ ++ /* Zero the upper 32 bits of %rip if not in long mode. */ ++ if ( !(hvm_long_mode_enabled(cur) && cs.attr.fields.l) ) ++ regs->eip = regs->_eip; + } + } + diff --git a/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch b/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch new file mode 100644 index 0000000000..bc99596083 --- /dev/null +++ b/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch @@ -0,0 +1,42 @@ +From: Andrew Cooper <andrew.cooper3@citrix.com> +Subject: x86/shadow: Avoid overflowing sh_ctxt->seg_reg[] + +hvm_get_seg_reg() does not perform a range check on its input segment, calls +hvm_get_segment_register() and writes straight into sh_ctxt->seg_reg[]. + +x86_seg_none is outside the bounds of sh_ctxt->seg_reg[], and will hit a BUG() +in {vmx,svm}_get_segment_register(). + +HVM guests running with shadow paging can end up performing a virtual to +linear translation with x86_seg_none. This is used for addresses which are +already linear. However, none of this is a legitimate pagetable update, so +fail the emulation in such a case. + +This is XSA-187 + +Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -140,9 +140,18 @@ static int hvm_translate_linear_addr( + struct sh_emulate_ctxt *sh_ctxt, + unsigned long *paddr) + { +- struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt); ++ struct segment_register *reg; + int okay; + ++ /* ++ * Can arrive here with non-user segments. However, no such cirucmstance ++ * is part of a legitimate pagetable update, so fail the emulation. ++ */ ++ if ( !is_x86_user_segment(seg) ) ++ return X86EMUL_UNHANDLEABLE; ++ ++ reg = hvm_get_seg_reg(seg, sh_ctxt); ++ + okay = hvm_virtual_to_linear_addr( + seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); + diff --git a/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch b/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch new file mode 100644 index 0000000000..5529701d36 --- /dev/null +++ b/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch @@ -0,0 +1,153 @@ +From: Andrew Cooper <andrew.cooper3@citrix.com> +Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[] + +HVM HAP codepaths have space for all segment registers in the seg_reg[] +cache (with x86_seg_none still risking an array overrun), while the shadow +codepaths only have space for the user segments. + +Range check the input segment of *_get_seg_reg() against the size of the array +used to cache the results, to avoid overruns in the case that the callers +don't filter their input suitably. + +Subsume the is_x86_user_segment(seg) checks from the shadow code, which were +an incomplete attempt at range checking, and are now superceeded. Make +hvm_get_seg_reg() static, as it is not used outside of shadow/common.c + +No functional change, but far easier to reason that no overflow is possible. + +Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Acked-by: Tim Deegan <tim@xen.org> +Acked-by: Jan Beulich <jbeulich@suse.com> + +--- a/xen/arch/x86/hvm/emulate.c ++++ b/xen/arch/x86/hvm/emulate.c +@@ -534,6 +534,8 @@ static int hvmemul_virtual_to_linear( + *reps = min_t(unsigned long, *reps, max_reps); + + reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); ++ if ( IS_ERR(reg) ) ++ return -PTR_ERR(reg); + + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) ) + { +@@ -1369,6 +1371,10 @@ static int hvmemul_read_segment( + struct hvm_emulate_ctxt *hvmemul_ctxt = + container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); ++ ++ if ( IS_ERR(sreg) ) ++ return -PTR_ERR(sreg); ++ + memcpy(reg, sreg, sizeof(struct segment_register)); + return X86EMUL_OKAY; + } +@@ -1382,6 +1388,9 @@ static int hvmemul_write_segment( + container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); + ++ if ( IS_ERR(sreg) ) ++ return -PTR_ERR(sreg); ++ + memcpy(sreg, reg, sizeof(struct segment_register)); + __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty); + +@@ -1934,10 +1943,17 @@ void hvm_emulate_writeback( + } + } + ++/* ++ * Callers which pass a known in-range x86_segment can rely on the return ++ * pointer being valid. Other callers must explicitly check for errors. ++ */ + struct segment_register *hvmemul_get_seg_reg( + enum x86_segment seg, + struct hvm_emulate_ctxt *hvmemul_ctxt) + { ++ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) ) ++ return ERR_PTR(-X86EMUL_UNHANDLEABLE); ++ + if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) ) + hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]); + return &hvmemul_ctxt->seg_reg[seg]; +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -123,10 +123,19 @@ __initcall(shadow_audit_key_init); + /* x86 emulator support for the shadow code + */ + +-struct segment_register *hvm_get_seg_reg( ++/* ++ * Callers which pass a known in-range x86_segment can rely on the return ++ * pointer being valid. Other callers must explicitly check for errors. ++ */ ++static struct segment_register *hvm_get_seg_reg( + enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt) + { +- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg]; ++ struct segment_register *seg_reg; ++ ++ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) ) ++ return ERR_PTR(-X86EMUL_UNHANDLEABLE); ++ ++ seg_reg = &sh_ctxt->seg_reg[seg]; + if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) ) + hvm_get_segment_register(current, seg, seg_reg); + return seg_reg; +@@ -143,14 +152,9 @@ static int hvm_translate_linear_addr( + struct segment_register *reg; + int okay; + +- /* +- * Can arrive here with non-user segments. However, no such cirucmstance +- * is part of a legitimate pagetable update, so fail the emulation. +- */ +- if ( !is_x86_user_segment(seg) ) +- return X86EMUL_UNHANDLEABLE; +- + reg = hvm_get_seg_reg(seg, sh_ctxt); ++ if ( IS_ERR(reg) ) ++ return -PTR_ERR(reg); + + okay = hvm_virtual_to_linear_addr( + seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); +@@ -253,9 +257,6 @@ hvm_emulate_write(enum x86_segment seg, + unsigned long addr; + int rc; + +- if ( !is_x86_user_segment(seg) ) +- return X86EMUL_UNHANDLEABLE; +- + /* How many emulations could we save if we unshadowed on stack writes? */ + if ( seg == x86_seg_ss ) + perfc_incr(shadow_fault_emulate_stack); +@@ -283,7 +284,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg + unsigned long addr, old, new; + int rc; + +- if ( !is_x86_user_segment(seg) || bytes > sizeof(long) ) ++ if ( bytes > sizeof(long) ) + return X86EMUL_UNHANDLEABLE; + + rc = hvm_translate_linear_addr( +--- a/xen/arch/x86/mm/shadow/private.h ++++ b/xen/arch/x86/mm/shadow/private.h +@@ -740,8 +740,6 @@ const struct x86_emulate_ops *shadow_ini + struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs); + void shadow_continue_emulation( + struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs); +-struct segment_register *hvm_get_seg_reg( +- enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt); + + #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) + /**************************************************************************/ +--- a/xen/include/asm-x86/hvm/emulate.h ++++ b/xen/include/asm-x86/hvm/emulate.h +@@ -13,6 +13,7 @@ + #define __ASM_X86_HVM_EMULATE_H__ + + #include <xen/config.h> ++#include <xen/err.h> + #include <asm/hvm/hvm.h> + #include <asm/x86_emulate.h> + |