summaryrefslogtreecommitdiff
path: root/system/xen/xsa
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2016-09-09 17:32:49 +0200
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2016-09-11 08:53:21 +0700
commit2950e4fd85dfa97149c179636ae27b664d9c3157 (patch)
tree63d67d6c6055f01d052abfb695519734377ea6e8 /system/xen/xsa
parent6f7d252327e4559f12c7cc6ec6f6d40a110cee00 (diff)
downloadslackbuilds-2950e4fd85dfa97149c179636ae27b664d9c3157.tar.gz
system/xen: Updated for version 4.7.0.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/xsa')
-rw-r--r--system/xen/xsa/xsa182-unstable.patch (renamed from system/xen/xsa/xsa182-4.6.patch)22
-rw-r--r--system/xen/xsa/xsa183-unstable.patch (renamed from system/xen/xsa/xsa183-4.6.patch)22
-rw-r--r--system/xen/xsa/xsa185.patch38
-rw-r--r--system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch73
-rw-r--r--system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch28
-rw-r--r--system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch42
-rw-r--r--system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch153
7 files changed, 356 insertions, 22 deletions
diff --git a/system/xen/xsa/xsa182-4.6.patch b/system/xen/xsa/xsa182-unstable.patch
index be2047d688..3e40e8a530 100644
--- a/system/xen/xsa/xsa182-4.6.patch
+++ b/system/xen/xsa/xsa182-unstable.patch
@@ -1,4 +1,4 @@
-From f48a75b0c10ac79b287ca2b580ecb9ea2f696607 Mon Sep 17 00:00:00 2001
+From 00593655e231ed5ea20704120037026e33b83fbb Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Mon, 11 Jul 2016 14:32:03 +0100
Subject: [PATCH] x86/pv: Remove unsafe bits from the mod_l?_entry() fastpath
@@ -19,10 +19,10 @@ Reviewed-by: Tim Deegan <tim@xen.org>
2 files changed, 17 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index daf02ab..8dd22b8 100644
+index dbcf6cb..56ca19f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
-@@ -1780,6 +1780,14 @@ static inline int update_intpte(intpte_t *p,
+@@ -1852,6 +1852,14 @@ static inline int update_intpte(intpte_t *p,
_t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \
(_m), (_v), (_ad))
@@ -37,8 +37,8 @@ index daf02ab..8dd22b8 100644
/* Update the L1 entry at pl1e to new value nl1e. */
static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
unsigned long gl1mfn, int preserve_ad,
-@@ -1820,9 +1828,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
- return -EINVAL;
+@@ -1891,9 +1899,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
+ nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(nl1e));
}
- /* Fast path for identical mapping, r/w, presence, and cachability. */
@@ -48,8 +48,8 @@ index daf02ab..8dd22b8 100644
+ if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) )
{
adjust_guest_l1e(nl1e, pt_dom);
- if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
-@@ -1904,11 +1911,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
+ rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
+@@ -1970,11 +1977,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
return -EINVAL;
}
@@ -63,7 +63,7 @@ index daf02ab..8dd22b8 100644
{
adjust_guest_l2e(nl2e, d);
if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
-@@ -1973,8 +1977,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
+@@ -2039,8 +2043,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
return -EINVAL;
}
@@ -74,7 +74,7 @@ index daf02ab..8dd22b8 100644
{
adjust_guest_l3e(nl3e, d);
rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad);
-@@ -2037,8 +2041,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
+@@ -2103,8 +2107,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
return -EINVAL;
}
@@ -86,10 +86,10 @@ index daf02ab..8dd22b8 100644
adjust_guest_l4e(nl4e, d);
rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
-index 66b611c..1a59ed8 100644
+index 224852a..4ae387f 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
-@@ -311,6 +311,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
+@@ -313,6 +313,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
#define _PAGE_AVAIL2 _AC(0x800,U)
#define _PAGE_AVAIL _AC(0xE00,U)
#define _PAGE_PSE_PAT _AC(0x1000,U)
diff --git a/system/xen/xsa/xsa183-4.6.patch b/system/xen/xsa/xsa183-unstable.patch
index 84d70077c8..573c530112 100644
--- a/system/xen/xsa/xsa183-4.6.patch
+++ b/system/xen/xsa/xsa183-unstable.patch
@@ -1,4 +1,4 @@
-From 777ebe30e81ab284f9b78392875fe884a593df35 Mon Sep 17 00:00:00 2001
+From 2fd4f34058fb5f87fbd80978dbd2cb458aff565d Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Wed, 15 Jun 2016 18:32:14 +0100
Subject: [PATCH] x86/entry: Avoid SMAP violation in
@@ -27,10 +27,10 @@ v2:
2 files changed, 5 insertions(+)
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
-index 0e3db7c..1eaf4bb 100644
+index 7f02afd..e80c53c 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -350,6 +350,7 @@ ENTRY(compat_int80_direct_trap)
+@@ -318,6 +318,7 @@ ENTRY(compat_int80_direct_trap)
compat_create_bounce_frame:
ASSERT_INTERRUPTS_ENABLED
mov %fs,%edi
@@ -38,15 +38,15 @@ index 0e3db7c..1eaf4bb 100644
testb $2,UREGS_cs+8(%rsp)
jz 1f
/* Push new frame at registered guest-OS stack base. */
-@@ -403,6 +404,7 @@ UNLIKELY_START(nz, compat_bounce_failsafe)
- movl %ds,%eax
- .Lft12: movl %eax,%fs:0*4(%rsi) # DS
- UNLIKELY_END(compat_bounce_failsafe)
+@@ -364,6 +365,7 @@ compat_create_bounce_frame:
+ movl TRAPBOUNCE_error_code(%rdx),%eax
+ .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
+ 1:
+ ASM_CLAC
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
-@@ -448,6 +450,7 @@ compat_crash_page_fault_4:
+@@ -403,6 +405,7 @@ compat_crash_page_fault_4:
addl $4,%esi
compat_crash_page_fault:
.Lft14: mov %edi,%fs
@@ -55,10 +55,10 @@ index 0e3db7c..1eaf4bb 100644
call show_page_walk
jmp dom_crash_sync_extable
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
-index 6e27508..0c2e63a 100644
+index ad8c64c..f7178cd 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
-@@ -462,9 +462,11 @@ domain_crash_page_fault_16:
+@@ -420,9 +420,11 @@ domain_crash_page_fault_16:
domain_crash_page_fault_8:
addq $8,%rsi
domain_crash_page_fault:
@@ -68,7 +68,7 @@ index 6e27508..0c2e63a 100644
ENTRY(dom_crash_sync_extable)
+ ASM_CLAC
# Get out of the guest-save area of the stack.
- GET_STACK_BASE(%rax)
+ GET_STACK_END(ax)
leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
--
2.1.4
diff --git a/system/xen/xsa/xsa185.patch b/system/xen/xsa/xsa185.patch
new file mode 100644
index 0000000000..a4c133ee19
--- /dev/null
+++ b/system/xen/xsa/xsa185.patch
@@ -0,0 +1,38 @@
+From 30aba4992b18245c436f16df7326a16c01a51570 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Mon, 8 Aug 2016 10:58:12 +0100
+Subject: x86/32on64: don't allow recursive page tables from L3
+
+L3 entries are special in PAE mode, and hence can't reasonably be used
+for setting up recursive (and hence linear) page table mappings. Since
+abuse is possible when the guest in fact gets run on 4-level page
+tables, this needs to be excluded explicitly.
+
+This is XSA-185.
+
+Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com>
+Reported-by: 栾尚聪(好风) <shangcong.lsc@alibaba-inc.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+---
+ xen/arch/x86/mm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
+index 109b8be..69b8b8d 100644
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -1122,7 +1122,9 @@ get_page_from_l3e(
+
+ rc = get_page_and_type_from_pagenr(
+ l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1);
+- if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) )
++ if ( unlikely(rc == -EINVAL) &&
++ !is_pv_32bit_domain(d) &&
++ get_l3_linear_pagetable(l3e, pfn, d) )
+ rc = 0;
+
+ return rc;
+--
+2.1.4
+
diff --git a/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch b/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
new file mode 100644
index 0000000000..b257497085
--- /dev/null
+++ b/system/xen/xsa/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
@@ -0,0 +1,73 @@
+From e938be013ba73ff08fa4f1d8670501aacefde7fb Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 22 Jul 2016 16:02:54 +0000
+Subject: [PATCH 1/2] x86/emulate: Correct boundary interactions of emulated
+ instructions
+
+This reverts most of c/s 0640ffb6 "x86emul: fix rIP handling".
+
+Experimentally, in long mode processors will execute an instruction stream
+which crosses the 64bit -1 -> 0 virtual boundary, whether the instruction
+boundary is aligned on the virtual boundary, or is misaligned.
+
+In compatibility mode, Intel processors will execute an instruction stream
+which crosses the 32bit -1 -> 0 virtual boundary, while AMD processors raise a
+segmentation fault. Xen's segmentation behaviour matches AMD.
+
+For 16bit code, hardware does not ever truncated %ip. %eip is always used and
+behaves normally as a 32bit register, including in 16bit protected mode
+segments, as well as in Real and Unreal mode.
+
+This is XSA-186
+
+Reported-by: Brian Marcotte <marcotte@panix.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+ xen/arch/x86/x86_emulate/x86_emulate.c | 22 ++++------------------
+ 1 file changed, 4 insertions(+), 18 deletions(-)
+
+diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
+index d5a56cf..bf3529a 100644
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -1570,10 +1570,6 @@ x86_emulate(
+ #endif
+ }
+
+- /* Truncate rIP to def_ad_bytes (2 or 4) if necessary. */
+- if ( def_ad_bytes < sizeof(_regs.eip) )
+- _regs.eip &= (1UL << (def_ad_bytes * 8)) - 1;
+-
+ /* Prefix bytes. */
+ for ( ; ; )
+ {
+@@ -3906,21 +3902,11 @@ x86_emulate(
+
+ /* Commit shadow register state. */
+ _regs.eflags &= ~EFLG_RF;
+- switch ( __builtin_expect(def_ad_bytes, sizeof(_regs.eip)) )
+- {
+- uint16_t ip;
+
+- case 2:
+- ip = _regs.eip;
+- _regs.eip = ctxt->regs->eip;
+- *(uint16_t *)&_regs.eip = ip;
+- break;
+-#ifdef __x86_64__
+- case 4:
+- _regs.rip = _regs._eip;
+- break;
+-#endif
+- }
++ /* Zero the upper 32 bits of %rip if not in long mode. */
++ if ( def_ad_bytes < sizeof(_regs.eip) )
++ _regs.eip = (uint32_t)_regs.eip;
++
+ *ctxt->regs = _regs;
+
+ done:
+--
+2.1.4
+
diff --git a/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch b/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
new file mode 100644
index 0000000000..cb73a81042
--- /dev/null
+++ b/system/xen/xsa/xsa186-4.7-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
@@ -0,0 +1,28 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: hvm/fep: Allow testing of instructions crossing the -1 -> 0 virtual boundary
+
+The Force Emulation Prefix is named to follow its PV counterpart for cpuid or
+rdtsc, but isn't really an instruction prefix. It behaves as a break-out into
+Xen, with the purpose of emulating the next instruction in the current state.
+
+It is important to be able to test legal situations which occur in real
+hardware, including instruction which cross certain boundaries, and
+instructions starting at 0.
+
+Reported-by: Brian Marcotte <marcotte@panix.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -3905,6 +3905,10 @@ void hvm_ud_intercept(struct cpu_user_re
+ {
+ regs->eip += sizeof(sig);
+ regs->eflags &= ~X86_EFLAGS_RF;
++
++ /* Zero the upper 32 bits of %rip if not in long mode. */
++ if ( !(hvm_long_mode_enabled(cur) && cs.attr.fields.l) )
++ regs->eip = regs->_eip;
+ }
+ }
+
diff --git a/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch b/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
new file mode 100644
index 0000000000..bc99596083
--- /dev/null
+++ b/system/xen/xsa/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
@@ -0,0 +1,42 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/shadow: Avoid overflowing sh_ctxt->seg_reg[]
+
+hvm_get_seg_reg() does not perform a range check on its input segment, calls
+hvm_get_segment_register() and writes straight into sh_ctxt->seg_reg[].
+
+x86_seg_none is outside the bounds of sh_ctxt->seg_reg[], and will hit a BUG()
+in {vmx,svm}_get_segment_register().
+
+HVM guests running with shadow paging can end up performing a virtual to
+linear translation with x86_seg_none. This is used for addresses which are
+already linear. However, none of this is a legitimate pagetable update, so
+fail the emulation in such a case.
+
+This is XSA-187
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -140,9 +140,18 @@ static int hvm_translate_linear_addr(
+ struct sh_emulate_ctxt *sh_ctxt,
+ unsigned long *paddr)
+ {
+- struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt);
++ struct segment_register *reg;
+ int okay;
+
++ /*
++ * Can arrive here with non-user segments. However, no such cirucmstance
++ * is part of a legitimate pagetable update, so fail the emulation.
++ */
++ if ( !is_x86_user_segment(seg) )
++ return X86EMUL_UNHANDLEABLE;
++
++ reg = hvm_get_seg_reg(seg, sh_ctxt);
++
+ okay = hvm_virtual_to_linear_addr(
+ seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
+
diff --git a/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch b/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
new file mode 100644
index 0000000000..5529701d36
--- /dev/null
+++ b/system/xen/xsa/xsa187-4.7-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
@@ -0,0 +1,153 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
+
+HVM HAP codepaths have space for all segment registers in the seg_reg[]
+cache (with x86_seg_none still risking an array overrun), while the shadow
+codepaths only have space for the user segments.
+
+Range check the input segment of *_get_seg_reg() against the size of the array
+used to cache the results, to avoid overruns in the case that the callers
+don't filter their input suitably.
+
+Subsume the is_x86_user_segment(seg) checks from the shadow code, which were
+an incomplete attempt at range checking, and are now superceeded. Make
+hvm_get_seg_reg() static, as it is not used outside of shadow/common.c
+
+No functional change, but far easier to reason that no overflow is possible.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Tim Deegan <tim@xen.org>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/hvm/emulate.c
++++ b/xen/arch/x86/hvm/emulate.c
+@@ -534,6 +534,8 @@ static int hvmemul_virtual_to_linear(
+ *reps = min_t(unsigned long, *reps, max_reps);
+
+ reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
++ if ( IS_ERR(reg) )
++ return -PTR_ERR(reg);
+
+ if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
+ {
+@@ -1369,6 +1371,10 @@ static int hvmemul_read_segment(
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
++
++ if ( IS_ERR(sreg) )
++ return -PTR_ERR(sreg);
++
+ memcpy(reg, sreg, sizeof(struct segment_register));
+ return X86EMUL_OKAY;
+ }
+@@ -1382,6 +1388,9 @@ static int hvmemul_write_segment(
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
+
++ if ( IS_ERR(sreg) )
++ return -PTR_ERR(sreg);
++
+ memcpy(sreg, reg, sizeof(struct segment_register));
+ __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
+
+@@ -1934,10 +1943,17 @@ void hvm_emulate_writeback(
+ }
+ }
+
++/*
++ * Callers which pass a known in-range x86_segment can rely on the return
++ * pointer being valid. Other callers must explicitly check for errors.
++ */
+ struct segment_register *hvmemul_get_seg_reg(
+ enum x86_segment seg,
+ struct hvm_emulate_ctxt *hvmemul_ctxt)
+ {
++ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
++ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
++
+ if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )
+ hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
+ return &hvmemul_ctxt->seg_reg[seg];
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -123,10 +123,19 @@ __initcall(shadow_audit_key_init);
+ /* x86 emulator support for the shadow code
+ */
+
+-struct segment_register *hvm_get_seg_reg(
++/*
++ * Callers which pass a known in-range x86_segment can rely on the return
++ * pointer being valid. Other callers must explicitly check for errors.
++ */
++static struct segment_register *hvm_get_seg_reg(
+ enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
+ {
+- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
++ struct segment_register *seg_reg;
++
++ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) )
++ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
++
++ seg_reg = &sh_ctxt->seg_reg[seg];
+ if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) )
+ hvm_get_segment_register(current, seg, seg_reg);
+ return seg_reg;
+@@ -143,14 +152,9 @@ static int hvm_translate_linear_addr(
+ struct segment_register *reg;
+ int okay;
+
+- /*
+- * Can arrive here with non-user segments. However, no such cirucmstance
+- * is part of a legitimate pagetable update, so fail the emulation.
+- */
+- if ( !is_x86_user_segment(seg) )
+- return X86EMUL_UNHANDLEABLE;
+-
+ reg = hvm_get_seg_reg(seg, sh_ctxt);
++ if ( IS_ERR(reg) )
++ return -PTR_ERR(reg);
+
+ okay = hvm_virtual_to_linear_addr(
+ seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
+@@ -253,9 +257,6 @@ hvm_emulate_write(enum x86_segment seg,
+ unsigned long addr;
+ int rc;
+
+- if ( !is_x86_user_segment(seg) )
+- return X86EMUL_UNHANDLEABLE;
+-
+ /* How many emulations could we save if we unshadowed on stack writes? */
+ if ( seg == x86_seg_ss )
+ perfc_incr(shadow_fault_emulate_stack);
+@@ -283,7 +284,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg
+ unsigned long addr, old, new;
+ int rc;
+
+- if ( !is_x86_user_segment(seg) || bytes > sizeof(long) )
++ if ( bytes > sizeof(long) )
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = hvm_translate_linear_addr(
+--- a/xen/arch/x86/mm/shadow/private.h
++++ b/xen/arch/x86/mm/shadow/private.h
+@@ -740,8 +740,6 @@ const struct x86_emulate_ops *shadow_ini
+ struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
+ void shadow_continue_emulation(
+ struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
+-struct segment_register *hvm_get_seg_reg(
+- enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
+
+ #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+ /**************************************************************************/
+--- a/xen/include/asm-x86/hvm/emulate.h
++++ b/xen/include/asm-x86/hvm/emulate.h
+@@ -13,6 +13,7 @@
+ #define __ASM_X86_HVM_EMULATE_H__
+
+ #include <xen/config.h>
++#include <xen/err.h>
+ #include <asm/hvm/hvm.h>
+ #include <asm/x86_emulate.h>
+