summaryrefslogtreecommitdiff
path: root/system/xen/patches
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2016-06-26 17:17:40 +0200
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2016-06-27 07:54:32 +0700
commit370708023c63342ef91cefb9f2aa520a75a76bd0 (patch)
tree0efe27fef778a711d7792455ac171b65c2f4ed66 /system/xen/patches
parent02f56b708f8e40ed4a384e22f00eebb074c54a7f (diff)
downloadslackbuilds-370708023c63342ef91cefb9f2aa520a75a76bd0.tar.gz
system/xen: Updated for version 4.6.3.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/patches')
-rw-r--r--system/xen/patches/remove_malloc_tracing.diff62
-rw-r--r--system/xen/patches/xsa154-4.6.patch359
-rw-r--r--system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch56
-rw-r--r--system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch75
-rw-r--r--system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch41
-rw-r--r--system/xen/patches/xsa162-qemuu.patch42
-rw-r--r--system/xen/patches/xsa170.patch79
-rw-r--r--system/xen/patches/xsa172.patch39
-rw-r--r--system/xen/patches/xsa173-4.6.patch244
-rw-r--r--system/xen/patches/xsa176.patch45
-rw-r--r--system/xen/patches/xsa179-qemuu-4.6-0001-vga-fix-banked-access-bounds-checking-CVE-2016-3710.patch108
-rw-r--r--system/xen/patches/xsa179-qemuu-4.6-0002-vga-add-vbe_enabled-helper.patch68
-rw-r--r--system/xen/patches/xsa179-qemuu-4.6-0003-vga-factor-out-vga-register-setup.patch127
-rw-r--r--system/xen/patches/xsa179-qemuu-4.6-0004-vga-update-vga-register-setup-on-vbe-changes.patch29
-rw-r--r--system/xen/patches/xsa179-qemuu-4.6-0005-vga-make-sure-vga-register-setup-for-vbe-stays-intac.patch75
-rw-r--r--system/xen/patches/xsa181.patch38
16 files changed, 0 insertions, 1487 deletions
diff --git a/system/xen/patches/remove_malloc_tracing.diff b/system/xen/patches/remove_malloc_tracing.diff
deleted file mode 100644
index 293000fdc6..0000000000
--- a/system/xen/patches/remove_malloc_tracing.diff
+++ /dev/null
@@ -1,62 +0,0 @@
---- xen-4.6.1/tools/qemu-xen/trace-events.orig 2016-01-06 17:42:43.000000000 +0100
-+++ xen-4.6.1/tools/qemu-xen/trace-events 2016-02-20 20:36:48.996704075 +0100
-@@ -571,9 +571,6 @@
- vm_state_notify(int running, int reason) "running %d reason %d"
- load_file(const char *name, const char *path) "name %s location %s"
- runstate_set(int new_state) "new state %d"
--g_malloc(size_t size, void *ptr) "size %zu ptr %p"
--g_realloc(void *ptr, size_t size, void *newptr) "ptr %p size %zu newptr %p"
--g_free(void *ptr) "ptr %p"
- system_wakeup_request(int reason) "reason=%d"
- qemu_system_shutdown_request(void) ""
- qemu_system_powerdown_request(void) ""
---- xen-4.6.1/tools/qemu-xen/vl.c.orig 2016-01-06 17:42:43.000000000 +0100
-+++ xen-4.6.1/tools/qemu-xen/vl.c 2016-02-20 20:38:17.715227938 +0100
-@@ -2628,26 +2628,6 @@
- return popt;
- }
-
--static gpointer malloc_and_trace(gsize n_bytes)
--{
-- void *ptr = malloc(n_bytes);
-- trace_g_malloc(n_bytes, ptr);
-- return ptr;
--}
--
--static gpointer realloc_and_trace(gpointer mem, gsize n_bytes)
--{
-- void *ptr = realloc(mem, n_bytes);
-- trace_g_realloc(mem, n_bytes, ptr);
-- return ptr;
--}
--
--static void free_and_trace(gpointer mem)
--{
-- trace_g_free(mem);
-- free(mem);
--}
--
- static int machine_set_property(const char *name, const char *value,
- void *opaque)
- {
-@@ -2763,11 +2743,6 @@
- bool userconfig = true;
- const char *log_mask = NULL;
- const char *log_file = NULL;
-- GMemVTable mem_trace = {
-- .malloc = malloc_and_trace,
-- .realloc = realloc_and_trace,
-- .free = free_and_trace,
-- };
- const char *trace_events = NULL;
- const char *trace_file = NULL;
- const ram_addr_t default_ram_size = (ram_addr_t)DEFAULT_RAM_SIZE *
-@@ -2781,8 +2756,6 @@
- error_set_progname(argv[0]);
- qemu_init_exec_dir(argv[0]);
-
-- g_mem_set_vtable(&mem_trace);
--
- module_call_init(MODULE_INIT_QOM);
-
- qemu_add_opts(&qemu_drive_opts);
diff --git a/system/xen/patches/xsa154-4.6.patch b/system/xen/patches/xsa154-4.6.patch
deleted file mode 100644
index f1e598812b..0000000000
--- a/system/xen/patches/xsa154-4.6.patch
+++ /dev/null
@@ -1,359 +0,0 @@
-x86: enforce consistent cachability of MMIO mappings
-
-We've been told by Intel that inconsistent cachability between
-multiple mappings of the same page can affect system stability only
-when the affected page is an MMIO one. Since the stale data issue is
-of no relevance to the hypervisor (since all guest memory accesses go
-through proper accessors and validation), handling of RAM pages
-remains unchanged here. Any MMIO mapped by domains however needs to be
-done consistently (all cachable mappings or all uncachable ones), in
-order to avoid Machine Check exceptions. Since converting existing
-cachable mappings to uncachable (at the time an uncachable mapping
-gets established) would in the PV case require tracking all mappings,
-allow MMIO to only get mapped uncachable (UC, UC-, or WC).
-
-This also implies that in the PV case we mustn't use the L1 PTE update
-fast path when cachability flags get altered.
-
-Since in the HVM case at least for now we want to continue honoring
-pinned cachability attributes for pages not mapped by the hypervisor,
-special case handling of r/o MMIO pages (forcing UC) gets added there.
-Arguably the counterpart change to p2m-pt.c may not be necessary, since
-UC- (which already gets enforced there) is probably strict enough.
-
-Note that the shadow code changes include fixing the write protection
-of r/o MMIO ranges: shadow_l1e_remove_flags() and its siblings, other
-than l1e_remove_flags() and alike, return the new PTE (and hence
-ignoring their return values makes them no-ops).
-
-This is CVE-2016-2270 / XSA-154.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/docs/misc/xen-command-line.markdown
-+++ b/docs/misc/xen-command-line.markdown
-@@ -1080,6 +1080,15 @@ limit is ignored by Xen.
-
- Specify if the MMConfig space should be enabled.
-
-+### mmio-relax
-+> `= <boolean> | all`
-+
-+> Default: `false`
-+
-+By default, domains may not create cached mappings to MMIO regions.
-+This option relaxes the check for Domain 0 (or when using `all`, all PV
-+domains), to permit the use of cacheable MMIO mappings.
-+
- ### msi
- > `= <boolean>`
-
---- a/xen/arch/x86/hvm/mtrr.c
-+++ b/xen/arch/x86/hvm/mtrr.c
-@@ -807,8 +807,17 @@ int epte_get_entry_emt(struct domain *d,
- if ( v->domain != d )
- v = d->vcpu ? d->vcpu[0] : NULL;
-
-- if ( !mfn_valid(mfn_x(mfn)) )
-+ if ( !mfn_valid(mfn_x(mfn)) ||
-+ rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
-+ mfn_x(mfn) + (1UL << order) - 1) )
-+ {
-+ *ipat = 1;
- return MTRR_TYPE_UNCACHABLE;
-+ }
-+
-+ if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
-+ mfn_x(mfn) + (1UL << order) - 1) )
-+ return -1;
-
- switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
- {
---- a/xen/arch/x86/mm/p2m-pt.c
-+++ b/xen/arch/x86/mm/p2m-pt.c
-@@ -107,6 +107,8 @@ static unsigned long p2m_type_to_flags(p
- case p2m_mmio_direct:
- if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
- flags |= _PAGE_RW;
-+ else
-+ flags |= _PAGE_PWT;
- return flags | P2M_BASE_FLAGS | _PAGE_PCD;
- }
- }
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -519,6 +519,7 @@ _sh_propagate(struct vcpu *v,
- gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
- u32 pass_thru_flags;
- u32 gflags, sflags;
-+ bool_t mmio_mfn;
-
- /* We don't shadow PAE l3s */
- ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
-@@ -559,7 +560,10 @@ _sh_propagate(struct vcpu *v,
- // mfn means that we can not usefully shadow anything, and so we
- // return early.
- //
-- if ( !mfn_valid(target_mfn)
-+ mmio_mfn = !mfn_valid(target_mfn)
-+ || (level == 1
-+ && page_get_owner(mfn_to_page(target_mfn)) == dom_io);
-+ if ( mmio_mfn
- && !(level == 1 && (!shadow_mode_refcounts(d)
- || p2mt == p2m_mmio_direct)) )
- {
-@@ -577,7 +581,7 @@ _sh_propagate(struct vcpu *v,
- _PAGE_RW | _PAGE_PRESENT);
- if ( guest_supports_nx(v) )
- pass_thru_flags |= _PAGE_NX_BIT;
-- if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
-+ if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
- pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
- sflags = gflags & pass_thru_flags;
-
-@@ -676,10 +680,14 @@ _sh_propagate(struct vcpu *v,
- }
-
- /* Read-only memory */
-- if ( p2m_is_readonly(p2mt) ||
-- (p2mt == p2m_mmio_direct &&
-- rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
-+ if ( p2m_is_readonly(p2mt) )
- sflags &= ~_PAGE_RW;
-+ else if ( p2mt == p2m_mmio_direct &&
-+ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn)) )
-+ {
-+ sflags &= ~(_PAGE_RW | _PAGE_PAT);
-+ sflags |= _PAGE_PCD | _PAGE_PWT;
-+ }
-
- // protect guest page tables
- //
-@@ -1185,22 +1193,28 @@ static int shadow_set_l1e(struct domain
- && !sh_l1e_is_magic(new_sl1e) )
- {
- /* About to install a new reference */
-- if ( shadow_mode_refcounts(d) ) {
-+ if ( shadow_mode_refcounts(d) )
-+ {
-+#define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
-+ int rc;
-+
- TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
-- switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
-+ switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) )
- {
- default:
- /* Doesn't look like a pagetable. */
- flags |= SHADOW_SET_ERROR;
- new_sl1e = shadow_l1e_empty();
- break;
-- case 1:
-- shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
-+ case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE:
-+ ASSERT(!(rc & ~PAGE_FLIPPABLE));
-+ new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
- /* fall through */
- case 0:
- shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
- break;
- }
-+#undef PAGE_FLIPPABLE
- }
- }
-
---- a/xen/arch/x86/mm/shadow/types.h
-+++ b/xen/arch/x86/mm/shadow/types.h
-@@ -99,6 +99,9 @@ static inline u32 shadow_l4e_get_flags(s
- static inline shadow_l1e_t
- shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
- { l1e_remove_flags(sl1e, flags); return sl1e; }
-+static inline shadow_l1e_t
-+shadow_l1e_flip_flags(shadow_l1e_t sl1e, u32 flags)
-+{ l1e_flip_flags(sl1e, flags); return sl1e; }
-
- static inline shadow_l1e_t shadow_l1e_empty(void)
- { return l1e_empty(); }
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -178,6 +178,18 @@ static uint32_t base_disallow_mask;
- is_pv_domain(d)) ? \
- L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
-
-+static s8 __read_mostly opt_mmio_relax;
-+static void __init parse_mmio_relax(const char *s)
-+{
-+ if ( !*s )
-+ opt_mmio_relax = 1;
-+ else
-+ opt_mmio_relax = parse_bool(s);
-+ if ( opt_mmio_relax < 0 && strcmp(s, "all") )
-+ opt_mmio_relax = 0;
-+}
-+custom_param("mmio-relax", parse_mmio_relax);
-+
- static void __init init_frametable_chunk(void *start, void *end)
- {
- unsigned long s = (unsigned long)start;
-@@ -799,10 +811,7 @@ get_page_from_l1e(
- if ( !mfn_valid(mfn) ||
- (real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
- {
--#ifndef NDEBUG
-- const unsigned long *ro_map;
-- unsigned int seg, bdf;
--#endif
-+ int flip = 0;
-
- /* Only needed the reference to confirm dom_io ownership. */
- if ( mfn_valid(mfn) )
-@@ -836,24 +845,55 @@ get_page_from_l1e(
- return -EINVAL;
- }
-
-- if ( !(l1f & _PAGE_RW) ||
-- !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
-- return 0;
-+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
-+ {
-+ /* MMIO pages must not be mapped cachable unless requested so. */
-+ switch ( opt_mmio_relax )
-+ {
-+ case 0:
-+ break;
-+ case 1:
-+ if ( is_hardware_domain(l1e_owner) )
-+ case -1:
-+ return 0;
-+ default:
-+ ASSERT_UNREACHABLE();
-+ }
-+ }
-+ else if ( l1f & _PAGE_RW )
-+ {
- #ifndef NDEBUG
-- if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
-- ((ro_map = pci_get_ro_map(seg)) != NULL &&
-- test_bit(bdf, ro_map)) )
-- printk(XENLOG_G_WARNING
-- "d%d: Forcing read-only access to MFN %lx\n",
-- l1e_owner->domain_id, mfn);
-- else
-- rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
-- print_mmio_emul_range,
-- &(struct mmio_emul_range_ctxt){
-- .d = l1e_owner,
-- .mfn = mfn });
-+ const unsigned long *ro_map;
-+ unsigned int seg, bdf;
-+
-+ if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
-+ ((ro_map = pci_get_ro_map(seg)) != NULL &&
-+ test_bit(bdf, ro_map)) )
-+ printk(XENLOG_G_WARNING
-+ "d%d: Forcing read-only access to MFN %lx\n",
-+ l1e_owner->domain_id, mfn);
-+ else
-+ rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
-+ print_mmio_emul_range,
-+ &(struct mmio_emul_range_ctxt){
-+ .d = l1e_owner,
-+ .mfn = mfn });
- #endif
-- return 1;
-+ flip = _PAGE_RW;
-+ }
-+
-+ switch ( l1f & PAGE_CACHE_ATTRS )
-+ {
-+ case 0: /* WB */
-+ flip |= _PAGE_PWT | _PAGE_PCD;
-+ break;
-+ case _PAGE_PWT: /* WT */
-+ case _PAGE_PWT | _PAGE_PAT: /* WP */
-+ flip |= _PAGE_PCD | (l1f & _PAGE_PAT);
-+ break;
-+ }
-+
-+ return flip;
- }
-
- if ( unlikely( (real_pg_owner != pg_owner) &&
-@@ -1243,8 +1283,9 @@ static int alloc_l1_table(struct page_in
- goto fail;
- case 0:
- break;
-- case 1:
-- l1e_remove_flags(pl1e[i], _PAGE_RW);
-+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
-+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
-+ l1e_flip_flags(pl1e[i], ret);
- break;
- }
-
-@@ -1759,8 +1800,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
- return -EINVAL;
- }
-
-- /* Fast path for identical mapping, r/w and presence. */
-- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
-+ /* Fast path for identical mapping, r/w, presence, and cachability. */
-+ if ( !l1e_has_changed(ol1e, nl1e,
-+ PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
- {
- adjust_guest_l1e(nl1e, pt_dom);
- if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
-@@ -1783,8 +1825,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
- return rc;
- case 0:
- break;
-- case 1:
-- l1e_remove_flags(nl1e, _PAGE_RW);
-+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
-+ ASSERT(!(rc & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
-+ l1e_flip_flags(nl1e, rc);
- rc = 0;
- break;
- }
-@@ -5000,6 +5043,7 @@ static int ptwr_emulated_update(
- l1_pgentry_t pte, ol1e, nl1e, *pl1e;
- struct vcpu *v = current;
- struct domain *d = v->domain;
-+ int ret;
-
- /* Only allow naturally-aligned stores within the original %cr2 page. */
- if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
-@@ -5047,7 +5091,7 @@ static int ptwr_emulated_update(
-
- /* Check the new PTE. */
- nl1e = l1e_from_intpte(val);
-- switch ( get_page_from_l1e(nl1e, d, d) )
-+ switch ( ret = get_page_from_l1e(nl1e, d, d) )
- {
- default:
- if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
-@@ -5071,8 +5115,9 @@ static int ptwr_emulated_update(
- break;
- case 0:
- break;
-- case 1:
-- l1e_remove_flags(nl1e, _PAGE_RW);
-+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
-+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
-+ l1e_flip_flags(nl1e, ret);
- break;
- }
-
---- a/xen/include/asm-x86/page.h
-+++ b/xen/include/asm-x86/page.h
-@@ -157,6 +157,9 @@ static inline l4_pgentry_t l4e_from_padd
- #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
- #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
-
-+/* Flip flags in an existing L1 PTE. */
-+#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags))
-+
- /* Check if a pte's page mapping or significant access flags have changed. */
- #define l1e_has_changed(x,y,flags) \
- ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
diff --git a/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch b/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
deleted file mode 100644
index 7935e58c40..0000000000
--- a/system/xen/patches/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From 12b11658a9d6a654a1e7acbf2f2d56ce9a396c86 Mon Sep 17 00:00:00 2001
-From: David Vrabel <david.vrabel@citrix.com>
-Date: Fri, 20 Nov 2015 11:59:05 -0500
-Subject: [PATCH 1/3] xen: Add RING_COPY_REQUEST()
-
-Using RING_GET_REQUEST() on a shared ring is easy to use incorrectly
-(i.e., by not considering that the other end may alter the data in the
-shared ring while it is being inspected). Safe usage of a request
-generally requires taking a local copy.
-
-Provide a RING_COPY_REQUEST() macro to use instead of
-RING_GET_REQUEST() and an open-coded memcpy(). This takes care of
-ensuring that the copy is done correctly regardless of any possible
-compiler optimizations.
-
-Use a volatile source to prevent the compiler from reordering or
-omitting the copy.
-
-This is part of XSA155.
-
-Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
----
-v2: Add comment about GCC bug.
----
- xen/include/public/io/ring.h | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/xen/include/public/io/ring.h b/xen/include/public/io/ring.h
-index ba9401b..801c0da 100644
---- a/xen/include/public/io/ring.h
-+++ b/xen/include/public/io/ring.h
-@@ -212,6 +212,20 @@ typedef struct __name##_back_ring __name##_back_ring_t
- #define RING_GET_REQUEST(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
-
-+/*
-+ * Get a local copy of a request.
-+ *
-+ * Use this in preference to RING_GET_REQUEST() so all processing is
-+ * done on a local copy that cannot be modified by the other end.
-+ *
-+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
-+ * to be ineffective where _req is a struct which consists of only bitfields.
-+ */
-+#define RING_COPY_REQUEST(_r, _idx, _req) do { \
-+ /* Use volatile to force the copy into _req. */ \
-+ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
-+} while (0)
-+
- #define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
-
---
-2.1.0
-
diff --git a/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch b/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
deleted file mode 100644
index 2d80a7bd43..0000000000
--- a/system/xen/patches/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 851ffb4eea917e2708c912291dea4d133026c0ac Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 20 Nov 2015 12:16:02 -0500
-Subject: [PATCH 2/3] blktap2: Use RING_COPY_REQUEST
-
-Instead of RING_GET_REQUEST. Using a local copy of the
-ring (and also with proper memory barriers) will mean
-we can do not have to worry about the compiler optimizing
-the code and doing a double-fetch in the shared memory space.
-
-This is part of XSA155.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
----
-v2: Fix compile issues with tapdisk-vbd
----
- tools/blktap2/drivers/block-log.c | 3 ++-
- tools/blktap2/drivers/tapdisk-vbd.c | 8 ++++----
- 2 files changed, 6 insertions(+), 5 deletions(-)
-
-diff --git a/tools/blktap2/drivers/block-log.c b/tools/blktap2/drivers/block-log.c
-index 5330cdc..5f3bd35 100644
---- a/tools/blktap2/drivers/block-log.c
-+++ b/tools/blktap2/drivers/block-log.c
-@@ -494,11 +494,12 @@ static int ctl_kick(struct tdlog_state* s, int fd)
- reqstart = s->bring.req_cons;
- reqend = s->sring->req_prod;
-
-+ xen_mb();
- BDPRINTF("ctl: ring kicked (start = %u, end = %u)", reqstart, reqend);
-
- while (reqstart != reqend) {
- /* XXX actually submit these! */
-- memcpy(&req, RING_GET_REQUEST(&s->bring, reqstart), sizeof(req));
-+ RING_COPY_REQUEST(&s->bring, reqstart, &req);
- BDPRINTF("ctl: read request %"PRIu64":%u", req.sector, req.count);
- s->bring.req_cons = ++reqstart;
-
-diff --git a/tools/blktap2/drivers/tapdisk-vbd.c b/tools/blktap2/drivers/tapdisk-vbd.c
-index 6d1d94a..89ef9ed 100644
---- a/tools/blktap2/drivers/tapdisk-vbd.c
-+++ b/tools/blktap2/drivers/tapdisk-vbd.c
-@@ -1555,7 +1555,7 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
- int idx;
- RING_IDX rp, rc;
- td_ring_t *ring;
-- blkif_request_t *req;
-+ blkif_request_t req;
- td_vbd_request_t *vreq;
-
- ring = &vbd->ring;
-@@ -1566,16 +1566,16 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
- xen_rmb();
-
- for (rc = ring->fe_ring.req_cons; rc != rp; rc++) {
-- req = RING_GET_REQUEST(&ring->fe_ring, rc);
-+ RING_COPY_REQUEST(&ring->fe_ring, rc, &req);
- ++ring->fe_ring.req_cons;
-
-- idx = req->id;
-+ idx = req.id;
- vreq = &vbd->request_list[idx];
-
- ASSERT(list_empty(&vreq->next));
- ASSERT(vreq->secs_pending == 0);
-
-- memcpy(&vreq->req, req, sizeof(blkif_request_t));
-+ memcpy(&vreq->req, &req, sizeof(blkif_request_t));
- vbd->received++;
- vreq->vbd = vbd;
-
---
-2.1.4
-
diff --git a/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch b/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
deleted file mode 100644
index 56a6e538f4..0000000000
--- a/system/xen/patches/xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From c1fce65e2b720684ea6ba76ae59921542bd154bb Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 20 Nov 2015 12:22:14 -0500
-Subject: [PATCH 3/3] libvchan: Read prod/cons only once.
-
-We must ensure that the prod/cons are only read once and that
-the compiler won't try to optimize the reads. That is split
-the read of these in multiple instructions influencing later
-branch code. As such insert barriers when fetching the cons
-and prod index.
-
-This is part of XSA155.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
----
- tools/libvchan/io.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/tools/libvchan/io.c b/tools/libvchan/io.c
-index 8a9629b..381cc05 100644
---- a/tools/libvchan/io.c
-+++ b/tools/libvchan/io.c
-@@ -117,6 +117,7 @@ static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit)
- static inline int raw_get_data_ready(struct libxenvchan *ctrl)
- {
- uint32_t ready = rd_prod(ctrl) - rd_cons(ctrl);
-+ xen_mb(); /* Ensure 'ready' is read only once. */
- if (ready > rd_ring_size(ctrl))
- /* We have no way to return errors. Locking up the ring is
- * better than the alternatives. */
-@@ -158,6 +159,7 @@ int libxenvchan_data_ready(struct libxenvchan *ctrl)
- static inline int raw_get_buffer_space(struct libxenvchan *ctrl)
- {
- uint32_t ready = wr_ring_size(ctrl) - (wr_prod(ctrl) - wr_cons(ctrl));
-+ xen_mb(); /* Ensure 'ready' is read only once. */
- if (ready > wr_ring_size(ctrl))
- /* We have no way to return errors. Locking up the ring is
- * better than the alternatives. */
---
-2.1.0
-
diff --git a/system/xen/patches/xsa162-qemuu.patch b/system/xen/patches/xsa162-qemuu.patch
deleted file mode 100644
index 2e3352d88b..0000000000
--- a/system/xen/patches/xsa162-qemuu.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-net: pcnet: add check to validate receive data size(CVE-2015-7504)
-
-In loopback mode, pcnet_receive routine appends CRC code to the
-receive buffer. If the data size given is same as the buffer size,
-the appended CRC code overwrites 4 bytes after s->buffer. Added a
-check to avoid that.
-
-Reported-by: Qinghao Tang <luodalongde@gmail.com>
-Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
----
- hw/net/pcnet.c | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
-diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
-index 3437376..5f55591 100644
---- a/hw/net/pcnet.c
-+++ b/hw/net/pcnet.c
-@@ -1085,7 +1085,7 @@ ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
- uint32_t fcs = ~0;
- uint8_t *p = src;
-
-- while (p != &src[size-4])
-+ while (p != &src[size])
- CRC(fcs, *p++);
- crc_err = (*(uint32_t *)p != htonl(fcs));
- }
-@@ -1234,8 +1234,10 @@ static void pcnet_transmit(PCNetState *s)
- bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
-
- /* if multi-tmd packet outsizes s->buffer then skip it silently.
-- Note: this is not what real hw does */
-- if (s->xmit_pos + bcnt > sizeof(s->buffer)) {
-+ * Note: this is not what real hw does.
-+ * Last four bytes of s->buffer are used to store CRC FCS code.
-+ */
-+ if (s->xmit_pos + bcnt > sizeof(s->buffer) - 4) {
- s->xmit_pos = -1;
- goto txdone;
- }
---
-2.4.3
-
diff --git a/system/xen/patches/xsa170.patch b/system/xen/patches/xsa170.patch
deleted file mode 100644
index f71fa19130..0000000000
--- a/system/xen/patches/xsa170.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-x86/VMX: sanitize rIP before re-entering guest
-
-... to prevent guest user mode arranging for a guest crash (due to
-failed VM entry). (On the AMD system I checked, hardware is doing
-exactly the canonicalization being added here.)
-
-Note that fixing this in an architecturally correct way would be quite
-a bit more involved: Making the x86 instruction emulator check all
-branch targets for validity, plus dealing with invalid rIP resulting
-from update_guest_eip() or incoming directly during a VM exit. The only
-way to get the latter right would be by not having hardware do the
-injection.
-
-Note further that there are a two early returns from
-vmx_vmexit_handler(): One (through vmx_failed_vmentry()) leads to
-domain_crash() anyway, and the other covers real mode only and can
-neither occur with a non-canonical rIP nor result in an altered rIP,
-so we don't need to force those paths through the checking logic.
-
-This is XSA-170.
-
-Reported-by: 刘令 <liuling-it@360.cn>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/hvm/vmx/vmx.c
-+++ b/xen/arch/x86/hvm/vmx/vmx.c
-@@ -2968,7 +2968,7 @@ static int vmx_handle_apic_write(void)
- void vmx_vmexit_handler(struct cpu_user_regs *regs)
- {
- unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
-- unsigned int vector = 0;
-+ unsigned int vector = 0, mode;
- struct vcpu *v = current;
-
- __vmread(GUEST_RIP, &regs->rip);
-@@ -3566,6 +3566,41 @@ void vmx_vmexit_handler(struct cpu_user_
- out:
- if ( nestedhvm_vcpu_in_guestmode(v) )
- nvmx_idtv_handling();
-+
-+ /*
-+ * VM entry will fail (causing the guest to get crashed) if rIP (and
-+ * rFLAGS, but we don't have an issue there) doesn't meet certain
-+ * criteria. As we must not allow less than fully privileged mode to have
-+ * such an effect on the domain, we correct rIP in that case (accepting
-+ * this not being architecturally correct behavior, as the injected #GP
-+ * fault will then not see the correct [invalid] return address).
-+ * And since we know the guest will crash, we crash it right away if it
-+ * already is in most privileged mode.
-+ */
-+ mode = vmx_guest_x86_mode(v);
-+ if ( mode == 8 ? !is_canonical_address(regs->rip)
-+ : regs->rip != regs->_eip )
-+ {
-+ struct segment_register ss;
-+
-+ gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
-+
-+ vmx_get_segment_register(v, x86_seg_ss, &ss);
-+ if ( ss.attr.fields.dpl )
-+ {
-+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-+ if ( !(intr_info & INTR_INFO_VALID_MASK) )
-+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
-+ /* Need to fix rIP nevertheless. */
-+ if ( mode == 8 )
-+ regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
-+ (64 - VADDR_BITS);
-+ else
-+ regs->rip = regs->_eip;
-+ }
-+ else
-+ domain_crash(v->domain);
-+ }
- }
-
- void vmx_vmenter_helper(const struct cpu_user_regs *regs)
diff --git a/system/xen/patches/xsa172.patch b/system/xen/patches/xsa172.patch
deleted file mode 100644
index 8b1d01fa84..0000000000
--- a/system/xen/patches/xsa172.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-x86: fix information leak on AMD CPUs
-
-The fix for XSA-52 was wrong, and so was the change synchronizing that
-new behavior to the FXRSTOR logic: AMD's manuals explictly state that
-writes to the ES bit are ignored, and it instead gets calculated from
-the exception and mask bits (it gets set whenever there is an unmasked
-exception, and cleared otherwise). Hence we need to follow that model
-in our workaround.
-
-This is XSA-172.
-
-The first hunk (xen/arch/x86/i387.c:fpu_fxrstor) is CVE-2016-3159.
-The second hunk (xen/arch/x86/xstate.c:xrstor) is CVE-2016-3158.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/i387.c
-+++ b/xen/arch/x86/i387.c
-@@ -49,7 +49,7 @@ static inline void fpu_fxrstor(struct vc
- * sometimes new user value. Both should be ok. Use the FPU saved
- * data block as a safe address because it should be in L1.
- */
-- if ( !(fpu_ctxt->fsw & 0x0080) &&
-+ if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- {
- asm volatile ( "fnclex\n\t"
---- a/xen/arch/x86/xstate.c
-+++ b/xen/arch/x86/xstate.c
-@@ -344,7 +344,7 @@ void xrstor(struct vcpu *v, uint64_t mas
- * data block as a safe address because it should be in L1.
- */
- if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
-- !(ptr->fpu_sse.fsw & 0x0080) &&
-+ !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- asm volatile ( "fnclex\n\t" /* clear exceptions */
- "ffree %%st(7)\n\t" /* clear stack tag */
diff --git a/system/xen/patches/xsa173-4.6.patch b/system/xen/patches/xsa173-4.6.patch
deleted file mode 100644
index aecf120c74..0000000000
--- a/system/xen/patches/xsa173-4.6.patch
+++ /dev/null
@@ -1,244 +0,0 @@
-commit 54a4651cb4e744960fb375ed99909d7dfb943caf
-Author: Tim Deegan <tim@xen.org>
-Date: Wed Mar 16 16:51:27 2016 +0000
-
- x86: limit GFNs to 32 bits for shadowed superpages.
-
- Superpage shadows store the shadowed GFN in the backpointer field,
- which for non-BIGMEM builds is 32 bits wide. Shadowing a superpage
- mapping of a guest-physical address above 2^44 would lead to the GFN
- being truncated there, and a crash when we come to remove the shadow
- from the hash table.
-
- Track the valid width of a GFN for each guest, including reporting it
- through CPUID, and enforce it in the shadow pagetables. Set the
- maximum witth to 32 for guests where this truncation could occur.
-
- This is XSA-173.
-
- Signed-off-by: Tim Deegan <tim@xen.org>
- Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
-Reported-by: Ling Liu <liuling-it@360.cn>
-diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
-index 35ef21b..528c283 100644
---- a/xen/arch/x86/cpu/common.c
-+++ b/xen/arch/x86/cpu/common.c
-@@ -38,6 +38,7 @@ integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
- const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {};
-
- unsigned int paddr_bits __read_mostly = 36;
-+unsigned int hap_paddr_bits __read_mostly = 36;
-
- /*
- * Default host IA32_CR_PAT value to cover all memory types.
-@@ -211,7 +212,7 @@ static void __init early_cpu_detect(void)
-
- static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
- {
-- u32 tfms, capability, excap, ebx;
-+ u32 tfms, capability, excap, ebx, eax;
-
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level,
-@@ -248,8 +249,11 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
- }
- if ( c->extended_cpuid_level >= 0x80000004 )
- get_model_name(c); /* Default name */
-- if ( c->extended_cpuid_level >= 0x80000008 )
-- paddr_bits = cpuid_eax(0x80000008) & 0xff;
-+ if ( c->extended_cpuid_level >= 0x80000008 ) {
-+ eax = cpuid_eax(0x80000008);
-+ paddr_bits = eax & 0xff;
-+ hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits;
-+ }
- }
-
- /* Might lift BIOS max_leaf=3 limit. */
-diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
-index e200aab..0b4d9f0 100644
---- a/xen/arch/x86/hvm/hvm.c
-+++ b/xen/arch/x86/hvm/hvm.c
-@@ -4567,8 +4567,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
- break;
-
- case 0x80000008:
-- count = cpuid_eax(0x80000008);
-- count = (count >> 16) & 0xff ?: count & 0xff;
-+ count = d->arch.paging.gfn_bits + PAGE_SHIFT;
- if ( (*eax & 0xff) > count )
- *eax = (*eax & ~0xff) | count;
-
-diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
-index 773454d..06543d3 100644
---- a/xen/arch/x86/mm/guest_walk.c
-+++ b/xen/arch/x86/mm/guest_walk.c
-@@ -93,6 +93,12 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
- struct page_info *page;
- void *map;
-
-+ if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
-+ {
-+ *rc = _PAGE_INVALID_BIT;
-+ return NULL;
-+ }
-+
- /* Translate the gfn, unsharing if shared */
- page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
- q);
-@@ -326,20 +332,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
- flags &= ~_PAGE_PAT;
-
- if ( gfn_x(start) & GUEST_L2_GFN_MASK & ~0x1 )
-- {
--#if GUEST_PAGING_LEVELS == 2
-- /*
-- * Note that _PAGE_INVALID_BITS is zero in this case, yielding a
-- * no-op here.
-- *
-- * Architecturally, the walk should fail if bit 21 is set (others
-- * aren't being checked at least in PSE36 mode), but we'll ignore
-- * this here in order to avoid specifying a non-natural, non-zero
-- * _PAGE_INVALID_BITS value just for that case.
-- */
--#endif
- rc |= _PAGE_INVALID_BITS;
-- }
-+
- /* Increment the pfn by the right number of 4k pages.
- * Mask out PAT and invalid bits. */
- start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
-@@ -422,5 +416,11 @@ set_ad:
- put_page(mfn_to_page(mfn_x(gw->l1mfn)));
- }
-
-+ /* If this guest has a restricted physical address space then the
-+ * target GFN must fit within it. */
-+ if ( !(rc & _PAGE_PRESENT)
-+ && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
-+ rc |= _PAGE_INVALID_BITS;
-+
- return rc;
- }
-diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
-index 6eb2167..f3475c6 100644
---- a/xen/arch/x86/mm/hap/hap.c
-+++ b/xen/arch/x86/mm/hap/hap.c
-@@ -448,6 +448,8 @@ void hap_domain_init(struct domain *d)
- {
- INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
-
-+ d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
-+
- /* Use HAP logdirty mechanism. */
- paging_log_dirty_init(d, hap_enable_log_dirty,
- hap_disable_log_dirty,
-diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
-index bad8360..98d0d2c 100644
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -51,6 +51,16 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
- INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
- INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
-
-+ d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
-+#ifndef CONFIG_BIGMEM
-+ /*
-+ * Shadowed superpages store GFNs in 32-bit page_info fields.
-+ * Note that we cannot use guest_supports_superpages() here.
-+ */
-+ if ( !is_pv_domain(d) || opt_allow_superpage )
-+ d->arch.paging.gfn_bits = 32;
-+#endif
-+
- /* Use shadow pagetables for log-dirty support */
- paging_log_dirty_init(d, sh_enable_log_dirty,
- sh_disable_log_dirty, sh_clean_dirty_bitmap);
-diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
-index 43c9488..71477fe 100644
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -525,7 +525,8 @@ _sh_propagate(struct vcpu *v,
- ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
-
- /* Check there's something for the shadows to map to */
-- if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) )
-+ if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
-+ || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
- {
- *sp = shadow_l1e_empty();
- goto done;
-diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
-index c6c6e71..74c3a52 100644
---- a/xen/include/asm-x86/domain.h
-+++ b/xen/include/asm-x86/domain.h
-@@ -193,6 +193,9 @@ struct paging_domain {
- /* log dirty support */
- struct log_dirty_domain log_dirty;
-
-+ /* Number of valid bits in a gfn. */
-+ unsigned int gfn_bits;
-+
- /* preemption handling */
- struct {
- const struct domain *dom;
-diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
-index f8a0d76..b5db401 100644
---- a/xen/include/asm-x86/guest_pt.h
-+++ b/xen/include/asm-x86/guest_pt.h
-@@ -210,15 +210,17 @@ guest_supports_nx(struct vcpu *v)
- }
-
-
--/* Some bits are invalid in any pagetable entry. */
--#if GUEST_PAGING_LEVELS == 2
--#define _PAGE_INVALID_BITS (0)
--#elif GUEST_PAGING_LEVELS == 3
--#define _PAGE_INVALID_BITS \
-- get_pte_flags(((1ull<<63) - 1) & ~((1ull<<paddr_bits) - 1))
--#else /* GUEST_PAGING_LEVELS == 4 */
-+/*
-+ * Some bits are invalid in any pagetable entry.
-+ * Normal flags values get represented in 24-bit values (see
-+ * get_pte_flags() and put_pte_flags()), so set bit 24 in
-+ * addition to be able to flag out of range frame numbers.
-+ */
-+#if GUEST_PAGING_LEVELS == 3
- #define _PAGE_INVALID_BITS \
-- get_pte_flags(((1ull<<52) - 1) & ~((1ull<<paddr_bits) - 1))
-+ (_PAGE_INVALID_BIT | get_pte_flags(((1ull << 63) - 1) & ~(PAGE_SIZE - 1)))
-+#else /* 2-level and 4-level */
-+#define _PAGE_INVALID_BITS _PAGE_INVALID_BIT
- #endif
-
-
-diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
-index f507f5e..a200470 100644
---- a/xen/include/asm-x86/processor.h
-+++ b/xen/include/asm-x86/processor.h
-@@ -212,6 +212,8 @@ extern u32 cpuid_ext_features;
-
- /* Maximum width of physical addresses supported by the hardware */
- extern unsigned int paddr_bits;
-+/* Max physical address width supported within HAP guests */
-+extern unsigned int hap_paddr_bits;
-
- extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]);
-
-diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
-index 19ab4d0..eb5e2fd 100644
---- a/xen/include/asm-x86/x86_64/page.h
-+++ b/xen/include/asm-x86/x86_64/page.h
-@@ -141,6 +141,12 @@ typedef l4_pgentry_t root_pgentry_t;
- #define _PAGE_GNTTAB (1U<<22)
-
- /*
-+ * Bit 24 of a 24-bit flag mask! This is not any bit of a real pte,
-+ * and is only used for signalling in variables that contain flags.
-+ */
-+#define _PAGE_INVALID_BIT (1U<<24)
-+
-+/*
- * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
- * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
- * is asserted for both.
diff --git a/system/xen/patches/xsa176.patch b/system/xen/patches/xsa176.patch
deleted file mode 100644
index 1c15abd3e3..0000000000
--- a/system/xen/patches/xsa176.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-x86/mm: fully honor PS bits in guest page table walks
-
-In L4 entries it is currently unconditionally reserved (and hence
-should, when set, always result in a reserved bit page fault), and is
-reserved on hardware not supporting 1Gb pages (and hence should, when
-set, similarly cause a reserved bit page fault on such hardware).
-
-This is CVE-2016-4480 / XSA-176.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm/guest_walk.c
-+++ b/xen/arch/x86/mm/guest_walk.c
-@@ -226,6 +226,11 @@ guest_walk_tables(struct vcpu *v, struct
- rc |= _PAGE_PRESENT;
- goto out;
- }
-+ if ( gflags & _PAGE_PSE )
-+ {
-+ rc |= _PAGE_PSE | _PAGE_INVALID_BIT;
-+ goto out;
-+ }
- rc |= ((gflags & mflags) ^ mflags);
-
- /* Map the l3 table */
-@@ -247,7 +252,7 @@ guest_walk_tables(struct vcpu *v, struct
- }
- rc |= ((gflags & mflags) ^ mflags);
-
-- pse1G = (gflags & _PAGE_PSE) && guest_supports_1G_superpages(v);
-+ pse1G = !!(gflags & _PAGE_PSE);
-
- if ( pse1G )
- {
-@@ -267,6 +272,8 @@ guest_walk_tables(struct vcpu *v, struct
- /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */
- flags &= ~_PAGE_PAT;
-
-+ if ( !guest_supports_1G_superpages(v) )
-+ rc |= _PAGE_PSE | _PAGE_INVALID_BIT;
- if ( gfn_x(start) & GUEST_L3_GFN_MASK & ~0x1 )
- rc |= _PAGE_INVALID_BITS;
-
diff --git a/system/xen/patches/xsa179-qemuu-4.6-0001-vga-fix-banked-access-bounds-checking-CVE-2016-3710.patch b/system/xen/patches/xsa179-qemuu-4.6-0001-vga-fix-banked-access-bounds-checking-CVE-2016-3710.patch
deleted file mode 100644
index 3ad3cf4327..0000000000
--- a/system/xen/patches/xsa179-qemuu-4.6-0001-vga-fix-banked-access-bounds-checking-CVE-2016-3710.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From b16db5ab2d0c5ff755e08942f4c8e8f9f8618eae Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 26 Apr 2016 08:49:10 +0200
-Subject: [PATCH 1/5] vga: fix banked access bounds checking (CVE-2016-3710)
-
-vga allows banked access to video memory using the window at 0xa00000
-and it supports a different access modes with different address
-calculations.
-
-The VBE bochs extentions support banked access too, using the
-VBE_DISPI_INDEX_BANK register. The code tries to take the different
-address calculations into account and applies different limits to
-VBE_DISPI_INDEX_BANK depending on the current access mode.
-
-Which is probably effective in stopping misprogramming by accident.
-But from a security point of view completely useless as an attacker
-can easily change access modes after setting the bank register.
-
-Drop the bogus check, add range checks to vga_mem_{readb,writeb}
-instead.
-
-Fixes: CVE-2016-3710
-Reported-by: Qinghao Tang <luodalongde@gmail.com>
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- hw/display/vga.c | 24 ++++++++++++++++++------
- 1 file changed, 18 insertions(+), 6 deletions(-)
-
-diff --git a/hw/display/vga.c b/hw/display/vga.c
-index 52eaf05..b577712 100644
---- a/hw/display/vga.c
-+++ b/hw/display/vga.c
-@@ -178,6 +178,7 @@ static void vga_update_memory_access(VGACommonState *s)
- break;
- }
- base += isa_mem_base;
-+ assert(offset + size <= s->vram_size);
- memory_region_init_alias(&s->chain4_alias, memory_region_owner(&s->vram),
- "vga.chain4", &s->vram, offset, size);
- memory_region_add_subregion_overlap(s->legacy_address_space, base,
-@@ -715,11 +716,7 @@ void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val)
- vbe_fixup_regs(s);
- break;
- case VBE_DISPI_INDEX_BANK:
-- if (s->vbe_regs[VBE_DISPI_INDEX_BPP] == 4) {
-- val &= (s->vbe_bank_mask >> 2);
-- } else {
-- val &= s->vbe_bank_mask;
-- }
-+ val &= s->vbe_bank_mask;
- s->vbe_regs[s->vbe_index] = val;
- s->bank_offset = (val << 16);
- vga_update_memory_access(s);
-@@ -818,13 +815,21 @@ uint32_t vga_mem_readb(VGACommonState *s, hwaddr addr)
-
- if (s->sr[VGA_SEQ_MEMORY_MODE] & VGA_SR04_CHN_4M) {
- /* chain 4 mode : simplest access */
-+ assert(addr < s->vram_size);
- ret = s->vram_ptr[addr];
- } else if (s->gr[VGA_GFX_MODE] & 0x10) {
- /* odd/even mode (aka text mode mapping) */
- plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1);
-- ret = s->vram_ptr[((addr & ~1) << 1) | plane];
-+ addr = ((addr & ~1) << 1) | plane;
-+ if (addr >= s->vram_size) {
-+ return 0xff;
-+ }
-+ ret = s->vram_ptr[addr];
- } else {
- /* standard VGA latched access */
-+ if (addr * sizeof(uint32_t) >= s->vram_size) {
-+ return 0xff;
-+ }
- s->latch = ((uint32_t *)s->vram_ptr)[addr];
-
- if (!(s->gr[VGA_GFX_MODE] & 0x08)) {
-@@ -881,6 +886,7 @@ void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val)
- plane = addr & 3;
- mask = (1 << plane);
- if (s->sr[VGA_SEQ_PLANE_WRITE] & mask) {
-+ assert(addr < s->vram_size);
- s->vram_ptr[addr] = val;
- #ifdef DEBUG_VGA_MEM
- printf("vga: chain4: [0x" TARGET_FMT_plx "]\n", addr);
-@@ -894,6 +900,9 @@ void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val)
- mask = (1 << plane);
- if (s->sr[VGA_SEQ_PLANE_WRITE] & mask) {
- addr = ((addr & ~1) << 1) | plane;
-+ if (addr >= s->vram_size) {
-+ return;
-+ }
- s->vram_ptr[addr] = val;
- #ifdef DEBUG_VGA_MEM
- printf("vga: odd/even: [0x" TARGET_FMT_plx "]\n", addr);
-@@ -967,6 +976,9 @@ void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val)
- mask = s->sr[VGA_SEQ_PLANE_WRITE];
- s->plane_updated |= mask; /* only used to detect font change */
- write_mask = mask16[mask];
-+ if (addr * sizeof(uint32_t) >= s->vram_size) {
-+ return;
-+ }
- ((uint32_t *)s->vram_ptr)[addr] =
- (((uint32_t *)s->vram_ptr)[addr] & ~write_mask) |
- (val & write_mask);
---
-1.9.1
-
diff --git a/system/xen/patches/xsa179-qemuu-4.6-0002-vga-add-vbe_enabled-helper.patch b/system/xen/patches/xsa179-qemuu-4.6-0002-vga-add-vbe_enabled-helper.patch
deleted file mode 100644
index 0daa3141eb..0000000000
--- a/system/xen/patches/xsa179-qemuu-4.6-0002-vga-add-vbe_enabled-helper.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From e026859e9aecf8635daf06e9fc2325239f458959 Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 26 Apr 2016 14:11:34 +0200
-Subject: [PATCH 2/5] vga: add vbe_enabled() helper
-
-Makes code a bit easier to read.
-
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- hw/display/vga.c | 13 +++++++++----
- 1 file changed, 9 insertions(+), 4 deletions(-)
-
-diff --git a/hw/display/vga.c b/hw/display/vga.c
-index b577712..ebf63ff 100644
---- a/hw/display/vga.c
-+++ b/hw/display/vga.c
-@@ -140,6 +140,11 @@ static uint32_t expand4[256];
- static uint16_t expand2[256];
- static uint8_t expand4to8[16];
-
-+static inline bool vbe_enabled(VGACommonState *s)
-+{
-+ return s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED;
-+}
-+
- static void vga_update_memory_access(VGACommonState *s)
- {
- hwaddr base, offset, size;
-@@ -563,7 +568,7 @@ static void vbe_fixup_regs(VGACommonState *s)
- uint16_t *r = s->vbe_regs;
- uint32_t bits, linelength, maxy, offset;
-
-- if (!(r[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED)) {
-+ if (!vbe_enabled(s)) {
- /* vbe is turned off -- nothing to do */
- return;
- }
-@@ -1057,7 +1062,7 @@ static void vga_get_offsets(VGACommonState *s,
- {
- uint32_t start_addr, line_offset, line_compare;
-
-- if (s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED) {
-+ if (vbe_enabled(s)) {
- line_offset = s->vbe_line_offset;
- start_addr = s->vbe_start_addr;
- line_compare = 65535;
-@@ -1382,7 +1387,7 @@ static int vga_get_bpp(VGACommonState *s)
- {
- int ret;
-
-- if (s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED) {
-+ if (vbe_enabled(s)) {
- ret = s->vbe_regs[VBE_DISPI_INDEX_BPP];
- } else {
- ret = 0;
-@@ -1394,7 +1399,7 @@ static void vga_get_resolution(VGACommonState *s, int *pwidth, int *pheight)
- {
- int width, height;
-
-- if (s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED) {
-+ if (vbe_enabled(s)) {
- width = s->vbe_regs[VBE_DISPI_INDEX_XRES];
- height = s->vbe_regs[VBE_DISPI_INDEX_YRES];
- } else {
---
-1.9.1
-
diff --git a/system/xen/patches/xsa179-qemuu-4.6-0003-vga-factor-out-vga-register-setup.patch b/system/xen/patches/xsa179-qemuu-4.6-0003-vga-factor-out-vga-register-setup.patch
deleted file mode 100644
index 70e4bdbca8..0000000000
--- a/system/xen/patches/xsa179-qemuu-4.6-0003-vga-factor-out-vga-register-setup.patch
+++ /dev/null
@@ -1,127 +0,0 @@
-From b36a4e26caf7a050a6e8593527c26bfa4f47a758 Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 26 Apr 2016 15:24:18 +0200
-Subject: [PATCH 3/5] vga: factor out vga register setup
-
-When enabling vbe mode qemu will setup a bunch of vga registers to make
-sure the vga emulation operates in correct mode for a linear
-framebuffer. Move that code to a separate function so we can call it
-from other places too.
-
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- hw/display/vga.c | 78 ++++++++++++++++++++++++++++++++------------------------
- 1 file changed, 44 insertions(+), 34 deletions(-)
-
-diff --git a/hw/display/vga.c b/hw/display/vga.c
-index ebf63ff..fb822f4 100644
---- a/hw/display/vga.c
-+++ b/hw/display/vga.c
-@@ -643,6 +643,49 @@ static void vbe_fixup_regs(VGACommonState *s)
- s->vbe_start_addr = offset / 4;
- }
-
-+/* we initialize the VGA graphic mode */
-+static void vbe_update_vgaregs(VGACommonState *s)
-+{
-+ int h, shift_control;
-+
-+ if (!vbe_enabled(s)) {
-+ /* vbe is turned off -- nothing to do */
-+ return;
-+ }
-+
-+ /* graphic mode + memory map 1 */
-+ s->gr[VGA_GFX_MISC] = (s->gr[VGA_GFX_MISC] & ~0x0c) | 0x04 |
-+ VGA_GR06_GRAPHICS_MODE;
-+ s->cr[VGA_CRTC_MODE] |= 3; /* no CGA modes */
-+ s->cr[VGA_CRTC_OFFSET] = s->vbe_line_offset >> 3;
-+ /* width */
-+ s->cr[VGA_CRTC_H_DISP] =
-+ (s->vbe_regs[VBE_DISPI_INDEX_XRES] >> 3) - 1;
-+ /* height (only meaningful if < 1024) */
-+ h = s->vbe_regs[VBE_DISPI_INDEX_YRES] - 1;
-+ s->cr[VGA_CRTC_V_DISP_END] = h;
-+ s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x42) |
-+ ((h >> 7) & 0x02) | ((h >> 3) & 0x40);
-+ /* line compare to 1023 */
-+ s->cr[VGA_CRTC_LINE_COMPARE] = 0xff;
-+ s->cr[VGA_CRTC_OVERFLOW] |= 0x10;
-+ s->cr[VGA_CRTC_MAX_SCAN] |= 0x40;
-+
-+ if (s->vbe_regs[VBE_DISPI_INDEX_BPP] == 4) {
-+ shift_control = 0;
-+ s->sr[VGA_SEQ_CLOCK_MODE] &= ~8; /* no double line */
-+ } else {
-+ shift_control = 2;
-+ /* set chain 4 mode */
-+ s->sr[VGA_SEQ_MEMORY_MODE] |= VGA_SR04_CHN_4M;
-+ /* activate all planes */
-+ s->sr[VGA_SEQ_PLANE_WRITE] |= VGA_SR02_ALL_PLANES;
-+ }
-+ s->gr[VGA_GFX_MODE] = (s->gr[VGA_GFX_MODE] & ~0x60) |
-+ (shift_control << 5);
-+ s->cr[VGA_CRTC_MAX_SCAN] &= ~0x9f; /* no double scan */
-+}
-+
- static uint32_t vbe_ioport_read_index(void *opaque, uint32_t addr)
- {
- VGACommonState *s = opaque;
-@@ -729,52 +772,19 @@ void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val)
- case VBE_DISPI_INDEX_ENABLE:
- if ((val & VBE_DISPI_ENABLED) &&
- !(s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED)) {
-- int h, shift_control;
-
- s->vbe_regs[VBE_DISPI_INDEX_VIRT_WIDTH] = 0;
- s->vbe_regs[VBE_DISPI_INDEX_X_OFFSET] = 0;
- s->vbe_regs[VBE_DISPI_INDEX_Y_OFFSET] = 0;
- s->vbe_regs[VBE_DISPI_INDEX_ENABLE] |= VBE_DISPI_ENABLED;
- vbe_fixup_regs(s);
-+ vbe_update_vgaregs(s);
-
- /* clear the screen */
- if (!(val & VBE_DISPI_NOCLEARMEM)) {
- memset(s->vram_ptr, 0,
- s->vbe_regs[VBE_DISPI_INDEX_YRES] * s->vbe_line_offset);
- }
--
-- /* we initialize the VGA graphic mode */
-- /* graphic mode + memory map 1 */
-- s->gr[VGA_GFX_MISC] = (s->gr[VGA_GFX_MISC] & ~0x0c) | 0x04 |
-- VGA_GR06_GRAPHICS_MODE;
-- s->cr[VGA_CRTC_MODE] |= 3; /* no CGA modes */
-- s->cr[VGA_CRTC_OFFSET] = s->vbe_line_offset >> 3;
-- /* width */
-- s->cr[VGA_CRTC_H_DISP] =
-- (s->vbe_regs[VBE_DISPI_INDEX_XRES] >> 3) - 1;
-- /* height (only meaningful if < 1024) */
-- h = s->vbe_regs[VBE_DISPI_INDEX_YRES] - 1;
-- s->cr[VGA_CRTC_V_DISP_END] = h;
-- s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x42) |
-- ((h >> 7) & 0x02) | ((h >> 3) & 0x40);
-- /* line compare to 1023 */
-- s->cr[VGA_CRTC_LINE_COMPARE] = 0xff;
-- s->cr[VGA_CRTC_OVERFLOW] |= 0x10;
-- s->cr[VGA_CRTC_MAX_SCAN] |= 0x40;
--
-- if (s->vbe_regs[VBE_DISPI_INDEX_BPP] == 4) {
-- shift_control = 0;
-- s->sr[VGA_SEQ_CLOCK_MODE] &= ~8; /* no double line */
-- } else {
-- shift_control = 2;
-- /* set chain 4 mode */
-- s->sr[VGA_SEQ_MEMORY_MODE] |= VGA_SR04_CHN_4M;
-- /* activate all planes */
-- s->sr[VGA_SEQ_PLANE_WRITE] |= VGA_SR02_ALL_PLANES;
-- }
-- s->gr[VGA_GFX_MODE] = (s->gr[VGA_GFX_MODE] & ~0x60) |
-- (shift_control << 5);
-- s->cr[VGA_CRTC_MAX_SCAN] &= ~0x9f; /* no double scan */
- } else {
- s->bank_offset = 0;
- }
---
-1.9.1
-
diff --git a/system/xen/patches/xsa179-qemuu-4.6-0004-vga-update-vga-register-setup-on-vbe-changes.patch b/system/xen/patches/xsa179-qemuu-4.6-0004-vga-update-vga-register-setup-on-vbe-changes.patch
deleted file mode 100644
index 0638edb91b..0000000000
--- a/system/xen/patches/xsa179-qemuu-4.6-0004-vga-update-vga-register-setup-on-vbe-changes.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From ef8bd1b26a597ae7c306227655626640093cb7a2 Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 26 Apr 2016 15:39:22 +0200
-Subject: [PATCH 4/5] vga: update vga register setup on vbe changes
-
-Call the new vbe_update_vgaregs() function on vbe configuration
-changes, to make sure vga registers are up-to-date.
-
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- hw/display/vga.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/hw/display/vga.c b/hw/display/vga.c
-index fb822f4..3739758 100644
---- a/hw/display/vga.c
-+++ b/hw/display/vga.c
-@@ -762,6 +762,7 @@ void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val)
- case VBE_DISPI_INDEX_Y_OFFSET:
- s->vbe_regs[s->vbe_index] = val;
- vbe_fixup_regs(s);
-+ vbe_update_vgaregs(s);
- break;
- case VBE_DISPI_INDEX_BANK:
- val &= s->vbe_bank_mask;
---
-1.9.1
-
diff --git a/system/xen/patches/xsa179-qemuu-4.6-0005-vga-make-sure-vga-register-setup-for-vbe-stays-intac.patch b/system/xen/patches/xsa179-qemuu-4.6-0005-vga-make-sure-vga-register-setup-for-vbe-stays-intac.patch
deleted file mode 100644
index c22f2d7195..0000000000
--- a/system/xen/patches/xsa179-qemuu-4.6-0005-vga-make-sure-vga-register-setup-for-vbe-stays-intac.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 92456c0c361d5da858d544647c6246ec78ed922b Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 26 Apr 2016 14:48:06 +0200
-Subject: [PATCH 5/5] vga: make sure vga register setup for vbe stays intact
- (CVE-2016-3712).
-
-Call vbe_update_vgaregs() when the guest touches GFX, SEQ or CRT
-registers, to make sure the vga registers will always have the
-values needed by vbe mode. This makes sure the sanity checks
-applied by vbe_fixup_regs() are effective.
-
-Without this guests can muck with shift_control, can turn on planar
-vga modes or text mode emulation while VBE is active, making qemu
-take code paths meant for CGA compatibility, but with the very
-large display widths and heigts settable using VBE registers.
-
-Which is good for one or another buffer overflow. Not that
-critical as they typically read overflows happening somewhere
-in the display code. So guests can DoS by crashing qemu with a
-segfault, but it is probably not possible to break out of the VM.
-
-Fixes: CVE-2016-3712
-Reported-by: Zuozhi Fzz <zuozhi.fzz@alibaba-inc.com>
-Reported-by: P J P <ppandit@redhat.com>
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- hw/display/vga.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/hw/display/vga.c b/hw/display/vga.c
-index 3739758..e7be97e 100644
---- a/hw/display/vga.c
-+++ b/hw/display/vga.c
-@@ -140,6 +140,8 @@ static uint32_t expand4[256];
- static uint16_t expand2[256];
- static uint8_t expand4to8[16];
-
-+static void vbe_update_vgaregs(VGACommonState *s);
-+
- static inline bool vbe_enabled(VGACommonState *s)
- {
- return s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED;
-@@ -483,6 +485,7 @@ void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
- printf("vga: write SR%x = 0x%02x\n", s->sr_index, val);
- #endif
- s->sr[s->sr_index] = val & sr_mask[s->sr_index];
-+ vbe_update_vgaregs(s);
- if (s->sr_index == VGA_SEQ_CLOCK_MODE) {
- s->update_retrace_info(s);
- }
-@@ -514,6 +517,7 @@ void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
- printf("vga: write GR%x = 0x%02x\n", s->gr_index, val);
- #endif
- s->gr[s->gr_index] = val & gr_mask[s->gr_index];
-+ vbe_update_vgaregs(s);
- vga_update_memory_access(s);
- break;
- case VGA_CRT_IM:
-@@ -532,10 +536,12 @@ void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
- if (s->cr_index == VGA_CRTC_OVERFLOW) {
- s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x10) |
- (val & 0x10);
-+ vbe_update_vgaregs(s);
- }
- return;
- }
- s->cr[s->cr_index] = val;
-+ vbe_update_vgaregs(s);
-
- switch(s->cr_index) {
- case VGA_CRTC_H_TOTAL:
---
-1.9.1
-
diff --git a/system/xen/patches/xsa181.patch b/system/xen/patches/xsa181.patch
deleted file mode 100644
index c44541ec4d..0000000000
--- a/system/xen/patches/xsa181.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From ee488e2133e581967d13d5287d7bd654e9b2e2a6 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Thu, 2 Jun 2016 14:19:00 +0100
-Subject: [PATCH] xen/arm: Don't free p2m->root in p2m_teardown() before it has
- been allocated
-
-If p2m_init() didn't complete successfully, (e.g. due to VMID
-exhaustion), p2m_teardown() is called and unconditionally tries to free
-p2m->root before it has been allocated. free_domheap_pages() doesn't
-tolerate NULL pointers.
-
-This is XSA-181
-
-Reported-by: Aaron Cornelius <Aaron.Cornelius@dornerworks.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Julien Grall <julien.grall@arm.com>
----
- xen/arch/arm/p2m.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
-index 838d004..6a19c57 100644
---- a/xen/arch/arm/p2m.c
-+++ b/xen/arch/arm/p2m.c
-@@ -1408,7 +1408,8 @@ void p2m_teardown(struct domain *d)
- while ( (pg = page_list_remove_head(&p2m->pages)) )
- free_domheap_page(pg);
-
-- free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
-+ if ( p2m->root )
-+ free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
-
- p2m->root = NULL;
-
---
-2.1.4
-