summaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa343-1.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa343-1.patch')
-rw-r--r--system/xen/xsa/xsa343-1.patch199
1 files changed, 199 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa343-1.patch b/system/xen/xsa/xsa343-1.patch
new file mode 100644
index 0000000000..0abbc03e8d
--- /dev/null
+++ b/system/xen/xsa/xsa343-1.patch
@@ -0,0 +1,199 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: evtchn: evtchn_reset() shouldn't succeed with still-open ports
+
+While the function closes all ports, it does so without holding any
+lock, and hence racing requests may be issued causing new ports to get
+opened. This would have been problematic in particular if such a newly
+opened port had a port number above the new implementation limit (i.e.
+when switching from FIFO to 2-level) after the reset, as prior to
+"evtchn: relax port_is_valid()" this could have led to e.g.
+evtchn_close()'s "BUG_ON(!port_is_valid(d2, port2))" to trigger.
+
+Introduce a counter of active ports and check that it's (still) no
+larger then the number of Xen internally used ones after obtaining the
+necessary lock in evtchn_reset().
+
+As to the access model of the new {active,xen}_evtchns fields - while
+all writes get done using write_atomic(), reads ought to use
+read_atomic() only when outside of a suitably locked region.
+
+Note that as of now evtchn_bind_virq() and evtchn_bind_ipi() don't have
+a need to call check_free_port().
+
+This is part of XSA-343.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Reviewed-by: Julien Grall <jgrall@amazon.com>
+---
+v7: Drop optimization from evtchn_reset().
+v6: Fix loop exit condition in evtchn_reset(). Use {read,write}_atomic()
+ also for xen_evtchns.
+v5: Move increment in alloc_unbound_xen_event_channel() out of the inner
+ locked region.
+v4: Account for Xen internal ports.
+v3: Document intended access next to new struct field.
+v2: Add comment to check_free_port(). Drop commented out calls.
+
+--- a/xen/common/event_channel.c
++++ b/xen/common/event_channel.c
+@@ -188,6 +188,8 @@ int evtchn_allocate_port(struct domain *
+ write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ }
+
++ write_atomic(&d->active_evtchns, d->active_evtchns + 1);
++
+ return 0;
+ }
+
+@@ -211,11 +213,26 @@ static int get_free_port(struct domain *
+ return -ENOSPC;
+ }
+
++/*
++ * Check whether a port is still marked free, and if so update the domain
++ * counter accordingly. To be used on function exit paths.
++ */
++static void check_free_port(struct domain *d, evtchn_port_t port)
++{
++ if ( port_is_valid(d, port) &&
++ evtchn_from_port(d, port)->state == ECS_FREE )
++ write_atomic(&d->active_evtchns, d->active_evtchns - 1);
++}
++
+ void evtchn_free(struct domain *d, struct evtchn *chn)
+ {
+ /* Clear pending event to avoid unexpected behavior on re-bind. */
+ evtchn_port_clear_pending(d, chn);
+
++ if ( consumer_is_xen(chn) )
++ write_atomic(&d->xen_evtchns, d->xen_evtchns - 1);
++ write_atomic(&d->active_evtchns, d->active_evtchns - 1);
++
+ /* Reset binding to vcpu0 when the channel is freed. */
+ chn->state = ECS_FREE;
+ chn->notify_vcpu_id = 0;
+@@ -258,6 +275,7 @@ static long evtchn_alloc_unbound(evtchn_
+ alloc->port = port;
+
+ out:
++ check_free_port(d, port);
+ spin_unlock(&d->event_lock);
+ rcu_unlock_domain(d);
+
+@@ -351,6 +369,7 @@ static long evtchn_bind_interdomain(evtc
+ bind->local_port = lport;
+
+ out:
++ check_free_port(ld, lport);
+ spin_unlock(&ld->event_lock);
+ if ( ld != rd )
+ spin_unlock(&rd->event_lock);
+@@ -488,7 +507,7 @@ static long evtchn_bind_pirq(evtchn_bind
+ struct domain *d = current->domain;
+ struct vcpu *v = d->vcpu[0];
+ struct pirq *info;
+- int port, pirq = bind->pirq;
++ int port = 0, pirq = bind->pirq;
+ long rc;
+
+ if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
+@@ -536,6 +555,7 @@ static long evtchn_bind_pirq(evtchn_bind
+ arch_evtchn_bind_pirq(d, pirq);
+
+ out:
++ check_free_port(d, port);
+ spin_unlock(&d->event_lock);
+
+ return rc;
+@@ -1011,10 +1031,10 @@ int evtchn_unmask(unsigned int port)
+ return 0;
+ }
+
+-
+ int evtchn_reset(struct domain *d)
+ {
+ unsigned int i;
++ int rc = 0;
+
+ if ( d != current->domain && !d->controller_pause_count )
+ return -EINVAL;
+@@ -1024,7 +1044,9 @@ int evtchn_reset(struct domain *d)
+
+ spin_lock(&d->event_lock);
+
+- if ( d->evtchn_fifo )
++ if ( d->active_evtchns > d->xen_evtchns )
++ rc = -EAGAIN;
++ else if ( d->evtchn_fifo )
+ {
+ /* Switching back to 2-level ABI. */
+ evtchn_fifo_destroy(d);
+@@ -1033,7 +1055,7 @@ int evtchn_reset(struct domain *d)
+
+ spin_unlock(&d->event_lock);
+
+- return 0;
++ return rc;
+ }
+
+ static long evtchn_set_priority(const struct evtchn_set_priority *set_priority)
+@@ -1219,10 +1241,9 @@ int alloc_unbound_xen_event_channel(
+
+ spin_lock(&ld->event_lock);
+
+- rc = get_free_port(ld);
++ port = rc = get_free_port(ld);
+ if ( rc < 0 )
+ goto out;
+- port = rc;
+ chn = evtchn_from_port(ld, port);
+
+ rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid);
+@@ -1238,7 +1259,10 @@ int alloc_unbound_xen_event_channel(
+
+ spin_unlock(&chn->lock);
+
++ write_atomic(&ld->xen_evtchns, ld->xen_evtchns + 1);
++
+ out:
++ check_free_port(ld, port);
+ spin_unlock(&ld->event_lock);
+
+ return rc < 0 ? rc : port;
+@@ -1314,6 +1338,7 @@ int evtchn_init(struct domain *d, unsign
+ return -EINVAL;
+ }
+ evtchn_from_port(d, 0)->state = ECS_RESERVED;
++ write_atomic(&d->active_evtchns, 0);
+
+ #if MAX_VIRT_CPUS > BITS_PER_LONG
+ d->poll_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus));
+@@ -1340,6 +1365,8 @@ void evtchn_destroy(struct domain *d)
+ for ( i = 0; port_is_valid(d, i); i++ )
+ evtchn_close(d, i, 0);
+
++ ASSERT(!d->active_evtchns);
++
+ clear_global_virq_handlers(d);
+
+ evtchn_fifo_destroy(d);
+--- a/xen/include/xen/sched.h
++++ b/xen/include/xen/sched.h
+@@ -361,6 +361,16 @@ struct domain
+ struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */
+ unsigned int max_evtchn_port; /* max permitted port number */
+ unsigned int valid_evtchns; /* number of allocated event channels */
++ /*
++ * Number of in-use event channels. Writers should use write_atomic().
++ * Readers need to use read_atomic() only when not holding event_lock.
++ */
++ unsigned int active_evtchns;
++ /*
++ * Number of event channels used internally by Xen (not subject to
++ * EVTCHNOP_reset). Read/write access like for active_evtchns.
++ */
++ unsigned int xen_evtchns;
+ spinlock_t event_lock;
+ const struct evtchn_port_ops *evtchn_port_ops;
+ struct evtchn_fifo_domain *evtchn_fifo;