1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
|
From: Jan Beulich <jbeulich@suse.com>
Subject: evtchn: evtchn_reset() shouldn't succeed with still-open ports
While the function closes all ports, it does so without holding any
lock, and hence racing requests may be issued causing new ports to get
opened. This would have been problematic in particular if such a newly
opened port had a port number above the new implementation limit (i.e.
when switching from FIFO to 2-level) after the reset, as prior to
"evtchn: relax port_is_valid()" this could have led to e.g.
evtchn_close()'s "BUG_ON(!port_is_valid(d2, port2))" to trigger.
Introduce a counter of active ports and check that it's (still) no
larger then the number of Xen internally used ones after obtaining the
necessary lock in evtchn_reset().
As to the access model of the new {active,xen}_evtchns fields - while
all writes get done using write_atomic(), reads ought to use
read_atomic() only when outside of a suitably locked region.
Note that as of now evtchn_bind_virq() and evtchn_bind_ipi() don't have
a need to call check_free_port().
This is part of XSA-343.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Reviewed-by: Julien Grall <jgrall@amazon.com>
---
v7: Drop optimization from evtchn_reset().
v6: Fix loop exit condition in evtchn_reset(). Use {read,write}_atomic()
also for xen_evtchns.
v5: Move increment in alloc_unbound_xen_event_channel() out of the inner
locked region.
v4: Account for Xen internal ports.
v3: Document intended access next to new struct field.
v2: Add comment to check_free_port(). Drop commented out calls.
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -188,6 +188,8 @@ int evtchn_allocate_port(struct domain *
write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
}
+ write_atomic(&d->active_evtchns, d->active_evtchns + 1);
+
return 0;
}
@@ -211,11 +213,26 @@ static int get_free_port(struct domain *
return -ENOSPC;
}
+/*
+ * Check whether a port is still marked free, and if so update the domain
+ * counter accordingly. To be used on function exit paths.
+ */
+static void check_free_port(struct domain *d, evtchn_port_t port)
+{
+ if ( port_is_valid(d, port) &&
+ evtchn_from_port(d, port)->state == ECS_FREE )
+ write_atomic(&d->active_evtchns, d->active_evtchns - 1);
+}
+
void evtchn_free(struct domain *d, struct evtchn *chn)
{
/* Clear pending event to avoid unexpected behavior on re-bind. */
evtchn_port_clear_pending(d, chn);
+ if ( consumer_is_xen(chn) )
+ write_atomic(&d->xen_evtchns, d->xen_evtchns - 1);
+ write_atomic(&d->active_evtchns, d->active_evtchns - 1);
+
/* Reset binding to vcpu0 when the channel is freed. */
chn->state = ECS_FREE;
chn->notify_vcpu_id = 0;
@@ -258,6 +275,7 @@ static long evtchn_alloc_unbound(evtchn_
alloc->port = port;
out:
+ check_free_port(d, port);
spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
@@ -351,6 +369,7 @@ static long evtchn_bind_interdomain(evtc
bind->local_port = lport;
out:
+ check_free_port(ld, lport);
spin_unlock(&ld->event_lock);
if ( ld != rd )
spin_unlock(&rd->event_lock);
@@ -488,7 +507,7 @@ static long evtchn_bind_pirq(evtchn_bind
struct domain *d = current->domain;
struct vcpu *v = d->vcpu[0];
struct pirq *info;
- int port, pirq = bind->pirq;
+ int port = 0, pirq = bind->pirq;
long rc;
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
@@ -536,6 +555,7 @@ static long evtchn_bind_pirq(evtchn_bind
arch_evtchn_bind_pirq(d, pirq);
out:
+ check_free_port(d, port);
spin_unlock(&d->event_lock);
return rc;
@@ -1011,10 +1031,10 @@ int evtchn_unmask(unsigned int port)
return 0;
}
-
int evtchn_reset(struct domain *d)
{
unsigned int i;
+ int rc = 0;
if ( d != current->domain && !d->controller_pause_count )
return -EINVAL;
@@ -1024,7 +1044,9 @@ int evtchn_reset(struct domain *d)
spin_lock(&d->event_lock);
- if ( d->evtchn_fifo )
+ if ( d->active_evtchns > d->xen_evtchns )
+ rc = -EAGAIN;
+ else if ( d->evtchn_fifo )
{
/* Switching back to 2-level ABI. */
evtchn_fifo_destroy(d);
@@ -1033,7 +1055,7 @@ int evtchn_reset(struct domain *d)
spin_unlock(&d->event_lock);
- return 0;
+ return rc;
}
static long evtchn_set_priority(const struct evtchn_set_priority *set_priority)
@@ -1219,10 +1241,9 @@ int alloc_unbound_xen_event_channel(
spin_lock(&ld->event_lock);
- rc = get_free_port(ld);
+ port = rc = get_free_port(ld);
if ( rc < 0 )
goto out;
- port = rc;
chn = evtchn_from_port(ld, port);
rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid);
@@ -1238,7 +1259,10 @@ int alloc_unbound_xen_event_channel(
spin_unlock(&chn->lock);
+ write_atomic(&ld->xen_evtchns, ld->xen_evtchns + 1);
+
out:
+ check_free_port(ld, port);
spin_unlock(&ld->event_lock);
return rc < 0 ? rc : port;
@@ -1314,6 +1338,7 @@ int evtchn_init(struct domain *d, unsign
return -EINVAL;
}
evtchn_from_port(d, 0)->state = ECS_RESERVED;
+ write_atomic(&d->active_evtchns, 0);
#if MAX_VIRT_CPUS > BITS_PER_LONG
d->poll_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus));
@@ -1340,6 +1365,8 @@ void evtchn_destroy(struct domain *d)
for ( i = 0; port_is_valid(d, i); i++ )
evtchn_close(d, i, 0);
+ ASSERT(!d->active_evtchns);
+
clear_global_virq_handlers(d);
evtchn_fifo_destroy(d);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -361,6 +361,16 @@ struct domain
struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */
unsigned int max_evtchn_port; /* max permitted port number */
unsigned int valid_evtchns; /* number of allocated event channels */
+ /*
+ * Number of in-use event channels. Writers should use write_atomic().
+ * Readers need to use read_atomic() only when not holding event_lock.
+ */
+ unsigned int active_evtchns;
+ /*
+ * Number of event channels used internally by Xen (not subject to
+ * EVTCHNOP_reset). Read/write access like for active_evtchns.
+ */
+ unsigned int xen_evtchns;
spinlock_t event_lock;
const struct evtchn_port_ops *evtchn_port_ops;
struct evtchn_fifo_domain *evtchn_fifo;
|