1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
|
From: Wei Chen <Wei.Chen@arm.com>
Subject: arm64: handle async aborts delivered while at EL2
If EL1 generates an asynchronous abort and then traps into EL2
(by HVC or IRQ) before the abort has been delivered, the hypervisor
could not catch it, because the PSTATE.A bit is masked all the time
in hypervisor. So this asynchronous abort may be slipped to next
running guest with PSTATE.A bit unmasked.
In order to avoid this, it is necessary to take the abort at EL2, by
clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
to open a window to catch guest-generated asynchronous abort in all
EL1 -> EL2 swich paths. If we catched such asynchronous abort in
checking window, the hyp_error exception will be triggered and the
abort source guest will be crashed.
This is CVE-2016-9816, part of XSA-201.
Signed-off-by: Wei Chen <Wei.Chen@arm.com>
Reviewed-by: Julien Grall <julien.grall@arm.com>
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -173,6 +173,43 @@ hyp_error_invalid:
entry hyp=1
invalid BAD_ERROR
+hyp_error:
+ /*
+ * Only two possibilities:
+ * 1) Either we come from the exit path, having just unmasked
+ * PSTATE.A: change the return code to an EL2 fault, and
+ * carry on, as we're already in a sane state to handle it.
+ * 2) Or we come from anywhere else, and that's a bug: we panic.
+ */
+ entry hyp=1
+ msr daifclr, #2
+
+ /*
+ * The ELR_EL2 may be modified by an interrupt, so we have to use the
+ * saved value in cpu_user_regs to check whether we come from 1) or
+ * not.
+ */
+ ldr x0, [sp, #UREGS_PC]
+ adr x1, abort_guest_exit_start
+ cmp x0, x1
+ adr x1, abort_guest_exit_end
+ ccmp x0, x1, #4, ne
+ mov x0, sp
+ mov x1, #BAD_ERROR
+
+ /*
+ * Not equal, the exception come from 2). It's a bug, we have to
+ * panic the hypervisor.
+ */
+ b.ne do_bad_mode
+
+ /*
+ * Otherwise, the exception come from 1). It happened because of
+ * the guest. Crash this guest.
+ */
+ bl do_trap_guest_error
+ exit hyp=1
+
/* Traps taken in Current EL with SP_ELx */
hyp_sync:
entry hyp=1
@@ -189,15 +226,29 @@ hyp_irq:
guest_sync:
entry hyp=0, compat=0
+ bl check_pending_vserror
+ /*
+ * If x0 is Non-zero, a vSError took place, the initial exception
+ * doesn't have any significance to be handled. Exit ASAP
+ */
+ cbnz x0, 1f
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
+1:
exit hyp=0, compat=0
guest_irq:
entry hyp=0, compat=0
+ bl check_pending_vserror
+ /*
+ * If x0 is Non-zero, a vSError took place, the initial exception
+ * doesn't have any significance to be handled. Exit ASAP
+ */
+ cbnz x0, 1f
mov x0, sp
bl do_trap_irq
+1:
exit hyp=0, compat=0
guest_fiq_invalid:
@@ -213,15 +264,29 @@ guest_error:
guest_sync_compat:
entry hyp=0, compat=1
+ bl check_pending_vserror
+ /*
+ * If x0 is Non-zero, a vSError took place, the initial exception
+ * doesn't have any significance to be handled. Exit ASAP
+ */
+ cbnz x0, 1f
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
+1:
exit hyp=0, compat=1
guest_irq_compat:
entry hyp=0, compat=1
+ bl check_pending_vserror
+ /*
+ * If x0 is Non-zero, a vSError took place, the initial exception
+ * doesn't have any significance to be handled. Exit ASAP
+ */
+ cbnz x0, 1f
mov x0, sp
bl do_trap_irq
+1:
exit hyp=0, compat=1
guest_fiq_invalid_compat:
@@ -270,6 +335,62 @@ return_from_trap:
eret
/*
+ * This function is used to check pending virtual SError in the gap of
+ * EL1 -> EL2 world switch.
+ * The x0 register will be used to indicate the results of detection.
+ * x0 -- Non-zero indicates a pending virtual SError took place.
+ * x0 -- Zero indicates no pending virtual SError took place.
+ */
+check_pending_vserror:
+ /*
+ * Save elr_el2 to check whether the pending SError exception takes
+ * place while we are doing this sync exception.
+ */
+ mrs x0, elr_el2
+
+ /* Synchronize against in-flight ld/st */
+ dsb sy
+
+ /*
+ * Unmask PSTATE asynchronous abort bit. If there is a pending
+ * SError, the EL2 error exception will happen after PSTATE.A
+ * is cleared.
+ */
+ msr daifclr, #4
+
+ /*
+ * This is our single instruction exception window. A pending
+ * SError is guaranteed to occur at the earliest when we unmask
+ * it, and at the latest just after the ISB.
+ *
+ * If a pending SError occurs, the program will jump to EL2 error
+ * exception handler, and the elr_el2 will be set to
+ * abort_guest_exit_start or abort_guest_exit_end.
+ */
+abort_guest_exit_start:
+
+ isb
+
+abort_guest_exit_end:
+ /* Mask PSTATE asynchronous abort bit, close the checking window. */
+ msr daifset, #4
+
+ /*
+ * Compare elr_el2 and the saved value to check whether we are
+ * returning from a valid exception caused by pending SError.
+ */
+ mrs x1, elr_el2
+ cmp x0, x1
+
+ /*
+ * Not equal, the pending SError exception took place, set
+ * x0 to non-zero.
+ */
+ cset x0, ne
+
+ ret
+
+/*
* Exception vectors.
*/
.macro ventry label
@@ -287,7 +408,7 @@ ENTRY(hyp_traps_vector)
ventry hyp_sync // Synchronous EL2h
ventry hyp_irq // IRQ EL2h
ventry hyp_fiq_invalid // FIQ EL2h
- ventry hyp_error_invalid // Error EL2h
+ ventry hyp_error // Error EL2h
ventry guest_sync // Synchronous 64-bit EL0/EL1
ventry guest_irq // IRQ 64-bit EL0/EL1
|