summaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa201-2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa201-2.patch')
-rw-r--r--system/xen/xsa/xsa201-2.patch199
1 files changed, 199 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa201-2.patch b/system/xen/xsa/xsa201-2.patch
new file mode 100644
index 0000000000..9bd1f8f89d
--- /dev/null
+++ b/system/xen/xsa/xsa201-2.patch
@@ -0,0 +1,199 @@
+From: Wei Chen <Wei.Chen@arm.com>
+Subject: arm64: handle async aborts delivered while at EL2
+
+If EL1 generates an asynchronous abort and then traps into EL2
+(by HVC or IRQ) before the abort has been delivered, the hypervisor
+could not catch it, because the PSTATE.A bit is masked all the time
+in hypervisor. So this asynchronous abort may be slipped to next
+running guest with PSTATE.A bit unmasked.
+
+In order to avoid this, it is necessary to take the abort at EL2, by
+clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
+to open a window to catch guest-generated asynchronous abort in all
+EL1 -> EL2 swich paths. If we catched such asynchronous abort in
+checking window, the hyp_error exception will be triggered and the
+abort source guest will be crashed.
+
+This is CVE-2016-9816, part of XSA-201.
+
+Signed-off-by: Wei Chen <Wei.Chen@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+
+--- a/xen/arch/arm/arm64/entry.S
++++ b/xen/arch/arm/arm64/entry.S
+@@ -173,6 +173,43 @@ hyp_error_invalid:
+ entry hyp=1
+ invalid BAD_ERROR
+
++hyp_error:
++ /*
++ * Only two possibilities:
++ * 1) Either we come from the exit path, having just unmasked
++ * PSTATE.A: change the return code to an EL2 fault, and
++ * carry on, as we're already in a sane state to handle it.
++ * 2) Or we come from anywhere else, and that's a bug: we panic.
++ */
++ entry hyp=1
++ msr daifclr, #2
++
++ /*
++ * The ELR_EL2 may be modified by an interrupt, so we have to use the
++ * saved value in cpu_user_regs to check whether we come from 1) or
++ * not.
++ */
++ ldr x0, [sp, #UREGS_PC]
++ adr x1, abort_guest_exit_start
++ cmp x0, x1
++ adr x1, abort_guest_exit_end
++ ccmp x0, x1, #4, ne
++ mov x0, sp
++ mov x1, #BAD_ERROR
++
++ /*
++ * Not equal, the exception come from 2). It's a bug, we have to
++ * panic the hypervisor.
++ */
++ b.ne do_bad_mode
++
++ /*
++ * Otherwise, the exception come from 1). It happened because of
++ * the guest. Crash this guest.
++ */
++ bl do_trap_guest_error
++ exit hyp=1
++
+ /* Traps taken in Current EL with SP_ELx */
+ hyp_sync:
+ entry hyp=1
+@@ -189,15 +226,29 @@ hyp_irq:
+
+ guest_sync:
+ entry hyp=0, compat=0
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
++1:
+ exit hyp=0, compat=0
+
+ guest_irq:
+ entry hyp=0, compat=0
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ mov x0, sp
+ bl do_trap_irq
++1:
+ exit hyp=0, compat=0
+
+ guest_fiq_invalid:
+@@ -213,15 +264,29 @@ guest_error:
+
+ guest_sync_compat:
+ entry hyp=0, compat=1
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
++1:
+ exit hyp=0, compat=1
+
+ guest_irq_compat:
+ entry hyp=0, compat=1
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ mov x0, sp
+ bl do_trap_irq
++1:
+ exit hyp=0, compat=1
+
+ guest_fiq_invalid_compat:
+@@ -270,6 +335,62 @@ return_from_trap:
+ eret
+
+ /*
++ * This function is used to check pending virtual SError in the gap of
++ * EL1 -> EL2 world switch.
++ * The x0 register will be used to indicate the results of detection.
++ * x0 -- Non-zero indicates a pending virtual SError took place.
++ * x0 -- Zero indicates no pending virtual SError took place.
++ */
++check_pending_vserror:
++ /*
++ * Save elr_el2 to check whether the pending SError exception takes
++ * place while we are doing this sync exception.
++ */
++ mrs x0, elr_el2
++
++ /* Synchronize against in-flight ld/st */
++ dsb sy
++
++ /*
++ * Unmask PSTATE asynchronous abort bit. If there is a pending
++ * SError, the EL2 error exception will happen after PSTATE.A
++ * is cleared.
++ */
++ msr daifclr, #4
++
++ /*
++ * This is our single instruction exception window. A pending
++ * SError is guaranteed to occur at the earliest when we unmask
++ * it, and at the latest just after the ISB.
++ *
++ * If a pending SError occurs, the program will jump to EL2 error
++ * exception handler, and the elr_el2 will be set to
++ * abort_guest_exit_start or abort_guest_exit_end.
++ */
++abort_guest_exit_start:
++
++ isb
++
++abort_guest_exit_end:
++ /* Mask PSTATE asynchronous abort bit, close the checking window. */
++ msr daifset, #4
++
++ /*
++ * Compare elr_el2 and the saved value to check whether we are
++ * returning from a valid exception caused by pending SError.
++ */
++ mrs x1, elr_el2
++ cmp x0, x1
++
++ /*
++ * Not equal, the pending SError exception took place, set
++ * x0 to non-zero.
++ */
++ cset x0, ne
++
++ ret
++
++/*
+ * Exception vectors.
+ */
+ .macro ventry label
+@@ -287,7 +408,7 @@ ENTRY(hyp_traps_vector)
+ ventry hyp_sync // Synchronous EL2h
+ ventry hyp_irq // IRQ EL2h
+ ventry hyp_fiq_invalid // FIQ EL2h
+- ventry hyp_error_invalid // Error EL2h
++ ventry hyp_error // Error EL2h
+
+ ventry guest_sync // Synchronous 64-bit EL0/EL1
+ ventry guest_irq // IRQ 64-bit EL0/EL1