diff options
author | Mario Preksavec <mario@slackware.hr> | 2016-12-17 12:07:20 +0100 |
---|---|---|
committer | Willy Sudiarto Raharjo <willysr@slackbuilds.org> | 2016-12-24 07:34:16 +0700 |
commit | b674aee736e631d8f7d4ec0ed949f71726ba3462 (patch) | |
tree | 0fce21c2a4dce8fca56280b6ee86dce68f4dfd71 /system/xen | |
parent | 8e2ace85aac7ba510e31434cd838a1d7d2c5d198 (diff) | |
download | slackbuilds-b674aee736e631d8f7d4ec0ed949f71726ba3462.tar.gz |
system/xen: XSA 199-201 update.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen')
-rw-r--r-- | system/xen/dom0/config-4.4.38-xen.i686 (renamed from system/xen/dom0/config-4.4.29-xen.i486) | 2 | ||||
-rw-r--r-- | system/xen/dom0/config-4.4.38-xen.x86_64 (renamed from system/xen/dom0/config-4.4.29-xen.x86_64) | 2 | ||||
-rw-r--r-- | system/xen/dom0/kernel-xen.sh | 4 | ||||
-rw-r--r-- | system/xen/domU/domU.sh | 2 | ||||
-rw-r--r-- | system/xen/xen.SlackBuild | 2 | ||||
-rw-r--r-- | system/xen/xsa/xsa199-qemut.patch | 89 | ||||
-rw-r--r-- | system/xen/xsa/xsa200-4.7.patch | 55 | ||||
-rw-r--r-- | system/xen/xsa/xsa201-1.patch | 87 | ||||
-rw-r--r-- | system/xen/xsa/xsa201-2.patch | 199 | ||||
-rw-r--r-- | system/xen/xsa/xsa201-3-4.7.patch | 47 | ||||
-rw-r--r-- | system/xen/xsa/xsa201-4.patch | 130 |
11 files changed, 613 insertions, 6 deletions
diff --git a/system/xen/dom0/config-4.4.29-xen.i486 b/system/xen/dom0/config-4.4.38-xen.i686 index 3af4f7cd41..cfbfe0d202 100644 --- a/system/xen/dom0/config-4.4.29-xen.i486 +++ b/system/xen/dom0/config-4.4.38-xen.i686 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.4.29 Kernel Configuration +# Linux/x86 4.4.38 Kernel Configuration # # CONFIG_64BIT is not set CONFIG_X86_32=y diff --git a/system/xen/dom0/config-4.4.29-xen.x86_64 b/system/xen/dom0/config-4.4.38-xen.x86_64 index 7b4e532ab2..db5c9e8c51 100644 --- a/system/xen/dom0/config-4.4.29-xen.x86_64 +++ b/system/xen/dom0/config-4.4.38-xen.x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.4.29 Kernel Configuration +# Linux/x86 4.4.38 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y diff --git a/system/xen/dom0/kernel-xen.sh b/system/xen/dom0/kernel-xen.sh index b84513786e..07d1a1f8b1 100644 --- a/system/xen/dom0/kernel-xen.sh +++ b/system/xen/dom0/kernel-xen.sh @@ -5,7 +5,7 @@ # Written by Chris Abela <chris.abela@maltats.com>, 20100515 # Modified by Mario Preksavec <mario@slackware.hr> -KERNEL=${KERNEL:-4.4.29} +KERNEL=${KERNEL:-4.4.38} XEN=${XEN:-4.7.1} BOOTLOADER=${BOOTLOADER:-lilo} @@ -15,7 +15,7 @@ ROOTDEV=${ROOTDEV:-/dev/sda2} if [ -z "$ARCH" ]; then case "$( uname -m )" in - i?86) ARCH=i486 ;; + i?86) ARCH=i686 ;; x86_64) ARCH=x86_64 ;; *) echo "Unsupported architecture detected ($ARCH)"; exit ;; esac diff --git a/system/xen/domU/domU.sh b/system/xen/domU/domU.sh index 1cd3e0d92c..6d61485b32 100644 --- a/system/xen/domU/domU.sh +++ b/system/xen/domU/domU.sh @@ -7,7 +7,7 @@ set -e -KERNEL=${KERNEL:-4.4.29} +KERNEL=${KERNEL:-4.4.38} # Build an image for the root file system and another for the swap # Default values : 8GB and 500MB resepectively. diff --git a/system/xen/xen.SlackBuild b/system/xen/xen.SlackBuild index 088d87b259..e4eabedb97 100644 --- a/system/xen/xen.SlackBuild +++ b/system/xen/xen.SlackBuild @@ -24,7 +24,7 @@ PRGNAM=xen VERSION=${VERSION:-4.7.1} -BUILD=${BUILD:-2} +BUILD=${BUILD:-3} TAG=${TAG:-_SBo} SEABIOS=${SEABIOS:-1.9.2} diff --git a/system/xen/xsa/xsa199-qemut.patch b/system/xen/xsa/xsa199-qemut.patch new file mode 100644 index 0000000000..50a7eb6c92 --- /dev/null +++ b/system/xen/xsa/xsa199-qemut.patch @@ -0,0 +1,89 @@ +From b73bd1edc05d1bad5c018228146930d79315a5da Mon Sep 17 00:00:00 2001 +From: Ian Jackson <ian.jackson@eu.citrix.com> +Date: Mon, 14 Nov 2016 17:19:46 +0000 +Subject: [PATCH] qemu: ioport_read, ioport_write: be defensive about 32-bit + addresses + +On x86, ioport addresses are 16-bit. That these functions take 32-bit +arguments is a mistake. Changing the argument type to 16-bit will +discard the top bits of any erroneous values from elsewhere in qemu. + +Also, check just before use that the value is in range. (This turns +an ill-advised change to MAX_IOPORTS into a possible guest crash +rather than a privilege escalation vulnerability.) + +And, in the Xen ioreq processor, clamp incoming ioport addresses to +16-bit values. Xen will never write >16-bit values but the guest may +have access to the ioreq ring. We want to defend the rest of the qemu +code from wrong values. + +This is XSA-199. + +Reported-by: yanghongke <yanghongke@huawei.com> +Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com> +--- + i386-dm/helper2.c | 2 ++ + vl.c | 9 +++++++-- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/i386-dm/helper2.c b/i386-dm/helper2.c +index 2706f2e..5d276bb 100644 +--- a/i386-dm/helper2.c ++++ b/i386-dm/helper2.c +@@ -375,6 +375,8 @@ static void cpu_ioreq_pio(CPUState *env, ioreq_t *req) + { + uint32_t i; + ++ req->addr &= 0x0ffffU; ++ + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(env, req->addr, req->size); +diff --git a/vl.c b/vl.c +index f9c4d7e..c3c5d63 100644 +--- a/vl.c ++++ b/vl.c +@@ -52,6 +52,7 @@ + + #include <xen/hvm/hvm_info_table.h> + ++#include <assert.h> + #include <unistd.h> + #include <fcntl.h> + #include <signal.h> +@@ -290,26 +291,30 @@ PicState2 *isa_pic; + static IOPortReadFunc default_ioport_readb, default_ioport_readw, default_ioport_readl; + static IOPortWriteFunc default_ioport_writeb, default_ioport_writew, default_ioport_writel; + +-static uint32_t ioport_read(int index, uint32_t address) ++static uint32_t ioport_read(int index, uint16_t address) + { + static IOPortReadFunc *default_func[3] = { + default_ioport_readb, + default_ioport_readw, + default_ioport_readl + }; ++ if (address >= MAX_IOPORTS) ++ abort(); + IOPortReadFunc *func = ioport_read_table[index][address]; + if (!func) + func = default_func[index]; + return func(ioport_opaque[address], address); + } + +-static void ioport_write(int index, uint32_t address, uint32_t data) ++static void ioport_write(int index, uint16_t address, uint32_t data) + { + static IOPortWriteFunc *default_func[3] = { + default_ioport_writeb, + default_ioport_writew, + default_ioport_writel + }; ++ if (address >= MAX_IOPORTS) ++ abort(); + IOPortWriteFunc *func = ioport_write_table[index][address]; + if (!func) + func = default_func[index]; +-- +2.1.4 + diff --git a/system/xen/xsa/xsa200-4.7.patch b/system/xen/xsa/xsa200-4.7.patch new file mode 100644 index 0000000000..69608f6fc3 --- /dev/null +++ b/system/xen/xsa/xsa200-4.7.patch @@ -0,0 +1,55 @@ +From: Jan Beulich <jbeulich@suse.com> +Subject: x86emul: CMPXCHG8B ignores operand size prefix + +Otherwise besides mis-handling the instruction, the comparison failure +case would result in uninitialized stack data being handed back to the +guest in rDX:rAX (32 bits leaked for 32-bit guests, 96 bits for 64-bit +ones). + +This is XSA-200. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> + +--- a/tools/tests/x86_emulator/test_x86_emulator.c ++++ b/tools/tests/x86_emulator/test_x86_emulator.c +@@ -435,6 +435,24 @@ int main(int argc, char **argv) + goto fail; + printf("okay\n"); + ++ printf("%-40s", "Testing cmpxchg8b (%edi) [opsize]..."); ++ instr[0] = 0x66; instr[1] = 0x0f; instr[2] = 0xc7; instr[3] = 0x0f; ++ res[0] = 0x12345678; ++ res[1] = 0x87654321; ++ regs.eflags = 0x200; ++ regs.eip = (unsigned long)&instr[0]; ++ regs.edi = (unsigned long)res; ++ rc = x86_emulate(&ctxt, &emulops); ++ if ( (rc != X86EMUL_OKAY) || ++ (res[0] != 0x12345678) || ++ (res[1] != 0x87654321) || ++ (regs.eax != 0x12345678) || ++ (regs.edx != 0x87654321) || ++ ((regs.eflags&0x240) != 0x200) || ++ (regs.eip != (unsigned long)&instr[4]) ) ++ goto fail; ++ printf("okay\n"); ++ + printf("%-40s", "Testing movsxbd (%%eax),%%ecx..."); + instr[0] = 0x0f; instr[1] = 0xbe; instr[2] = 0x08; + regs.eflags = 0x200; +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -4775,8 +4775,12 @@ x86_emulate( + generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1); + generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); + if ( op_bytes == 8 ) ++ { + host_and_vcpu_must_have(cx16); +- op_bytes *= 2; ++ op_bytes = 16; ++ } ++ else ++ op_bytes = 8; + + /* Get actual old value. */ + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes, diff --git a/system/xen/xsa/xsa201-1.patch b/system/xen/xsa/xsa201-1.patch new file mode 100644 index 0000000000..50983b852f --- /dev/null +++ b/system/xen/xsa/xsa201-1.patch @@ -0,0 +1,87 @@ +From: Wei Chen <Wei.Chen@arm.com> +Subject: arm64: handle guest-generated EL1 asynchronous abort + +In current code, when the hypervisor receives an asynchronous abort +from a guest, the hypervisor will do panic, the host will be down. +We have to prevent such security issue, so, in this patch we crash +the guest, when the hypervisor receives an asynchronous abort from +the guest. + +This is CVE-2016-9815, part of XSA-201. + +Signed-off-by: Wei Chen <Wei.Chen@arm.com> +Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> +Reviewed-by: Steve Capper <steve.capper@arm.com> +Reviewed-by: Julien Grall <Julien.Grall@arm.com> + +--- a/xen/arch/arm/arm64/entry.S ++++ b/xen/arch/arm/arm64/entry.S +@@ -204,9 +204,12 @@ guest_fiq_invalid: + entry hyp=0, compat=0 + invalid BAD_FIQ + +-guest_error_invalid: ++guest_error: + entry hyp=0, compat=0 +- invalid BAD_ERROR ++ msr daifclr, #2 ++ mov x0, sp ++ bl do_trap_guest_error ++ exit hyp=0, compat=0 + + guest_sync_compat: + entry hyp=0, compat=1 +@@ -225,9 +228,12 @@ guest_fiq_invalid_compat: + entry hyp=0, compat=1 + invalid BAD_FIQ + +-guest_error_invalid_compat: ++guest_error_compat: + entry hyp=0, compat=1 +- invalid BAD_ERROR ++ msr daifclr, #2 ++ mov x0, sp ++ bl do_trap_guest_error ++ exit hyp=0, compat=1 + + ENTRY(return_to_new_vcpu32) + exit hyp=0, compat=1 +@@ -286,12 +292,12 @@ ENTRY(hyp_traps_vector) + ventry guest_sync // Synchronous 64-bit EL0/EL1 + ventry guest_irq // IRQ 64-bit EL0/EL1 + ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1 +- ventry guest_error_invalid // Error 64-bit EL0/EL1 ++ ventry guest_error // Error 64-bit EL0/EL1 + + ventry guest_sync_compat // Synchronous 32-bit EL0/EL1 + ventry guest_irq_compat // IRQ 32-bit EL0/EL1 + ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1 +- ventry guest_error_invalid_compat // Error 32-bit EL0/EL1 ++ ventry guest_error_compat // Error 32-bit EL0/EL1 + + /* + * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next) +--- a/xen/arch/arm/traps.c ++++ b/xen/arch/arm/traps.c +@@ -2723,6 +2723,21 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) + } + } + ++asmlinkage void do_trap_guest_error(struct cpu_user_regs *regs) ++{ ++ enter_hypervisor_head(regs); ++ ++ /* ++ * Currently, to ensure hypervisor safety, when we received a ++ * guest-generated vSerror/vAbort, we just crash the guest to protect ++ * the hypervisor. In future we can better handle this by injecting ++ * a vSerror/vAbort to the guest. ++ */ ++ gdprintk(XENLOG_WARNING, "Guest(Dom-%u) will be crashed by vSError\n", ++ current->domain->domain_id); ++ domain_crash_synchronous(); ++} ++ + asmlinkage void do_trap_irq(struct cpu_user_regs *regs) + { + enter_hypervisor_head(regs); diff --git a/system/xen/xsa/xsa201-2.patch b/system/xen/xsa/xsa201-2.patch new file mode 100644 index 0000000000..9bd1f8f89d --- /dev/null +++ b/system/xen/xsa/xsa201-2.patch @@ -0,0 +1,199 @@ +From: Wei Chen <Wei.Chen@arm.com> +Subject: arm64: handle async aborts delivered while at EL2 + +If EL1 generates an asynchronous abort and then traps into EL2 +(by HVC or IRQ) before the abort has been delivered, the hypervisor +could not catch it, because the PSTATE.A bit is masked all the time +in hypervisor. So this asynchronous abort may be slipped to next +running guest with PSTATE.A bit unmasked. + +In order to avoid this, it is necessary to take the abort at EL2, by +clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit +to open a window to catch guest-generated asynchronous abort in all +EL1 -> EL2 swich paths. If we catched such asynchronous abort in +checking window, the hyp_error exception will be triggered and the +abort source guest will be crashed. + +This is CVE-2016-9816, part of XSA-201. + +Signed-off-by: Wei Chen <Wei.Chen@arm.com> +Reviewed-by: Julien Grall <julien.grall@arm.com> + +--- a/xen/arch/arm/arm64/entry.S ++++ b/xen/arch/arm/arm64/entry.S +@@ -173,6 +173,43 @@ hyp_error_invalid: + entry hyp=1 + invalid BAD_ERROR + ++hyp_error: ++ /* ++ * Only two possibilities: ++ * 1) Either we come from the exit path, having just unmasked ++ * PSTATE.A: change the return code to an EL2 fault, and ++ * carry on, as we're already in a sane state to handle it. ++ * 2) Or we come from anywhere else, and that's a bug: we panic. ++ */ ++ entry hyp=1 ++ msr daifclr, #2 ++ ++ /* ++ * The ELR_EL2 may be modified by an interrupt, so we have to use the ++ * saved value in cpu_user_regs to check whether we come from 1) or ++ * not. ++ */ ++ ldr x0, [sp, #UREGS_PC] ++ adr x1, abort_guest_exit_start ++ cmp x0, x1 ++ adr x1, abort_guest_exit_end ++ ccmp x0, x1, #4, ne ++ mov x0, sp ++ mov x1, #BAD_ERROR ++ ++ /* ++ * Not equal, the exception come from 2). It's a bug, we have to ++ * panic the hypervisor. ++ */ ++ b.ne do_bad_mode ++ ++ /* ++ * Otherwise, the exception come from 1). It happened because of ++ * the guest. Crash this guest. ++ */ ++ bl do_trap_guest_error ++ exit hyp=1 ++ + /* Traps taken in Current EL with SP_ELx */ + hyp_sync: + entry hyp=1 +@@ -189,15 +226,29 @@ hyp_irq: + + guest_sync: + entry hyp=0, compat=0 ++ bl check_pending_vserror ++ /* ++ * If x0 is Non-zero, a vSError took place, the initial exception ++ * doesn't have any significance to be handled. Exit ASAP ++ */ ++ cbnz x0, 1f + msr daifclr, #2 + mov x0, sp + bl do_trap_hypervisor ++1: + exit hyp=0, compat=0 + + guest_irq: + entry hyp=0, compat=0 ++ bl check_pending_vserror ++ /* ++ * If x0 is Non-zero, a vSError took place, the initial exception ++ * doesn't have any significance to be handled. Exit ASAP ++ */ ++ cbnz x0, 1f + mov x0, sp + bl do_trap_irq ++1: + exit hyp=0, compat=0 + + guest_fiq_invalid: +@@ -213,15 +264,29 @@ guest_error: + + guest_sync_compat: + entry hyp=0, compat=1 ++ bl check_pending_vserror ++ /* ++ * If x0 is Non-zero, a vSError took place, the initial exception ++ * doesn't have any significance to be handled. Exit ASAP ++ */ ++ cbnz x0, 1f + msr daifclr, #2 + mov x0, sp + bl do_trap_hypervisor ++1: + exit hyp=0, compat=1 + + guest_irq_compat: + entry hyp=0, compat=1 ++ bl check_pending_vserror ++ /* ++ * If x0 is Non-zero, a vSError took place, the initial exception ++ * doesn't have any significance to be handled. Exit ASAP ++ */ ++ cbnz x0, 1f + mov x0, sp + bl do_trap_irq ++1: + exit hyp=0, compat=1 + + guest_fiq_invalid_compat: +@@ -270,6 +335,62 @@ return_from_trap: + eret + + /* ++ * This function is used to check pending virtual SError in the gap of ++ * EL1 -> EL2 world switch. ++ * The x0 register will be used to indicate the results of detection. ++ * x0 -- Non-zero indicates a pending virtual SError took place. ++ * x0 -- Zero indicates no pending virtual SError took place. ++ */ ++check_pending_vserror: ++ /* ++ * Save elr_el2 to check whether the pending SError exception takes ++ * place while we are doing this sync exception. ++ */ ++ mrs x0, elr_el2 ++ ++ /* Synchronize against in-flight ld/st */ ++ dsb sy ++ ++ /* ++ * Unmask PSTATE asynchronous abort bit. If there is a pending ++ * SError, the EL2 error exception will happen after PSTATE.A ++ * is cleared. ++ */ ++ msr daifclr, #4 ++ ++ /* ++ * This is our single instruction exception window. A pending ++ * SError is guaranteed to occur at the earliest when we unmask ++ * it, and at the latest just after the ISB. ++ * ++ * If a pending SError occurs, the program will jump to EL2 error ++ * exception handler, and the elr_el2 will be set to ++ * abort_guest_exit_start or abort_guest_exit_end. ++ */ ++abort_guest_exit_start: ++ ++ isb ++ ++abort_guest_exit_end: ++ /* Mask PSTATE asynchronous abort bit, close the checking window. */ ++ msr daifset, #4 ++ ++ /* ++ * Compare elr_el2 and the saved value to check whether we are ++ * returning from a valid exception caused by pending SError. ++ */ ++ mrs x1, elr_el2 ++ cmp x0, x1 ++ ++ /* ++ * Not equal, the pending SError exception took place, set ++ * x0 to non-zero. ++ */ ++ cset x0, ne ++ ++ ret ++ ++/* + * Exception vectors. + */ + .macro ventry label +@@ -287,7 +408,7 @@ ENTRY(hyp_traps_vector) + ventry hyp_sync // Synchronous EL2h + ventry hyp_irq // IRQ EL2h + ventry hyp_fiq_invalid // FIQ EL2h +- ventry hyp_error_invalid // Error EL2h ++ ventry hyp_error // Error EL2h + + ventry guest_sync // Synchronous 64-bit EL0/EL1 + ventry guest_irq // IRQ 64-bit EL0/EL1 diff --git a/system/xen/xsa/xsa201-3-4.7.patch b/system/xen/xsa/xsa201-3-4.7.patch new file mode 100644 index 0000000000..af7fc3703e --- /dev/null +++ b/system/xen/xsa/xsa201-3-4.7.patch @@ -0,0 +1,47 @@ +From: Wei Chen <Wei.Chen@arm.com> +Subject: arm: crash the guest when it traps on external abort + +If we spot a data or prefetch abort bearing the ESR_EL2.EA bit set, we +know that this is an external abort, and that should crash the guest. + +This is CVE-2016-9817, part of XSA-201. + +Signed-off-by: Wei Chen <Wei.Chen@arm.com> +Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> +Reviewed-by: Steve Capper <steve.capper@arm.com> +Reviewed-by: Julien Grall <Julien.Grall@arm.com> + +--- a/xen/arch/arm/traps.c ++++ b/xen/arch/arm/traps.c +@@ -2383,6 +2383,15 @@ static void do_trap_instr_abort_guest(struct cpu_user_regs *regs, + int rc; + register_t gva = READ_SYSREG(FAR_EL2); + ++ /* ++ * If this bit has been set, it means that this instruction abort is caused ++ * by a guest external abort. Currently we crash the guest to protect the ++ * hypervisor. In future one can better handle this by injecting a virtual ++ * abort to the guest. ++ */ ++ if ( hsr.iabt.eat ) ++ domain_crash_synchronous(); ++ + switch ( hsr.iabt.ifsc & 0x3f ) + { + case FSC_FLT_PERM ... FSC_FLT_PERM + 3: +@@ -2437,6 +2446,15 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs, + return; + } + ++ /* ++ * If this bit has been set, it means that this data abort is caused ++ * by a guest external abort. Currently we crash the guest to protect the ++ * hypervisor. In future one can better handle this by injecting a virtual ++ * abort to the guest. ++ */ ++ if ( dabt.eat ) ++ domain_crash_synchronous(); ++ + info.dabt = dabt; + #ifdef CONFIG_ARM_32 + info.gva = READ_CP32(HDFAR); diff --git a/system/xen/xsa/xsa201-4.patch b/system/xen/xsa/xsa201-4.patch new file mode 100644 index 0000000000..8060a5be13 --- /dev/null +++ b/system/xen/xsa/xsa201-4.patch @@ -0,0 +1,130 @@ +From: Wei Chen <Wei.Chen@arm.com> +Subject: arm32: handle async aborts delivered while at HYP + +If guest generates an asynchronous abort and then traps into HYP +(by HVC or IRQ) before the abort has been delivered, the hypervisor +could not catch it, because the PSTATE.A bit is masked all the time +in hypervisor. So this asynchronous abort may be slipped to next +running guest with PSTATE.A bit unmasked. + +In order to avoid this, it is necessary to take the abort at HYP, by +clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit +to open a window to catch guest-generated asynchronous abort in all +Guest -> HYP switch paths. If we caught such asynchronous abort in +checking window, the HYP data abort exception will be triggered and +the abort source guest will be crashed. + +This is CVE-2016-9818, part of XSA-201. + +Signed-off-by: Wei Chen <Wei.Chen@arm.com> +Reviewed-by: Julien Grall <julien.grall@arm.com> + +--- a/xen/arch/arm/arm32/entry.S ++++ b/xen/arch/arm/arm32/entry.S +@@ -42,6 +42,61 @@ save_guest_regs: + SAVE_BANKED(fiq) + SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq); SAVE_ONE_BANKED(R10_fiq) + SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq); ++ /* ++ * Start to check pending virtual abort in the gap of Guest -> HYP ++ * world switch. ++ * ++ * Save ELR_hyp to check whether the pending virtual abort exception ++ * takes place while we are doing this trap exception. ++ */ ++ mrs r1, ELR_hyp ++ ++ /* ++ * Force loads and stores to complete before unmasking asynchronous ++ * aborts and forcing the delivery of the exception. ++ */ ++ dsb sy ++ ++ /* ++ * Unmask asynchronous abort bit. If there is a pending asynchronous ++ * abort, the data_abort exception will happen after A bit is cleared. ++ */ ++ cpsie a ++ ++ /* ++ * This is our single instruction exception window. A pending ++ * asynchronous abort is guaranteed to occur at the earliest when we ++ * unmask it, and at the latest just after the ISB. ++ * ++ * If a pending abort occurs, the program will jump to data_abort ++ * exception handler, and the ELR_hyp will be set to ++ * abort_guest_exit_start or abort_guest_exit_end. ++ */ ++ .global abort_guest_exit_start ++abort_guest_exit_start: ++ ++ isb ++ ++ .global abort_guest_exit_end ++abort_guest_exit_end: ++ /* Mask CPSR asynchronous abort bit, close the checking window. */ ++ cpsid a ++ ++ /* ++ * Compare ELR_hyp and the saved value to check whether we are ++ * returning from a valid exception caused by pending virtual ++ * abort. ++ */ ++ mrs r2, ELR_hyp ++ cmp r1, r2 ++ ++ /* ++ * Not equal, the pending virtual abort exception took place, the ++ * initial exception does not have any significance to be handled. ++ * Exit ASAP. ++ */ ++ bne return_from_trap ++ + mov pc, lr + + #define DEFINE_TRAP_ENTRY(trap) \ +--- a/xen/arch/arm/arm32/traps.c ++++ b/xen/arch/arm/arm32/traps.c +@@ -63,7 +63,10 @@ asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs) + + asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs) + { +- do_unexpected_trap("Data Abort", regs); ++ if ( VABORT_GEN_BY_GUEST(regs) ) ++ do_trap_guest_error(regs); ++ else ++ do_unexpected_trap("Data Abort", regs); + } + + /* +--- a/xen/include/asm-arm/arm32/processor.h ++++ b/xen/include/asm-arm/arm32/processor.h +@@ -55,6 +55,17 @@ struct cpu_user_regs + + uint32_t pad1; /* Doubleword-align the user half of the frame */ + }; ++ ++/* Functions for pending virtual abort checking window. */ ++void abort_guest_exit_start(void); ++void abort_guest_exit_end(void); ++ ++#define VABORT_GEN_BY_GUEST(r) \ ++( \ ++ ( (unsigned long)abort_guest_exit_start == (r)->pc ) || \ ++ ( (unsigned long)abort_guest_exit_end == (r)->pc ) \ ++) ++ + #endif + + /* Layout as used in assembly, with src/dest registers mixed in */ +--- a/xen/include/asm-arm/processor.h ++++ b/xen/include/asm-arm/processor.h +@@ -690,6 +690,8 @@ void vcpu_regs_user_to_hyp(struct vcpu *vcpu, + int call_smc(register_t function_id, register_t arg0, register_t arg1, + register_t arg2); + ++void do_trap_guest_error(struct cpu_user_regs *regs); ++ + #endif /* __ASSEMBLY__ */ + #endif /* __ASM_ARM_PROCESSOR_H */ + /* |