summaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa192.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa192.patch')
-rw-r--r--system/xen/xsa/xsa192.patch64
1 files changed, 64 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa192.patch b/system/xen/xsa/xsa192.patch
new file mode 100644
index 0000000000..b573a132c9
--- /dev/null
+++ b/system/xen/xsa/xsa192.patch
@@ -0,0 +1,64 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
+
+Just like TR, LDTR is purely a protected mode facility and hence needs
+to be loaded accordingly. Also move its loading to where it
+architecurally belongs.
+
+This is XSA-192.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
+ }
+
+ static int hvm_load_segment_selector(
+- enum x86_segment seg, uint16_t sel)
++ enum x86_segment seg, uint16_t sel, unsigned int eflags)
+ {
+ struct segment_register desctab, cs, segr;
+ struct desc_struct *pdesc, desc;
+ u8 dpl, rpl, cpl;
+ bool_t writable;
+ int fault_type = TRAP_invalid_tss;
+- struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct vcpu *v = current;
+
+- if ( regs->eflags & X86_EFLAGS_VM )
++ if ( eflags & X86_EFLAGS_VM )
+ {
+ segr.sel = sel;
+ segr.base = (uint32_t)sel << 4;
+@@ -2986,6 +2985,8 @@ void hvm_task_switch(
+ if ( rc != HVMCOPY_okay )
+ goto out;
+
++ if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
++ goto out;
+
+ if ( hvm_set_cr3(tss.cr3, 1) )
+ goto out;
+@@ -3008,13 +3009,12 @@ void hvm_task_switch(
+ }
+
+ exn_raised = 0;
+- if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
+- hvm_load_segment_selector(x86_seg_es, tss.es) ||
+- hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
+- hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
+- hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
+- hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
+- hvm_load_segment_selector(x86_seg_gs, tss.gs) )
++ if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
++ hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
++ hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
++ hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
++ hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
++ hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
+ exn_raised = 1;
+
+ rc = hvm_copy_to_guest_virt(