summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa
diff options
context:
space:
mode:
author Mario Preksavec2016-07-29 00:37:50 +0200
committer Willy Sudiarto Raharjo2016-07-30 03:28:21 +0200
commit368a721cfdc418228722fc6d7f5d9e7c6f908865 (patch)
treedbbb84ea9b4f813d5ea614233042589e1dcdb9be /system/xen/xsa
parentdf27e59d4aeec89f8cca4677b3004690d2bd105f (diff)
downloadslackbuilds-368a721cfdc418228722fc6d7f5d9e7c6f908865.tar.gz
system/xen: Additional features, patches and script fixes.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/xsa')
-rw-r--r--system/xen/xsa/xsa182-4.6.patch102
-rw-r--r--system/xen/xsa/xsa183-4.6.patch75
-rw-r--r--system/xen/xsa/xsa184-qemut-master.patch43
-rw-r--r--system/xen/xsa/xsa184-qemuu-master.patch43
4 files changed, 263 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa182-4.6.patch b/system/xen/xsa/xsa182-4.6.patch
new file mode 100644
index 0000000000..be2047d688
--- /dev/null
+++ b/system/xen/xsa/xsa182-4.6.patch
@@ -0,0 +1,102 @@
+From f48a75b0c10ac79b287ca2b580ecb9ea2f696607 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Mon, 11 Jul 2016 14:32:03 +0100
+Subject: [PATCH] x86/pv: Remove unsafe bits from the mod_l?_entry() fastpath
+
+All changes in writeability and cacheability must go through full
+re-validation.
+
+Rework the logic as a whitelist, to make it clearer to follow.
+
+This is XSA-182
+
+Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+---
+ xen/arch/x86/mm.c | 28 ++++++++++++++++------------
+ xen/include/asm-x86/page.h | 1 +
+ 2 files changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
+index daf02ab..8dd22b8 100644
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -1780,6 +1780,14 @@ static inline int update_intpte(intpte_t *p,
+ _t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \
+ (_m), (_v), (_ad))
+
++/*
++ * PTE flags that a guest may change without re-validating the PTE.
++ * All other bits affect translation, caching, or Xen's safety.
++ */
++#define FASTPATH_FLAG_WHITELIST \
++ (_PAGE_NX_BIT | _PAGE_AVAIL_HIGH | _PAGE_AVAIL | _PAGE_GLOBAL | \
++ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER)
++
+ /* Update the L1 entry at pl1e to new value nl1e. */
+ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
+ unsigned long gl1mfn, int preserve_ad,
+@@ -1820,9 +1828,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping, r/w, presence, and cachability. */
+- if ( !l1e_has_changed(ol1e, nl1e,
+- PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
++ /* Fast path for sufficiently-similar mappings. */
++ if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) )
+ {
+ adjust_guest_l1e(nl1e, pt_dom);
+ if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
+@@ -1904,11 +1911,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping and presence. */
+- if ( !l2e_has_changed(ol2e, nl2e,
+- unlikely(opt_allow_superpage)
+- ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
+- : _PAGE_PRESENT) )
++ /* Fast path for sufficiently-similar mappings. */
++ if ( !l2e_has_changed(ol2e, nl2e, ~FASTPATH_FLAG_WHITELIST) )
+ {
+ adjust_guest_l2e(nl2e, d);
+ if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
+@@ -1973,8 +1977,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping and presence. */
+- if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) )
++ /* Fast path for sufficiently-similar mappings. */
++ if ( !l3e_has_changed(ol3e, nl3e, ~FASTPATH_FLAG_WHITELIST) )
+ {
+ adjust_guest_l3e(nl3e, d);
+ rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad);
+@@ -2037,8 +2041,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping and presence. */
+- if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) )
++ /* Fast path for sufficiently-similar mappings. */
++ if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
+ {
+ adjust_guest_l4e(nl4e, d);
+ rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
+diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
+index 66b611c..1a59ed8 100644
+--- a/xen/include/asm-x86/page.h
++++ b/xen/include/asm-x86/page.h
+@@ -311,6 +311,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
+ #define _PAGE_AVAIL2 _AC(0x800,U)
+ #define _PAGE_AVAIL _AC(0xE00,U)
+ #define _PAGE_PSE_PAT _AC(0x1000,U)
++#define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12)
+ #define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
+ /* non-architectural flags */
+ #define _PAGE_PAGED 0x2000U
+--
+2.1.4
+
diff --git a/system/xen/xsa/xsa183-4.6.patch b/system/xen/xsa/xsa183-4.6.patch
new file mode 100644
index 0000000000..84d70077c8
--- /dev/null
+++ b/system/xen/xsa/xsa183-4.6.patch
@@ -0,0 +1,75 @@
+From 777ebe30e81ab284f9b78392875fe884a593df35 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Wed, 15 Jun 2016 18:32:14 +0100
+Subject: [PATCH] x86/entry: Avoid SMAP violation in
+ compat_create_bounce_frame()
+
+A 32bit guest kernel might be running on user mappings.
+compat_create_bounce_frame() must whitelist its guest accesses to avoid
+risking a SMAP violation.
+
+For both variants of create_bounce_frame(), re-blacklist user accesses if
+execution exits via an exception table redirection.
+
+This is XSA-183 / CVE-2016-6259
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+v2:
+ * Include CLAC on the exit paths from compat_create_bounce_frame which occur
+ from faults attempting to load %fs
+ * Reposition ASM_STAC to avoid breaking the macro-op fusion of test/jz
+---
+ xen/arch/x86/x86_64/compat/entry.S | 3 +++
+ xen/arch/x86/x86_64/entry.S | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
+index 0e3db7c..1eaf4bb 100644
+--- a/xen/arch/x86/x86_64/compat/entry.S
++++ b/xen/arch/x86/x86_64/compat/entry.S
+@@ -350,6 +350,7 @@ ENTRY(compat_int80_direct_trap)
+ compat_create_bounce_frame:
+ ASSERT_INTERRUPTS_ENABLED
+ mov %fs,%edi
++ ASM_STAC
+ testb $2,UREGS_cs+8(%rsp)
+ jz 1f
+ /* Push new frame at registered guest-OS stack base. */
+@@ -403,6 +404,7 @@ UNLIKELY_START(nz, compat_bounce_failsafe)
+ movl %ds,%eax
+ .Lft12: movl %eax,%fs:0*4(%rsi) # DS
+ UNLIKELY_END(compat_bounce_failsafe)
++ ASM_CLAC
+ /* Rewrite our stack frame and return to guest-OS mode. */
+ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
+ andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
+@@ -448,6 +450,7 @@ compat_crash_page_fault_4:
+ addl $4,%esi
+ compat_crash_page_fault:
+ .Lft14: mov %edi,%fs
++ ASM_CLAC
+ movl %esi,%edi
+ call show_page_walk
+ jmp dom_crash_sync_extable
+diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
+index 6e27508..0c2e63a 100644
+--- a/xen/arch/x86/x86_64/entry.S
++++ b/xen/arch/x86/x86_64/entry.S
+@@ -462,9 +462,11 @@ domain_crash_page_fault_16:
+ domain_crash_page_fault_8:
+ addq $8,%rsi
+ domain_crash_page_fault:
++ ASM_CLAC
+ movq %rsi,%rdi
+ call show_page_walk
+ ENTRY(dom_crash_sync_extable)
++ ASM_CLAC
+ # Get out of the guest-save area of the stack.
+ GET_STACK_BASE(%rax)
+ leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
+--
+2.1.4
+
diff --git a/system/xen/xsa/xsa184-qemut-master.patch b/system/xen/xsa/xsa184-qemut-master.patch
new file mode 100644
index 0000000000..d15167f4ac
--- /dev/null
+++ b/system/xen/xsa/xsa184-qemut-master.patch
@@ -0,0 +1,43 @@
+From 17d8c4e47dfb41cb6778520ff2eab7a11fe12dfd Mon Sep 17 00:00:00 2001
+From: P J P <ppandit@redhat.com>
+Date: Tue, 26 Jul 2016 15:31:59 +0100
+Subject: [PATCH] virtio: error out if guest exceeds virtqueue size
+
+A broken or malicious guest can submit more requests than the virtqueue
+size permits.
+
+The guest can submit requests without bothering to wait for completion
+and is therefore not bound by virtqueue size. This requires reusing
+vring descriptors in more than one request, which is incorrect but
+possible. Processing a request allocates a VirtQueueElement and
+therefore causes unbounded memory allocation controlled by the guest.
+
+Exit with an error if the guest provides more requests than the
+virtqueue size permits. This bounds memory allocation and makes the
+buggy guest visible to the user.
+
+Reported-by: Zhenhao Hong <zhenhaohong@gmail.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+---
+ hw/virtio.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/virtio.c b/hw/virtio.c
+index c26feff..42897bf 100644
+--- a/hw/virtio.c
++++ b/hw/virtio.c
+@@ -421,6 +421,11 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
+ /* When we start there are none of either input nor output. */
+ elem->out_num = elem->in_num = 0;
+
++ if (vq->inuse >= vq->vring.num) {
++ fprintf(stderr, "Virtqueue size exceeded");
++ exit(1);
++ }
++
+ i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
+ do {
+ struct iovec *sg;
+--
+2.1.4
+
diff --git a/system/xen/xsa/xsa184-qemuu-master.patch b/system/xen/xsa/xsa184-qemuu-master.patch
new file mode 100644
index 0000000000..ef96bff80c
--- /dev/null
+++ b/system/xen/xsa/xsa184-qemuu-master.patch
@@ -0,0 +1,43 @@
+From e469db25d6b2e5c71cd15451889226641c53a5cd Mon Sep 17 00:00:00 2001
+From: P J P <ppandit@redhat.com>
+Date: Mon, 25 Jul 2016 17:37:18 +0530
+Subject: [PATCH] virtio: error out if guest exceeds virtqueue size
+
+A broken or malicious guest can submit more requests than the virtqueue
+size permits.
+
+The guest can submit requests without bothering to wait for completion
+and is therefore not bound by virtqueue size. This requires reusing
+vring descriptors in more than one request, which is incorrect but
+possible. Processing a request allocates a VirtQueueElement and
+therefore causes unbounded memory allocation controlled by the guest.
+
+Exit with an error if the guest provides more requests than the
+virtqueue size permits. This bounds memory allocation and makes the
+buggy guest visible to the user.
+
+Reported-by: Zhenhao Hong <zhenhaohong@gmail.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+---
+ hw/virtio/virtio.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
+index d24f775..f8ac0fb 100644
+--- a/hw/virtio/virtio.c
++++ b/hw/virtio/virtio.c
+@@ -483,6 +483,11 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
+
+ max = vq->vring.num;
+
++ if (vq->inuse >= max) {
++ error_report("Virtqueue size exceeded");
++ exit(1);
++ }
++
+ i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
+ if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vq->last_avail_idx);
+--
+2.1.4
+