summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch')
-rw-r--r--system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch48
1 files changed, 48 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch b/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
new file mode 100644
index 0000000000..2047686903
--- /dev/null
+++ b/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
@@ -0,0 +1,48 @@
+From a48d47febc1340f27d6c716545692641a09b414c Mon Sep 17 00:00:00 2001
+From: Julien Grall <julien.grall@arm.com>
+Date: Thu, 21 Sep 2017 14:13:08 +0100
+Subject: [PATCH 1/2] xen/page_alloc: Cover memory unreserved after boot in
+ first_valid_mfn
+
+On Arm, some regions (e.g Initramfs, Dom0 Kernel...) are marked as
+reserved until the hardware domain is built and they are copied into its
+memory. Therefore, they will not be added in the boot allocator via
+init_boot_pages.
+
+Instead, init_xenheap_pages will be called once the region are not used
+anymore.
+
+Update first_valid_mfn in both init_heap_pages and init_boot_pages
+(already exist) to cover all the cases.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+[Adjust comment, added locking around first_valid_mfn update]
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+---
+ xen/common/page_alloc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
+index 0b9f6cc6df..fbe5a8af39 100644
+--- a/xen/common/page_alloc.c
++++ b/xen/common/page_alloc.c
+@@ -1700,6 +1700,16 @@ static void init_heap_pages(
+ {
+ unsigned long i;
+
++ /*
++ * Some pages may not go through the boot allocator (e.g reserved
++ * memory at boot but released just after --- kernel, initramfs,
++ * etc.).
++ * Update first_valid_mfn to ensure those regions are covered.
++ */
++ spin_lock(&heap_lock);
++ first_valid_mfn = min_t(unsigned long, page_to_mfn(pg), first_valid_mfn);
++ spin_unlock(&heap_lock);
++
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ unsigned int nid = phys_to_nid(page_to_maddr(pg+i));
+--
+2.11.0
+