summaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2017-10-20 20:56:55 +0200
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2017-10-21 07:08:14 +0700
commit139c45ee8aed136d55ae25517e67cd103978c9c3 (patch)
tree0c1f8b0bb7353039941d468a024722add35b2d17 /system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
parentd7ebd09fcd7f59ba2cb12f45eecff627aec49860 (diff)
downloadslackbuilds-139c45ee8aed136d55ae25517e67cd103978c9c3.tar.gz
system/xen: XSA 237-245 update.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch')
-rw-r--r--system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch48
1 files changed, 48 insertions, 0 deletions
diff --git a/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch b/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
new file mode 100644
index 0000000000..2047686903
--- /dev/null
+++ b/system/xen/xsa/xsa245-0001-xen-page_alloc-Cover-memory-unreserved-after-boot-in.patch
@@ -0,0 +1,48 @@
+From a48d47febc1340f27d6c716545692641a09b414c Mon Sep 17 00:00:00 2001
+From: Julien Grall <julien.grall@arm.com>
+Date: Thu, 21 Sep 2017 14:13:08 +0100
+Subject: [PATCH 1/2] xen/page_alloc: Cover memory unreserved after boot in
+ first_valid_mfn
+
+On Arm, some regions (e.g Initramfs, Dom0 Kernel...) are marked as
+reserved until the hardware domain is built and they are copied into its
+memory. Therefore, they will not be added in the boot allocator via
+init_boot_pages.
+
+Instead, init_xenheap_pages will be called once the region are not used
+anymore.
+
+Update first_valid_mfn in both init_heap_pages and init_boot_pages
+(already exist) to cover all the cases.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+[Adjust comment, added locking around first_valid_mfn update]
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+---
+ xen/common/page_alloc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
+index 0b9f6cc6df..fbe5a8af39 100644
+--- a/xen/common/page_alloc.c
++++ b/xen/common/page_alloc.c
+@@ -1700,6 +1700,16 @@ static void init_heap_pages(
+ {
+ unsigned long i;
+
++ /*
++ * Some pages may not go through the boot allocator (e.g reserved
++ * memory at boot but released just after --- kernel, initramfs,
++ * etc.).
++ * Update first_valid_mfn to ensure those regions are covered.
++ */
++ spin_lock(&heap_lock);
++ first_valid_mfn = min_t(unsigned long, page_to_mfn(pg), first_valid_mfn);
++ spin_unlock(&heap_lock);
++
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ unsigned int nid = phys_to_nid(page_to_maddr(pg+i));
+--
+2.11.0
+