Linux 5.4 and later kernels for arm64 changed the kernel VA space
arrangement and introduced 52-bit kernel VAs by merging branch
commit b333b0ba2346. Support 5.9+ kernels with vmcoreinfo entries
and 5.4+ kernels with best guessing.
However, the following conditions are not supported for now due to
no necessary information provided from kernel:
(1) 5.4 <= kernels <= 5.8 and
- if PA_BITS=52 && VA_BITS!=52
- with -x option if vabits_actual=52
(2) kernels < 5.4 with CONFIG_ARM64_USER_VA_BITS_52=y
(1) should be supported with kernel commit bbdbc11804ff and
1d50e5d0c5052 adding necessary information to vmcoreinfo.
Signed-off-by: Pingfan Liu <piliu(a)redhat.com>
---
...coreinfo-note-in-proc-kcore-for-mem-.patch | 146 +++++++++++
...Make-use-of-NUMBER-VA_BITS-in-vmcore.patch | 101 ++++++++
...support-flipped-VA-and-52-bit-kernel.patch | 245 ++++++++++++++++++
kexec-tools.spec | 7 +
4 files changed, 499 insertions(+)
create mode 100644 kexec-tools-2.0.21-makedumpfile-PATCH-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
create mode 100644 kexec-tools-2.0.21-makedumpfile-PATCH-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
create mode 100644 kexec-tools-2.0.21-makedumpfile-PATCH-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
diff --git a/kexec-tools-2.0.21-makedumpfile-PATCH-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch b/kexec-tools-2.0.21-makedumpfile-PATCH-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
new file mode 100644
index 0000000..316d1b4
--- /dev/null
+++ b/kexec-tools-2.0.21-makedumpfile-PATCH-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
@@ -0,0 +1,146 @@
+From d8b701796f0491f2ac4b06c7a5b795c29399efab Mon Sep 17 00:00:00 2001
+From: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+Date: Fri, 29 Jan 2021 11:40:23 +0900
+Subject: [PATCH 1/3] [PATCH 1/3] Use vmcoreinfo note in /proc/kcore for
+ --mem-usage option
+
+kernel commit 23c85094fe18 added vmcoreinfo note to /proc/kcore.
+Use the vmcoreinfo note to get necessary information, especially
+page_offset and phys_base on arm64, for the --mem-usage option.
+
+Signed-off-by: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+---
+ elf_info.c | 49 -------------------------------------------------
+ elf_info.h | 1 -
+ makedumpfile.c | 26 +++++++++++++++++++++-----
+ 3 files changed, 21 insertions(+), 55 deletions(-)
+
+diff --git a/makedumpfile-1.6.8/elf_info.c b/makedumpfile-1.6.8/elf_info.c
+index a6624b5..e8affb7 100644
+--- a/makedumpfile-1.6.8/elf_info.c
++++ b/makedumpfile-1.6.8/elf_info.c
+@@ -698,55 +698,6 @@ get_elf32_ehdr(int fd, char *filename, Elf32_Ehdr *ehdr)
+ return TRUE;
+ }
+
+-int
+-get_elf_loads(int fd, char *filename)
+-{
+- int i, j, phnum, elf_format;
+- Elf64_Phdr phdr;
+-
+- /*
+- * Check ELF64 or ELF32.
+- */
+- elf_format = check_elf_format(fd, filename, &phnum, &num_pt_loads);
+- if (elf_format == ELF64)
+- flags_memory |= MEMORY_ELF64;
+- else if (elf_format != ELF32)
+- return FALSE;
+-
+- if (!num_pt_loads) {
+- ERRMSG("Can't get the number of PT_LOAD.\n");
+- return FALSE;
+- }
+-
+- /*
+- * The below file information will be used as /proc/vmcore.
+- */
+- fd_memory = fd;
+- name_memory = filename;
+-
+- pt_loads = calloc(sizeof(struct pt_load_segment), num_pt_loads);
+- if (pt_loads == NULL) {
+- ERRMSG("Can't allocate memory for the PT_LOAD. %s\n",
+- strerror(errno));
+- return FALSE;
+- }
+- for (i = 0, j = 0; i < phnum; i++) {
+- if (!get_phdr_memory(i, &phdr))
+- return FALSE;
+-
+- if (phdr.p_type != PT_LOAD)
+- continue;
+-
+- if (j >= num_pt_loads)
+- return FALSE;
+- if (!dump_Elf_load(&phdr, j))
+- return FALSE;
+- j++;
+- }
+-
+- return TRUE;
+-}
+-
+ static int exclude_segment(struct pt_load_segment **pt_loads,
+ unsigned int *num_pt_loads, uint64_t start, uint64_t end)
+ {
+diff --git a/makedumpfile-1.6.8/elf_info.h b/makedumpfile-1.6.8/elf_info.h
+index d9b5d05..d5416b3 100644
+--- a/makedumpfile-1.6.8/elf_info.h
++++ b/makedumpfile-1.6.8/elf_info.h
+@@ -44,7 +44,6 @@ int get_elf64_ehdr(int fd, char *filename, Elf64_Ehdr *ehdr);
+ int get_elf32_ehdr(int fd, char *filename, Elf32_Ehdr *ehdr);
+ int get_elf_info(int fd, char *filename);
+ void free_elf_info(void);
+-int get_elf_loads(int fd, char *filename);
+ int set_kcore_vmcoreinfo(uint64_t vmcoreinfo_addr, uint64_t vmcoreinfo_len);
+ int get_kcore_dump_loads(void);
+
+diff --git a/makedumpfile-1.6.8/makedumpfile.c b/makedumpfile-1.6.8/makedumpfile.c
+index ba0003a..768eda4 100644
+--- a/makedumpfile-1.6.8/makedumpfile.c
++++ b/makedumpfile-1.6.8/makedumpfile.c
+@@ -11445,6 +11445,7 @@ int show_mem_usage(void)
+ {
+ uint64_t vmcoreinfo_addr, vmcoreinfo_len;
+ struct cycle cycle = {0};
++ int vmcoreinfo = FALSE;
+
+ if (!is_crashkernel_mem_reserved()) {
+ ERRMSG("No memory is reserved for crashkernel!\n");
+@@ -11456,9 +11457,22 @@ int show_mem_usage(void)
+ if (!open_files_for_creating_dumpfile())
+ return FALSE;
+
+- if (!get_elf_loads(info->fd_memory, info->name_memory))
++ if (!get_elf_info(info->fd_memory, info->name_memory))
+ return FALSE;
+
++ /*
++ * /proc/kcore on Linux 4.19 and later kernels have vmcoreinfo note in
++ * NOTE segment. See commit 23c85094fe18.
++ */
++ if (has_vmcoreinfo()) {
++ off_t offset;
++ unsigned long size;
++
++ get_vmcoreinfo(&offset, &size);
++ vmcoreinfo = read_vmcoreinfo_from_vmcore(offset, size, FALSE);
++ DEBUG_MSG("Read vmcoreinfo from NOTE segment: %d\n", vmcoreinfo);
++ }
++
+ if (!get_page_offset())
+ return FALSE;
+
+@@ -11466,11 +11480,13 @@ int show_mem_usage(void)
+ if (!get_phys_base())
+ return FALSE;
+
+- if (!get_sys_kernel_vmcoreinfo(&vmcoreinfo_addr, &vmcoreinfo_len))
+- return FALSE;
++ if (!vmcoreinfo) {
++ if (!get_sys_kernel_vmcoreinfo(&vmcoreinfo_addr, &vmcoreinfo_len))
++ return FALSE;
+
+- if (!set_kcore_vmcoreinfo(vmcoreinfo_addr, vmcoreinfo_len))
+- return FALSE;
++ if (!set_kcore_vmcoreinfo(vmcoreinfo_addr, vmcoreinfo_len))
++ return FALSE;
++ }
+
+ if (!initial())
+ return FALSE;
+--
+2.29.2
+
diff --git a/kexec-tools-2.0.21-makedumpfile-PATCH-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch b/kexec-tools-2.0.21-makedumpfile-PATCH-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
new file mode 100644
index 0000000..e29a0c0
--- /dev/null
+++ b/kexec-tools-2.0.21-makedumpfile-PATCH-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
@@ -0,0 +1,101 @@
+From 67d0e1d68f28c567a704fd6b9b8fd696ad3df183 Mon Sep 17 00:00:00 2001
+From: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+Date: Fri, 29 Jan 2021 11:40:24 +0900
+Subject: [PATCH 2/3] [PATCH 2/3] arm64: Make use of NUMBER(VA_BITS) in
+ vmcoreinfo
+
+Make use of the NUMBER(VA_BITS) in vmcoreinfo, which was added by
+kernel commit 20a166243328 (Linux 4.12 and later kernels), as the
+current way of guessing VA_BITS does not work on Linux 5.4 and
+later kernels.
+
+Signed-off-by: Bhupesh Sharma <bhsharma(a)redhat.com>
+Signed-off-by: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+---
+ arch/arm64.c | 63 ++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 42 insertions(+), 21 deletions(-)
+
+diff --git a/makedumpfile-1.6.8/arch/arm64.c b/makedumpfile-1.6.8/arch/arm64.c
+index 3d7b416..2916b4f 100644
+--- a/makedumpfile-1.6.8/arch/arm64.c
++++ b/makedumpfile-1.6.8/arch/arm64.c
+@@ -345,6 +345,43 @@ get_stext_symbol(void)
+ return(found ? kallsym : FALSE);
+ }
+
++static int
++get_va_bits_from_stext_arm64(void)
++{
++ ulong _stext;
++
++ _stext = get_stext_symbol();
++ if (!_stext) {
++ ERRMSG("Can't get the symbol of _stext.\n");
++ return FALSE;
++ }
++
++ /*
++ * Derive va_bits as per arch/arm64/Kconfig. Note that this is a
++ * best case approximation at the moment, as there can be
++ * inconsistencies in this calculation (for e.g., for 52-bit
++ * kernel VA case, the 48th bit is set in * the _stext symbol).
++ */
++ if ((_stext & PAGE_OFFSET_48) == PAGE_OFFSET_48) {
++ va_bits = 48;
++ } else if ((_stext & PAGE_OFFSET_47) == PAGE_OFFSET_47) {
++ va_bits = 47;
++ } else if ((_stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
++ va_bits = 42;
++ } else if ((_stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
++ va_bits = 39;
++ } else if ((_stext & PAGE_OFFSET_36) == PAGE_OFFSET_36) {
++ va_bits = 36;
++ } else {
++ ERRMSG("Cannot find a proper _stext for calculating VA_BITS\n");
++ return FALSE;
++ }
++
++ DEBUG_MSG("va_bits : %d (guess from _stext)\n", va_bits);
++
++ return TRUE;
++}
++
+ int
+ get_machdep_info_arm64(void)
+ {
+@@ -398,27 +435,11 @@ get_xen_info_arm64(void)
+ int
+ get_versiondep_info_arm64(void)
+ {
+- ulong _stext;
+-
+- _stext = get_stext_symbol();
+- if (!_stext) {
+- ERRMSG("Can't get the symbol of _stext.\n");
+- return FALSE;
+- }
+-
+- /* Derive va_bits as per arch/arm64/Kconfig */
+- if ((_stext & PAGE_OFFSET_36) == PAGE_OFFSET_36) {
+- va_bits = 36;
+- } else if ((_stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
+- va_bits = 39;
+- } else if ((_stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
+- va_bits = 42;
+- } else if ((_stext & PAGE_OFFSET_47) == PAGE_OFFSET_47) {
+- va_bits = 47;
+- } else if ((_stext & PAGE_OFFSET_48) == PAGE_OFFSET_48) {
+- va_bits = 48;
+- } else {
+- ERRMSG("Cannot find a proper _stext for calculating VA_BITS\n");
++ if (NUMBER(VA_BITS) != NOT_FOUND_NUMBER) {
++ va_bits = NUMBER(VA_BITS);
++ DEBUG_MSG("va_bits : %d (vmcoreinfo)\n", va_bits);
++ } else if (get_va_bits_from_stext_arm64() == FALSE) {
++ ERRMSG("Can't determine va_bits.\n");
+ return FALSE;
+ }
+
+--
+2.29.2
+
diff --git a/kexec-tools-2.0.21-makedumpfile-PATCH-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch b/kexec-tools-2.0.21-makedumpfile-PATCH-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
new file mode 100644
index 0000000..752bb07
--- /dev/null
+++ b/kexec-tools-2.0.21-makedumpfile-PATCH-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
@@ -0,0 +1,245 @@
+From a0216b678a95f099a16172cc4a67ad5aa6a89583 Mon Sep 17 00:00:00 2001
+From: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+Date: Fri, 29 Jan 2021 11:40:25 +0900
+Subject: [PATCH 3/3] [PATCH 3/3] arm64: support flipped VA and 52-bit kernel
+ VA
+
+Linux 5.4 and later kernels for arm64 changed the kernel VA space
+arrangement and introduced 52-bit kernel VAs by merging branch
+commit b333b0ba2346. Support 5.9+ kernels with vmcoreinfo entries
+and 5.4+ kernels with best guessing.
+
+However, the following conditions are not supported for now due to
+no necessary information provided from kernel:
+(1) 5.4 <= kernels <= 5.8 and
+ - if PA_BITS=52 && VA_BITS!=52
+ - with -x option if vabits_actual=52
+(2) kernels < 5.4 with CONFIG_ARM64_USER_VA_BITS_52=y
+
+(1) should be supported with kernel commit bbdbc11804ff and
+1d50e5d0c5052 adding necessary information to vmcoreinfo.
+
+Signed-off-by: Bhupesh Sharma <bhsharma(a)redhat.com>
+Signed-off-by: Kazuhito Hagio <k-hagio-ab(a)nec.com>
+Reviewed-by: Pingfan Liu <piliu(a)redhat.com>
+---
+ arch/arm64.c | 100 +++++++++++++++++++++++++++++++++++++++++--------
+ makedumpfile.c | 2 +
+ makedumpfile.h | 1 +
+ 3 files changed, 88 insertions(+), 15 deletions(-)
+
+diff --git a/makedumpfile-1.6.8/arch/arm64.c b/makedumpfile-1.6.8/arch/arm64.c
+index 2916b4f..1072178 100644
+--- a/makedumpfile-1.6.8/arch/arm64.c
++++ b/makedumpfile-1.6.8/arch/arm64.c
+@@ -47,6 +47,8 @@ typedef struct {
+ static int lpa_52_bit_support_available;
+ static int pgtable_level;
+ static int va_bits;
++static int vabits_actual;
++static int flipped_va;
+ static unsigned long kimage_voffset;
+
+ #define SZ_4K 4096
+@@ -58,7 +60,6 @@ static unsigned long kimage_voffset;
+ #define PAGE_OFFSET_42 ((0xffffffffffffffffUL) << 42)
+ #define PAGE_OFFSET_47 ((0xffffffffffffffffUL) << 47)
+ #define PAGE_OFFSET_48 ((0xffffffffffffffffUL) << 48)
+-#define PAGE_OFFSET_52 ((0xffffffffffffffffUL) << 52)
+
+ #define pgd_val(x) ((x).pgd)
+ #define pud_val(x) (pgd_val((x).pgd))
+@@ -218,12 +219,20 @@ pmd_page_paddr(pmd_t pmd)
+ #define pte_index(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
+ #define pte_offset(dir, vaddr) (pmd_page_paddr((*dir)) + pte_index(vaddr) * sizeof(pte_t))
+
++/*
++ * The linear kernel range starts at the bottom of the virtual address
++ * space. Testing the top bit for the start of the region is a
++ * sufficient check and avoids having to worry about the tag.
++ */
++#define is_linear_addr(addr) (flipped_va ? \
++ (!((unsigned long)(addr) & (1UL << (vabits_actual - 1)))) : \
++ (!!((unsigned long)(addr) & (1UL << (vabits_actual - 1)))))
++
+ static unsigned long long
+ __pa(unsigned long vaddr)
+ {
+- if (kimage_voffset == NOT_FOUND_NUMBER ||
+- (vaddr >= PAGE_OFFSET))
+- return (vaddr - PAGE_OFFSET + info->phys_base);
++ if (kimage_voffset == NOT_FOUND_NUMBER || is_linear_addr(vaddr))
++ return ((vaddr & ~PAGE_OFFSET) + info->phys_base);
+ else
+ return (vaddr - kimage_voffset);
+ }
+@@ -253,6 +262,7 @@ static int calculate_plat_config(void)
+ (PAGESIZE() == SZ_64K && va_bits == 42)) {
+ pgtable_level = 2;
+ } else if ((PAGESIZE() == SZ_64K && va_bits == 48) ||
++ (PAGESIZE() == SZ_64K && va_bits == 52) ||
+ (PAGESIZE() == SZ_4K && va_bits == 39) ||
+ (PAGESIZE() == SZ_16K && va_bits == 47)) {
+ pgtable_level = 3;
+@@ -263,6 +273,7 @@ static int calculate_plat_config(void)
+ PAGESIZE(), va_bits);
+ return FALSE;
+ }
++ DEBUG_MSG("pgtable_level: %d\n", pgtable_level);
+
+ return TRUE;
+ }
+@@ -270,6 +281,9 @@ static int calculate_plat_config(void)
+ unsigned long
+ get_kvbase_arm64(void)
+ {
++ if (flipped_va)
++ return PAGE_OFFSET;
++
+ return (0xffffffffffffffffUL << va_bits);
+ }
+
+@@ -382,22 +396,54 @@ get_va_bits_from_stext_arm64(void)
+ return TRUE;
+ }
+
++static void
++get_page_offset_arm64(void)
++{
++ ulong page_end;
++ int vabits_min;
++
++ /*
++ * See arch/arm64/include/asm/memory.h for more details of
++ * the PAGE_OFFSET calculation.
++ */
++ vabits_min = (va_bits > 48) ? 48 : va_bits;
++ page_end = -(1UL << (vabits_min - 1));
++
++ if (SYMBOL(_stext) > page_end) {
++ flipped_va = TRUE;
++ info->page_offset = -(1UL << vabits_actual);
++ } else {
++ flipped_va = FALSE;
++ info->page_offset = -(1UL << (vabits_actual - 1));
++ }
++
++ DEBUG_MSG("page_offset : %lx (from page_end check)\n",
++ info->page_offset);
++}
++
+ int
+ get_machdep_info_arm64(void)
+ {
++ /* Check if va_bits is still not initialized. If still 0, call
++ * get_versiondep_info() to initialize the same.
++ */
++ if (!va_bits)
++ get_versiondep_info_arm64();
++
+ /* Determine if the PA address range is 52-bits: ARMv8.2-LPA */
+ if (NUMBER(MAX_PHYSMEM_BITS) != NOT_FOUND_NUMBER) {
+ info->max_physmem_bits = NUMBER(MAX_PHYSMEM_BITS);
++ DEBUG_MSG("max_physmem_bits : %ld (vmcoreinfo)\n", info->max_physmem_bits);
+ if (info->max_physmem_bits == 52)
+ lpa_52_bit_support_available = 1;
+- } else
+- info->max_physmem_bits = 48;
++ } else {
++ if (va_bits == 52)
++ info->max_physmem_bits = 52; /* just guess */
++ else
++ info->max_physmem_bits = 48;
+
+- /* Check if va_bits is still not initialized. If still 0, call
+- * get_versiondep_info() to initialize the same.
+- */
+- if (!va_bits)
+- get_versiondep_info_arm64();
++ DEBUG_MSG("max_physmem_bits : %ld (guess)\n", info->max_physmem_bits);
++ }
+
+ if (!calculate_plat_config()) {
+ ERRMSG("Can't determine platform config values\n");
+@@ -408,7 +454,6 @@ get_machdep_info_arm64(void)
+ info->section_size_bits = SECTIONS_SIZE_BITS;
+
+ DEBUG_MSG("kimage_voffset : %lx\n", kimage_voffset);
+- DEBUG_MSG("max_physmem_bits : %ld\n", info->max_physmem_bits);
+ DEBUG_MSG("section_size_bits: %ld\n", info->section_size_bits);
+
+ return TRUE;
+@@ -443,10 +488,35 @@ get_versiondep_info_arm64(void)
+ return FALSE;
+ }
+
+- info->page_offset = (0xffffffffffffffffUL) << (va_bits - 1);
++ /*
++ * See TCR_EL1, Translation Control Register (EL1) register
++ * description in the ARMv8 Architecture Reference Manual.
++ * Basically, we can use the TCR_EL1.T1SZ value to determine
++ * the virtual addressing range supported in the kernel-space
++ * (i.e. vabits_actual) since Linux 5.9.
++ */
++ if (NUMBER(TCR_EL1_T1SZ) != NOT_FOUND_NUMBER) {
++ vabits_actual = 64 - NUMBER(TCR_EL1_T1SZ);
++ DEBUG_MSG("vabits_actual : %d (vmcoreinfo)\n", vabits_actual);
++ } else if ((va_bits == 52) && (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)) {
++ /*
++ * Linux 5.4 through 5.10 have the following linear space:
++ * 48-bit: 0xffff000000000000 - 0xffff7fffffffffff
++ * 52-bit: 0xfff0000000000000 - 0xfff7ffffffffffff
++ * and SYMBOL(mem_section) should be in linear space if
++ * the kernel is configured with COMFIG_SPARSEMEM_EXTREME=y.
++ */
++ if (SYMBOL(mem_section) & (1UL << (va_bits - 1)))
++ vabits_actual = 48;
++ else
++ vabits_actual = 52;
++ DEBUG_MSG("vabits_actual : %d (guess from mem_section)\n", vabits_actual);
++ } else {
++ vabits_actual = va_bits;
++ DEBUG_MSG("vabits_actual : %d (same as va_bits)\n", vabits_actual);
++ }
+
+- DEBUG_MSG("va_bits : %d\n", va_bits);
+- DEBUG_MSG("page_offset : %lx\n", info->page_offset);
++ get_page_offset_arm64();
+
+ return TRUE;
+ }
+diff --git a/makedumpfile-1.6.8/makedumpfile.c b/makedumpfile-1.6.8/makedumpfile.c
+index 768eda4..fcd766b 100644
+--- a/makedumpfile-1.6.8/makedumpfile.c
++++ b/makedumpfile-1.6.8/makedumpfile.c
+@@ -2397,6 +2397,7 @@ write_vmcoreinfo_data(void)
+ WRITE_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
+ #ifdef __aarch64__
+ WRITE_NUMBER("VA_BITS", VA_BITS);
++ /* WRITE_NUMBER("TCR_EL1_T1SZ", TCR_EL1_T1SZ); should not exists */
+ WRITE_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ WRITE_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
+ #endif
+@@ -2836,6 +2837,7 @@ read_vmcoreinfo(void)
+ READ_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);
+ #ifdef __aarch64__
+ READ_NUMBER("VA_BITS", VA_BITS);
++ READ_NUMBER("TCR_EL1_T1SZ", TCR_EL1_T1SZ);
+ READ_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ READ_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
+ #endif
+diff --git a/makedumpfile-1.6.8/makedumpfile.h b/makedumpfile-1.6.8/makedumpfile.h
+index 0ed6417..97a5554 100644
+--- a/makedumpfile-1.6.8/makedumpfile.h
++++ b/makedumpfile-1.6.8/makedumpfile.h
+@@ -2049,6 +2049,7 @@ struct number_table {
+ long KERNEL_IMAGE_SIZE;
+ #ifdef __aarch64__
+ long VA_BITS;
++ long TCR_EL1_T1SZ;
+ unsigned long PHYS_OFFSET;
+ unsigned long kimage_voffset;
+ #endif
+--
+2.29.2
+
diff --git a/kexec-tools.spec b/kexec-tools.spec
index 69a89d5..0da4382 100644
--- a/kexec-tools.spec
+++ b/kexec-tools.spec
@@ -96,6 +96,10 @@ Requires: systemd-udev%{?_isa}
#
# Patches 501 through 600 are meant for ARM kexec-tools enablement
#
+Patch501: ./kexec-tools-2.0.21-makedumpfile-PATCH-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
+Patch502: ./kexec-tools-2.0.21-makedumpfile-PATCH-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
+Patch503: ./kexec-tools-2.0.21-makedumpfile-PATCH-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
+
#
# Patches 601 onward are generic patches
@@ -121,6 +125,9 @@ mkdir -p -m755 kcp
tar -z -x -v -f %{SOURCE9}
tar -z -x -v -f %{SOURCE19}
+%patch501 -p1
+%patch502 -p1
+%patch503 -p1
%patch603 -p1
%patch604 -p1
%patch605 -p1
--
2.29.2