Blob Blame History Raw
From 1a3af2aaee85bd475bda44993b3e8dc1335497d5 Mon Sep 17 00:00:00 2001
From: Joe Lawrence <joe.lawrence@redhat.com>
Date: Thu, 10 Oct 2024 16:58:58 -0400
Subject: [PATCH 101/118] aarch64: create-diff-object implementation

TODO

Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
Signed-off-by: Pete Swain <swine@google.com>
Signed-off-by: Mihails Strasuns <mstrasun@amazon.com>
Signed-off-by: Joe Lawrence <joe.lawrence@redhat.com>
---
 kpatch-build/create-diff-object.c | 104 ++++++++++++++++++++++++++----
 kpatch-build/kpatch-elf.c         |   8 +++
 kpatch-build/kpatch-elf.h         |   1 +
 3 files changed, 101 insertions(+), 12 deletions(-)

diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c
index 8170b18..e62880b 100644
--- a/kpatch-build/create-diff-object.c
+++ b/kpatch-build/create-diff-object.c
@@ -173,6 +173,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf,
 					  struct symbol *sym)
 {
 	switch(kelf->arch) {
+	case AARCH64:
+		return false;
 	case PPC64:
 		return ((PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other) != 0) &&
 			sym->sym.st_value == 8);
@@ -230,6 +232,25 @@ static struct rela *toc_rela(const struct rela *rela)
 				   (unsigned int)rela->addend);
 }
 
+/*
+ * Mapping symbols are used to mark and label the transitions between code and
+ * data in elf files. They begin with a "$" dollar symbol. Don't correlate them
+ * as they often all have the same name either "$x" to mark the start of code
+ * or "$d" to mark the start of data.
+ */
+static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym)
+{
+	if (kelf->arch != AARCH64)
+		return false;
+
+	if (sym->name && sym->name[0] == '$' &&
+	    sym->type == STT_NOTYPE &&
+	    sym->bind == STB_LOCAL)
+		return true;
+
+	return false;
+}
+
 /*
  * When compiling with -ffunction-sections and -fdata-sections, almost every
  * symbol gets its own dedicated section.  We call such symbols "bundled"
@@ -667,6 +688,12 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr)
 
 	switch(kelf->arch) {
 
+	case AARCH64:
+		/* Verify mov w2 <line number> */
+		if ((insn[0] & 0b11111) == 0x2 && insn[3] == 0x52)
+			return true;
+		break;
+
 	case X86_64:
 		/* arg2: mov $imm, %esi */
 		if (insn[0] == 0xbe)
@@ -1076,15 +1103,15 @@ static void kpatch_correlate_sections(struct list_head *seclist_orig,
 	}
 }
 
-static void kpatch_correlate_symbols(struct list_head *symlist_orig,
-		struct list_head *symlist_patched)
+static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig,
+		struct kpatch_elf *kelf_patched)
 {
 	struct symbol *sym_orig, *sym_patched;
 
-	list_for_each_entry(sym_orig, symlist_orig, list) {
+	list_for_each_entry(sym_orig, &kelf_orig->symbols, list) {
 		if (sym_orig->twin)
 			continue;
-		list_for_each_entry(sym_patched, symlist_patched, list) {
+		list_for_each_entry(sym_patched, &kelf_patched->symbols, list) {
 			if (kpatch_mangled_strcmp(sym_orig->name, sym_patched->name) ||
 			    sym_orig->type != sym_patched->type || sym_patched->twin)
 				continue;
@@ -1104,6 +1131,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig,
 			    !strncmp(sym_orig->name, ".LC", 3))
 				continue;
 
+			if (kpatch_is_mapping_symbol(kelf_orig, sym_orig))
+				continue;
+
 			/* group section symbols must have correlated sections */
 			if (sym_orig->sec &&
 			    sym_orig->sec->sh.sh_type == SHT_GROUP &&
@@ -1509,7 +1539,7 @@ static void kpatch_correlate_elfs(struct kpatch_elf *kelf_orig,
 		struct kpatch_elf *kelf_patched)
 {
 	kpatch_correlate_sections(&kelf_orig->sections, &kelf_patched->sections);
-	kpatch_correlate_symbols(&kelf_orig->symbols, &kelf_patched->symbols);
+	kpatch_correlate_symbols(kelf_orig, kelf_patched);
 }
 
 static void kpatch_compare_correlated_elements(struct kpatch_elf *kelf)
@@ -1625,7 +1655,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
 
 				if (is_text_section(relasec->base) &&
 				    !is_text_section(sym->sec) &&
-				    is_arch(X86_64) && rela->type == R_X86_64_32S &&
+				    ((is_arch(X86_64) && rela->type == R_X86_64_32S) ||
+				     (is_arch(AARCH64) && rela->type == R_AARCH64_ABS64)) &&
 				    rela->addend == (long)sym->sec->sh.sh_size &&
 				    end == (long)sym->sec->sh.sh_size) {
 
@@ -1662,6 +1693,9 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
 					 */
 				} else if (target_off == start && target_off == end) {
 
+					if(kpatch_is_mapping_symbol(kelf, sym))
+						continue;
+
 					/*
 					 * Allow replacement for references to
 					 * empty symbols.
@@ -2422,28 +2456,28 @@ static bool static_call_sites_group_filter(struct lookup_table *lookup,
 static struct special_section special_sections[] = {
 	{
 		.name		= "__bug_table",
-		.arch		= X86_64 | PPC64 | S390,
+		.arch		= AARCH64 | X86_64 | PPC64 | S390,
 		.group_size	= bug_table_group_size,
 	},
 	{
 		.name		= ".fixup",
-		.arch		= X86_64 | PPC64 | S390,
+		.arch		= AARCH64 | X86_64 | PPC64 | S390,
 		.group_size	= fixup_group_size,
 	},
 	{
 		.name		= "__ex_table", /* must come after .fixup */
-		.arch		= X86_64 | PPC64 | S390,
+		.arch		= AARCH64 | X86_64 | PPC64 | S390,
 		.group_size	= ex_table_group_size,
 	},
 	{
 		.name		= "__jump_table",
-		.arch		= X86_64 | PPC64 | S390,
+		.arch		= AARCH64 | X86_64 | PPC64 | S390,
 		.group_size	= jump_table_group_size,
 		.group_filter	= jump_table_group_filter,
 	},
 	{
 		.name		= ".printk_index",
-		.arch		= X86_64 | PPC64 | S390,
+		.arch		= AARCH64 | X86_64 | PPC64 | S390,
 		.group_size	= printk_index_group_size,
 	},
 	{
@@ -2458,7 +2492,7 @@ static struct special_section special_sections[] = {
 	},
 	{
 		.name		= ".altinstructions",
-		.arch		= X86_64 | S390,
+		.arch		= AARCH64 | X86_64 | S390,
 		.group_size	= altinstructions_group_size,
 	},
 	{
@@ -3774,6 +3808,47 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool
 		}
 
 		switch(kelf->arch) {
+		case AARCH64:
+			unsigned char *insn;
+
+			/*
+			 * Assume ppc64le is built with -fpatchable-function-entry=2, which means that all 2 nops are
+			 * after the entry point of the function.
+			 *
+			 * Disassembly of section .text.cmdline_proc_show:
+			 *
+			 * 0000000000000000 <cmdline_proc_show>:
+			 *    0:   d503201f        nop                                   << <<
+			 *    4:   d503201f        nop
+			 *
+			 * Relocation section '.rela__patchable_function_entries'
+			 *     Offset             Info             Type               Symbol's Value  Symbol's Name + Addend
+			 * 0000000000000008  0000000f00000101 R_AARCH64_ABS64        0000000000000000 .text.cmdline_proc_show + 0
+			 *                                                                                                      ^
+			 */
+			insn_offset = 0;
+			insn = sym->sec->data->d_buf + insn_offset;
+
+			/*
+			 * If BTI (Branch Target Identification) is enabled then there
+			 * might be an additional 'BTI C' instruction before the two
+			 * patchable function entry 'NOP's.
+			 * i.e. 0xd503245f (little endian)
+			 */
+			if (insn[0] == 0x5f) {
+				if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5)
+					ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
+				insn_offset += 4;
+				insn += 4;
+			}
+			for (int i=0; i<8; i+=4) {
+				/* We expect a NOP i.e. 0xd503201f (little endian) */
+				if (insn[i] != 0x1f || insn[i + 1] != 0x20 ||
+				    insn[i + 2] != 0x03 || insn [i + 3] != 0xd5)
+					ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
+			}
+
+			break;
 		case PPC64: {
 			unsigned char *insn;
 
@@ -4067,6 +4142,11 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf)
 			continue;
 
 		switch(kelf->arch) {
+		case AARCH64:
+			if (kpatch_symbol_has_pfe_entry(kelf, sym)) {
+				sym->has_func_profiling = 1;
+			}
+			break;
 		case PPC64:
 			if (kpatch_symbol_has_pfe_entry(kelf, sym)) {
 				sym->has_func_profiling = 1;
diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c
index 073b808..17c0491 100755
--- a/kpatch-build/kpatch-elf.c
+++ b/kpatch-build/kpatch-elf.c
@@ -156,6 +156,8 @@ struct rela *find_rela_by_offset(struct section *relasec, unsigned int offset)
 unsigned int absolute_rela_type(struct kpatch_elf *kelf)
 {
 	switch(kelf->arch) {
+	case AARCH64:
+		return R_AARCH64_ABS64;
 	case PPC64:
 		return R_PPC64_ADDR64;
 	case X86_64:
@@ -225,6 +227,7 @@ long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec,
 	struct section *sec = relasec->base;
 
 	switch(kelf->arch) {
+	case AARCH64:
 	case PPC64:
 		add_off = 0;
 		break;
@@ -274,6 +277,8 @@ unsigned int insn_length(struct kpatch_elf *kelf, void *addr)
 	char *insn = addr;
 
 	switch(kelf->arch) {
+	case AARCH64:
+		return 4;
 
 	case X86_64:
 		insn_init(&decoded_insn, addr, 1);
@@ -604,6 +609,9 @@ struct kpatch_elf *kpatch_elf_open(const char *name)
 	if (!gelf_getehdr(kelf->elf, &ehdr))
 		ERROR("gelf_getehdr");
 	switch (ehdr.e_machine) {
+	case EM_AARCH64:
+		kelf->arch = AARCH64;
+		break;
 	case EM_PPC64:
 		kelf->arch = PPC64;
 		break;
diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h
index 3389dfe..4a3617d 100644
--- a/kpatch-build/kpatch-elf.h
+++ b/kpatch-build/kpatch-elf.h
@@ -116,6 +116,7 @@ enum architecture {
 	PPC64  = 0x1 << 0,
 	X86_64 = 0x1 << 1,
 	S390   = 0x1 << 2,
+	AARCH64 = 0x1 << 3,
 };
 
 struct kpatch_elf {
-- 
2.48.1