diff -rup binutils-2.30/bfd/archures.c binutils-2.30.new/bfd/archures.c --- binutils-2.30/bfd/archures.c 2021-03-23 16:21:44.001022834 +0000 +++ binutils-2.30.new/bfd/archures.c 2021-03-23 16:20:02.829710624 +0000 @@ -526,6 +526,7 @@ DESCRIPTION .#define bfd_mach_tilegx32 2 . bfd_arch_aarch64, {* AArch64. *} .#define bfd_mach_aarch64 0 +.#define bfd_mach_aarch64_8R 1 .#define bfd_mach_aarch64_ilp32 32 . bfd_arch_nios2, {* Nios II. *} .#define bfd_mach_nios2 0 diff -rup binutils-2.30/bfd/bfd-in2.h binutils-2.30.new/bfd/bfd-in2.h --- binutils-2.30/bfd/bfd-in2.h 2021-03-23 16:21:44.002022828 +0000 +++ binutils-2.30.new/bfd/bfd-in2.h 2021-03-23 16:20:02.815710719 +0000 @@ -985,12 +985,6 @@ extern void bfd_elf64_aarch64_init_maps extern void bfd_elf32_aarch64_init_maps (bfd *); -extern void bfd_elf64_aarch64_set_options - (bfd *, struct bfd_link_info *, int, int, int, int, int, int); - -extern void bfd_elf32_aarch64_set_options - (bfd *, struct bfd_link_info *, int, int, int, int, int, int); - /* ELF AArch64 mapping symbol support. */ #define BFD_AARCH64_SPECIAL_SYM_TYPE_MAP (1 << 0) #define BFD_AARCH64_SPECIAL_SYM_TYPE_TAG (1 << 1) @@ -2388,6 +2382,7 @@ enum bfd_architecture #define bfd_mach_tilegx32 2 bfd_arch_aarch64, /* AArch64. */ #define bfd_mach_aarch64 0 +#define bfd_mach_aarch64_8R 1 #define bfd_mach_aarch64_ilp32 32 bfd_arch_nios2, /* Nios II. */ #define bfd_mach_nios2 0 @@ -5961,6 +5956,36 @@ of a signed value. Changes instruction value's sign. */ BFD_RELOC_AARCH64_MOVW_G2_S, +/* AArch64 MOV[NZ] instruction with most significant bits 0 to 15 +of a signed value. Changes instruction to MOVZ or MOVN depending on the +value's sign. */ + BFD_RELOC_AARCH64_MOVW_PREL_G0, + +/* AArch64 MOV[NZ] instruction with most significant bits 0 to 15 +of a signed value. Changes instruction to MOVZ or MOVN depending on the +value's sign. */ + BFD_RELOC_AARCH64_MOVW_PREL_G0_NC, + +/* AArch64 MOVK instruction with most significant bits 16 to 31 +of a signed value. */ + BFD_RELOC_AARCH64_MOVW_PREL_G1, + +/* AArch64 MOVK instruction with most significant bits 16 to 31 +of a signed value. */ + BFD_RELOC_AARCH64_MOVW_PREL_G1_NC, + +/* AArch64 MOVK instruction with most significant bits 32 to 47 +of a signed value. */ + BFD_RELOC_AARCH64_MOVW_PREL_G2, + +/* AArch64 MOVK instruction with most significant bits 32 to 47 +of a signed value. */ + BFD_RELOC_AARCH64_MOVW_PREL_G2_NC, + +/* AArch64 MOVK instruction with most significant bits 47 to 63 +of a signed value. */ + BFD_RELOC_AARCH64_MOVW_PREL_G3, + /* AArch64 Load Literal instruction, holding a 19 bit pc-relative word offset. The lowest two bits must be zero and are not stored in the instruction, giving a 21 bit signed byte offset. */ @@ -6188,6 +6213,34 @@ instructions. */ /* AArch64 TLS LOCAL EXEC relocation. */ BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, +/* bit[11:1] of byte offset to module TLS base address, encoded in ldst +instructions. */ + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, + +/* Similar as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */ + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, + +/* bit[11:2] of byte offset to module TLS base address, encoded in ldst +instructions. */ + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, + +/* Similar as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */ + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, + +/* bit[11:3] of byte offset to module TLS base address, encoded in ldst +instructions. */ + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, + +/* Similar as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */ + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, + +/* bit[11:0] of byte offset to module TLS base address, encoded in ldst +instructions. */ + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, + +/* Similar as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */ + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, + /* AArch64 TLS DESC relocation. */ BFD_RELOC_AARCH64_TLSDESC_LD_PREL19, @@ -6271,6 +6324,14 @@ any object files. */ /* Similar as BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12, but no overflow check. */ BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC, +/* AArch64 pseudo relocation code for TLS local exec mode. It's to be +used internally by the AArch64 assembler and not (currently) written to +any object files. */ + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12, + +/* Similar as BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12, but no overflow check. */ + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC, + /* AArch64 pseudo relocation code to be used internally by the AArch64 assembler and not (currently) written to any object files. */ BFD_RELOC_AARCH64_LD_GOT_LO12_NC, diff -rup binutils-2.30/bfd/cpu-aarch64.c binutils-2.30.new/bfd/cpu-aarch64.c --- binutils-2.30/bfd/cpu-aarch64.c 2018-01-13 13:31:15.000000000 +0000 +++ binutils-2.30.new/bfd/cpu-aarch64.c 2021-03-23 16:20:02.830710617 +0000 @@ -1,5 +1,5 @@ /* BFD support for AArch64. - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2021 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of BFD, the Binary File Descriptor library. @@ -22,6 +22,7 @@ #include "bfd.h" #include "libbfd.h" #include "libiberty.h" +#include "cpu-aarch64.h" /* This routine is provided two arch_infos and works out which Aarch64 machine which would be compatible with both and returns a pointer @@ -68,10 +69,11 @@ static struct } processors[] = { - /* These two are example CPUs supported in GCC, once we have real - CPUs they will be removed. */ - { bfd_mach_aarch64, "example-1" }, - { bfd_mach_aarch64, "example-2" } + { bfd_mach_aarch64, "cortex-a34" }, + { bfd_mach_aarch64, "cortex-a65" }, + { bfd_mach_aarch64, "cortex-a65ae" }, + { bfd_mach_aarch64, "cortex-a76ae" }, + { bfd_mach_aarch64, "cortex-a77" } }; static bfd_boolean @@ -103,10 +105,14 @@ scan (const struct bfd_arch_info *info, #define N(NUMBER, PRINT, WORDSIZE, DEFAULT, NEXT) \ { WORDSIZE, WORDSIZE, 8, bfd_arch_aarch64, NUMBER, \ "aarch64", PRINT, 4, DEFAULT, compatible, scan, \ - bfd_arch_default_fill, NEXT } + bfd_arch_default_fill, NEXT, 0 } + +static const bfd_arch_info_type bfd_aarch64_arch_v8_r = + N (bfd_mach_aarch64_8R, "aarch64:armv8-r", 64, FALSE, NULL); static const bfd_arch_info_type bfd_aarch64_arch_ilp32 = - N (bfd_mach_aarch64_ilp32, "aarch64:ilp32", 32, FALSE, NULL); + N (bfd_mach_aarch64_ilp32, "aarch64:ilp32", 32, FALSE, + &bfd_aarch64_arch_v8_r); const bfd_arch_info_type bfd_aarch64_arch = N (0, "aarch64", 64, TRUE, &bfd_aarch64_arch_ilp32); Only in binutils-2.30.new/bfd: cpu-aarch64.h diff -rup binutils-2.30/bfd/elfnn-aarch64.c binutils-2.30.new/bfd/elfnn-aarch64.c --- binutils-2.30/bfd/elfnn-aarch64.c 2021-03-23 16:21:45.924009761 +0000 +++ binutils-2.30.new/bfd/elfnn-aarch64.c 2021-03-23 16:20:02.826710644 +0000 @@ -1,5 +1,5 @@ /* AArch64-specific support for NN-bit ELF. - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2021 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of BFD, the Binary File Descriptor library. @@ -139,12 +139,12 @@ #include "bfd.h" #include "libiberty.h" #include "libbfd.h" -#include "bfd_stdint.h" #include "elf-bfd.h" #include "bfdlink.h" #include "objalloc.h" #include "elf/aarch64.h" #include "elfxx-aarch64.h" +#include "cpu-aarch64.h" #define ARCH_SIZE NN @@ -201,6 +201,14 @@ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \ + || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \ @@ -260,6 +268,12 @@ #define PLT_ENTRY_SIZE (32) #define PLT_SMALL_ENTRY_SIZE (16) #define PLT_TLSDESC_ENTRY_SIZE (32) +/* PLT sizes with BTI insn. */ +#define PLT_BTI_SMALL_ENTRY_SIZE (24) +/* PLT sizes with PAC insn. */ +#define PLT_PAC_SMALL_ENTRY_SIZE (24) +/* PLT sizes with BTI and PAC insn. */ +#define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24) /* Encoding of the nop instruction. */ #define INSN_NOP 0xd503201f @@ -290,9 +304,26 @@ static const bfd_byte elfNN_aarch64_smal 0x1f, 0x20, 0x03, 0xd5, /* nop */ }; +static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] = +{ + 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ + 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ +#if ARCH_SIZE == 64 + 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ + 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ +#else + 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */ + 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */ +#endif + 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + /* Per function entry in a procedure linkage table looks like this if the distance between the PLTGOT and the PLT is < 4GB use - these PLT entries. */ + these PLT entries. Use BTI versions of the PLTs when enabled. */ static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] = { 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ @@ -307,6 +338,54 @@ static const bfd_byte elfNN_aarch64_smal }; static const bfd_byte +elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] = +{ + 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ +#if ARCH_SIZE == 64 + 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ + 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ +#else + 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ + 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ +#endif + 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + +static const bfd_byte +elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] = +{ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ +#if ARCH_SIZE == 64 + 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ + 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ +#else + 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ + 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ +#endif + 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ + 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + +static const bfd_byte +elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] = +{ + 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ +#if ARCH_SIZE == 64 + 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ + 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ +#else + 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ + 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ +#endif + 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ + 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ +}; + +static const bfd_byte elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] = { 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ @@ -324,6 +403,24 @@ elfNN_aarch64_tlsdesc_small_plt_entry[PL 0x1f, 0x20, 0x03, 0xd5, /* nop */ }; +static const bfd_byte +elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] = +{ + 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ + 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ + 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ + 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ +#if ARCH_SIZE == 64 + 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */ + 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ +#else + 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */ + 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */ +#endif + 0x40, 0x00, 0x1f, 0xd6, /* br x2 */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + #define elf_info_to_howto elfNN_aarch64_info_to_howto #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto @@ -617,6 +714,114 @@ static reloc_howto_type elfNN_aarch64_ho 0xffff, /* dst_mask */ FALSE), /* pcrel_offset */ + /* Group relocations to create a 16, 32, 48 or 64 bit + PC relative address inline. */ + + /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */ + HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 17, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G0), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */ + HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */ + HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 17, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G1), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */ + HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */ + HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */ + 32, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 17, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G2), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */ + HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */ + 32, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */ + HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */ + 48, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (MOVW_PREL_G3), /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + /* Relocations to generate 19, 21 and 33 bit PC-relative load/store addresses: PG(x) is (x & ~0xfff). */ @@ -1527,6 +1732,126 @@ static reloc_howto_type elfNN_aarch64_ho 0xfff, /* dst_mask */ FALSE), /* pcrel_offset */ + /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */ + HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */ + 1, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 11, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */ + FALSE, /* partial_inplace */ + 0x1ffc00, /* src_mask */ + 0x1ffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */ + HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */ + 1, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 11, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */ + FALSE, /* partial_inplace */ + 0x1ffc00, /* src_mask */ + 0x1ffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */ + HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 10, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */ + FALSE, /* partial_inplace */ + 0xffc00, /* src_mask */ + 0xffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */ + HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 10, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */ + FALSE, /* partial_inplace */ + 0xffc00, /* src_mask */ + 0xffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */ + HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 9, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */ + FALSE, /* partial_inplace */ + 0x7fc00, /* src_mask */ + 0x7fc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */ + HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 9, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */ + FALSE, /* partial_inplace */ + 0x7fc00, /* src_mask */ + 0x7fc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */ + HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */ + FALSE, /* partial_inplace */ + 0x3ffc00, /* src_mask */ + 0x3ffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */ + HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */ + FALSE, /* partial_inplace */ + 0x3ffc00, /* src_mask */ + 0x3ffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ @@ -1864,7 +2189,7 @@ elfNN_aarch64_bfd_reloc_from_howto (relo /* Given R_TYPE, return the bfd internal relocation enumerator. */ static bfd_reloc_code_real_type -elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type) +elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type) { static bfd_boolean initialized_p = FALSE; /* Indexed by R_TYPE, values are offsets in the howto_table. */ @@ -1887,7 +2212,8 @@ elfNN_aarch64_bfd_reloc_from_type (unsig /* PR 17512: file: b371e70a. */ if (r_type >= R_AARCH64_end) { - _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type); + _bfd_error_handler (_("%pB: unsupported relocation type %#x"), + abfd, r_type); bfd_set_error (bfd_error_bad_value); return BFD_RELOC_AARCH64_NONE; } @@ -1946,7 +2272,7 @@ elfNN_aarch64_howto_from_bfd_reloc (bfd_ } static reloc_howto_type * -elfNN_aarch64_howto_from_type (unsigned int r_type) +elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type) { bfd_reloc_code_real_type val; reloc_howto_type *howto; @@ -1962,7 +2288,7 @@ elfNN_aarch64_howto_from_type (unsigned if (r_type == R_AARCH64_NONE) return &elfNN_aarch64_howto_none; - val = elfNN_aarch64_bfd_reloc_from_type (r_type); + val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type); howto = elfNN_aarch64_howto_from_bfd_reloc (val); if (howto != NULL) @@ -1972,14 +2298,22 @@ elfNN_aarch64_howto_from_type (unsigned return NULL; } -static void -elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc, +static bfd_boolean +elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc, Elf_Internal_Rela *elf_reloc) { unsigned int r_type; r_type = ELFNN_R_TYPE (elf_reloc->r_info); - bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type); + bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type); + + if (bfd_reloc->howto == NULL) + { + /* xgettext:c-format */ + _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type); + return FALSE; + } + return TRUE; } static reloc_howto_type * @@ -2194,6 +2528,13 @@ struct elf_aarch64_obj_tdata /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */ uint32_t gnu_and_prop; + + /* Zero to warn when linking objects with incompatible + GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */ + int no_bti_warn; + + /* PLT type based on security. */ + aarch64_plt_type plt_type; }; #define elf_aarch64_tdata(bfd) \ @@ -2299,9 +2640,15 @@ struct elf_aarch64_link_hash_table /* The number of bytes in the initial entry in the PLT. */ bfd_size_type plt_header_size; - /* The number of bytes in the subsequent PLT etries. */ + /* The bytes of the initial PLT entry. */ + const bfd_byte *plt0_entry; + + /* The number of bytes in the subsequent PLT entries. */ bfd_size_type plt_entry_size; + /* The bytes of the subsequent PLT entry. */ + const bfd_byte *plt_entry; + /* Small local sym cache. */ struct sym_cache sym_cache; @@ -2347,6 +2694,9 @@ struct elf_aarch64_link_hash_table yet. */ bfd_vma tlsdesc_plt; + /* The number of bytes in the PLT enty for the TLS descriptor. */ + bfd_size_type tlsdesc_plt_entry_size; + /* The GOT offset for the lazy trampoline. Communicated to the loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1 indicates an offset is not allocated. */ @@ -2560,8 +2910,8 @@ elfNN_aarch64_merge_symbol_attribute (st bfd_boolean definition ATTRIBUTE_UNUSED, bfd_boolean dynamic ATTRIBUTE_UNUSED) { - unsigned int isym_sto = isym->st_other & ~ELF_ST_VISIBILITY (-1); - unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1); + unsigned int isym_sto = (isym->st_other & ~ELF_ST_VISIBILITY (-1)) & 0xFF; + unsigned int h_sto = (h->other & ~ELF_ST_VISIBILITY (-1)) & 0xFF; if (isym_sto == h_sto) return; @@ -2600,7 +2950,7 @@ static struct bfd_link_hash_table * elfNN_aarch64_link_hash_table_create (bfd *abfd) { struct elf_aarch64_link_hash_table *ret; - bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table); + size_t amt = sizeof (struct elf_aarch64_link_hash_table); ret = bfd_zmalloc (amt); if (ret == NULL) @@ -2615,7 +2965,10 @@ elfNN_aarch64_link_hash_table_create (bf } ret->plt_header_size = PLT_ENTRY_SIZE; + ret->plt0_entry = elfNN_aarch64_small_plt0_entry; ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE; + ret->plt_entry = elfNN_aarch64_small_plt_entry; + ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; ret->obfd = abfd; ret->dt_tlsdesc_got = (bfd_vma) - 1; @@ -2650,12 +3003,13 @@ aarch64_relocate (unsigned int r_type, b reloc_howto_type *howto; bfd_vma place; - howto = elfNN_aarch64_howto_from_type (r_type); + howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); place = (input_section->output_section->vma + input_section->output_offset + offset); - r_type = elfNN_aarch64_bfd_reloc_from_type (r_type); - value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE); + r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place, + value, 0, FALSE); return _bfd_aarch64_elf_put_addend (input_bfd, input_section->contents + offset, r_type, howto, value) == bfd_reloc_ok; @@ -2882,7 +3236,7 @@ _bfd_aarch64_add_stub_entry_in_group (co if (stub_entry == NULL) { /* xgettext:c-format */ - _bfd_error_handler (_("%B: cannot create stub entry %s"), + _bfd_error_handler (_("%pB: cannot create stub entry %s"), section->owner, stub_name); return NULL; } @@ -2905,6 +3259,8 @@ _bfd_aarch64_add_stub_entry_after (const asection *stub_sec; struct elf_aarch64_stub_hash_entry *stub_entry; + stub_sec = NULL; + /* Only create the actual stub if we will end up needing it. */ stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab); stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, TRUE, FALSE); @@ -3097,7 +3453,7 @@ elfNN_aarch64_setup_section_lists (bfd * unsigned int top_id, top_index; asection *section; asection **input_list, **list; - bfd_size_type amt; + size_t amt; struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); @@ -3175,7 +3531,7 @@ elfNN_aarch64_next_input_section (struct { asection **list = htab->input_list + isec->output_section->index; - if (*list != bfd_abs_section_ptr) + if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) { /* Steal the link_sec pointer for our list. */ /* This happens to make the list in reverse order, @@ -3196,68 +3552,97 @@ elfNN_aarch64_next_input_section (struct static void group_sections (struct elf_aarch64_link_hash_table *htab, bfd_size_type stub_group_size, - bfd_boolean stubs_always_before_branch) + bfd_boolean stubs_always_after_branch) { - asection **list = htab->input_list + htab->top_index; + asection **list = htab->input_list; do { asection *tail = *list; + asection *head; if (tail == bfd_abs_section_ptr) continue; + /* Reverse the list: we must avoid placing stubs at the + beginning of the section because the beginning of the text + section may be required for an interrupt vector in bare metal + code. */ +#define NEXT_SEC PREV_SEC + head = NULL; while (tail != NULL) { - asection *curr; - asection *prev; - bfd_size_type total; + /* Pop from tail. */ + asection *item = tail; + tail = PREV_SEC (item); - curr = tail; - total = tail->size; - while ((prev = PREV_SEC (curr)) != NULL - && ((total += curr->output_offset - prev->output_offset) - < stub_group_size)) - curr = prev; + /* Push on head. */ + NEXT_SEC (item) = head; + head = item; + } - /* OK, the size from the start of CURR to the end is less + while (head != NULL) + { + asection *curr; + asection *next; + bfd_vma stub_group_start = head->output_offset; + bfd_vma end_of_next; + + curr = head; + while (NEXT_SEC (curr) != NULL) + { + next = NEXT_SEC (curr); + end_of_next = next->output_offset + next->size; + if (end_of_next - stub_group_start >= stub_group_size) + /* End of NEXT is too far from start, so stop. */ + break; + /* Add NEXT to the group. */ + curr = next; + } + + /* OK, the size from the start to the start of CURR is less than stub_group_size and thus can be handled by one stub - section. (Or the tail section is itself larger than + section. (Or the head section is itself larger than stub_group_size, in which case we may be toast.) We should really be keeping track of the total size of stubs added here, as stubs contribute to the final output section size. */ do { - prev = PREV_SEC (tail); + next = NEXT_SEC (head); /* Set up this stub group. */ - htab->stub_group[tail->id].link_sec = curr; + htab->stub_group[head->id].link_sec = curr; } - while (tail != curr && (tail = prev) != NULL); + while (head != curr && (head = next) != NULL); /* But wait, there's more! Input sections up to stub_group_size - bytes before the stub section can be handled by it too. */ - if (!stubs_always_before_branch) + bytes after the stub section can be handled by it too. */ + if (!stubs_always_after_branch) { - total = 0; - while (prev != NULL - && ((total += tail->output_offset - prev->output_offset) - < stub_group_size)) + stub_group_start = curr->output_offset + curr->size; + + while (next != NULL) { - tail = prev; - prev = PREV_SEC (tail); - htab->stub_group[tail->id].link_sec = curr; + end_of_next = next->output_offset + next->size; + if (end_of_next - stub_group_start >= stub_group_size) + /* End of NEXT is too far from stubs, so stop. */ + break; + /* Add NEXT to the stub group. */ + head = next; + next = NEXT_SEC (head); + htab->stub_group[head->id].link_sec = curr; } } - tail = prev; + head = next; } } - while (list-- != htab->input_list); + while (list++ != htab->input_list + htab->top_index); free (htab->input_list); } #undef PREV_SEC +#undef PREV_SEC #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1)) @@ -3531,7 +3916,8 @@ _bfd_aarch64_erratum_835769_stub_name (u { char *stub_name = (char *) bfd_malloc (strlen ("__erratum_835769_veneer_") + 16); - sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes); + if (stub_name != NULL) + sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes); return stub_name; } @@ -3573,8 +3959,9 @@ _bfd_aarch64_erratum_835769_scan (bfd *i sec_data = elf_aarch64_section_data (section); - qsort (sec_data->map, sec_data->mapcount, - sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); + if (sec_data->mapcount) + qsort (sec_data->map, sec_data->mapcount, + sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); for (span = 0; span < sec_data->mapcount; span++) { @@ -3630,7 +4017,7 @@ _bfd_aarch64_erratum_835769_scan (bfd *i static bfd_boolean _bfd_aarch64_adrp_p (uint32_t insn) { - return ((insn & 0x9f000000) == 0x90000000); + return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP); } @@ -3727,22 +4114,24 @@ _bfd_aarch64_resize_stubs (struct elf_aa if (!strstr (section->name, STUB_SUFFIX)) continue; + /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned, + as long branch stubs contain a 64-bit address. */ if (section->size) - section->size += 4; + section->size += 8; /* Ensure all stub sections have a size which is a multiple of 4096. This is important in order to ensure that the insertion of stub sections does not in itself move existing code around - in such a way that new errata sequences are created. */ + in such a way that new errata sequences are created. We only do this + when the ADRP workaround is enabled. If only the ADR workaround is + enabled then the stubs workaround won't ever be used. */ if (htab->fix_erratum_843419) if (section->size) section->size = BFD_ALIGN (section->size, 0x1000); } } - -/* Construct an erratum 843419 workaround stub name. - */ +/* Construct an erratum 843419 workaround stub name. */ static char * _bfd_aarch64_erratum_843419_stub_name (asection *input_section, @@ -3778,6 +4167,8 @@ _bfd_aarch64_erratum_843419_fixup (uint3 struct elf_aarch64_stub_hash_entry *stub_entry; stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset); + if (stub_name == NULL) + return FALSE; stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE); if (stub_entry) @@ -3795,8 +4186,7 @@ _bfd_aarch64_erratum_843419_fixup (uint3 If we placed workaround veneers in any other stub section then we could not assume that all relocations have been processed on the corresponding input section at the point we output the stub - section. - */ + section. */ stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab); if (stub_entry == NULL) @@ -3854,8 +4244,9 @@ _bfd_aarch64_erratum_843419_scan (bfd *i sec_data = elf_aarch64_section_data (section); - qsort (sec_data->map, sec_data->mapcount, - sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); + if (sec_data->mapcount) + qsort (sec_data->map, sec_data->mapcount, + sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); for (span = 0; span < sec_data->mapcount; span++) { @@ -3950,9 +4341,15 @@ elfNN_aarch64_size_stubs (bfd *output_bf for (input_bfd = info->input_bfds; input_bfd != NULL; input_bfd = input_bfd->link.next) - if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info, - &num_erratum_835769_fixes)) - return FALSE; + { + if (!is_aarch64_elf (input_bfd) + || (input_bfd->flags & BFD_LINKER_CREATED) != 0) + continue; + + if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info, + &num_erratum_835769_fixes)) + return FALSE; + } _bfd_aarch64_resize_stubs (htab); (*htab->layout_sections_again) (); @@ -3968,6 +4365,10 @@ elfNN_aarch64_size_stubs (bfd *output_bf { asection *section; + if (!is_aarch64_elf (input_bfd) + || (input_bfd->flags & BFD_LINKER_CREATED) != 0) + continue; + for (section = input_bfd->sections; section != NULL; section = section->next) @@ -3990,6 +4391,10 @@ elfNN_aarch64_size_stubs (bfd *output_bf asection *section; Elf_Internal_Sym *local_syms = NULL; + if (!is_aarch64_elf (input_bfd) + || (input_bfd->flags & BFD_LINKER_CREATED) != 0) + continue; + /* We'll need the symbol table in a second. */ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; if (symtab_hdr->sh_info == 0) @@ -4198,6 +4603,9 @@ elfNN_aarch64_size_stubs (bfd *output_bf { /* The proper stub has already been created. */ free (stub_name); + /* Always update this stub's target since it may have + changed after layout. */ + stub_entry->target_value = sym_value + irela->r_addend; continue; } @@ -4249,7 +4657,7 @@ elfNN_aarch64_size_stubs (bfd *output_bf return TRUE; -error_ret_free_local: + error_ret_free_local: return FALSE; } @@ -4284,8 +4692,11 @@ elfNN_aarch64_build_stubs (struct bfd_li return FALSE; stub_sec->size = 0; + /* Add a branch around the stub section, and a nop, to keep it 8 byte + aligned, as long branch stubs contain a 64-bit address. */ bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents); - stub_sec->size += 4; + bfd_putl32 (INSN_NOP, stub_sec->contents + 4); + stub_sec->size += 8; } /* Build the stubs as directed by the stub hash table. */ @@ -4375,21 +4786,67 @@ bfd_elfNN_aarch64_init_maps (bfd *abfd) } } +static void +setup_plt_values (struct bfd_link_info *link_info, + aarch64_plt_type plt_type) +{ + struct elf_aarch64_link_hash_table *globals; + globals = elf_aarch64_hash_table (link_info); + + if (plt_type == PLT_BTI_PAC) + { + globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; + + /* Only in ET_EXEC we need PLTn with BTI. */ + if (bfd_link_pde (link_info)) + { + globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; + globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry; + } + else + { + globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; + globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; + } + } + else if (plt_type == PLT_BTI) + { + globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; + + /* Only in ET_EXEC we need PLTn with BTI. */ + if (bfd_link_pde (link_info)) + { + globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE; + globals->plt_entry = elfNN_aarch64_small_plt_bti_entry; + } + } + else if (plt_type == PLT_PAC) + { + globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; + globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; + } +} + /* Set option values needed during linking. */ void bfd_elfNN_aarch64_set_options (struct bfd *output_bfd, struct bfd_link_info *link_info, int no_enum_warn, - int no_wchar_warn, int pic_veneer, + int no_wchar_warn, + int pic_veneer, int fix_erratum_835769, int fix_erratum_843419, - int no_apply_dynamic_relocs) + int no_apply_dynamic_relocs, + aarch64_bti_pac_info bp_info) { struct elf_aarch64_link_hash_table *globals; globals = elf_aarch64_hash_table (link_info); globals->pic_veneer = pic_veneer; globals->fix_erratum_835769 = fix_erratum_835769; + /* If the default options are used, then ERRAT_ADR will be set by default + which will enable the ADRP->ADR workaround for the erratum 843419 + workaround. */ globals->fix_erratum_843419 = fix_erratum_843419; globals->fix_erratum_843419_adr = TRUE; globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs; @@ -4397,6 +4854,20 @@ bfd_elfNN_aarch64_set_options (struct bf BFD_ASSERT (is_aarch64_elf (output_bfd)); elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; + + switch (bp_info.bti_type) + { + case BTI_WARN: + elf_aarch64_tdata (output_bfd)->no_bti_warn = 0; + elf_aarch64_tdata (output_bfd)->gnu_and_prop + |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI; + break; + + default: + break; + } + elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type; + setup_plt_values (link_info, bp_info.plt_type); } static bfd_vma @@ -4631,7 +5102,7 @@ aarch64_tls_transition (bfd *input_bfd, unsigned long r_symndx) { bfd_reloc_code_real_type bfd_r_type - = elfNN_aarch64_bfd_reloc_from_type (r_type); + = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx)) return bfd_r_type; @@ -4807,7 +5278,7 @@ make_branch_to_erratum_835769_stub (stru abfd = stub_entry->target_section->owner; if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) _bfd_error_handler - (_("%B: error: Erratum 835769 stub out " + (_("%pB: error: erratum 835769 stub out " "of range (input file too large)"), abfd); target = stub_entry->target_value; @@ -4847,15 +5318,21 @@ _bfd_aarch64_erratum_843419_branch_to_st || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer) return TRUE; - insn = bfd_getl32 (contents + stub_entry->target_value); - bfd_putl32 (insn, - stub_entry->stub_sec->contents + stub_entry->stub_offset); + /* Only update the stub section if we have one. We should always have one if + we're allowed to use the ADRP errata workaround, otherwise it is not + required. */ + if (stub_entry->stub_sec) + { + insn = bfd_getl32 (contents + stub_entry->target_value); + bfd_putl32 (insn, + stub_entry->stub_sec->contents + stub_entry->stub_offset); + } place = (section->output_section->vma + section->output_offset + stub_entry->adrp_offset); insn = bfd_getl32 (contents + stub_entry->adrp_offset); - if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP) + if (!_bfd_aarch64_adrp_p (insn)) abort (); bfd_signed_vma imm = @@ -4869,6 +5346,8 @@ _bfd_aarch64_erratum_843419_branch_to_st insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm) | AARCH64_RT (insn)); bfd_putl32 (insn, contents + stub_entry->adrp_offset); + /* Stub is not needed, don't map it out. */ + stub_entry->stub_type = aarch64_stub_none; } else { @@ -4888,7 +5367,7 @@ _bfd_aarch64_erratum_843419_branch_to_st abfd = stub_entry->target_section->owner; if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) _bfd_error_handler - (_("%B: error: Erratum 843419 stub out " + (_("%pB: error: erratum 843419 stub out " "of range (input file too large)"), abfd); branch_insn = 0x14000000; @@ -4952,6 +5431,17 @@ aarch64_relocation_aginst_gp_p (bfd_relo || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1); } +/* Extracted from linker.c. */ +/* Return TRUE if the symbol described by a linker hash entry H + is going to be absolute. Linker-script defined symbols can be + converted from absolute to section-relative ones late in the + link. Use this macro to correctly determine whether the symbol + will actually end up absolute in output. */ +#define bfd_is_abs_symbol(H) \ + (((H)->type == bfd_link_hash_defined \ + || (H)->type == bfd_link_hash_defweak) \ + && bfd_is_abs_section ((H)->u.def.section)) + /* Perform a relocation as part of a final link. The input relocation type should be TLS relaxed. */ @@ -4985,6 +5475,7 @@ elfNN_aarch64_final_link_relocate (reloc asection *base_got; bfd_vma orig_value = value; bfd_boolean resolved_to_zero; + bfd_boolean abs_symbol_p; globals = elf_aarch64_hash_table (info); @@ -5004,12 +5495,13 @@ elfNN_aarch64_final_link_relocate (reloc weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak : bfd_is_und_section (sym_sec)); + abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root); + /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it here if it is defined in a non-shared object. */ if (h != NULL && h->type == STT_GNU_IFUNC - && (input_section->flags & SEC_ALLOC) && h->def_regular) { asection *plt; @@ -5037,6 +5529,7 @@ elfNN_aarch64_final_link_relocate (reloc /* xgettext:c-format */ (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"), input_bfd, input_section, rel->r_offset, howto->name, name); + bfd_set_error (bfd_error_bad_value); return bfd_reloc_notsupported; } @@ -5050,7 +5543,7 @@ elfNN_aarch64_final_link_relocate (reloc switch (bfd_r_type) { default: -bad_ifunc_reloc: + bad_ifunc_reloc: if (h->root.root.string) name = h->root.root.string; else @@ -5058,7 +5551,7 @@ bad_ifunc_reloc: NULL); _bfd_error_handler /* xgettext:c-format */ - (_("%B: relocation %s against STT_GNU_IFUNC " + (_("%pB: relocation %s against STT_GNU_IFUNC " "symbol `%s' isn't handled by %s"), input_bfd, howto->name, name, __FUNCTION__); bfd_set_error (bfd_error_bad_value); @@ -5074,9 +5567,9 @@ bad_ifunc_reloc: sym, NULL); _bfd_error_handler /* xgettext:c-format */ - (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' has non-zero addend: %Ld"), - input_bfd, howto->name, name, rel->r_addend); + (_("%pB: relocation %s against STT_GNU_IFUNC " + "symbol `%s' has non-zero addend: %ld"), + input_bfd, howto->name, name, (int64_t) rel->r_addend); bfd_set_error (bfd_error_bad_value); return bfd_reloc_notsupported; } @@ -5130,7 +5623,8 @@ bad_ifunc_reloc: /* FALLTHROUGH */ case BFD_RELOC_AARCH64_CALL26: case BFD_RELOC_AARCH64_JUMP26: - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, signed_addend, weak_undef_p); return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, @@ -5207,7 +5701,8 @@ bad_ifunc_reloc: addend = (globals->root.sgot->output_section->vma + globals->root.sgot->output_offset); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, addend, weak_undef_p); return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value); case BFD_RELOC_AARCH64_ADD_LO12: @@ -5276,6 +5771,12 @@ bad_ifunc_reloc: skip = TRUE; relocate = TRUE; } + else if (abs_symbol_p) + { + /* Local absolute symbol. */ + skip = (h->forced_local || (h->dynindx == -1)); + relocate = skip; + } outrel.r_offset += (input_section->output_section->vma + input_section->output_offset); @@ -5285,8 +5786,7 @@ bad_ifunc_reloc: else if (h != NULL && h->dynindx != -1 && (!bfd_link_pic (info) - || !(bfd_link_pie (info) - || SYMBOLIC_BIND (info, h)) + || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h)) || !h->def_regular)) outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type); else @@ -5380,7 +5880,8 @@ bad_ifunc_reloc: signed_addend = 0; } } - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, signed_addend, weak_undef_p); *unresolved_reloc_p = FALSE; break; @@ -5392,6 +5893,13 @@ bad_ifunc_reloc: case BFD_RELOC_AARCH64_ADR_HI21_PCREL: case BFD_RELOC_AARCH64_ADR_LO21_PCREL: case BFD_RELOC_AARCH64_LD_LO19_PCREL: + case BFD_RELOC_AARCH64_MOVW_PREL_G0: + case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G2: + case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G3: if (bfd_link_pic (info) && (input_section->flags & SEC_ALLOC) != 0 && (input_section->flags & SEC_READONLY) != 0 @@ -5401,7 +5909,7 @@ bad_ifunc_reloc: _bfd_error_handler /* xgettext:c-format */ - (_("%B: relocation %s against symbol `%s' which may bind " + (_("%pB: relocation %s against symbol `%s' which may bind " "externally can not be used when making a shared object; " "recompile with -fPIC"), input_bfd, elfNN_aarch64_howto_table[howto_index].name, @@ -5409,6 +5917,23 @@ bad_ifunc_reloc: bfd_set_error (bfd_error_bad_value); return bfd_reloc_notsupported; } + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, + signed_addend, + weak_undef_p); + break; + + case BFD_RELOC_AARCH64_BRANCH19: + case BFD_RELOC_AARCH64_TSTBR14: + if (h && h->root.type == bfd_link_hash_undefined) + { + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: conditional branch to undefined symbol `%s' " + "not allowed"), input_bfd, h->root.root.string); + bfd_set_error (bfd_error_bad_value); + return bfd_reloc_notsupported; + } /* Fall through. */ case BFD_RELOC_AARCH64_16: @@ -5416,7 +5941,6 @@ bad_ifunc_reloc: case BFD_RELOC_AARCH64_32: #endif case BFD_RELOC_AARCH64_ADD_LO12: - case BFD_RELOC_AARCH64_BRANCH19: case BFD_RELOC_AARCH64_LDST128_LO12: case BFD_RELOC_AARCH64_LDST16_LO12: case BFD_RELOC_AARCH64_LDST32_LO12: @@ -5432,8 +5956,8 @@ bad_ifunc_reloc: case BFD_RELOC_AARCH64_MOVW_G2_NC: case BFD_RELOC_AARCH64_MOVW_G2_S: case BFD_RELOC_AARCH64_MOVW_G3: - case BFD_RELOC_AARCH64_TSTBR14: - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, signed_addend, weak_undef_p); break; @@ -5478,7 +6002,8 @@ bad_ifunc_reloc: if (aarch64_relocation_aginst_gp_p (bfd_r_type)) addend = (globals->root.sgot->output_section->vma + globals->root.sgot->output_offset); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, addend, weak_undef_p); } else @@ -5492,7 +6017,7 @@ bad_ifunc_reloc: int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; _bfd_error_handler /* xgettext:c-format */ - (_("%B: Local symbol descriptor table be NULL when applying " + (_("%pB: local symbol descriptor table be NULL when applying " "relocation %s against local symbol"), input_bfd, elfNN_aarch64_howto_table[howto_index].name); abort (); @@ -5525,7 +6050,8 @@ bad_ifunc_reloc: if (aarch64_relocation_aginst_gp_p (bfd_r_type)) addend = base_got->output_section->vma + base_got->output_offset; - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, addend, weak_undef_p); } @@ -5562,7 +6088,8 @@ bad_ifunc_reloc: + globals->root.sgot->output_section->vma + globals->root.sgot->output_offset); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, 0, weak_undef_p); *unresolved_reloc_p = FALSE; break; @@ -5575,7 +6102,8 @@ bad_ifunc_reloc: return bfd_reloc_notsupported; value = symbol_got_offset (input_bfd, h, r_symndx); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, 0, weak_undef_p); *unresolved_reloc_p = FALSE; break; @@ -5596,24 +6124,64 @@ bad_ifunc_reloc: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, - signed_addend - dtpoff_base (info), - weak_undef_p); - break; + { + if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) + { + int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: TLS relocation %s against undefined symbol `%s'"), + input_bfd, elfNN_aarch64_howto_table[howto_index].name, + h->root.root.string); + bfd_set_error (bfd_error_bad_value); + return bfd_reloc_notsupported; + } + + bfd_vma def_value + = weak_undef_p ? 0 : signed_addend - dtpoff_base (info); + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, + def_value, weak_undef_p); + break; + } case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, - signed_addend - tpoff_base (info), - weak_undef_p); - *unresolved_reloc_p = FALSE; - break; + { + if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) + { + int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: TLS relocation %s against undefined symbol `%s'"), + input_bfd, elfNN_aarch64_howto_table[howto_index].name, + h->root.root.string); + bfd_set_error (bfd_error_bad_value); + return bfd_reloc_notsupported; + } + + bfd_vma def_value + = weak_undef_p ? 0 : signed_addend - tpoff_base (info); + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, + def_value, weak_undef_p); + *unresolved_reloc_p = FALSE; + break; + } case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: @@ -5628,7 +6196,8 @@ bad_ifunc_reloc: + globals->root.sgotplt->output_offset + globals->sgotplt_jump_table_size); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, 0, weak_undef_p); *unresolved_reloc_p = FALSE; break; @@ -5646,7 +6215,8 @@ bad_ifunc_reloc: value -= (globals->root.sgot->output_section->vma + globals->root.sgot->output_offset); - value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value, + value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, + place, value, 0, weak_undef_p); *unresolved_reloc_p = FALSE; break; @@ -5695,6 +6265,64 @@ bad_ifunc_reloc: # define movz_hw_R0 (0x52c00000) #endif +/* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub, + it is used to identify the stub information to reset. */ + +struct erratum_843419_branch_to_stub_clear_data +{ + bfd_vma adrp_offset; + asection *output_section; +}; + +/* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and + section inside IN_ARG matches. The clearing is done by setting the + stub_type to none. */ + +static bfd_boolean +_bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry, + void *in_arg) +{ + struct elf_aarch64_stub_hash_entry *stub_entry + = (struct elf_aarch64_stub_hash_entry *) gen_entry; + struct erratum_843419_branch_to_stub_clear_data *data + = (struct erratum_843419_branch_to_stub_clear_data *) in_arg; + + if (stub_entry->target_section != data->output_section + || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer + || stub_entry->adrp_offset != data->adrp_offset) + return TRUE; + + /* Change the stub type instead of removing the entry, removing from the hash + table would be slower and we have already reserved the memory for the entry + so there wouldn't be much gain. Changing the stub also keeps around a + record of what was there before. */ + stub_entry->stub_type = aarch64_stub_none; + + /* We're done and there could have been only one matching stub at that + particular offset, so abort further traversal. */ + return FALSE; +} + +/* TLS Relaxations may relax an adrp sequence that matches the erratum 843419 + sequence. In this case the erratum no longer applies and we need to remove + the entry from the pending stub generation. This clears matching adrp insn + at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */ + +static void +clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals, + bfd_vma adrp_offset, asection *input_section) +{ + if (globals->fix_erratum_843419) + { + struct erratum_843419_branch_to_stub_clear_data data; + data.adrp_offset = adrp_offset; + data.output_section = input_section; + + bfd_hash_traverse (&globals->stub_hash_table, + _bfd_aarch64_erratum_843419_clear_stub, &data); + } +} + /* Handle TLS relaxations. Relaxing is possible for symbols that use R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static link. @@ -5705,8 +6333,9 @@ bad_ifunc_reloc: static bfd_reloc_status_type elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals, - bfd *input_bfd, bfd_byte *contents, - Elf_Internal_Rela *rel, struct elf_link_hash_entry *h) + bfd *input_bfd, asection *input_section, + bfd_byte *contents, Elf_Internal_Rela *rel, + struct elf_link_hash_entry *h) { bfd_boolean is_local = h == NULL; unsigned int r_type = ELFNN_R_TYPE (rel->r_info); @@ -5714,7 +6343,7 @@ elfNN_aarch64_tls_relax (struct elf_aarc BFD_ASSERT (globals && input_bfd && contents && rel); - switch (elfNN_aarch64_bfd_reloc_from_type (r_type)) + switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) { case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: @@ -5727,6 +6356,9 @@ elfNN_aarch64_tls_relax (struct elf_aarc Where R is x for LP64, and w for ILP32. */ bfd_putl32 (movz_R0, contents + rel->r_offset); + /* We have relaxed the adrp into a mov, we may have to clear any + pending erratum fixes. */ + clear_erratum_843419_entry (globals, rel->r_offset, input_section); return bfd_reloc_continue; } else @@ -6017,6 +6649,9 @@ elfNN_aarch64_tls_relax (struct elf_aarc { insn = bfd_getl32 (contents + rel->r_offset); bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset); + /* We have relaxed the adrp into a mov, we may have to clear any + pending erratum fixes. */ + clear_erratum_843419_entry (globals, rel->r_offset, input_section); } return bfd_reloc_continue; @@ -6136,7 +6771,8 @@ elfNN_aarch64_relocate_section (bfd *out r_symndx = ELFNN_R_SYM (rel->r_info); r_type = ELFNN_R_TYPE (rel->r_info); - howto = bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type); + bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); + howto = bfd_reloc.howto; if (howto == NULL) return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); @@ -6206,7 +6842,7 @@ elfNN_aarch64_relocate_section (bfd *out name = (bfd_elf_string_from_elf_section (input_bfd, symtab_hdr->sh_link, sym->st_name)); if (name == NULL || *name == '\0') - name = bfd_section_name (input_bfd, sec); + name = bfd_section_name (NULL, sec); } if (r_symndx != 0 @@ -6220,11 +6856,11 @@ elfNN_aarch64_relocate_section (bfd *out _bfd_error_handler ((sym_type == STT_TLS /* xgettext:c-format */ - ? _("%B(%A+%#Lx): %s used with TLS symbol %s") + ? _("%pB(%pA+%#lx): %s used with TLS symbol %s") /* xgettext:c-format */ - : _("%B(%A+%#Lx): %s used with non-TLS symbol %s")), + : _("%pB(%pA+%#lx): %s used with non-TLS symbol %s")), input_bfd, - input_section, rel->r_offset, howto->name, name); + input_section, (uint64_t) rel->r_offset, howto->name, name); } /* We relax only if we can see that there can be a valid transition @@ -6240,7 +6876,8 @@ elfNN_aarch64_relocate_section (bfd *out howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type); BFD_ASSERT (howto != NULL); r_type = howto->type; - r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h); + r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section, + contents, rel, h); unresolved_reloc = 0; } else @@ -6264,7 +6901,7 @@ elfNN_aarch64_relocate_section (bfd *out h, &unresolved_reloc, save_addend, &addend, sym); - switch (elfNN_aarch64_bfd_reloc_from_type (r_type)) + switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) { case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: @@ -6307,7 +6944,7 @@ elfNN_aarch64_relocate_section (bfd *out bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); bfd_reloc_code_real_type real_type = - elfNN_aarch64_bfd_reloc_from_type (r_type); + elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 @@ -6492,8 +7129,9 @@ elfNN_aarch64_relocate_section (bfd *out { _bfd_error_handler /* xgettext:c-format */ - (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"), - input_bfd, input_section, rel->r_offset, howto->name, + (_("%pB(%pA+%#lx): " + "unresolvable %s relocation against symbol `%s'"), + input_bfd, input_section, (uint64_t) rel->r_offset, howto->name, h->root.root.string); return FALSE; } @@ -6501,7 +7139,7 @@ elfNN_aarch64_relocate_section (bfd *out if (r != bfd_reloc_ok && r != bfd_reloc_continue) { bfd_reloc_code_real_type real_r_type - = elfNN_aarch64_bfd_reloc_from_type (r_type); + = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); switch (r) { @@ -6514,7 +7152,7 @@ elfNN_aarch64_relocate_section (bfd *out { (*info->callbacks->warning) (info, - _("Too many GOT entries for -fpic, " + _("too many GOT entries for -fpic, " "please recompile with -fPIC"), name, input_bfd, input_section, rel->r_offset); return FALSE; @@ -6532,7 +7170,7 @@ elfNN_aarch64_relocate_section (bfd *out Try to catch this situation here and provide a more helpful error message to the user. */ - if (addend & ((1 << howto->rightshift) - 1) + if (addend & (((bfd_vma) 1 << howto->rightshift) - 1) /* FIXME: Are we testing all of the appropriate reloc types here ? */ && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL @@ -6544,7 +7182,7 @@ elfNN_aarch64_relocate_section (bfd *out info->callbacks->warning (info, _("One possible cause of this error is that the \ symbol is being referenced in the indicated code as if it had a larger \ -alignment than was declared where it was defined."), +alignment than was declared where it was defined"), name, input_bfd, input_section, rel->r_offset); } break; @@ -6687,7 +7325,7 @@ elfNN_aarch64_merge_private_bfd_data (bf for (sec = ibfd->sections; sec != NULL; sec = sec->next) { - if ((bfd_get_section_flags (ibfd, sec) + if ((bfd_get_section_flags (NULL, sec) & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) only_data_sections = FALSE; @@ -6721,33 +7359,16 @@ elfNN_aarch64_print_private_bfd_data (bf containing valid data. */ /* xgettext:c-format */ - fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); + fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags); if (flags) - fprintf (file, _("")); + fprintf (file, _(" ")); fputc ('\n', file); return TRUE; } -/* Find dynamic relocs for H that apply to read-only sections. */ - -static asection * -readonly_dynrelocs (struct elf_link_hash_entry *h) -{ - struct elf_dyn_relocs *p; - - for (p = elf_aarch64_hash_entry (h)->dyn_relocs; p != NULL; p = p->next) - { - asection *s = p->sec->output_section; - - if (s != NULL && (s->flags & SEC_READONLY) != 0) - return p->sec; - } - return NULL; -} - /* Return true if we need copy relocation against EH. */ static bfd_boolean @@ -6933,7 +7554,7 @@ aarch64_elf_create_got_section (bfd *abf (bed->dynamic_sec_flags | SEC_READONLY)); if (s == NULL - || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) + || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; htab->srelgot = s; @@ -6961,8 +7582,7 @@ aarch64_elf_create_got_section (bfd *abf { s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags); if (s == NULL - || !bfd_set_section_alignment (abfd, s, - bed->s->log_file_align)) + || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; htab->sgotplt = s; } @@ -7013,7 +7633,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, s if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) { /* xgettext:c-format */ - _bfd_error_handler (_("%B: bad symbol index: %d"), abfd, r_symndx); + _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx); return FALSE; } @@ -7117,8 +7737,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, s if (h != NULL /* This is an absolute symbol. It represents a value instead of an address. */ - && ((h->root.type == bfd_link_hash_defined - && bfd_is_abs_section (h->root.u.def.section)) + && (bfd_is_abs_symbol (&h->root) /* This is an undefined symbol. */ || h->root.type == bfd_link_hash_undefined)) break; @@ -7128,7 +7747,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, s int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; _bfd_error_handler /* xgettext:c-format */ - (_("%B: relocation %s against `%s' can not be used when making " + (_("%pB: relocation %s against `%s' can not be used when making " "a shared object"), abfd, elfNN_aarch64_howto_table[howto_index].name, (h) ? h->root.root.string : "a local symbol"); @@ -7147,7 +7766,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, s int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; _bfd_error_handler /* xgettext:c-format */ - (_("%B: relocation %s against `%s' can not be used when making " + (_("%pB: relocation %s against `%s' can not be used when making " "a shared object; recompile with -fPIC"), abfd, elfNN_aarch64_howto_table[howto_index].name, (h) ? h->root.root.string : "a local symbol"); @@ -7268,7 +7887,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, s p = *head; if (p == NULL || p->sec != sec) { - bfd_size_type amt = sizeof *p; + size_t amt = sizeof *p; p = ((struct elf_dyn_relocs *) bfd_zalloc (htab->root.dynobj, amt)); if (p == NULL) @@ -7318,9 +7937,6 @@ elfNN_aarch64_check_relocs (bfd *abfd, s case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: - case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: - case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: - case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: { unsigned got_type; unsigned old_got_type; @@ -7417,116 +8033,43 @@ elfNN_aarch64_is_target_special_symbol ( BFD_AARCH64_SPECIAL_SYM_TYPE_ANY); } -/* This is a copy of elf_find_function () from elf.c except that - AArch64 mapping symbols are ignored when looking for function names. */ - -static bfd_boolean -aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED, - asymbol **symbols, - asection *section, - bfd_vma offset, - const char **filename_ptr, - const char **functionname_ptr) -{ - const char *filename = NULL; - asymbol *func = NULL; - bfd_vma low_func = 0; - asymbol **p; - - for (p = symbols; *p != NULL; p++) - { - elf_symbol_type *q; - - q = (elf_symbol_type *) * p; +/* If the ELF symbol SYM might be a function in SEC, return the + function size and set *CODE_OFF to the function's entry point, + otherwise return zero. */ + +static bfd_size_type +elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec, + bfd_vma *code_off) +{ + bfd_size_type size; + + if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT + | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0 + || sym->section != sec) + return 0; - switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) - { - default: - break; - case STT_FILE: - filename = bfd_asymbol_name (&q->symbol); - break; + if (!(sym->flags & BSF_SYNTHETIC)) + switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info)) + { case STT_FUNC: case STT_NOTYPE: - /* Skip mapping symbols. */ - if ((q->symbol.flags & BSF_LOCAL) - && (bfd_is_aarch64_special_symbol_name - (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))) - continue; - /* Fall through. */ - if (bfd_get_section (&q->symbol) == section - && q->symbol.value >= low_func && q->symbol.value <= offset) - { - func = (asymbol *) q; - low_func = q->symbol.value; - } break; - } - } - - if (func == NULL) - return FALSE; - - if (filename_ptr) - *filename_ptr = filename; - if (functionname_ptr) - *functionname_ptr = bfd_asymbol_name (func); - - return TRUE; -} - - -/* Find the nearest line to a particular section and offset, for error - reporting. This code is a duplicate of the code in elf.c, except - that it uses aarch64_elf_find_function. */ - -static bfd_boolean -elfNN_aarch64_find_nearest_line (bfd *abfd, - asymbol **symbols, - asection *section, - bfd_vma offset, - const char **filename_ptr, - const char **functionname_ptr, - unsigned int *line_ptr, - unsigned int *discriminator_ptr) -{ - bfd_boolean found = FALSE; - - if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset, - filename_ptr, functionname_ptr, - line_ptr, discriminator_ptr, - dwarf_debug_sections, 0, - &elf_tdata (abfd)->dwarf2_find_line_info)) - { - if (!*functionname_ptr) - aarch64_elf_find_function (abfd, symbols, section, offset, - *filename_ptr ? NULL : filename_ptr, - functionname_ptr); - - return TRUE; - } - - /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64 - toolchain uses DWARF1. */ - - if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, - &found, filename_ptr, - functionname_ptr, line_ptr, - &elf_tdata (abfd)->line_info)) - return FALSE; - - if (found && (*functionname_ptr || *line_ptr)) - return TRUE; - - if (symbols == NULL) - return FALSE; + default: + return 0; + } - if (!aarch64_elf_find_function (abfd, symbols, section, offset, - filename_ptr, functionname_ptr)) - return FALSE; + if ((sym->flags & BSF_LOCAL) + && bfd_is_aarch64_special_symbol_name (sym->name, + BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)) + return 0; - *line_ptr = 0; - return TRUE; + *code_off = sym->value; + size = 0; + if (!(sym->flags & BSF_SYNTHETIC)) + size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size; + if (size == 0) + size = 1; + return size; } static bfd_boolean @@ -7542,19 +8085,6 @@ elfNN_aarch64_find_inliner_info (bfd *ab return found; } - -static void -elfNN_aarch64_post_process_headers (bfd *abfd, - struct bfd_link_info *link_info) -{ - Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */ - - i_ehdrp = elf_elfheader (abfd); - i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION; - - _bfd_elf_post_process_headers (abfd, link_info); -} - static enum elf_reloc_type_class elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, const asection *rel_sec ATTRIBUTE_UNUSED, @@ -7579,7 +8109,7 @@ elfNN_aarch64_reloc_type_class (const st 0, &sym)) { /* xgettext:c-format */ - _bfd_error_handler (_("%B symbol number %lu references" + _bfd_error_handler (_("%pB symbol number %lu references" " nonexistent SHT_SYMTAB_SHNDX section"), abfd, r_symndx); /* Ideally an error class should be returned here. */ @@ -7830,6 +8360,8 @@ aarch64_map_one_stub (struct bfd_hash_en if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) return FALSE; break; + case aarch64_stub_none: + break; default: abort (); @@ -7907,7 +8439,7 @@ elfNN_aarch64_new_section_hook (bfd *abf if (!sec->used_by_bfd) { _aarch64_elf_section_data *sdata; - bfd_size_type amt = sizeof (*sdata); + size_t amt = sizeof (*sdata); sdata = bfd_zalloc (abfd, amt); if (sdata == NULL) @@ -8035,7 +8567,7 @@ elfNN_aarch64_allocate_dynrelocs (struct /* Make room for this entry. For now we only create the small model PLT entries. We later need to find a way of relaxing into these from the large model PLT entries. */ - s->size += PLT_SMALL_ENTRY_SIZE; + s->size += htab->plt_entry_size; /* We also need to make an entry in the .got.plt section, which will be placed in the .got section by the linker script. */ @@ -8063,7 +8595,6 @@ elfNN_aarch64_allocate_dynrelocs (struct variant PCS symbols are present. */ if (h->other & STO_AARCH64_VARIANT_PCS) htab->variant_pcs = 1; - } else { @@ -8296,7 +8827,6 @@ elfNN_aarch64_allocate_ifunc_dynrelocs ( info = (struct bfd_link_info *) inf; htab = elf_aarch64_hash_table (info); - eh = (struct elf_aarch64_link_hash_entry *) h; /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it @@ -8304,7 +8834,7 @@ elfNN_aarch64_allocate_ifunc_dynrelocs ( if (h->type == STT_GNU_IFUNC && h->def_regular) return _bfd_elf_allocate_ifunc_dyn_relocs (info, h, - &eh->dyn_relocs, + & eh->dyn_relocs, NULL, htab->plt_entry_size, htab->plt_header_size, @@ -8314,10 +8844,10 @@ elfNN_aarch64_allocate_ifunc_dynrelocs ( } /* Allocate space in .plt, .got and associated reloc sections for - local dynamic relocs. */ + local ifunc dynamic relocs. */ static bfd_boolean -elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf) +elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf) { struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) *slot; @@ -8329,26 +8859,24 @@ elfNN_aarch64_allocate_local_dynrelocs ( || h->root.type != bfd_link_hash_defined) abort (); - return elfNN_aarch64_allocate_dynrelocs (h, inf); + return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf); } -/* Allocate space in .plt, .got and associated reloc sections for - local ifunc dynamic relocs. */ +/* Find dynamic relocs for H that apply to read-only sections. */ -static bfd_boolean -elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf) +static asection * +readonly_dynrelocs (struct elf_link_hash_entry *h) { - struct elf_link_hash_entry *h - = (struct elf_link_hash_entry *) *slot; + struct elf_dyn_relocs *p; - if (h->type != STT_GNU_IFUNC - || !h->def_regular - || !h->ref_regular - || !h->forced_local - || h->root.type != bfd_link_hash_defined) - abort (); + for (p = elf_aarch64_hash_entry (h)->dyn_relocs; p != NULL; p = p->next) + { + asection *s = p->sec->output_section; - return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf); + if (s != NULL && (s->flags & SEC_READONLY) != 0) + return p->sec; + } + return NULL; } /* Set DF_TEXTREL if we find any dynamic relocs that apply to @@ -8378,6 +8906,12 @@ maybe_set_textrel (struct elf_link_hash_ return TRUE; } +static inline bfd_boolean +startswith (const char *str, const char *prefix) +{ + return strncmp (str, prefix, strlen (prefix)) == 0; +} + /* This is the most important function of all . Innocuosly named though ! */ @@ -8519,11 +9053,6 @@ elfNN_aarch64_size_dynamic_sections (bfd elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs, info); - /* Allocate .plt and .got entries, and space for local symbols. */ - htab_traverse (htab->loc_hash_table, - elfNN_aarch64_allocate_local_dynrelocs, - info); - /* Allocate .plt and .got entries, and space for local ifunc symbols. */ htab_traverse (htab->loc_hash_table, elfNN_aarch64_allocate_local_ifunc_dynrelocs, @@ -8541,15 +9070,17 @@ elfNN_aarch64_size_dynamic_sections (bfd if (htab->tlsdesc_plt) { if (htab->root.splt->size == 0) - htab->root.splt->size += PLT_ENTRY_SIZE; - - htab->tlsdesc_plt = htab->root.splt->size; - htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE; + htab->root.splt->size += htab->plt_header_size; /* If we're not using lazy TLS relocations, don't generate the - GOT entry required. */ - if (!(info->flags & DF_BIND_NOW)) + GOT and PLT entry required. */ + if ((info->flags & DF_BIND_NOW)) + htab->tlsdesc_plt = 0; + else { + htab->tlsdesc_plt = htab->root.splt->size; + htab->root.splt->size += htab->tlsdesc_plt_entry_size; + htab->dt_tlsdesc_got = htab->root.sgot->size; htab->root.sgot->size += GOT_ENTRY_SIZE; } @@ -8584,7 +9115,7 @@ elfNN_aarch64_size_dynamic_sections (bfd /* Strip this section if we don't need it; see the comment below. */ } - else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela")) + else if (startswith (bfd_section_name (NULL, s), ".rela")) { if (s->size != 0 && s != htab->root.srelplt) relocs = TRUE; @@ -8646,19 +9177,21 @@ elfNN_aarch64_size_dynamic_sections (bfd if (htab->root.splt->size != 0) { - if (!add_dynamic_entry (DT_PLTGOT, 0) - || !add_dynamic_entry (DT_PLTRELSZ, 0) - || !add_dynamic_entry (DT_PLTREL, DT_RELA) - || !add_dynamic_entry (DT_JMPREL, 0)) - return FALSE; - if (htab->variant_pcs && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0)) return FALSE; - if (htab->tlsdesc_plt - && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) - || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) + if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC) + && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0) + || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))) + return FALSE; + + else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI) + && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)) + return FALSE; + + else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC) + && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)) return FALSE; } @@ -8756,7 +9289,13 @@ elfNN_aarch64_create_small_pltn_entry (s gotplt->output_offset + got_offset; /* Copy in the boiler-plate for the PLTn entry. */ - memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE); + memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size); + + /* First instruction in BTI enabled PLT stub is a BTI + instruction so skip it. */ + if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI + && elf_elfheader (output_bfd)->e_type == ET_EXEC) + plt_entry = plt_entry + 4; /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ @@ -8974,7 +9513,7 @@ elfNN_aarch64_finish_dynamic_symbol (bfd } else { -do_glob_dat: + do_glob_dat: BFD_ASSERT ((h->got.offset & 1) == 0); bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgot->contents + h->got.offset); @@ -9061,10 +9600,13 @@ elfNN_aarch64_init_small_plt0_entry (bfd bfd_vma plt_base; - memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry, - PLT_ENTRY_SIZE); - elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = - PLT_ENTRY_SIZE; + memcpy (htab->root.splt->contents, htab->plt0_entry, + htab->plt_header_size); + + /* PR 26312: Explicitly set the sh_entsize to 0 so that + consumers do not think that the section contains fixed + sized objects. */ + elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0; plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma + htab->root.sgotplt->output_offset @@ -9073,18 +9615,24 @@ elfNN_aarch64_init_small_plt0_entry (bfd plt_base = htab->root.splt->output_section->vma + htab->root.splt->output_offset; + /* First instruction in BTI enabled PLT stub is a BTI + instruction so skip it. */ + bfd_byte *plt0_entry = htab->root.splt->contents; + if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI) + plt0_entry = plt0_entry + 4; + /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, - htab->root.splt->contents + 4, + plt0_entry + 4, PG (plt_got_2nd_ent) - PG (plt_base + 4)); elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12, - htab->root.splt->contents + 8, + plt0_entry + 8, PG_OFFSET (plt_got_2nd_ent)); elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12, - htab->root.splt->contents + 12, + plt0_entry + 12, PG_OFFSET (plt_got_2nd_ent)); } @@ -9144,6 +9692,7 @@ elfNN_aarch64_finish_dynamic_sections (b case DT_TLSDESC_GOT: s = htab->root.sgot; + BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1); dyn.d_un.d_ptr = s->output_section->vma + s->output_offset + htab->dt_tlsdesc_got; break; @@ -9159,23 +9708,29 @@ elfNN_aarch64_finish_dynamic_sections (b { elfNN_aarch64_init_small_plt0_entry (output_bfd, htab); - elf_section_data (htab->root.splt->output_section)-> - this_hdr.sh_entsize = htab->plt_entry_size; - - - if (htab->tlsdesc_plt) + if (htab->tlsdesc_plt && !(info->flags & DF_BIND_NOW)) { + BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1); bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgot->contents + htab->dt_tlsdesc_got); + const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry; + htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; + + aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type; + if (type == PLT_BTI || type == PLT_BTI_PAC) + { + entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry; + } + memcpy (htab->root.splt->contents + htab->tlsdesc_plt, - elfNN_aarch64_tlsdesc_small_plt_entry, - sizeof (elfNN_aarch64_tlsdesc_small_plt_entry)); + entry, htab->tlsdesc_plt_entry_size); { bfd_vma adrp1_addr = htab->root.splt->output_section->vma - + htab->root.splt->output_offset + htab->tlsdesc_plt + 4; + + htab->root.splt->output_offset + + htab->tlsdesc_plt + 4; bfd_vma adrp2_addr = adrp1_addr + 4; @@ -9192,6 +9747,15 @@ elfNN_aarch64_finish_dynamic_sections (b bfd_byte *plt_entry = htab->root.splt->contents + htab->tlsdesc_plt; + /* First instruction in BTI enabled PLT stub is a BTI + instruction so skip it. */ + if (type & PLT_BTI) + { + plt_entry = plt_entry + 4; + adrp1_addr = adrp1_addr + 4; + adrp2_addr = adrp2_addr + 4; + } + /* adrp x2, DT_TLSDESC_GOT */ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, @@ -9226,7 +9790,7 @@ elfNN_aarch64_finish_dynamic_sections (b if (bfd_is_abs_section (htab->root.sgotplt->output_section)) { _bfd_error_handler - (_("discarded output section: `%A'"), htab->root.sgotplt); + (_("discarded output section: `%pA'"), htab->root.sgotplt); return FALSE; } @@ -9270,6 +9834,57 @@ elfNN_aarch64_finish_dynamic_sections (b return TRUE; } +/* Check if BTI enabled PLTs are needed. Returns the type needed. */ +static aarch64_plt_type +get_plt_type (bfd *abfd) +{ + aarch64_plt_type ret = PLT_NORMAL; + bfd_byte *contents, *extdyn, *extdynend; + asection *sec = bfd_get_section_by_name (abfd, ".dynamic"); + if (!sec || !bfd_malloc_and_get_section (abfd, sec, &contents)) + return ret; + extdyn = contents; + extdynend = contents + sec->size; + for (; extdyn < extdynend; extdyn += sizeof (ElfNN_External_Dyn)) + { + Elf_Internal_Dyn dyn; + bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn); + + /* Let's check the processor specific dynamic array tags. */ + bfd_vma tag = dyn.d_tag; + if (tag < DT_LOPROC || tag > DT_HIPROC) + continue; + + switch (tag) + { + case DT_AARCH64_BTI_PLT: + ret |= PLT_BTI; + break; + + case DT_AARCH64_PAC_PLT: + ret |= PLT_PAC; + break; + + default: break; + } + } + free (contents); + return ret; +} + +static long +elfNN_aarch64_get_synthetic_symtab (bfd *abfd, + long symcount, + asymbol **syms, + long dynsymcount, + asymbol **dynsyms, + asymbol **ret) +{ + elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd); + return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms, + dynsymcount, dynsyms, ret); +} + /* Return address for Ith PLT stub in section PLT, for relocation REL or (bfd_vma) -1 if it should not be included. */ @@ -9277,7 +9892,27 @@ static bfd_vma elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt, const arelent *rel ATTRIBUTE_UNUSED) { - return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE; + size_t plt0_size = PLT_ENTRY_SIZE; + size_t pltn_size = PLT_SMALL_ENTRY_SIZE; + + if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC) + { + if (elf_elfheader (plt->owner)->e_type == ET_EXEC) + pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; + else + pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; + } + else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI) + { + if (elf_elfheader (plt->owner)->e_type == ET_EXEC) + pltn_size = PLT_BTI_SMALL_ENTRY_SIZE; + } + else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC) + { + pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; + } + + return plt->vma + plt0_size + i * pltn_size; } /* Returns TRUE if NAME is an AArch64 mapping symbol. @@ -9323,6 +9958,9 @@ elfNN_aarch64_link_setup_gnu_properties uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop); elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop; + elf_aarch64_tdata (info->output_bfd)->plt_type + |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0; + setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type); return pbfd; } @@ -9331,14 +9969,38 @@ elfNN_aarch64_link_setup_gnu_properties for the effect of GNU properties of the output_bfd. */ static bfd_boolean elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info, - bfd *abfd, - bfd *bbfd ATTRIBUTE_UNUSED, - elf_property *aprop, - elf_property *bprop) + bfd *abfd, bfd *bbfd, + elf_property *aprop, + elf_property *bprop) { uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; + /* If output has been marked with BTI using command line argument, give out + warning if necessary. */ + /* Properties are merged per type, hence only check for warnings when merging + GNU_PROPERTY_AARCH64_FEATURE_1_AND. */ + if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) + || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)) + && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) + && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn)) + { + if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) + || !aprop) + { + _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " + "all inputs do not have BTI in NOTE section."), + abfd); + } + if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) + || !bprop) + { + _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " + "all inputs do not have BTI in NOTE section."), + bbfd); + } + } + return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop, bprop, prop); } @@ -9413,8 +10075,8 @@ const struct elf_size_info elfNN_aarch64 #define bfd_elfNN_find_inliner_info \ elfNN_aarch64_find_inliner_info -#define bfd_elfNN_find_nearest_line \ - elfNN_aarch64_find_nearest_line +#define bfd_elfNN_get_synthetic_symtab \ + elfNN_aarch64_get_synthetic_symtab #define bfd_elfNN_mkobject \ elfNN_aarch64_mkobject @@ -9457,12 +10119,12 @@ const struct elf_size_info elfNN_aarch64 #define elf_backend_output_arch_local_syms \ elfNN_aarch64_output_arch_local_syms +#define elf_backend_maybe_function_sym \ + elfNN_aarch64_maybe_function_sym + #define elf_backend_plt_sym_val \ elfNN_aarch64_plt_sym_val -#define elf_backend_post_process_headers \ - elfNN_aarch64_post_process_headers - #define elf_backend_relocate_section \ elfNN_aarch64_relocate_section diff -rup binutils-2.30/bfd/elfxx-aarch64.c binutils-2.30.new/bfd/elfxx-aarch64.c --- binutils-2.30/bfd/elfxx-aarch64.c 2021-03-23 16:21:45.925009754 +0000 +++ binutils-2.30.new/bfd/elfxx-aarch64.c 2021-03-23 16:20:02.829710624 +0000 @@ -1,5 +1,5 @@ /* AArch64-specific support for ELF. - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2021 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of BFD, the Binary File Descriptor library. @@ -19,10 +19,11 @@ see . */ #include "sysdep.h" +#include "bfd.h" +#include "elf-bfd.h" #include "elfxx-aarch64.h" #include #include -#include "libbfd.h" #define MASK(n) ((1u << (n)) - 1) @@ -286,11 +287,18 @@ _bfd_aarch64_elf_put_addend (bfd *abfd, case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: if (old_addend & ((1 << howto->rightshift) - 1)) return bfd_reloc_overflow; /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order - 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL - which computes the (pc-relative) page base. */ + 12 bits address offset. */ contents = reencode_ldst_pos_imm (contents, addend); break; @@ -302,6 +310,10 @@ _bfd_aarch64_elf_put_addend (bfd *abfd, case BFD_RELOC_AARCH64_MOVW_G0_S: case BFD_RELOC_AARCH64_MOVW_G1_S: case BFD_RELOC_AARCH64_MOVW_G2_S: + case BFD_RELOC_AARCH64_MOVW_PREL_G0: + case BFD_RELOC_AARCH64_MOVW_PREL_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G2: + case BFD_RELOC_AARCH64_MOVW_PREL_G3: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: @@ -334,6 +346,9 @@ _bfd_aarch64_elf_put_addend (bfd *abfd, case BFD_RELOC_AARCH64_MOVW_G3: case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: @@ -380,10 +395,12 @@ _bfd_aarch64_elf_put_addend (bfd *abfd, } bfd_vma -_bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type, +_bfd_aarch64_elf_resolve_relocation (bfd *input_bfd, + bfd_reloc_code_real_type r_type, bfd_vma place, bfd_vma value, bfd_vma addend, bfd_boolean weak_undef_p) { + bfd_boolean tls_reloc = TRUE; switch (r_type) { case BFD_RELOC_AARCH64_NONE: @@ -396,6 +413,13 @@ _bfd_aarch64_elf_resolve_relocation (bfd case BFD_RELOC_AARCH64_ADR_LO21_PCREL: case BFD_RELOC_AARCH64_BRANCH19: case BFD_RELOC_AARCH64_LD_LO19_PCREL: + case BFD_RELOC_AARCH64_MOVW_PREL_G0: + case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G2: + case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G3: case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: @@ -424,6 +448,8 @@ _bfd_aarch64_elf_resolve_relocation (bfd case BFD_RELOC_AARCH64_MOVW_G2_NC: case BFD_RELOC_AARCH64_MOVW_G2_S: case BFD_RELOC_AARCH64_MOVW_G3: + tls_reloc = FALSE; + /* fall-through. */ case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: @@ -432,18 +458,27 @@ _bfd_aarch64_elf_resolve_relocation (bfd case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: - case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: - case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: - case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: - case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: + /* Weak Symbols and TLS relocations are implementation defined. For this + case we choose to emit 0. */ + if (weak_undef_p && tls_reloc) + { + _bfd_error_handler (_("%pB: warning: Weak TLS is implementation " + "defined and may not work as expected"), + input_bfd); + value = place; + } value = value + addend; break; @@ -493,7 +528,15 @@ _bfd_aarch64_elf_resolve_relocation (bfd case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: + case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: value = PG_OFFSET (value + addend); break; @@ -530,25 +573,6 @@ _bfd_aarch64_elf_resolve_relocation (bfd return value; } -/* Hook called by the linker routine which adds symbols from an object - file. */ - -bfd_boolean -_bfd_aarch64_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, - Elf_Internal_Sym *sym, - const char **namep ATTRIBUTE_UNUSED, - flagword *flagsp ATTRIBUTE_UNUSED, - asection **secp ATTRIBUTE_UNUSED, - bfd_vma *valp ATTRIBUTE_UNUSED) -{ - if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC - && (abfd->flags & DYNAMIC) == 0 - && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) - elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc; - - return TRUE; -} - /* Support for core dump NOTE sections. */ bfd_boolean @@ -631,7 +655,18 @@ _bfd_aarch64_elf_write_core_note (bfd *a va_start (ap, note_type); memset (data, 0, sizeof (data)); strncpy (data + 40, va_arg (ap, const char *), 16); +#if GCC_VERSION == 8000 || GCC_VERSION == 8001 + DIAGNOSTIC_PUSH; + /* GCC 8.0 and 8.1 warn about 80 equals destination size with + -Wstringop-truncation: + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643 + */ + DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION; +#endif strncpy (data + 56, va_arg (ap, const char *), 80); +#if GCC_VERSION == 8000 || GCC_VERSION == 8001 + DIAGNOSTIC_POP; +#endif va_end (ap); return elfcore_write_note (abfd, buf, bufsiz, "CORE", @@ -673,6 +708,7 @@ _bfd_aarch64_elf_link_setup_gnu_properti bfd *pbfd; bfd *ebfd = NULL; elf_property *prop; + unsigned align; uint32_t gnu_prop = *gprop; @@ -697,6 +733,11 @@ _bfd_aarch64_elf_link_setup_gnu_properti prop = _bfd_elf_get_property (ebfd, GNU_PROPERTY_AARCH64_FEATURE_1_AND, 4); + if (gnu_prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI + && !(prop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) + _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti " + "when all inputs do not have BTI in NOTE " + "section."), ebfd); prop->u.number |= gnu_prop; prop->pr_kind = property_number; @@ -716,6 +757,11 @@ _bfd_aarch64_elf_link_setup_gnu_properti info->callbacks->einfo ( _("%F%P: failed to create GNU property section\n")); + align = (bfd_get_mach (ebfd) & bfd_mach_aarch64_ilp32) ? 2 : 3; + if (!bfd_set_section_alignment (ebfd, sec, align)) + info->callbacks->einfo (_("%F%pA: failed to align section\n"), + sec); + elf_section_type (sec) = SHT_NOTE; } } @@ -836,11 +882,43 @@ _bfd_aarch64_elf_merge_gnu_properties (s break; default: - _bfd_error_handler - ( _("error: %pB: "), - abfd, pr_type); - return FALSE; + abort (); } return updated; } + +/* Fix up AArch64 GNU properties. */ +void +_bfd_aarch64_elf_link_fixup_gnu_properties + (struct bfd_link_info *info ATTRIBUTE_UNUSED, + elf_property_list **listp) +{ + elf_property_list *p, *prev; + + for (p = *listp, prev = *listp; p; p = p->next) + { + unsigned int type = p->property.pr_type; + if (type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) + { + if (p->property.pr_kind == property_remove) + { + /* Remove empty property. */ + if (prev == p) + { + *listp = p->next; + prev = *listp; + } + else + prev->next = p->next; + continue; + } + prev = p; + } + else if (type > GNU_PROPERTY_HIPROC) + { + /* The property list is sorted in order of type. */ + break; + } + } +} diff -rup binutils-2.30/bfd/elfxx-aarch64.h binutils-2.30.new/bfd/elfxx-aarch64.h --- binutils-2.30/bfd/elfxx-aarch64.h 2021-03-23 16:21:45.652011610 +0000 +++ binutils-2.30.new/bfd/elfxx-aarch64.h 2021-03-23 16:20:02.830710617 +0000 @@ -1,5 +1,5 @@ /* AArch64-specific backend routines. - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2021 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of BFD, the Binary File Descriptor library. @@ -18,9 +18,77 @@ along with this program; see the file COPYING3. If not, see . */ -#include "bfd.h" -#include "elf-bfd.h" -#include "stdint.h" +#define uint32_t unsigned int +#define uint64_t unsigned long + +extern void bfd_elf64_aarch64_init_maps + (bfd *); + +extern void bfd_elf32_aarch64_init_maps + (bfd *); + +/* Types of PLTs based on the level of security. This would be a + bit-mask to denote which of the combinations of security features + are enabled: + - No security feature PLTs + - PLTs with BTI instruction + - PLTs with PAC instruction +*/ +typedef enum +{ + PLT_NORMAL = 0x0, /* Normal plts. */ + PLT_BTI = 0x1, /* plts with bti. */ + PLT_PAC = 0x2, /* plts with pointer authentication. */ + PLT_BTI_PAC = PLT_BTI | PLT_PAC +} aarch64_plt_type; + +/* To indicate if BTI is enabled with/without warning. */ +typedef enum +{ + BTI_NONE = 0, /* BTI is not enabled. */ + BTI_WARN = 1, /* BTI is enabled with -z force-bti. */ +} aarch64_enable_bti_type; + +/* A structure to encompass all information coming from BTI or PAC + related command line options. This involves the "PLT_TYPE" to determine + which version of PLTs to pick and "BTI_TYPE" to determine if + BTI should be turned on with any warnings. */ +typedef struct +{ + aarch64_plt_type plt_type; + aarch64_enable_bti_type bti_type; +} aarch64_bti_pac_info; + +extern void bfd_elf64_aarch64_set_options + (bfd *, struct bfd_link_info *, int, int, int, int, int, int, + aarch64_bti_pac_info); + +extern void bfd_elf32_aarch64_set_options + (bfd *, struct bfd_link_info *, int, int, int, int, int, int, + aarch64_bti_pac_info); + +/* AArch64 stub generation support for ELF64. Called from the linker. */ +extern int elf64_aarch64_setup_section_lists + (bfd *, struct bfd_link_info *); +extern void elf64_aarch64_next_input_section + (struct bfd_link_info *, struct bfd_section *); +extern bfd_boolean elf64_aarch64_size_stubs + (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma, + struct bfd_section * (*) (const char *, struct bfd_section *), + void (*) (void)); +extern bfd_boolean elf64_aarch64_build_stubs + (struct bfd_link_info *); +/* AArch64 stub generation support for ELF32. Called from the linker. */ +extern int elf32_aarch64_setup_section_lists + (bfd *, struct bfd_link_info *); +extern void elf32_aarch64_next_input_section + (struct bfd_link_info *, struct bfd_section *); +extern bfd_boolean elf32_aarch64_size_stubs + (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma, + struct bfd_section * (*) (const char *, struct bfd_section *), + void (*) (void)); +extern bfd_boolean elf32_aarch64_build_stubs + (struct bfd_link_info *); /* Take the PAGE component of an address or offset. */ #define PG(x) ((x) & ~ (bfd_vma) 0xfff) @@ -44,13 +112,8 @@ _bfd_aarch64_elf_put_addend (bfd *, bfd_ reloc_howto_type *, bfd_signed_vma); extern bfd_vma -_bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type, bfd_vma, bfd_vma, - bfd_vma, bfd_boolean); - -extern bfd_boolean -_bfd_aarch64_elf_add_symbol_hook (bfd *, struct bfd_link_info *, - Elf_Internal_Sym *, const char **, - flagword *, asection **, bfd_vma *); +_bfd_aarch64_elf_resolve_relocation (bfd *, bfd_reloc_code_real_type, bfd_vma, + bfd_vma, bfd_vma, bfd_boolean); extern bfd_boolean _bfd_aarch64_elf_grok_prstatus (bfd *, Elf_Internal_Note *); @@ -61,7 +124,6 @@ _bfd_aarch64_elf_grok_psinfo (bfd *, Elf extern char * _bfd_aarch64_elf_write_core_note (bfd *, char *, int *, int, ...); -#define elf_backend_add_symbol_hook _bfd_aarch64_elf_add_symbol_hook #define elf_backend_grok_prstatus _bfd_aarch64_elf_grok_prstatus #define elf_backend_grok_psinfo _bfd_aarch64_elf_grok_psinfo #define elf_backend_write_core_note _bfd_aarch64_elf_write_core_note @@ -79,5 +141,12 @@ _bfd_aarch64_elf_merge_gnu_properties (s elf_property *, elf_property *, uint32_t); +extern void +_bfd_aarch64_elf_link_fixup_gnu_properties (struct bfd_link_info *, + elf_property_list **); + #define elf_backend_parse_gnu_properties \ _bfd_aarch64_elf_parse_gnu_properties + +#define elf_backend_fixup_gnu_properties \ + _bfd_aarch64_elf_link_fixup_gnu_properties diff -rup binutils-2.30/bfd/reloc.c binutils-2.30.new/bfd/reloc.c --- binutils-2.30/bfd/reloc.c 2018-01-13 13:31:15.000000000 +0000 +++ binutils-2.30.new/bfd/reloc.c 2021-03-23 16:20:02.829710624 +0000 @@ -7071,6 +7071,43 @@ ENUMDOC of a signed value. Changes instruction to MOVZ or MOVN depending on the value's sign. ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G0 +ENUMDOC + AArch64 MOV[NZ] instruction with most significant bits 0 to 15 + of a signed value. Changes instruction to MOVZ or MOVN depending on the + value's sign. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G0_NC +ENUMDOC + AArch64 MOV[NZ] instruction with most significant bits 0 to 15 + of a signed value. Changes instruction to MOVZ or MOVN depending on the + value's sign. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G1 +ENUMDOC + AArch64 MOVK instruction with most significant bits 16 to 31 + of a signed value. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G1_NC +ENUMDOC + AArch64 MOVK instruction with most significant bits 16 to 31 + of a signed value. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G2 +ENUMDOC + AArch64 MOVK instruction with most significant bits 32 to 47 + of a signed value. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G2_NC +ENUMDOC + AArch64 MOVK instruction with most significant bits 32 to 47 + of a signed value. +ENUM + BFD_RELOC_AARCH64_MOVW_PREL_G3 +ENUMDOC + AArch64 MOVK instruction with most significant bits 47 to 63 + of a signed value. +ENUM BFD_RELOC_AARCH64_LD_LO19_PCREL ENUMDOC AArch64 Load Literal instruction, holding a 19 bit pc-relative word @@ -7359,6 +7396,42 @@ ENUM ENUMDOC AArch64 TLS LOCAL EXEC relocation. ENUM + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 +ENUMDOC + bit[11:1] of byte offset to module TLS base address, encoded in ldst + instructions. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC +ENUMDOC + Similar as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 +ENUMDOC + bit[11:2] of byte offset to module TLS base address, encoded in ldst + instructions. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC +ENUMDOC + Similar as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 +ENUMDOC + bit[11:3] of byte offset to module TLS base address, encoded in ldst + instructions. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC +ENUMDOC + Similar as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 +ENUMDOC + bit[11:0] of byte offset to module TLS base address, encoded in ldst + instructions. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC +ENUMDOC + Similar as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. +ENUM BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 ENUMDOC AArch64 TLS DESC relocation. @@ -7467,6 +7540,16 @@ ENUM ENUMDOC Similar as BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12, but no overflow check. ENUM + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12 +ENUMDOC + AArch64 pseudo relocation code for TLS local exec mode. It's to be + used internally by the AArch64 assembler and not (currently) written to + any object files. +ENUM + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC +ENUMDOC + Similar as BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12, but no overflow check. +ENUM BFD_RELOC_AARCH64_LD_GOT_LO12_NC ENUMDOC AArch64 pseudo relocation code to be used internally by the AArch64 diff -rup binutils-2.30/gas/config/tc-aarch64.c binutils-2.30.new/gas/config/tc-aarch64.c --- binutils-2.30/gas/config/tc-aarch64.c 2021-03-23 16:21:44.128021971 +0000 +++ binutils-2.30.new/gas/config/tc-aarch64.c 2021-03-23 16:19:55.031763633 +0000 @@ -1,6 +1,6 @@ /* tc-aarch64.c -- Assemble for the AArch64 ISA - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2021 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of GAS. @@ -35,6 +35,9 @@ #include "dwarf2dbg.h" +/* Number of littlenums required to hold an extended precision number. */ +#define MAX_LITTLENUMS 6 + /* Types of processor to assemble for. */ #ifndef CPU_DEFAULT #define CPU_DEFAULT AARCH64_ARCH_V8 @@ -55,6 +58,9 @@ static const aarch64_feature_set *march_ /* Constants for known architecture features. */ static const aarch64_feature_set cpu_default = CPU_DEFAULT; +/* Currently active instruction sequence. */ +static aarch64_instr_sequence *insn_sequence = NULL; + #ifdef OBJ_ELF /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ static symbolS *GOT_symbol; @@ -146,6 +152,13 @@ static aarch64_instruction inst; static bfd_boolean parse_operands (char *, const aarch64_opcode *); static bfd_boolean programmer_friendly_fixup (aarch64_instruction *); +#ifdef OBJ_ELF +# define now_instr_sequence seg_info \ + (now_seg)->tc_segment_info_data.insn_sequence +#else +static struct aarch64_instr_sequence now_instr_sequence; +#endif + /* Diagnostics inline function utilities. These are lightweight utilities which should only be called by parse_operands @@ -228,9 +241,6 @@ set_fatal_syntax_error (const char *erro set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error); } -/* Number of littlenums required to hold an extended precision number. */ -#define MAX_LITTLENUMS 6 - /* Return value for certain parsers when the parsing fails; those parsers return the information of the parsed result, e.g. register number, on success. */ @@ -243,12 +253,6 @@ set_fatal_syntax_error (const char *erro typedef struct { const char *template; - unsigned long value; -} asm_barrier_opt; - -typedef struct -{ - const char *template; uint32_t value; } asm_nzcv; @@ -439,24 +443,28 @@ get_reg_expected_msg (aarch64_reg_type r /* Some well known registers that we refer to directly elsewhere. */ #define REG_SP 31 +#define REG_ZR 31 /* Instructions take 4 bytes in the object file. */ #define INSN_SIZE 4 -static struct hash_control *aarch64_ops_hsh; -static struct hash_control *aarch64_cond_hsh; -static struct hash_control *aarch64_shift_hsh; -static struct hash_control *aarch64_sys_regs_hsh; -static struct hash_control *aarch64_pstatefield_hsh; -static struct hash_control *aarch64_sys_regs_ic_hsh; -static struct hash_control *aarch64_sys_regs_dc_hsh; -static struct hash_control *aarch64_sys_regs_at_hsh; -static struct hash_control *aarch64_sys_regs_tlbi_hsh; -static struct hash_control *aarch64_reg_hsh; -static struct hash_control *aarch64_barrier_opt_hsh; -static struct hash_control *aarch64_nzcv_hsh; -static struct hash_control *aarch64_pldop_hsh; -static struct hash_control *aarch64_hint_opt_hsh; +#define htab_t struct hash_control * + +static htab_t aarch64_ops_hsh; +static htab_t aarch64_cond_hsh; +static htab_t aarch64_shift_hsh; +static htab_t aarch64_sys_regs_hsh; +static htab_t aarch64_pstatefield_hsh; +static htab_t aarch64_sys_regs_ic_hsh; +static htab_t aarch64_sys_regs_dc_hsh; +static htab_t aarch64_sys_regs_at_hsh; +static htab_t aarch64_sys_regs_tlbi_hsh; +static htab_t aarch64_sys_regs_sr_hsh; +static htab_t aarch64_reg_hsh; +static htab_t aarch64_barrier_opt_hsh; +static htab_t aarch64_nzcv_hsh; +static htab_t aarch64_pldop_hsh; +static htab_t aarch64_hint_opt_hsh; /* Stuff needed to resolve the label ambiguity As: @@ -520,7 +528,7 @@ const char EXP_CHARS[] = "eE"; /* As in 0f12.456 */ /* or 0d1.2345e12 */ -const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; +const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH"; /* Prefix character that indicates the start of an immediate value. */ #define is_immediate_prefix(C) ((C) == '#') @@ -618,6 +626,96 @@ my_get_expression (expressionS * ep, cha return TRUE; } +#define MAX_PRECISION 5 +#define H_PRECISION 1 +#define F_PRECISION 2 +#define D_PRECISION 4 +#define X_PRECISION 5 +#define P_PRECISION 5 +/* Length in LittleNums of guard bits. */ +#define GUARD 2 + +static void +make_invalid_floating_point_number (LITTLENUM_TYPE *words) +{ + as_bad (_("cannot create floating-point number")); + /* Zero the leftmost bit. */ + words[0] = (LITTLENUM_TYPE) ((unsigned) -1) >> 1; + words[1] = (LITTLENUM_TYPE) -1; + words[2] = (LITTLENUM_TYPE) -1; + words[3] = (LITTLENUM_TYPE) -1; + words[4] = (LITTLENUM_TYPE) -1; + words[5] = (LITTLENUM_TYPE) -1; +} + +char * atof_ieee_detail (char *, int, int, LITTLENUM_TYPE *, FLONUM_TYPE *); + +/* Build a floating point constant at str into a IEEE floating + point number. This function does the same thing as atof_ieee + however it allows more control over the exact format, i.e. + explicitly specifying the precision and number of exponent bits + instead of relying on this infomation being deduced from a given type. + + If generic_float_info is not NULL then it will be set to contain generic + infomation about the parsed floating point number. + + Returns pointer past text consumed. */ +char * +atof_ieee_detail (char * str, + int precision, + int exponent_bits, + LITTLENUM_TYPE * words, + FLONUM_TYPE * generic_float_info) +{ + /* Extra bits for zeroed low-order bits. + The 1st MAX_PRECISION are zeroed, the last contain flonum bits. */ + static LITTLENUM_TYPE bits[MAX_PRECISION + MAX_PRECISION + GUARD]; + char *return_value; + + /* Number of 16-bit words in the format. */ + FLONUM_TYPE save_gen_flonum; + + /* We have to save the generic_floating_point_number because it + contains storage allocation about the array of LITTLENUMs where + the value is actually stored. We will allocate our own array of + littlenums below, but have to restore the global one on exit. */ + save_gen_flonum = generic_floating_point_number; + + return_value = str; + generic_floating_point_number.low = bits + MAX_PRECISION; + generic_floating_point_number.high = NULL; + generic_floating_point_number.leader = NULL; + generic_floating_point_number.exponent = 0; + generic_floating_point_number.sign = '\0'; + + /* Use more LittleNums than seems necessary: the highest flonum may + have 15 leading 0 bits, so could be useless. */ + + memset (bits, '\0', sizeof (LITTLENUM_TYPE) * MAX_PRECISION); + + generic_floating_point_number.high + = generic_floating_point_number.low + precision - 1 + GUARD; + + if (atof_generic (&return_value, ".", EXP_CHARS, + &generic_floating_point_number)) + { + make_invalid_floating_point_number (words); + return NULL; + } + + if (generic_float_info) + *generic_float_info = generic_floating_point_number; + + gen_to_words (words, precision, exponent_bits); + + /* Restore the generic_floating_point_number's storage alloc (and + everything else). */ + generic_floating_point_number = save_gen_flonum; + + return return_value; +} + + /* Turn a string in input_line_pointer into a floating point constant of type TYPE, and store the appropriate bytes in *LITP. The number of LITTLENUMS emitted is stored in *SIZEP. An error message is @@ -626,6 +724,54 @@ my_get_expression (expressionS * ep, cha const char * md_atof (int type, char *litP, int *sizeP) { + /* If this is a bfloat16 type, then parse it slightly differently - + as it does not follow the IEEE standard exactly. */ + if (type == 'b') + { + char * t; + LITTLENUM_TYPE words[MAX_LITTLENUMS]; + FLONUM_TYPE generic_float; + + t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float); + + if (t) + input_line_pointer = t; + else + return _("invalid floating point number"); + + switch (generic_float.sign) + { + /* Is +Inf. */ + case 'P': + words[0] = 0x7f80; + break; + + /* Is -Inf. */ + case 'N': + words[0] = 0xff80; + break; + + /* Is NaN. */ + /* bfloat16 has two types of NaN - quiet and signalling. + Quiet NaN has bit[6] == 1 && faction != 0, whereas + signalling Nan's have bit[0] == 0 && fraction != 0. + Chose this specific encoding as it is the same form + as used by other IEEE 754 encodings in GAS. */ + case 0: + words[0] = 0x7fff; + break; + + default: + break; + } + + *sizeP = 2; + + md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE)); + + return NULL; + } + return ieee_md_atof (type, litP, sizeP, target_big_endian); } @@ -826,7 +972,7 @@ parse_vector_type_for_operand (aarch64_r return FALSE; } -elt_size: + elt_size: switch (TOLOWER (*ptr)) { case 'b': @@ -1945,7 +2091,7 @@ s_variant_pcs (int ignored ATTRIBUTE_UNU restore_line_pointer (c); demand_empty_rest_of_line (); bfdsym = symbol_get_bfdsym (sym); - elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym); + elfsym = elf_symbol_from (NULL, bfdsym); gas_assert (elfsym); elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS; } @@ -2088,6 +2234,8 @@ const pseudo_typeS md_pseudo_table[] = { {"dword", s_aarch64_elf_cons, 8}, {"variant_pcs", s_variant_pcs, 0}, #endif + {"float16", float_cons, 'h'}, + {"bfloat16", float_cons, 'b'}, {0, 0, 0} }; @@ -2122,7 +2270,7 @@ reg_name_p (char *str, aarch64_reg_type return FALSE; skip_whitespace (str); - if (*str == ',' || is_end_of_line[(unsigned int) *str]) + if (*str == ',' || is_end_of_line[(unsigned char) *str]) return TRUE; return FALSE; @@ -2302,7 +2450,6 @@ parse_aarch64_imm_float (char **ccp, int char *str = *ccp; char *fpnum; LITTLENUM_TYPE words[MAX_LITTLENUMS]; - int found_fpchar = 0; int64_t val = 0; unsigned fpword = 0; bfd_boolean hex_p = FALSE; @@ -2332,26 +2479,10 @@ parse_aarch64_imm_float (char **ccp, int hex_p = TRUE; } - else - { - if (reg_name_p (str, reg_type)) - { - set_recoverable_error (_("immediate operand required")); - return FALSE; - } - - /* We must not accidentally parse an integer as a floating-point number. - Make sure that the value we parse is not an integer by checking for - special characters '.' or 'e'. */ - for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) - if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') - { - found_fpchar = 1; - break; - } - - if (!found_fpchar) - return FALSE; + else if (reg_name_p (str, reg_type)) + { + set_recoverable_error (_("immediate operand required")); + return FALSE; } if (! hex_p) @@ -2373,7 +2504,7 @@ parse_aarch64_imm_float (char **ccp, int *ccp = str; return TRUE; -invalid_fp: + invalid_fp: set_fatal_syntax_error (_("invalid floating-point constant")); return FALSE; } @@ -2600,6 +2731,69 @@ static struct reloc_table_entry reloc_ta 0, 0}, + /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */ + {"prel_g0", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G0, + 0, + 0, + 0}, + + /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */ + {"prel_g0_nc", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G0_NC, + 0, + 0, + 0}, + + /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */ + {"prel_g1", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G1, + 0, + 0, + 0}, + + /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */ + {"prel_g1_nc", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G1_NC, + 0, + 0, + 0}, + + /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */ + {"prel_g2", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G2, + 0, + 0, + 0}, + + /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */ + {"prel_g2_nc", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G2_NC, + 0, + 0, + 0}, + + /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ + {"prel_g3", 1, + 0, /* adr_type */ + 0, + BFD_RELOC_AARCH64_MOVW_PREL_G3, + 0, + 0, + 0}, + /* Get to the page containing GOT entry for a symbol. */ {"got", 1, 0, /* adr_type */ @@ -2862,7 +3056,7 @@ static struct reloc_table_entry reloc_ta 0, 0, BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, - 0, + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12, 0}, /* Get tp offset for a symbol. */ @@ -2880,7 +3074,7 @@ static struct reloc_table_entry reloc_ta 0, 0, BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, - 0, + BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC, 0}, /* Most significant bits 32-47 of address/value: MOVZ. */ @@ -3337,6 +3531,7 @@ parse_shifter_operand_reloc (char **str, [base,Xm,SXTX {#imm}] [base,Wm,(S|U)XTW {#imm}] Pre-indexed + [base]! // in ldraa/ldrab exclusive [base,#imm]! Post-indexed [base],#imm @@ -3350,6 +3545,7 @@ parse_shifter_operand_reloc (char **str, [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements [Zn.S,#imm] [Zn.D,#imm] + [Zn.S{, Xm}] [Zn.S,Zm.S{,LSL #imm}] // in ADR [Zn.D,Zm.D{,LSL #imm}] // in ADR [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR @@ -3515,6 +3711,7 @@ parse_address_main (char **str, aarch64_ return FALSE; } /* We only accept: + [base,Xm] # For vector plus scalar SVE2 indexing. [base,Xm{,LSL #imm}] [base,Xm,SXTX {#imm}] [base,Wm,(S|U)XTW {#imm}] */ @@ -3528,7 +3725,10 @@ parse_address_main (char **str, aarch64_ return FALSE; } if (aarch64_get_qualifier_esize (*base_qualifier) - != aarch64_get_qualifier_esize (*offset_qualifier)) + != aarch64_get_qualifier_esize (*offset_qualifier) + && (operand->type != AARCH64_OPND_SVE_ADDR_ZX + || *base_qualifier != AARCH64_OPND_QLF_S_S + || *offset_qualifier != AARCH64_OPND_QLF_X)) { set_syntax_error (_("offset has different size from base")); return FALSE; @@ -3646,18 +3846,43 @@ parse_address_main (char **str, aarch64_ } /* If at this point neither .preind nor .postind is set, we have a - bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */ + bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and + ldrab, accept [Rn] as a shorthand for [Rn,#0]. + For SVE2 vector plus scalar offsets, allow [Zn.] as shorthand for + [Zn., xzr]. */ if (operand->addr.preind == 0 && operand->addr.postind == 0) { if (operand->addr.writeback) { - /* Reject [Rn]! */ - set_syntax_error (_("missing offset in the pre-indexed address")); - return FALSE; + if (operand->type == AARCH64_OPND_ADDR_SIMM10) + { + /* Accept [Rn]! as a shorthand for [Rn,#0]! */ + operand->addr.offset.is_reg = 0; + operand->addr.offset.imm = 0; + operand->addr.preind = 1; + } + else + { + /* Reject [Rn]! */ + set_syntax_error (_("missing offset in the pre-indexed address")); + return FALSE; + } + } + else + { + operand->addr.preind = 1; + if (operand->type == AARCH64_OPND_SVE_ADDR_ZX) + { + operand->addr.offset.is_reg = 1; + operand->addr.offset.regno = REG_ZR; + *offset_qualifier = AARCH64_OPND_QLF_X; + } + else + { + inst.reloc.exp.X_op = O_constant; + inst.reloc.exp.X_add_number = 0; + } } - operand->addr.preind = 1; - inst.reloc.exp.X_op = O_constant; - inst.reloc.exp.X_add_number = 0; } *str = p; @@ -3849,7 +4074,7 @@ static int parse_barrier (char **str) { char *p, *q; - const asm_barrier_opt *o; + const struct aarch64_name_value_pair *o; p = q = *str; while (ISALPHA (*q)) @@ -3881,7 +4106,7 @@ parse_barrier_psb (char **str, if (!o) { set_fatal_syntax_error - ( _("unknown or missing option to PSB")); + ( _("unknown or missing option to PSB/TSB")); return PARSE_FAIL; } @@ -3889,7 +4114,48 @@ parse_barrier_psb (char **str, { /* PSB only accepts option name 'CSYNC'. */ set_syntax_error - (_("the specified option is not accepted for PSB")); + (_("the specified option is not accepted for PSB/TSB")); + return PARSE_FAIL; + } + + *str = q; + *hint_opt = o; + return 0; +} + +/* Parse an operand for BTI. Set *HINT_OPT to the hint-option record + return 0 if successful. Otherwise return PARSE_FAIL. */ + +static int +parse_bti_operand (char **str, + const struct aarch64_name_value_pair ** hint_opt) +{ + char *p, *q; + const struct aarch64_name_value_pair *o; + + p = q = *str; + while (ISALPHA (*q)) + q++; + + o = hash_find_n (aarch64_hint_opt_hsh, p, q - p); + if (!o) + { + set_fatal_syntax_error + ( _("unknown option to BTI")); + return PARSE_FAIL; + } + + switch (o->value) + { + /* Valid BTI operands. */ + case HINT_OPD_C: + case HINT_OPD_J: + case HINT_OPD_JC: + break; + + default: + set_syntax_error + (_("unknown option to BTI")); return PARSE_FAIL; } @@ -3909,21 +4175,26 @@ parse_barrier_psb (char **str, */ static int -parse_sys_reg (char **str, struct hash_control *sys_regs, - int imple_defined_p, int pstatefield_p) +parse_sys_reg (char **str, htab_t sys_regs, + int imple_defined_p, int pstatefield_p, + uint32_t* flags) { char *p, *q; - char buf[32]; + char buf[AARCH64_MAX_SYSREG_NAME_LEN]; const aarch64_sys_reg *o; int value; p = buf; for (q = *str; ISALNUM (*q) || *q == '_'; q++) - if (p < buf + 31) + if (p < buf + (sizeof (buf) - 1)) *p++ = TOLOWER (*q); *p = '\0'; - /* Assert that BUF be large enough. */ - gas_assert (p - buf == q - *str); + + /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a + valid system register. This is enforced by construction of the hash + table. */ + if (p - buf != q - *str) + return PARSE_FAIL; o = hash_find (sys_regs, buf); if (!o) @@ -3941,6 +4212,8 @@ parse_sys_reg (char **str, struct hash_c if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7) return PARSE_FAIL; value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2; + if (flags) + *flags = 0; } } else @@ -3948,13 +4221,17 @@ parse_sys_reg (char **str, struct hash_c if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o)) as_bad (_("selected processor does not support PSTATE field " "name '%s'"), buf); - if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o)) + if (!pstatefield_p + && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name, + o->value, o->flags, o->features)) as_bad (_("selected processor does not support system register " "name '%s'"), buf); - if (aarch64_sys_reg_deprecated_p (o)) + if (aarch64_sys_reg_deprecated_p (o->flags)) as_warn (_("system register name '%s' is deprecated and may be " "removed in a future release"), buf); value = o->value; + if (flags) + *flags = o->flags; } *str = q; @@ -3965,25 +4242,35 @@ parse_sys_reg (char **str, struct hash_c for the option, or NULL. */ static const aarch64_sys_ins_reg * -parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs) +parse_sys_ins_reg (char **str, htab_t sys_ins_regs) { char *p, *q; - char buf[32]; + char buf[AARCH64_MAX_SYSREG_NAME_LEN]; const aarch64_sys_ins_reg *o; p = buf; for (q = *str; ISALNUM (*q) || *q == '_'; q++) - if (p < buf + 31) + if (p < buf + (sizeof (buf) - 1)) *p++ = TOLOWER (*q); *p = '\0'; + /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a + valid system register. This is enforced by construction of the hash + table. */ + if (p - buf != q - *str) + return NULL; + o = hash_find (sys_ins_regs, buf); if (!o) return NULL; - if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o)) + if (!aarch64_sys_ins_reg_supported_p (cpu_variant, + o->name, o->value, o->flags, 0)) as_bad (_("selected processor does not support system register " "name '%s'"), buf); + if (aarch64_sys_reg_deprecated_p (o->flags)) + as_warn (_("system register name '%s' is deprecated and may be " + "removed in a future release"), buf); *str = q; return o; @@ -4137,7 +4424,10 @@ reencode_movzn_to_movn (uint32_t opcode) static fixS * fix_new_aarch64 (fragS * frag, int where, - short int size, expressionS * exp, int pc_rel, int reloc) + short int size, + expressionS * exp, + int pc_rel, + int reloc) { fixS *new_fix; @@ -4371,6 +4661,7 @@ record_operand_error (const aarch64_opco info.index = idx; info.kind = kind; info.error = error; + info.non_fatal = FALSE; record_operand_error_info (opcode, &info); } @@ -4386,6 +4677,7 @@ record_operand_error_with_data (const aa info.data[0] = extra_data[0]; info.data[1] = extra_data[1]; info.data[2] = extra_data[2]; + info.non_fatal = FALSE; record_operand_error_info (opcode, &info); } @@ -4504,7 +4796,8 @@ print_operands (char *buf, const aarch64 break; /* Generate the operand string in STR. */ - aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL); + aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL, + NULL, cpu_variant); /* Delimiter. */ if (str[0] != '\0') @@ -4550,12 +4843,14 @@ output_operand_error_record (const opera enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx] : AARCH64_OPND_NIL); + typedef void (*handler_t)(const char *format, ...); + handler_t handler = detail->non_fatal ? as_warn : as_bad; + switch (detail->kind) { case AARCH64_OPDE_NIL: gas_assert (0); break; - case AARCH64_OPDE_SYNTAX_ERROR: case AARCH64_OPDE_RECOVERABLE: case AARCH64_OPDE_FATAL_SYNTAX_ERROR: @@ -4565,21 +4860,21 @@ output_operand_error_record (const opera if (detail->error != NULL) { if (idx < 0) - as_bad (_("%s -- `%s'"), detail->error, str); + handler (_("%s -- `%s'"), detail->error, str); else - as_bad (_("%s at operand %d -- `%s'"), - detail->error, idx + 1, str); + handler (_("%s at operand %d -- `%s'"), + detail->error, idx + 1, str); } else { gas_assert (idx >= 0); - as_bad (_("operand %d must be %s -- `%s'"), idx + 1, - aarch64_get_operand_desc (opd_code), str); + handler (_("operand %d must be %s -- `%s'"), idx + 1, + aarch64_get_operand_desc (opd_code), str); } break; case AARCH64_OPDE_INVALID_VARIANT: - as_bad (_("operand mismatch -- `%s'"), str); + handler (_("operand mismatch -- `%s'"), str); if (verbose_error_p) { /* We will try to correct the erroneous instruction and also provide @@ -4627,7 +4922,7 @@ output_operand_error_record (const opera && programmer_friendly_fixup (&inst); gas_assert (result); result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value, - NULL, NULL); + NULL, NULL, insn_sequence); gas_assert (!result); /* Find the most matched qualifier sequence. */ @@ -4676,36 +4971,36 @@ output_operand_error_record (const opera break; case AARCH64_OPDE_UNTIED_OPERAND: - as_bad (_("operand %d must be the same register as operand 1 -- `%s'"), - detail->index + 1, str); + handler (_("operand %d must be the same register as operand 1 -- `%s'"), + detail->index + 1, str); break; case AARCH64_OPDE_OUT_OF_RANGE: if (detail->data[0] != detail->data[1]) - as_bad (_("%s out of range %d to %d at operand %d -- `%s'"), - detail->error ? detail->error : _("immediate value"), - detail->data[0], detail->data[1], idx + 1, str); + handler (_("%s out of range %d to %d at operand %d -- `%s'"), + detail->error ? detail->error : _("immediate value"), + detail->data[0], detail->data[1], idx + 1, str); else - as_bad (_("%s must be %d at operand %d -- `%s'"), - detail->error ? detail->error : _("immediate value"), - detail->data[0], idx + 1, str); + handler (_("%s must be %d at operand %d -- `%s'"), + detail->error ? detail->error : _("immediate value"), + detail->data[0], idx + 1, str); break; case AARCH64_OPDE_REG_LIST: if (detail->data[0] == 1) - as_bad (_("invalid number of registers in the list; " - "only 1 register is expected at operand %d -- `%s'"), - idx + 1, str); + handler (_("invalid number of registers in the list; " + "only 1 register is expected at operand %d -- `%s'"), + idx + 1, str); else - as_bad (_("invalid number of registers in the list; " - "%d registers are expected at operand %d -- `%s'"), - detail->data[0], idx + 1, str); + handler (_("invalid number of registers in the list; " + "%d registers are expected at operand %d -- `%s'"), + detail->data[0], idx + 1, str); break; case AARCH64_OPDE_UNALIGNED: - as_bad (_("immediate value must be a multiple of " - "%d at operand %d -- `%s'"), - detail->data[0], idx + 1, str); + handler (_("immediate value must be a multiple of " + "%d at operand %d -- `%s'"), + detail->data[0], idx + 1, str); break; default: @@ -4719,10 +5014,15 @@ output_operand_error_record (const opera When this function is called, the operand error information had been collected for an assembly line and there will be multiple errors in the case of multiple instruction templates; output the - error message that most closely describes the problem. */ + error message that most closely describes the problem. + + The errors to be printed can be filtered on printing all errors + or only non-fatal errors. This distinction has to be made because + the error buffer may already be filled with fatal errors we don't want to + print due to the different instruction templates. */ static void -output_operand_error_report (char *str) +output_operand_error_report (char *str, bfd_boolean non_fatal_only) { int largest_error_pos; const char *msg = NULL; @@ -4740,9 +5040,14 @@ output_operand_error_report (char *str) /* Only one error. */ if (head == operand_error_report.tail) { - DEBUG_TRACE ("single opcode entry with error kind: %s", - operand_mismatch_kind_names[head->detail.kind]); - output_operand_error_record (head, str); + /* If the only error is a non-fatal one and we don't want to print it, + just exit. */ + if (!non_fatal_only || head->detail.non_fatal) + { + DEBUG_TRACE ("single opcode entry with error kind: %s", + operand_mismatch_kind_names[head->detail.kind]); + output_operand_error_record (head, str); + } return; } @@ -4753,16 +5058,21 @@ output_operand_error_report (char *str) { gas_assert (curr->detail.kind != AARCH64_OPDE_NIL); DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]); - if (operand_error_higher_severity_p (curr->detail.kind, kind)) + if (operand_error_higher_severity_p (curr->detail.kind, kind) + && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal))) kind = curr->detail.kind; } - gas_assert (kind != AARCH64_OPDE_NIL); + + gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only); /* Pick up one of errors of KIND to report. */ largest_error_pos = -2; /* Index can be -1 which means unknown index. */ for (curr = head; curr != NULL; curr = curr->next) { - if (curr->detail.kind != kind) + /* If we don't want to print non-fatal errors then don't consider them + at all. */ + if (curr->detail.kind != kind + || (non_fatal_only && !curr->detail.non_fatal)) continue; /* If there are multiple errors, pick up the one with the highest mismatching operand index. In the case of multiple errors with @@ -4778,6 +5088,17 @@ output_operand_error_report (char *str) } } + /* The way errors are collected in the back-end is a bit non-intuitive. But + essentially, because each operand template is tried recursively you may + always have errors collected from the previous tried OPND. These are + usually skipped if there is one successful match. However now with the + non-fatal errors we have to ignore those previously collected hard errors + when we're only interested in printing the non-fatal ones. This condition + prevents us from printing errors that are not appropriate, since we did + match a condition, but it also has warnings that it wants to print. */ + if (non_fatal_only && !record) + return; + gas_assert (largest_error_pos != -2 && record != NULL); DEBUG_TRACE ("Pick up error kind %s to report", operand_mismatch_kind_names[record->detail.kind]); @@ -4802,7 +5123,8 @@ get_aarch64_insn (char *buf) { unsigned char *where = (unsigned char *) buf; uint32_t result; - result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24)); + result = ((where[0] | (where[1] << 8) | (where[2] << 16) + | ((uint32_t) where[3] << 24))); return result; } @@ -4957,6 +5279,10 @@ vectype_to_qualifier (const struct vecto if (vectype->type == NT_b && vectype->width == 4) return AARCH64_OPND_QLF_S_4B; + /* Special case S_2H. */ + if (vectype->type == NT_h && vectype->width == 2) + return AARCH64_OPND_QLF_S_2H; + /* Vector element register. */ return AARCH64_OPND_QLF_S_B + vectype->type; } @@ -4989,7 +5315,7 @@ vectype_to_qualifier (const struct vecto return offset; } -vectype_conversion_fail: + vectype_conversion_fail: first_error (_("bad vector arrangement type")); return AARCH64_OPND_QLF_NIL; } @@ -5014,6 +5340,8 @@ process_omitted_operand (enum aarch64_op case AARCH64_OPND_Rm: case AARCH64_OPND_Rt: case AARCH64_OPND_Rt2: + case AARCH64_OPND_Rt_LS64: + case AARCH64_OPND_Rt_SP: case AARCH64_OPND_Rs: case AARCH64_OPND_Ra: case AARCH64_OPND_Rt_SYS: @@ -5041,6 +5369,7 @@ process_omitted_operand (enum aarch64_op case AARCH64_OPND_Ed: case AARCH64_OPND_En: case AARCH64_OPND_Em: + case AARCH64_OPND_Em16: case AARCH64_OPND_SM3_IMM2: operand->reglane.regno = default_value; break; @@ -5079,6 +5408,11 @@ process_omitted_operand (enum aarch64_op case AARCH64_OPND_BARRIER_ISB: operand->barrier = aarch64_barrier_options + default_value; + break; + + case AARCH64_OPND_BTI_TARGET: + operand->hint_option = aarch64_hint_options + default_value; + break; default: break; @@ -5102,6 +5436,10 @@ process_movw_reloc_info (void) case BFD_RELOC_AARCH64_MOVW_G0_S: case BFD_RELOC_AARCH64_MOVW_G1_S: case BFD_RELOC_AARCH64_MOVW_G2_S: + case BFD_RELOC_AARCH64_MOVW_PREL_G0: + case BFD_RELOC_AARCH64_MOVW_PREL_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G2: + case BFD_RELOC_AARCH64_MOVW_PREL_G3: case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: @@ -5119,6 +5457,8 @@ process_movw_reloc_info (void) case BFD_RELOC_AARCH64_MOVW_G0_NC: case BFD_RELOC_AARCH64_MOVW_G0_S: case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: + case BFD_RELOC_AARCH64_MOVW_PREL_G0: + case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: @@ -5132,6 +5472,8 @@ process_movw_reloc_info (void) case BFD_RELOC_AARCH64_MOVW_G1_NC: case BFD_RELOC_AARCH64_MOVW_G1_S: case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G1: + case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: @@ -5144,6 +5486,8 @@ process_movw_reloc_info (void) case BFD_RELOC_AARCH64_MOVW_G2: case BFD_RELOC_AARCH64_MOVW_G2_NC: case BFD_RELOC_AARCH64_MOVW_G2_S: + case BFD_RELOC_AARCH64_MOVW_PREL_G2: + case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: if (is32) @@ -5156,6 +5500,7 @@ process_movw_reloc_info (void) shift = 32; break; case BFD_RELOC_AARCH64_MOVW_G3: + case BFD_RELOC_AARCH64_MOVW_PREL_G3: if (is32) { set_fatal_syntax_error @@ -5202,7 +5547,7 @@ ldst_lo12_determine_real_reloc_type (voi enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier; enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier; - const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = { + const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = { { BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12, @@ -5223,13 +5568,31 @@ ldst_lo12_determine_real_reloc_type (voi BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, BFD_RELOC_AARCH64_NONE + }, + { + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, + BFD_RELOC_AARCH64_NONE + }, + { + BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, + BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, + BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, + BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, + BFD_RELOC_AARCH64_NONE } }; gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 || (inst.reloc.type - == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)); + == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) + || (inst.reloc.type + == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12) + || (inst.reloc.type + == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)); gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12); if (opd1_qlf == AARCH64_OPND_QLF_NIL) @@ -5240,7 +5603,9 @@ ldst_lo12_determine_real_reloc_type (voi logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf)); if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 - || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) + || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC + || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12 + || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC) gas_assert (logsz <= 3); else gas_assert (logsz <= 4); @@ -5345,14 +5710,31 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_Rt2: case AARCH64_OPND_Rs: case AARCH64_OPND_Ra: + case AARCH64_OPND_Rt_LS64: case AARCH64_OPND_Rt_SYS: case AARCH64_OPND_PAIRREG: case AARCH64_OPND_SVE_Rm: po_int_reg_or_fail (REG_TYPE_R_Z); + + /* In LS64 load/store instructions Rt register number must be even + and <=22. */ + if (operands[i] == AARCH64_OPND_Rt_LS64) + { + /* We've already checked if this is valid register. + This will check if register number (Rt) is not undefined for LS64 + instructions: + if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */ + if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01) + { + set_syntax_error (_("invalid Rt register number in 64-byte load/store")); + goto failure; + } + } break; case AARCH64_OPND_Rd_SP: case AARCH64_OPND_Rn_SP: + case AARCH64_OPND_Rt_SP: case AARCH64_OPND_SVE_Rn_SP: case AARCH64_OPND_Rm_SP: po_int_reg_or_fail (REG_TYPE_R_SP); @@ -5477,6 +5859,8 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_SVE_Zm3_INDEX: case AARCH64_OPND_SVE_Zm3_22_INDEX: + case AARCH64_OPND_SVE_Zm3_11_INDEX: + case AARCH64_OPND_SVE_Zm4_11_INDEX: case AARCH64_OPND_SVE_Zm4_INDEX: case AARCH64_OPND_SVE_Zn_INDEX: reg_type = REG_TYPE_ZN; @@ -5485,6 +5869,7 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_Ed: case AARCH64_OPND_En: case AARCH64_OPND_Em: + case AARCH64_OPND_Em16: case AARCH64_OPND_SM3_IMM2: reg_type = REG_TYPE_VN; vector_reg_index: @@ -5533,11 +5918,20 @@ parse_operands (char *str, const aarch64 val = parse_vector_reg_list (&str, reg_type, &vectype); if (val == PARSE_FAIL) goto failure; + if (! reg_list_valid_p (val, /* accept_alternate */ 0)) { set_fatal_syntax_error (_("invalid register list")); goto failure; } + + if (vectype.width != 0 && *str != ',') + { + set_fatal_syntax_error + (_("expected element type rather than vector type")); + goto failure; + } + info->reglist.first_regno = (val >> 2) & 0x1f; info->reglist.num_regs = (val & 0x3) + 1; } @@ -5591,7 +5985,10 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_CCMP_IMM: case AARCH64_OPND_SIMM5: case AARCH64_OPND_FBITS: + case AARCH64_OPND_TME_UIMM16: case AARCH64_OPND_UIMM4: + case AARCH64_OPND_UIMM4_ADDG: + case AARCH64_OPND_UIMM10: case AARCH64_OPND_UIMM3_OP1: case AARCH64_OPND_UIMM3_OP2: case AARCH64_OPND_IMM_VLSL: @@ -5603,8 +6000,10 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_SVE_LIMM_MOV: case AARCH64_OPND_SVE_SHLIMM_PRED: case AARCH64_OPND_SVE_SHLIMM_UNPRED: + case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: case AARCH64_OPND_SVE_SHRIMM_PRED: case AARCH64_OPND_SVE_SHRIMM_UNPRED: + case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: case AARCH64_OPND_SVE_SIMM5: case AARCH64_OPND_SVE_SIMM5B: case AARCH64_OPND_SVE_SIMM6: @@ -5618,6 +6017,7 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_IMM_ROT3: case AARCH64_OPND_SVE_IMM_ROT1: case AARCH64_OPND_SVE_IMM_ROT2: + case AARCH64_OPND_SVE_IMM_ROT3: po_imm_nc_or_fail (); info->imm.value = val; break; @@ -5865,6 +6265,7 @@ parse_operands (char *str, const aarch64 break; case AARCH64_OPND_EXCEPTION: + case AARCH64_OPND_UNDEFINED: po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp, imm_reg_type)); assign_imm_if_const_or_fixup_later (&inst.reloc, info, @@ -6062,6 +6463,8 @@ parse_operands (char *str, const aarch64 case AARCH64_OPND_ADDR_SIMM9: case AARCH64_OPND_ADDR_SIMM9_2: + case AARCH64_OPND_ADDR_SIMM11: + case AARCH64_OPND_ADDR_SIMM13: po_misc_or_fail (parse_address (&str, info)); if (info->addr.pcrel || info->addr.offset.is_reg || (!info->addr.preind && !info->addr.postind) @@ -6116,7 +6519,11 @@ parse_operands (char *str, const aarch64 || (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12) || (inst.reloc.type - == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)) + == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) + || (inst.reloc.type + == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12) + || (inst.reloc.type + == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)) inst.reloc.type = ldst_lo12_determine_real_reloc_type (); /* Leave qualifier to be determined by libopcodes. */ break; @@ -6144,6 +6551,7 @@ parse_operands (char *str, const aarch64 break; case AARCH64_OPND_SVE_ADDR_RI_S4x16: + case AARCH64_OPND_SVE_ADDR_RI_S4x32: case AARCH64_OPND_SVE_ADDR_RI_S4xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: @@ -6182,6 +6590,25 @@ parse_operands (char *str, const aarch64 info->addr.offset.imm = inst.reloc.exp.X_add_number; break; + case AARCH64_OPND_SVE_ADDR_R: + /* [{, }] + but recognizing SVE registers. */ + po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, + &offset_qualifier)); + if (offset_qualifier == AARCH64_OPND_QLF_NIL) + { + offset_qualifier = AARCH64_OPND_QLF_X; + info->addr.offset.is_reg = 1; + info->addr.offset.regno = 31; + } + else if (base_qualifier != AARCH64_OPND_QLF_X + || offset_qualifier != AARCH64_OPND_QLF_X) + { + set_syntax_error (_("invalid addressing mode")); + goto failure; + } + goto regoff_addr; + case AARCH64_OPND_SVE_ADDR_RR: case AARCH64_OPND_SVE_ADDR_RR_LSL1: case AARCH64_OPND_SVE_ADDR_RR_LSL2: @@ -6228,6 +6655,33 @@ parse_operands (char *str, const aarch64 info->qualifier = offset_qualifier; goto regoff_addr; + case AARCH64_OPND_SVE_ADDR_ZX: + /* [Zn.{, }]. */ + po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, + &offset_qualifier)); + /* Things to check: + base_qualifier either S_S or S_D + offset_qualifier must be X + */ + if ((base_qualifier != AARCH64_OPND_QLF_S_S + && base_qualifier != AARCH64_OPND_QLF_S_D) + || offset_qualifier != AARCH64_OPND_QLF_X) + { + set_syntax_error (_("invalid addressing mode")); + goto failure; + } + info->qualifier = base_qualifier; + if (!info->addr.offset.is_reg || info->addr.pcrel + || !info->addr.preind || info->addr.writeback + || info->shifter.operator_present != 0) + { + set_syntax_error (_("invalid addressing mode")); + goto failure; + } + info->shifter.kind = AARCH64_MOD_LSL; + break; + + case AARCH64_OPND_SVE_ADDR_ZI_U5: case AARCH64_OPND_SVE_ADDR_ZI_U5x2: case AARCH64_OPND_SVE_ADDR_ZI_U5x4: @@ -6269,17 +6723,21 @@ parse_operands (char *str, const aarch64 goto regoff_addr; case AARCH64_OPND_SYSREG: - if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0)) - == PARSE_FAIL) - { - set_syntax_error (_("unknown or missing system register name")); - goto failure; - } - inst.base.operands[i].sysreg = val; - break; + { + uint32_t sysreg_flags; + if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0, + &sysreg_flags)) == PARSE_FAIL) + { + set_syntax_error (_("unknown or missing system register name")); + goto failure; + } + inst.base.operands[i].sysreg.value = val; + inst.base.operands[i].sysreg.flags = sysreg_flags; + break; + } case AARCH64_OPND_PSTATEFIELD: - if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1)) + if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL)) == PARSE_FAIL) { set_syntax_error (_("unknown or missing PSTATE field name")); @@ -6292,18 +6750,26 @@ parse_operands (char *str, const aarch64 inst.base.operands[i].sysins_op = parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh); goto sys_reg_ins; + case AARCH64_OPND_SYSREG_DC: inst.base.operands[i].sysins_op = parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh); goto sys_reg_ins; + case AARCH64_OPND_SYSREG_AT: inst.base.operands[i].sysins_op = parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh); goto sys_reg_ins; + + case AARCH64_OPND_SYSREG_SR: + inst.base.operands[i].sysins_op = + parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh); + goto sys_reg_ins; + case AARCH64_OPND_SYSREG_TLBI: inst.base.operands[i].sysins_op = parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh); -sys_reg_ins: + sys_reg_ins: if (inst.base.operands[i].sysins_op == NULL) { set_fatal_syntax_error ( _("unknown or missing operation name")); @@ -6324,12 +6790,53 @@ sys_reg_ins: backtrack_pos = 0; goto failure; } + if (val != PARSE_FAIL + && operands[i] == AARCH64_OPND_BARRIER) + { + /* Regular barriers accept options CRm (C0-C15). + DSB nXS barrier variant accepts values > 15. */ + if (val < 0 || val > 15) + { + set_syntax_error (_("the specified option is not accepted in DSB")); + goto failure; + } + } /* This is an extension to accept a 0..15 immediate. */ if (val == PARSE_FAIL) po_imm_or_fail (0, 15); info->barrier = aarch64_barrier_options + val; break; + case AARCH64_OPND_BARRIER_DSB_NXS: + val = parse_barrier (&str); + if (val != PARSE_FAIL) + { + /* DSB nXS barrier variant accept only