From d7a338195825c14715d3178c5e06537b609da6fc Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Dec 07 2021 19:22:27 +0000 Subject: import gnutls-3.7.2-8.el9 --- diff --git a/SOURCES/gnutls-3.7.2-enable-intel-cet.patch b/SOURCES/gnutls-3.7.2-enable-intel-cet.patch new file mode 100644 index 0000000..b7e1063 --- /dev/null +++ b/SOURCES/gnutls-3.7.2-enable-intel-cet.patch @@ -0,0 +1,2565 @@ +From 71b1812bf9a785b66e3f17175580d3d20cea9c0c Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Tue, 12 Oct 2021 13:33:31 +0200 +Subject: [PATCH] x86: port Intel CET support + +Signed-off-by: Daiki Ueno +--- + lib/accelerated/x86/elf/aes-ssse3-x86.s | 30 ++++++++++++++ + lib/accelerated/x86/elf/aes-ssse3-x86_64.s | 26 +++++++++++++ + lib/accelerated/x86/elf/aesni-gcm-x86_64.s | 21 ++++++++++ + lib/accelerated/x86/elf/aesni-x86.s | 39 +++++++++++++++++++ + lib/accelerated/x86/elf/aesni-x86_64.s | 32 +++++++++++++++ + lib/accelerated/x86/elf/ghash-x86_64.s | 27 +++++++++++++ + lib/accelerated/x86/elf/sha1-ssse3-x86.s | 18 +++++++++ + lib/accelerated/x86/elf/sha1-ssse3-x86_64.s | 21 ++++++++++ + lib/accelerated/x86/elf/sha256-ssse3-x86.s | 18 +++++++++ + lib/accelerated/x86/elf/sha256-ssse3-x86_64.s | 21 ++++++++++ + lib/accelerated/x86/elf/sha512-ssse3-x86.s | 18 +++++++++ + lib/accelerated/x86/elf/sha512-ssse3-x86_64.s | 21 ++++++++++ + 12 files changed, 292 insertions(+) + +diff --git a/lib/accelerated/x86/elf/aes-ssse3-x86.s b/lib/accelerated/x86/elf/aes-ssse3-x86.s +index 265e28a7ef..7be53059f7 100644 +--- a/lib/accelerated/x86/elf/aes-ssse3-x86.s ++++ b/lib/accelerated/x86/elf/aes-ssse3-x86.s +@@ -71,6 +71,7 @@ + .type _vpaes_preheat,@function + .align 16 + _vpaes_preheat: ++.byte 243,15,30,251 + addl (%esp),%ebp + movdqa -48(%ebp),%xmm7 + movdqa -16(%ebp),%xmm6 +@@ -79,6 +80,7 @@ _vpaes_preheat: + .type _vpaes_encrypt_core,@function + .align 16 + _vpaes_encrypt_core: ++.byte 243,15,30,251 + movl $16,%ecx + movl 240(%edx),%eax + movdqa %xmm6,%xmm1 +@@ -156,6 +158,7 @@ _vpaes_encrypt_core: + .type _vpaes_decrypt_core,@function + .align 16 + _vpaes_decrypt_core: ++.byte 243,15,30,251 + leal 608(%ebp),%ebx + movl 240(%edx),%eax + movdqa %xmm6,%xmm1 +@@ -244,6 +247,7 @@ _vpaes_decrypt_core: + .type _vpaes_schedule_core,@function + .align 16 + _vpaes_schedule_core: ++.byte 243,15,30,251 + addl (%esp),%ebp + movdqu (%esi),%xmm0 + movdqa 320(%ebp),%xmm2 +@@ -338,6 +342,7 @@ _vpaes_schedule_core: + .type _vpaes_schedule_192_smear,@function + .align 16 + _vpaes_schedule_192_smear: ++.byte 243,15,30,251 + pshufd $128,%xmm6,%xmm1 + pshufd $254,%xmm7,%xmm0 + pxor %xmm1,%xmm6 +@@ -350,6 +355,7 @@ _vpaes_schedule_192_smear: + .type _vpaes_schedule_round,@function + .align 16 + _vpaes_schedule_round: ++.byte 243,15,30,251 + movdqa 8(%esp),%xmm2 + pxor %xmm1,%xmm1 + .byte 102,15,58,15,202,15 +@@ -399,6 +405,7 @@ _vpaes_schedule_round: + .type _vpaes_schedule_transform,@function + .align 16 + _vpaes_schedule_transform: ++.byte 243,15,30,251 + movdqa -16(%ebp),%xmm2 + movdqa %xmm2,%xmm1 + pandn %xmm0,%xmm1 +@@ -414,6 +421,7 @@ _vpaes_schedule_transform: + .type _vpaes_schedule_mangle,@function + .align 16 + _vpaes_schedule_mangle: ++.byte 243,15,30,251 + movdqa %xmm0,%xmm4 + movdqa 128(%ebp),%xmm5 + testl %edi,%edi +@@ -475,6 +483,7 @@ _vpaes_schedule_mangle: + .align 16 + vpaes_set_encrypt_key: + .L_vpaes_set_encrypt_key_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -508,6 +517,7 @@ vpaes_set_encrypt_key: + .align 16 + vpaes_set_decrypt_key: + .L_vpaes_set_decrypt_key_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -546,6 +556,7 @@ vpaes_set_decrypt_key: + .align 16 + vpaes_encrypt: + .L_vpaes_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -575,6 +586,7 @@ vpaes_encrypt: + .align 16 + vpaes_decrypt: + .L_vpaes_decrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -604,6 +616,7 @@ vpaes_decrypt: + .align 16 + vpaes_cbc_encrypt: + .L_vpaes_cbc_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -671,4 +684,21 @@ vpaes_cbc_encrypt: + ret + .size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: ++ + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/aes-ssse3-x86_64.s b/lib/accelerated/x86/elf/aes-ssse3-x86_64.s +index ea1216baf7..5a3f336f26 100644 +--- a/lib/accelerated/x86/elf/aes-ssse3-x86_64.s ++++ b/lib/accelerated/x86/elf/aes-ssse3-x86_64.s +@@ -635,6 +635,7 @@ _vpaes_schedule_mangle: + .align 16 + vpaes_set_encrypt_key: + .cfi_startproc ++.byte 243,15,30,250 + movl %esi,%eax + shrl $5,%eax + addl $5,%eax +@@ -653,6 +654,7 @@ vpaes_set_encrypt_key: + .align 16 + vpaes_set_decrypt_key: + .cfi_startproc ++.byte 243,15,30,250 + movl %esi,%eax + shrl $5,%eax + addl $5,%eax +@@ -676,6 +678,7 @@ vpaes_set_decrypt_key: + .align 16 + vpaes_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + movdqu (%rdi),%xmm0 + call _vpaes_preheat + call _vpaes_encrypt_core +@@ -689,6 +692,7 @@ vpaes_encrypt: + .align 16 + vpaes_decrypt: + .cfi_startproc ++.byte 243,15,30,250 + movdqu (%rdi),%xmm0 + call _vpaes_preheat + call _vpaes_decrypt_core +@@ -701,6 +705,7 @@ vpaes_decrypt: + .align 16 + vpaes_cbc_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + xchgq %rcx,%rdx + subq $16,%rcx + jc .Lcbc_abort +@@ -863,5 +868,26 @@ _vpaes_consts: + .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 + .align 64 + .size _vpaes_consts,.-_vpaes_consts ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/aesni-gcm-x86_64.s b/lib/accelerated/x86/elf/aesni-gcm-x86_64.s +index 461dd026b9..ea5398bc2c 100644 +--- a/lib/accelerated/x86/elf/aesni-gcm-x86_64.s ++++ b/lib/accelerated/x86/elf/aesni-gcm-x86_64.s +@@ -826,5 +826,26 @@ aesni_gcm_encrypt: + .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + .align 64 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/aesni-x86.s b/lib/accelerated/x86/elf/aesni-x86.s +index 6e4860209f..f41d5f9ef3 100644 +--- a/lib/accelerated/x86/elf/aesni-x86.s ++++ b/lib/accelerated/x86/elf/aesni-x86.s +@@ -43,6 +43,7 @@ + .align 16 + aesni_encrypt: + .L_aesni_encrypt_begin: ++.byte 243,15,30,251 + movl 4(%esp),%eax + movl 12(%esp),%edx + movups (%eax),%xmm2 +@@ -70,6 +71,7 @@ aesni_encrypt: + .align 16 + aesni_decrypt: + .L_aesni_decrypt_begin: ++.byte 243,15,30,251 + movl 4(%esp),%eax + movl 12(%esp),%edx + movups (%eax),%xmm2 +@@ -95,6 +97,7 @@ aesni_decrypt: + .type _aesni_encrypt2,@function + .align 16 + _aesni_encrypt2: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -122,6 +125,7 @@ _aesni_encrypt2: + .type _aesni_decrypt2,@function + .align 16 + _aesni_decrypt2: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -149,6 +153,7 @@ _aesni_decrypt2: + .type _aesni_encrypt3,@function + .align 16 + _aesni_encrypt3: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -181,6 +186,7 @@ _aesni_encrypt3: + .type _aesni_decrypt3,@function + .align 16 + _aesni_decrypt3: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -213,6 +219,7 @@ _aesni_decrypt3: + .type _aesni_encrypt4,@function + .align 16 + _aesni_encrypt4: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + movups 16(%edx),%xmm1 + shll $4,%ecx +@@ -251,6 +258,7 @@ _aesni_encrypt4: + .type _aesni_decrypt4,@function + .align 16 + _aesni_decrypt4: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + movups 16(%edx),%xmm1 + shll $4,%ecx +@@ -289,6 +297,7 @@ _aesni_decrypt4: + .type _aesni_encrypt6,@function + .align 16 + _aesni_encrypt6: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -343,6 +352,7 @@ _aesni_encrypt6: + .type _aesni_decrypt6,@function + .align 16 + _aesni_decrypt6: ++.byte 243,15,30,251 + movups (%edx),%xmm0 + shll $4,%ecx + movups 16(%edx),%xmm1 +@@ -399,6 +409,7 @@ _aesni_decrypt6: + .align 16 + aesni_ecb_encrypt: + .L_aesni_ecb_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -634,6 +645,7 @@ aesni_ecb_encrypt: + .align 16 + aesni_ccm64_encrypt_blocks: + .L_aesni_ccm64_encrypt_blocks_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -722,6 +734,7 @@ aesni_ccm64_encrypt_blocks: + .align 16 + aesni_ccm64_decrypt_blocks: + .L_aesni_ccm64_decrypt_blocks_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -845,6 +858,7 @@ aesni_ccm64_decrypt_blocks: + .align 16 + aesni_ctr32_encrypt_blocks: + .L_aesni_ctr32_encrypt_blocks_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -1083,6 +1097,7 @@ aesni_ctr32_encrypt_blocks: + .align 16 + aesni_xts_encrypt: + .L_aesni_xts_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -1443,6 +1458,7 @@ aesni_xts_encrypt: + .align 16 + aesni_xts_decrypt: + .L_aesni_xts_decrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -1833,6 +1849,7 @@ aesni_xts_decrypt: + .align 16 + aesni_ocb_encrypt: + .L_aesni_ocb_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -2228,6 +2245,7 @@ aesni_ocb_encrypt: + .align 16 + aesni_ocb_decrypt: + .L_aesni_ocb_decrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -2623,6 +2641,7 @@ aesni_ocb_decrypt: + .align 16 + aesni_cbc_encrypt: + .L_aesni_cbc_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -2882,6 +2901,7 @@ aesni_cbc_encrypt: + .type _aesni_set_encrypt_key,@function + .align 16 + _aesni_set_encrypt_key: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + testl %eax,%eax +@@ -3217,6 +3237,7 @@ _aesni_set_encrypt_key: + .align 16 + aesni_set_encrypt_key: + .L_aesni_set_encrypt_key_begin: ++.byte 243,15,30,251 + movl 4(%esp),%eax + movl 8(%esp),%ecx + movl 12(%esp),%edx +@@ -3228,6 +3249,7 @@ aesni_set_encrypt_key: + .align 16 + aesni_set_decrypt_key: + .L_aesni_set_decrypt_key_begin: ++.byte 243,15,30,251 + movl 4(%esp),%eax + movl 8(%esp),%ecx + movl 12(%esp),%edx +@@ -3275,4 +3297,21 @@ aesni_set_decrypt_key: + .byte 115,108,46,111,114,103,62,0 + .comm _gnutls_x86_cpuid_s,16,4 + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: ++ + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/aesni-x86_64.s b/lib/accelerated/x86/elf/aesni-x86_64.s +index acc7c2c555..e3f9d5a995 100644 +--- a/lib/accelerated/x86/elf/aesni-x86_64.s ++++ b/lib/accelerated/x86/elf/aesni-x86_64.s +@@ -44,6 +44,7 @@ + .align 16 + aesni_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + movups (%rdi),%xmm2 + movl 240(%rdx),%eax + movups (%rdx),%xmm0 +@@ -70,6 +71,7 @@ aesni_encrypt: + .align 16 + aesni_decrypt: + .cfi_startproc ++.byte 243,15,30,250 + movups (%rdi),%xmm2 + movl 240(%rdx),%eax + movups (%rdx),%xmm0 +@@ -557,6 +559,7 @@ _aesni_decrypt8: + .align 16 + aesni_ecb_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + andq $-16,%rdx + jz .Lecb_ret + +@@ -901,6 +904,7 @@ aesni_ecb_encrypt: + .align 16 + aesni_ccm64_encrypt_blocks: + .cfi_startproc ++.byte 243,15,30,250 + movl 240(%rcx),%eax + movdqu (%r8),%xmm6 + movdqa .Lincrement64(%rip),%xmm9 +@@ -966,6 +970,7 @@ aesni_ccm64_encrypt_blocks: + .align 16 + aesni_ccm64_decrypt_blocks: + .cfi_startproc ++.byte 243,15,30,250 + movl 240(%rcx),%eax + movups (%r8),%xmm6 + movdqu (%r9),%xmm3 +@@ -1065,6 +1070,7 @@ aesni_ccm64_decrypt_blocks: + .align 16 + aesni_ctr32_encrypt_blocks: + .cfi_startproc ++.byte 243,15,30,250 + cmpq $1,%rdx + jne .Lctr32_bulk + +@@ -1643,6 +1649,7 @@ aesni_ctr32_encrypt_blocks: + .align 16 + aesni_xts_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + leaq (%rsp),%r11 + .cfi_def_cfa_register %r11 + pushq %rbp +@@ -2113,6 +2120,7 @@ aesni_xts_encrypt: + .align 16 + aesni_xts_decrypt: + .cfi_startproc ++.byte 243,15,30,250 + leaq (%rsp),%r11 + .cfi_def_cfa_register %r11 + pushq %rbp +@@ -2620,6 +2628,7 @@ aesni_xts_decrypt: + .align 32 + aesni_ocb_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + leaq (%rsp),%rax + pushq %rbx + .cfi_adjust_cfa_offset 8 +@@ -3047,6 +3056,7 @@ __ocb_encrypt1: + .align 32 + aesni_ocb_decrypt: + .cfi_startproc ++.byte 243,15,30,250 + leaq (%rsp),%rax + pushq %rbx + .cfi_adjust_cfa_offset 8 +@@ -3484,6 +3494,7 @@ __ocb_decrypt1: + .align 16 + aesni_cbc_encrypt: + .cfi_startproc ++.byte 243,15,30,250 + testq %rdx,%rdx + jz .Lcbc_ret + +@@ -4511,5 +4522,26 @@ __aesni_set_encrypt_key: + + .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + .align 64 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/ghash-x86_64.s b/lib/accelerated/x86/elf/ghash-x86_64.s +index 1e4d18b341..8da3f294c7 100644 +--- a/lib/accelerated/x86/elf/ghash-x86_64.s ++++ b/lib/accelerated/x86/elf/ghash-x86_64.s +@@ -45,6 +45,7 @@ + .align 16 + gcm_gmult_4bit: + .cfi_startproc ++.byte 243,15,30,250 + pushq %rbx + .cfi_adjust_cfa_offset 8 + .cfi_offset %rbx,-16 +@@ -156,6 +157,7 @@ gcm_gmult_4bit: + .align 16 + gcm_ghash_4bit: + .cfi_startproc ++.byte 243,15,30,250 + pushq %rbx + .cfi_adjust_cfa_offset 8 + .cfi_offset %rbx,-16 +@@ -903,6 +905,7 @@ gcm_init_clmul: + .align 16 + gcm_gmult_clmul: + .cfi_startproc ++.byte 243,15,30,250 + .L_gmult_clmul: + movdqu (%rdi),%xmm0 + movdqa .Lbswap_mask(%rip),%xmm5 +@@ -956,6 +959,7 @@ gcm_gmult_clmul: + .align 32 + gcm_ghash_clmul: + .cfi_startproc ++.byte 243,15,30,250 + .L_ghash_clmul: + movdqa .Lbswap_mask(%rip),%xmm10 + +@@ -1450,6 +1454,7 @@ gcm_init_avx: + .align 32 + gcm_gmult_avx: + .cfi_startproc ++.byte 243,15,30,250 + jmp .L_gmult_clmul + .cfi_endproc + .size gcm_gmult_avx,.-gcm_gmult_avx +@@ -1458,6 +1463,7 @@ gcm_gmult_avx: + .align 32 + gcm_ghash_avx: + .cfi_startproc ++.byte 243,15,30,250 + vzeroupper + + vmovdqu (%rdi),%xmm10 +@@ -1884,5 +1890,26 @@ gcm_ghash_avx: + + .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + .align 64 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha1-ssse3-x86.s b/lib/accelerated/x86/elf/sha1-ssse3-x86.s +index 8bfbcb6b39..57b6ba58f6 100644 +--- a/lib/accelerated/x86/elf/sha1-ssse3-x86.s ++++ b/lib/accelerated/x86/elf/sha1-ssse3-x86.s +@@ -43,6 +43,7 @@ + .align 16 + sha1_block_data_order: + .L_sha1_block_data_order_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -1417,4 +1418,21 @@ sha1_block_data_order: + .byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 + .byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: ++ + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha1-ssse3-x86_64.s b/lib/accelerated/x86/elf/sha1-ssse3-x86_64.s +index d34f34497c..54095050c8 100644 +--- a/lib/accelerated/x86/elf/sha1-ssse3-x86_64.s ++++ b/lib/accelerated/x86/elf/sha1-ssse3-x86_64.s +@@ -5487,5 +5487,26 @@ K_XX_XX: + .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 + .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + .align 64 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha256-ssse3-x86.s b/lib/accelerated/x86/elf/sha256-ssse3-x86.s +index 8d9aaa4a81..6d16b9140e 100644 +--- a/lib/accelerated/x86/elf/sha256-ssse3-x86.s ++++ b/lib/accelerated/x86/elf/sha256-ssse3-x86.s +@@ -43,6 +43,7 @@ + .align 16 + sha256_block_data_order: + .L_sha256_block_data_order_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -3384,4 +3385,21 @@ sha256_block_data_order: + ret + .size sha256_block_data_order,.-.L_sha256_block_data_order_begin + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: ++ + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha256-ssse3-x86_64.s b/lib/accelerated/x86/elf/sha256-ssse3-x86_64.s +index d196c6a793..1514ee45c0 100644 +--- a/lib/accelerated/x86/elf/sha256-ssse3-x86_64.s ++++ b/lib/accelerated/x86/elf/sha256-ssse3-x86_64.s +@@ -5493,5 +5493,26 @@ sha256_block_data_order_avx2: + .byte 0xf3,0xc3 + .cfi_endproc + .size sha256_block_data_order_avx2,.-sha256_block_data_order_avx2 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha512-ssse3-x86.s b/lib/accelerated/x86/elf/sha512-ssse3-x86.s +index 481c777154..afca4eae7b 100644 +--- a/lib/accelerated/x86/elf/sha512-ssse3-x86.s ++++ b/lib/accelerated/x86/elf/sha512-ssse3-x86.s +@@ -43,6 +43,7 @@ + .align 16 + sha512_block_data_order: + .L_sha512_block_data_order_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -602,4 +603,21 @@ sha512_block_data_order: + .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 + .byte 62,0 + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: ++ + .section .note.GNU-stack,"",%progbits +diff --git a/lib/accelerated/x86/elf/sha512-ssse3-x86_64.s b/lib/accelerated/x86/elf/sha512-ssse3-x86_64.s +index 446c06a3e6..a7be2cd444 100644 +--- a/lib/accelerated/x86/elf/sha512-ssse3-x86_64.s ++++ b/lib/accelerated/x86/elf/sha512-ssse3-x86_64.s +@@ -5498,5 +5498,26 @@ sha512_block_data_order_avx2: + .byte 0xf3,0xc3 + .cfi_endproc + .size sha512_block_data_order_avx2,.-sha512_block_data_order_avx2 ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +-- +2.31.1 + +diff --git a/lib/accelerated/x86/elf/e_padlock-x86.s b/lib/accelerated/x86/elf/e_padlock-x86.s +index ed8681ee4..dd56518f6 100644 +--- a/lib/accelerated/x86/elf/e_padlock-x86.s ++++ b/lib/accelerated/x86/elf/e_padlock-x86.s +@@ -1,4 +1,4 @@ +-# Copyright (c) 2011-2013, Andy Polyakov ++# Copyright (c) 2011-2016, Andy Polyakov + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without +@@ -37,13 +37,13 @@ + # + # *** This file is auto-generated *** + # +-.file "devel/perlasm/e_padlock-x86.s" + .text + .globl padlock_capability + .type padlock_capability,@function + .align 16 + padlock_capability: + .L_padlock_capability_begin: ++.byte 243,15,30,251 + pushl %ebx + pushfl + popl %eax +@@ -60,11 +60,20 @@ padlock_capability: + .byte 0x0f,0xa2 + xorl %eax,%eax + cmpl $0x746e6543,%ebx +- jne .L000noluck ++ jne .L001zhaoxin + cmpl $0x48727561,%edx + jne .L000noluck + cmpl $0x736c7561,%ecx + jne .L000noluck ++ jmp .L002zhaoxinEnd ++.L001zhaoxin: ++ cmpl $0x68532020,%ebx ++ jne .L000noluck ++ cmpl $0x68676e61,%edx ++ jne .L000noluck ++ cmpl $0x20206961,%ecx ++ jne .L000noluck ++.L002zhaoxinEnd: + movl $3221225472,%eax + .byte 0x0f,0xa2 + movl %eax,%edx +@@ -95,15 +104,16 @@ padlock_capability: + .align 16 + padlock_key_bswap: + .L_padlock_key_bswap_begin: ++.byte 243,15,30,251 + movl 4(%esp),%edx + movl 240(%edx),%ecx +-.L001bswap_loop: ++.L003bswap_loop: + movl (%edx),%eax + bswap %eax + movl %eax,(%edx) + leal 4(%edx),%edx + subl $1,%ecx +- jnz .L001bswap_loop ++ jnz .L003bswap_loop + ret + .size padlock_key_bswap,.-.L_padlock_key_bswap_begin + .globl padlock_verify_context +@@ -111,25 +121,27 @@ padlock_key_bswap: + .align 16 + padlock_verify_context: + .L_padlock_verify_context_begin: ++.byte 243,15,30,251 + movl 4(%esp),%edx +- leal .Lpadlock_saved_context-.L002verify_pic_point,%eax ++ leal .Lpadlock_saved_context-.L004verify_pic_point,%eax + pushfl + call _padlock_verify_ctx +-.L002verify_pic_point: ++.L004verify_pic_point: + leal 4(%esp),%esp + ret + .size padlock_verify_context,.-.L_padlock_verify_context_begin + .type _padlock_verify_ctx,@function + .align 16 + _padlock_verify_ctx: ++.byte 243,15,30,251 + addl (%esp),%eax + btl $30,4(%esp) +- jnc .L003verified ++ jnc .L005verified + cmpl (%eax),%edx +- je .L003verified ++ je .L005verified + pushfl + popfl +-.L003verified: ++.L005verified: + movl %edx,(%eax) + ret + .size _padlock_verify_ctx,.-_padlock_verify_ctx +@@ -138,6 +150,7 @@ _padlock_verify_ctx: + .align 16 + padlock_reload_key: + .L_padlock_reload_key_begin: ++.byte 243,15,30,251 + pushfl + popfl + ret +@@ -147,6 +160,7 @@ padlock_reload_key: + .align 16 + padlock_aes_block: + .L_padlock_aes_block_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + pushl %ebx +@@ -167,6 +181,7 @@ padlock_aes_block: + .align 16 + padlock_ecb_encrypt: + .L_padlock_ecb_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -176,25 +191,25 @@ padlock_ecb_encrypt: + movl 28(%esp),%edx + movl 32(%esp),%ecx + testl $15,%edx +- jnz .L004ecb_abort ++ jnz .L006ecb_abort + testl $15,%ecx +- jnz .L004ecb_abort +- leal .Lpadlock_saved_context-.L005ecb_pic_point,%eax ++ jnz .L006ecb_abort ++ leal .Lpadlock_saved_context-.L007ecb_pic_point,%eax + pushfl + cld + call _padlock_verify_ctx +-.L005ecb_pic_point: ++.L007ecb_pic_point: + leal 16(%edx),%edx + xorl %eax,%eax + xorl %ebx,%ebx + testl $32,(%edx) +- jnz .L006ecb_aligned ++ jnz .L008ecb_aligned + testl $15,%edi + setz %al + testl $15,%esi + setz %bl + testl %ebx,%eax +- jnz .L006ecb_aligned ++ jnz .L008ecb_aligned + negl %eax + movl $512,%ebx + notl %eax +@@ -213,7 +228,7 @@ padlock_ecb_encrypt: + andl $-16,%esp + movl %eax,16(%ebp) + cmpl %ebx,%ecx +- ja .L007ecb_loop ++ ja .L009ecb_loop + movl %esi,%eax + cmpl %esp,%ebp + cmovel %edi,%eax +@@ -224,10 +239,10 @@ padlock_ecb_encrypt: + movl $-128,%eax + cmovael %ebx,%eax + andl %eax,%ebx +- jz .L008ecb_unaligned_tail +- jmp .L007ecb_loop ++ jz .L010ecb_unaligned_tail ++ jmp .L009ecb_loop + .align 16 +-.L007ecb_loop: ++.L009ecb_loop: + movl %edi,(%ebp) + movl %esi,4(%ebp) + movl %ecx,8(%ebp) +@@ -236,13 +251,13 @@ padlock_ecb_encrypt: + testl $15,%edi + cmovnzl %esp,%edi + testl $15,%esi +- jz .L009ecb_inp_aligned ++ jz .L011ecb_inp_aligned + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi + movl %ebx,%ecx + movl %edi,%esi +-.L009ecb_inp_aligned: ++.L011ecb_inp_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx +@@ -250,23 +265,23 @@ padlock_ecb_encrypt: + movl (%ebp),%edi + movl 12(%ebp),%ebx + testl $15,%edi +- jz .L010ecb_out_aligned ++ jz .L012ecb_out_aligned + movl %ebx,%ecx + leal (%esp),%esi + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi +-.L010ecb_out_aligned: ++.L012ecb_out_aligned: + movl 4(%ebp),%esi + movl 8(%ebp),%ecx + addl %ebx,%edi + addl %ebx,%esi + subl %ebx,%ecx + movl $512,%ebx +- jz .L011ecb_break ++ jz .L013ecb_break + cmpl %ebx,%ecx +- jae .L007ecb_loop +-.L008ecb_unaligned_tail: ++ jae .L009ecb_loop ++.L010ecb_unaligned_tail: + xorl %eax,%eax + cmpl %ebp,%esp + cmovel %ecx,%eax +@@ -279,24 +294,24 @@ padlock_ecb_encrypt: + movl %esp,%esi + movl %eax,%edi + movl %ebx,%ecx +- jmp .L007ecb_loop ++ jmp .L009ecb_loop + .align 16 +-.L011ecb_break: ++.L013ecb_break: + cmpl %ebp,%esp +- je .L012ecb_done ++ je .L014ecb_done + pxor %xmm0,%xmm0 + leal (%esp),%eax +-.L013ecb_bzero: ++.L015ecb_bzero: + movaps %xmm0,(%eax) + leal 16(%eax),%eax + cmpl %eax,%ebp +- ja .L013ecb_bzero +-.L012ecb_done: ++ ja .L015ecb_bzero ++.L014ecb_done: + movl 16(%ebp),%ebp + leal 24(%ebp),%esp +- jmp .L014ecb_exit ++ jmp .L016ecb_exit + .align 16 +-.L006ecb_aligned: ++.L008ecb_aligned: + leal (%esi,%ecx,1),%ebp + negl %ebp + andl $4095,%ebp +@@ -306,14 +321,14 @@ padlock_ecb_encrypt: + cmovael %eax,%ebp + andl %ecx,%ebp + subl %ebp,%ecx +- jz .L015ecb_aligned_tail ++ jz .L017ecb_aligned_tail + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx + .byte 243,15,167,200 + testl %ebp,%ebp +- jz .L014ecb_exit +-.L015ecb_aligned_tail: ++ jz .L016ecb_exit ++.L017ecb_aligned_tail: + movl %ebp,%ecx + leal -24(%esp),%ebp + movl %ebp,%esp +@@ -330,11 +345,11 @@ padlock_ecb_encrypt: + movl %esp,%esi + movl %eax,%edi + movl %ebx,%ecx +- jmp .L007ecb_loop +-.L014ecb_exit: ++ jmp .L009ecb_loop ++.L016ecb_exit: + movl $1,%eax + leal 4(%esp),%esp +-.L004ecb_abort: ++.L006ecb_abort: + popl %edi + popl %esi + popl %ebx +@@ -346,6 +361,7 @@ padlock_ecb_encrypt: + .align 16 + padlock_cbc_encrypt: + .L_padlock_cbc_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -355,25 +371,25 @@ padlock_cbc_encrypt: + movl 28(%esp),%edx + movl 32(%esp),%ecx + testl $15,%edx +- jnz .L016cbc_abort ++ jnz .L018cbc_abort + testl $15,%ecx +- jnz .L016cbc_abort +- leal .Lpadlock_saved_context-.L017cbc_pic_point,%eax ++ jnz .L018cbc_abort ++ leal .Lpadlock_saved_context-.L019cbc_pic_point,%eax + pushfl + cld + call _padlock_verify_ctx +-.L017cbc_pic_point: ++.L019cbc_pic_point: + leal 16(%edx),%edx + xorl %eax,%eax + xorl %ebx,%ebx + testl $32,(%edx) +- jnz .L018cbc_aligned ++ jnz .L020cbc_aligned + testl $15,%edi + setz %al + testl $15,%esi + setz %bl + testl %ebx,%eax +- jnz .L018cbc_aligned ++ jnz .L020cbc_aligned + negl %eax + movl $512,%ebx + notl %eax +@@ -392,7 +408,7 @@ padlock_cbc_encrypt: + andl $-16,%esp + movl %eax,16(%ebp) + cmpl %ebx,%ecx +- ja .L019cbc_loop ++ ja .L021cbc_loop + movl %esi,%eax + cmpl %esp,%ebp + cmovel %edi,%eax +@@ -403,10 +419,10 @@ padlock_cbc_encrypt: + movl $-64,%eax + cmovael %ebx,%eax + andl %eax,%ebx +- jz .L020cbc_unaligned_tail +- jmp .L019cbc_loop ++ jz .L022cbc_unaligned_tail ++ jmp .L021cbc_loop + .align 16 +-.L019cbc_loop: ++.L021cbc_loop: + movl %edi,(%ebp) + movl %esi,4(%ebp) + movl %ecx,8(%ebp) +@@ -415,13 +431,13 @@ padlock_cbc_encrypt: + testl $15,%edi + cmovnzl %esp,%edi + testl $15,%esi +- jz .L021cbc_inp_aligned ++ jz .L023cbc_inp_aligned + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi + movl %ebx,%ecx + movl %edi,%esi +-.L021cbc_inp_aligned: ++.L023cbc_inp_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx +@@ -431,23 +447,23 @@ padlock_cbc_encrypt: + movl (%ebp),%edi + movl 12(%ebp),%ebx + testl $15,%edi +- jz .L022cbc_out_aligned ++ jz .L024cbc_out_aligned + movl %ebx,%ecx + leal (%esp),%esi + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi +-.L022cbc_out_aligned: ++.L024cbc_out_aligned: + movl 4(%ebp),%esi + movl 8(%ebp),%ecx + addl %ebx,%edi + addl %ebx,%esi + subl %ebx,%ecx + movl $512,%ebx +- jz .L023cbc_break ++ jz .L025cbc_break + cmpl %ebx,%ecx +- jae .L019cbc_loop +-.L020cbc_unaligned_tail: ++ jae .L021cbc_loop ++.L022cbc_unaligned_tail: + xorl %eax,%eax + cmpl %ebp,%esp + cmovel %ecx,%eax +@@ -460,24 +476,24 @@ padlock_cbc_encrypt: + movl %esp,%esi + movl %eax,%edi + movl %ebx,%ecx +- jmp .L019cbc_loop ++ jmp .L021cbc_loop + .align 16 +-.L023cbc_break: ++.L025cbc_break: + cmpl %ebp,%esp +- je .L024cbc_done ++ je .L026cbc_done + pxor %xmm0,%xmm0 + leal (%esp),%eax +-.L025cbc_bzero: ++.L027cbc_bzero: + movaps %xmm0,(%eax) + leal 16(%eax),%eax + cmpl %eax,%ebp +- ja .L025cbc_bzero +-.L024cbc_done: ++ ja .L027cbc_bzero ++.L026cbc_done: + movl 16(%ebp),%ebp + leal 24(%ebp),%esp +- jmp .L026cbc_exit ++ jmp .L028cbc_exit + .align 16 +-.L018cbc_aligned: ++.L020cbc_aligned: + leal (%esi,%ecx,1),%ebp + negl %ebp + andl $4095,%ebp +@@ -487,7 +503,7 @@ padlock_cbc_encrypt: + cmovael %eax,%ebp + andl %ecx,%ebp + subl %ebp,%ecx +- jz .L027cbc_aligned_tail ++ jz .L029cbc_aligned_tail + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx +@@ -495,8 +511,8 @@ padlock_cbc_encrypt: + movaps (%eax),%xmm0 + movaps %xmm0,-16(%edx) + testl %ebp,%ebp +- jz .L026cbc_exit +-.L027cbc_aligned_tail: ++ jz .L028cbc_exit ++.L029cbc_aligned_tail: + movl %ebp,%ecx + leal -24(%esp),%ebp + movl %ebp,%esp +@@ -513,11 +529,11 @@ padlock_cbc_encrypt: + movl %esp,%esi + movl %eax,%edi + movl %ebx,%ecx +- jmp .L019cbc_loop +-.L026cbc_exit: ++ jmp .L021cbc_loop ++.L028cbc_exit: + movl $1,%eax + leal 4(%esp),%esp +-.L016cbc_abort: ++.L018cbc_abort: + popl %edi + popl %esi + popl %ebx +@@ -529,6 +545,7 @@ padlock_cbc_encrypt: + .align 16 + padlock_cfb_encrypt: + .L_padlock_cfb_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -538,25 +555,25 @@ padlock_cfb_encrypt: + movl 28(%esp),%edx + movl 32(%esp),%ecx + testl $15,%edx +- jnz .L028cfb_abort ++ jnz .L030cfb_abort + testl $15,%ecx +- jnz .L028cfb_abort +- leal .Lpadlock_saved_context-.L029cfb_pic_point,%eax ++ jnz .L030cfb_abort ++ leal .Lpadlock_saved_context-.L031cfb_pic_point,%eax + pushfl + cld + call _padlock_verify_ctx +-.L029cfb_pic_point: ++.L031cfb_pic_point: + leal 16(%edx),%edx + xorl %eax,%eax + xorl %ebx,%ebx + testl $32,(%edx) +- jnz .L030cfb_aligned ++ jnz .L032cfb_aligned + testl $15,%edi + setz %al + testl $15,%esi + setz %bl + testl %ebx,%eax +- jnz .L030cfb_aligned ++ jnz .L032cfb_aligned + negl %eax + movl $512,%ebx + notl %eax +@@ -574,9 +591,9 @@ padlock_cfb_encrypt: + andl $-16,%ebp + andl $-16,%esp + movl %eax,16(%ebp) +- jmp .L031cfb_loop ++ jmp .L033cfb_loop + .align 16 +-.L031cfb_loop: ++.L033cfb_loop: + movl %edi,(%ebp) + movl %esi,4(%ebp) + movl %ecx,8(%ebp) +@@ -585,13 +602,13 @@ padlock_cfb_encrypt: + testl $15,%edi + cmovnzl %esp,%edi + testl $15,%esi +- jz .L032cfb_inp_aligned ++ jz .L034cfb_inp_aligned + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi + movl %ebx,%ecx + movl %edi,%esi +-.L032cfb_inp_aligned: ++.L034cfb_inp_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx +@@ -601,45 +618,45 @@ padlock_cfb_encrypt: + movl (%ebp),%edi + movl 12(%ebp),%ebx + testl $15,%edi +- jz .L033cfb_out_aligned ++ jz .L035cfb_out_aligned + movl %ebx,%ecx + leal (%esp),%esi + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi +-.L033cfb_out_aligned: ++.L035cfb_out_aligned: + movl 4(%ebp),%esi + movl 8(%ebp),%ecx + addl %ebx,%edi + addl %ebx,%esi + subl %ebx,%ecx + movl $512,%ebx +- jnz .L031cfb_loop ++ jnz .L033cfb_loop + cmpl %ebp,%esp +- je .L034cfb_done ++ je .L036cfb_done + pxor %xmm0,%xmm0 + leal (%esp),%eax +-.L035cfb_bzero: ++.L037cfb_bzero: + movaps %xmm0,(%eax) + leal 16(%eax),%eax + cmpl %eax,%ebp +- ja .L035cfb_bzero +-.L034cfb_done: ++ ja .L037cfb_bzero ++.L036cfb_done: + movl 16(%ebp),%ebp + leal 24(%ebp),%esp +- jmp .L036cfb_exit ++ jmp .L038cfb_exit + .align 16 +-.L030cfb_aligned: ++.L032cfb_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx + .byte 243,15,167,224 + movaps (%eax),%xmm0 + movaps %xmm0,-16(%edx) +-.L036cfb_exit: ++.L038cfb_exit: + movl $1,%eax + leal 4(%esp),%esp +-.L028cfb_abort: ++.L030cfb_abort: + popl %edi + popl %esi + popl %ebx +@@ -651,6 +668,7 @@ padlock_cfb_encrypt: + .align 16 + padlock_ofb_encrypt: + .L_padlock_ofb_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -660,25 +678,25 @@ padlock_ofb_encrypt: + movl 28(%esp),%edx + movl 32(%esp),%ecx + testl $15,%edx +- jnz .L037ofb_abort ++ jnz .L039ofb_abort + testl $15,%ecx +- jnz .L037ofb_abort +- leal .Lpadlock_saved_context-.L038ofb_pic_point,%eax ++ jnz .L039ofb_abort ++ leal .Lpadlock_saved_context-.L040ofb_pic_point,%eax + pushfl + cld + call _padlock_verify_ctx +-.L038ofb_pic_point: ++.L040ofb_pic_point: + leal 16(%edx),%edx + xorl %eax,%eax + xorl %ebx,%ebx + testl $32,(%edx) +- jnz .L039ofb_aligned ++ jnz .L041ofb_aligned + testl $15,%edi + setz %al + testl $15,%esi + setz %bl + testl %ebx,%eax +- jnz .L039ofb_aligned ++ jnz .L041ofb_aligned + negl %eax + movl $512,%ebx + notl %eax +@@ -696,9 +714,9 @@ padlock_ofb_encrypt: + andl $-16,%ebp + andl $-16,%esp + movl %eax,16(%ebp) +- jmp .L040ofb_loop ++ jmp .L042ofb_loop + .align 16 +-.L040ofb_loop: ++.L042ofb_loop: + movl %edi,(%ebp) + movl %esi,4(%ebp) + movl %ecx,8(%ebp) +@@ -707,13 +725,13 @@ padlock_ofb_encrypt: + testl $15,%edi + cmovnzl %esp,%edi + testl $15,%esi +- jz .L041ofb_inp_aligned ++ jz .L043ofb_inp_aligned + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi + movl %ebx,%ecx + movl %edi,%esi +-.L041ofb_inp_aligned: ++.L043ofb_inp_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx +@@ -723,45 +741,45 @@ padlock_ofb_encrypt: + movl (%ebp),%edi + movl 12(%ebp),%ebx + testl $15,%edi +- jz .L042ofb_out_aligned ++ jz .L044ofb_out_aligned + movl %ebx,%ecx + leal (%esp),%esi + shrl $2,%ecx + .byte 243,165 + subl %ebx,%edi +-.L042ofb_out_aligned: ++.L044ofb_out_aligned: + movl 4(%ebp),%esi + movl 8(%ebp),%ecx + addl %ebx,%edi + addl %ebx,%esi + subl %ebx,%ecx + movl $512,%ebx +- jnz .L040ofb_loop ++ jnz .L042ofb_loop + cmpl %ebp,%esp +- je .L043ofb_done ++ je .L045ofb_done + pxor %xmm0,%xmm0 + leal (%esp),%eax +-.L044ofb_bzero: ++.L046ofb_bzero: + movaps %xmm0,(%eax) + leal 16(%eax),%eax + cmpl %eax,%ebp +- ja .L044ofb_bzero +-.L043ofb_done: ++ ja .L046ofb_bzero ++.L045ofb_done: + movl 16(%ebp),%ebp + leal 24(%ebp),%esp +- jmp .L045ofb_exit ++ jmp .L047ofb_exit + .align 16 +-.L039ofb_aligned: ++.L041ofb_aligned: + leal -16(%edx),%eax + leal 16(%edx),%ebx + shrl $4,%ecx + .byte 243,15,167,232 + movaps (%eax),%xmm0 + movaps %xmm0,-16(%edx) +-.L045ofb_exit: ++.L047ofb_exit: + movl $1,%eax + leal 4(%esp),%esp +-.L037ofb_abort: ++.L039ofb_abort: + popl %edi + popl %esi + popl %ebx +@@ -773,6 +791,7 @@ padlock_ofb_encrypt: + .align 16 + padlock_ctr32_encrypt: + .L_padlock_ctr32_encrypt_begin: ++.byte 243,15,30,251 + pushl %ebp + pushl %ebx + pushl %esi +@@ -782,14 +801,14 @@ padlock_ctr32_encrypt: + movl 28(%esp),%edx + movl 32(%esp),%ecx + testl $15,%edx +- jnz .L046ctr32_abort ++ jnz .L048ctr32_abort + testl $15,%ecx +- jnz .L046ctr32_abort +- leal .Lpadlock_saved_context-.L047ctr32_pic_point,%eax ++ jnz .L048ctr32_abort ++ leal .Lpadlock_saved_context-.L049ctr32_pic_point,%eax + pushfl + cld + call _padlock_verify_ctx +-.L047ctr32_pic_point: ++.L049ctr32_pic_point: + leal 16(%edx),%edx + xorl %eax,%eax + movq -16(%edx),%mm0 +@@ -809,9 +828,9 @@ padlock_ctr32_encrypt: + andl $-16,%ebp + andl $-16,%esp + movl %eax,16(%ebp) +- jmp .L048ctr32_loop ++ jmp .L050ctr32_loop + .align 16 +-.L048ctr32_loop: ++.L050ctr32_loop: + movl %edi,(%ebp) + movl %esi,4(%ebp) + movl %ecx,8(%ebp) +@@ -820,7 +839,7 @@ padlock_ctr32_encrypt: + movl -4(%edx),%ecx + xorl %edi,%edi + movl -8(%edx),%eax +-.L049ctr32_prepare: ++.L051ctr32_prepare: + movl %ecx,12(%esp,%edi,1) + bswap %ecx + movq %mm0,(%esp,%edi,1) +@@ -829,7 +848,7 @@ padlock_ctr32_encrypt: + bswap %ecx + leal 16(%edi),%edi + cmpl %ebx,%edi +- jb .L049ctr32_prepare ++ jb .L051ctr32_prepare + movl %ecx,-4(%edx) + leal (%esp),%esi + leal (%esp),%edi +@@ -842,33 +861,33 @@ padlock_ctr32_encrypt: + movl 12(%ebp),%ebx + movl 4(%ebp),%esi + xorl %ecx,%ecx +-.L050ctr32_xor: ++.L052ctr32_xor: + movups (%esi,%ecx,1),%xmm1 + leal 16(%ecx),%ecx + pxor -16(%esp,%ecx,1),%xmm1 + movups %xmm1,-16(%edi,%ecx,1) + cmpl %ebx,%ecx +- jb .L050ctr32_xor ++ jb .L052ctr32_xor + movl 8(%ebp),%ecx + addl %ebx,%edi + addl %ebx,%esi + subl %ebx,%ecx + movl $512,%ebx +- jnz .L048ctr32_loop ++ jnz .L050ctr32_loop + pxor %xmm0,%xmm0 + leal (%esp),%eax +-.L051ctr32_bzero: ++.L053ctr32_bzero: + movaps %xmm0,(%eax) + leal 16(%eax),%eax + cmpl %eax,%ebp +- ja .L051ctr32_bzero +-.L052ctr32_done: ++ ja .L053ctr32_bzero ++.L054ctr32_done: + movl 16(%ebp),%ebp + leal 24(%ebp),%esp + movl $1,%eax + leal 4(%esp),%esp + emms +-.L046ctr32_abort: ++.L048ctr32_abort: + popl %edi + popl %esi + popl %ebx +@@ -880,6 +899,7 @@ padlock_ctr32_encrypt: + .align 16 + padlock_xstore: + .L_padlock_xstore_begin: ++.byte 243,15,30,251 + pushl %edi + movl 8(%esp),%edi + movl 12(%esp),%edx +@@ -890,14 +910,15 @@ padlock_xstore: + .type _win32_segv_handler,@function + .align 16 + _win32_segv_handler: ++.byte 243,15,30,251 + movl $1,%eax + movl 4(%esp),%edx + movl 12(%esp),%ecx + cmpl $3221225477,(%edx) +- jne .L053ret ++ jne .L055ret + addl $4,184(%ecx) + movl $0,%eax +-.L053ret: ++.L055ret: + ret + .size _win32_segv_handler,.-_win32_segv_handler + .globl padlock_sha1_oneshot +@@ -905,6 +926,7 @@ _win32_segv_handler: + .align 16 + padlock_sha1_oneshot: + .L_padlock_sha1_oneshot_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + xorl %eax,%eax +@@ -936,6 +958,7 @@ padlock_sha1_oneshot: + .align 16 + padlock_sha1_blocks: + .L_padlock_sha1_blocks_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + movl 12(%esp),%edi +@@ -966,6 +989,7 @@ padlock_sha1_blocks: + .align 16 + padlock_sha256_oneshot: + .L_padlock_sha256_oneshot_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + xorl %eax,%eax +@@ -997,6 +1021,7 @@ padlock_sha256_oneshot: + .align 16 + padlock_sha256_blocks: + .L_padlock_sha256_blocks_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + movl 12(%esp),%edi +@@ -1027,6 +1052,7 @@ padlock_sha256_blocks: + .align 16 + padlock_sha512_blocks: + .L_padlock_sha512_blocks_begin: ++.byte 243,15,30,251 + pushl %edi + pushl %esi + movl 12(%esp),%edi +@@ -1069,7 +1095,21 @@ padlock_sha512_blocks: + .Lpadlock_saved_context: + .long 0 + ++ .section ".note.gnu.property", "a" ++ .p2align 2 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz "GNU" ++1: ++ .p2align 2 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 2 ++4: + + .section .note.GNU-stack,"",%progbits +- +- +diff --git a/lib/accelerated/x86/elf/e_padlock-x86_64.s b/lib/accelerated/x86/elf/e_padlock-x86_64.s +index c161f0a73..f92da756c 100644 +--- a/lib/accelerated/x86/elf/e_padlock-x86_64.s ++++ b/lib/accelerated/x86/elf/e_padlock-x86_64.s +@@ -1,4 +1,4 @@ +-# Copyright (c) 2011-2013, Andy Polyakov ++# Copyright (c) 2011-2016, Andy Polyakov + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without +@@ -42,36 +42,50 @@ + .type padlock_capability,@function + .align 16 + padlock_capability: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rbx,%r8 + xorl %eax,%eax + cpuid + xorl %eax,%eax +- cmpl $1953391939,%ebx ++ cmpl $0x746e6543,%ebx ++ jne .Lzhaoxin ++ cmpl $0x48727561,%edx + jne .Lnoluck +- cmpl $1215460705,%edx ++ cmpl $0x736c7561,%ecx + jne .Lnoluck +- cmpl $1936487777,%ecx ++ jmp .LzhaoxinEnd ++.Lzhaoxin: ++ cmpl $0x68532020,%ebx + jne .Lnoluck +- movl $3221225472,%eax ++ cmpl $0x68676e61,%edx ++ jne .Lnoluck ++ cmpl $0x20206961,%ecx ++ jne .Lnoluck ++.LzhaoxinEnd: ++ movl $0xC0000000,%eax + cpuid + movl %eax,%edx + xorl %eax,%eax +- cmpl $3221225473,%edx ++ cmpl $0xC0000001,%edx + jb .Lnoluck +- movl $3221225473,%eax ++ movl $0xC0000001,%eax + cpuid + movl %edx,%eax +- andl $4294967279,%eax +- orl $16,%eax ++ andl $0xffffffef,%eax ++ orl $0x10,%eax + .Lnoluck: + movq %r8,%rbx + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_capability,.-padlock_capability + + .globl padlock_key_bswap + .type padlock_key_bswap,@function + .align 16 + padlock_key_bswap: ++.cfi_startproc ++.byte 243,15,30,250 + movl 240(%rdi),%edx + .Lbswap_loop: + movl (%rdi),%eax +@@ -81,23 +95,29 @@ padlock_key_bswap: + subl $1,%edx + jnz .Lbswap_loop + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_key_bswap,.-padlock_key_bswap + + .globl padlock_verify_context + .type padlock_verify_context,@function + .align 16 + padlock_verify_context: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdi,%rdx + pushf + leaq .Lpadlock_saved_context(%rip),%rax + call _padlock_verify_ctx + leaq 8(%rsp),%rsp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_verify_context,.-padlock_verify_context + + .type _padlock_verify_ctx,@function + .align 16 + _padlock_verify_ctx: ++.cfi_startproc ++.byte 243,15,30,250 + movq 8(%rsp),%r8 + btq $30,%r8 + jnc .Lverified +@@ -108,43 +128,55 @@ _padlock_verify_ctx: + .Lverified: + movq %rdx,(%rax) + .byte 0xf3,0xc3 ++.cfi_endproc + .size _padlock_verify_ctx,.-_padlock_verify_ctx + + .globl padlock_reload_key + .type padlock_reload_key,@function + .align 16 + padlock_reload_key: ++.cfi_startproc ++.byte 243,15,30,250 + pushf + popf + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_reload_key,.-padlock_reload_key + + .globl padlock_aes_block + .type padlock_aes_block,@function + .align 16 + padlock_aes_block: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rbx,%r8 + movq $1,%rcx + leaq 32(%rdx),%rbx + leaq 16(%rdx),%rdx +-.byte 0xf3,0x0f,0xa7,0xc8 ++.byte 0xf3,0x0f,0xa7,0xc8 + movq %r8,%rbx + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_aes_block,.-padlock_aes_block + + .globl padlock_xstore + .type padlock_xstore,@function + .align 16 + padlock_xstore: ++.cfi_startproc ++.byte 243,15,30,250 + movl %esi,%edx +-.byte 0x0f,0xa7,0xc0 ++.byte 0x0f,0xa7,0xc0 + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_xstore,.-padlock_xstore + + .globl padlock_sha1_oneshot + .type padlock_sha1_oneshot,@function + .align 16 + padlock_sha1_oneshot: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdx,%rcx + movq %rdi,%rdx + movups (%rdi),%xmm0 +@@ -154,19 +186,22 @@ padlock_sha1_oneshot: + movq %rsp,%rdi + movl %eax,16(%rsp) + xorq %rax,%rax +-.byte 0xf3,0x0f,0xa6,0xc8 ++.byte 0xf3,0x0f,0xa6,0xc8 + movaps (%rsp),%xmm0 + movl 16(%rsp),%eax + addq $128+8,%rsp + movups %xmm0,(%rdx) + movl %eax,16(%rdx) + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_sha1_oneshot,.-padlock_sha1_oneshot + + .globl padlock_sha1_blocks + .type padlock_sha1_blocks,@function + .align 16 + padlock_sha1_blocks: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdx,%rcx + movq %rdi,%rdx + movups (%rdi),%xmm0 +@@ -176,19 +211,22 @@ padlock_sha1_blocks: + movq %rsp,%rdi + movl %eax,16(%rsp) + movq $-1,%rax +-.byte 0xf3,0x0f,0xa6,0xc8 ++.byte 0xf3,0x0f,0xa6,0xc8 + movaps (%rsp),%xmm0 + movl 16(%rsp),%eax + addq $128+8,%rsp + movups %xmm0,(%rdx) + movl %eax,16(%rdx) + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_sha1_blocks,.-padlock_sha1_blocks + + .globl padlock_sha256_oneshot + .type padlock_sha256_oneshot,@function + .align 16 + padlock_sha256_oneshot: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdx,%rcx + movq %rdi,%rdx + movups (%rdi),%xmm0 +@@ -198,19 +236,22 @@ padlock_sha256_oneshot: + movq %rsp,%rdi + movaps %xmm1,16(%rsp) + xorq %rax,%rax +-.byte 0xf3,0x0f,0xa6,0xd0 ++.byte 0xf3,0x0f,0xa6,0xd0 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + addq $128+8,%rsp + movups %xmm0,(%rdx) + movups %xmm1,16(%rdx) + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_sha256_oneshot,.-padlock_sha256_oneshot + + .globl padlock_sha256_blocks + .type padlock_sha256_blocks,@function + .align 16 + padlock_sha256_blocks: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdx,%rcx + movq %rdi,%rdx + movups (%rdi),%xmm0 +@@ -220,19 +261,22 @@ padlock_sha256_blocks: + movq %rsp,%rdi + movaps %xmm1,16(%rsp) + movq $-1,%rax +-.byte 0xf3,0x0f,0xa6,0xd0 ++.byte 0xf3,0x0f,0xa6,0xd0 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + addq $128+8,%rsp + movups %xmm0,(%rdx) + movups %xmm1,16(%rdx) + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_sha256_blocks,.-padlock_sha256_blocks + + .globl padlock_sha512_blocks + .type padlock_sha512_blocks,@function + .align 16 + padlock_sha512_blocks: ++.cfi_startproc ++.byte 243,15,30,250 + movq %rdx,%rcx + movq %rdi,%rdx + movups (%rdi),%xmm0 +@@ -245,7 +289,7 @@ padlock_sha512_blocks: + movaps %xmm1,16(%rsp) + movaps %xmm2,32(%rsp) + movaps %xmm3,48(%rsp) +-.byte 0xf3,0x0f,0xa6,0xe0 ++.byte 0xf3,0x0f,0xa6,0xe0 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + movaps 32(%rsp),%xmm2 +@@ -256,11 +300,14 @@ padlock_sha512_blocks: + movups %xmm2,32(%rdx) + movups %xmm3,48(%rdx) + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_sha512_blocks,.-padlock_sha512_blocks + .globl padlock_ecb_encrypt + .type padlock_ecb_encrypt,@function + .align 16 + padlock_ecb_encrypt: ++.cfi_startproc ++.byte 243,15,30,250 + pushq %rbp + pushq %rbx + +@@ -278,9 +325,9 @@ padlock_ecb_encrypt: + xorl %ebx,%ebx + testl $32,(%rdx) + jnz .Lecb_aligned +- testq $15,%rdi ++ testq $0x0f,%rdi + setz %al +- testq $15,%rsi ++ testq $0x0f,%rsi + setz %bl + testl %ebx,%eax + jnz .Lecb_aligned +@@ -304,7 +351,7 @@ padlock_ecb_encrypt: + cmoveq %rdi,%rax + addq %rcx,%rax + negq %rax +- andq $4095,%rax ++ andq $0xfff,%rax + cmpq $128,%rax + movq $-128,%rax + cmovaeq %rbx,%rax +@@ -320,12 +367,12 @@ padlock_ecb_encrypt: + movq %rcx,%r10 + movq %rbx,%rcx + movq %rbx,%r11 +- testq $15,%rdi ++ testq $0x0f,%rdi + cmovnzq %rsp,%rdi +- testq $15,%rsi ++ testq $0x0f,%rsi + jz .Lecb_inp_aligned + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + movq %rbx,%rcx + movq %rdi,%rsi +@@ -333,15 +380,15 @@ padlock_ecb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,200 ++.byte 0xf3,0x0f,0xa7,200 + movq %r8,%rdi + movq %r11,%rbx +- testq $15,%rdi ++ testq $0x0f,%rdi + jz .Lecb_out_aligned + movq %rbx,%rcx + leaq (%rsp),%rsi + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + .Lecb_out_aligned: + movq %r9,%rsi +@@ -362,7 +409,7 @@ padlock_ecb_encrypt: + subq %rax,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + movq %rsp,%rsi + movq %r8,%rdi + movq %rbx,%rcx +@@ -388,7 +435,7 @@ padlock_ecb_encrypt: + .Lecb_aligned: + leaq (%rsi,%rcx,1),%rbp + negq %rbp +- andq $4095,%rbp ++ andq $0xfff,%rbp + xorl %eax,%eax + cmpq $128,%rbp + movq $128-1,%rbp +@@ -399,7 +446,7 @@ padlock_ecb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,200 ++.byte 0xf3,0x0f,0xa7,200 + testq %rbp,%rbp + jz .Lecb_exit + +@@ -411,7 +458,7 @@ padlock_ecb_encrypt: + subq %rcx,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + leaq (%r8),%rdi + leaq (%rsp),%rsi + movq %rbx,%rcx +@@ -423,11 +470,14 @@ padlock_ecb_encrypt: + popq %rbx + popq %rbp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_ecb_encrypt,.-padlock_ecb_encrypt + .globl padlock_cbc_encrypt + .type padlock_cbc_encrypt,@function + .align 16 + padlock_cbc_encrypt: ++.cfi_startproc ++.byte 243,15,30,250 + pushq %rbp + pushq %rbx + +@@ -445,9 +495,9 @@ padlock_cbc_encrypt: + xorl %ebx,%ebx + testl $32,(%rdx) + jnz .Lcbc_aligned +- testq $15,%rdi ++ testq $0x0f,%rdi + setz %al +- testq $15,%rsi ++ testq $0x0f,%rsi + setz %bl + testl %ebx,%eax + jnz .Lcbc_aligned +@@ -471,7 +521,7 @@ padlock_cbc_encrypt: + cmoveq %rdi,%rax + addq %rcx,%rax + negq %rax +- andq $4095,%rax ++ andq $0xfff,%rax + cmpq $64,%rax + movq $-64,%rax + cmovaeq %rbx,%rax +@@ -487,12 +537,12 @@ padlock_cbc_encrypt: + movq %rcx,%r10 + movq %rbx,%rcx + movq %rbx,%r11 +- testq $15,%rdi ++ testq $0x0f,%rdi + cmovnzq %rsp,%rdi +- testq $15,%rsi ++ testq $0x0f,%rsi + jz .Lcbc_inp_aligned + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + movq %rbx,%rcx + movq %rdi,%rsi +@@ -500,17 +550,17 @@ padlock_cbc_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,208 ++.byte 0xf3,0x0f,0xa7,208 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + movq %r8,%rdi + movq %r11,%rbx +- testq $15,%rdi ++ testq $0x0f,%rdi + jz .Lcbc_out_aligned + movq %rbx,%rcx + leaq (%rsp),%rsi + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + .Lcbc_out_aligned: + movq %r9,%rsi +@@ -531,7 +581,7 @@ padlock_cbc_encrypt: + subq %rax,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + movq %rsp,%rsi + movq %r8,%rdi + movq %rbx,%rcx +@@ -557,7 +607,7 @@ padlock_cbc_encrypt: + .Lcbc_aligned: + leaq (%rsi,%rcx,1),%rbp + negq %rbp +- andq $4095,%rbp ++ andq $0xfff,%rbp + xorl %eax,%eax + cmpq $64,%rbp + movq $64-1,%rbp +@@ -568,7 +618,7 @@ padlock_cbc_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,208 ++.byte 0xf3,0x0f,0xa7,208 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + testq %rbp,%rbp +@@ -582,7 +632,7 @@ padlock_cbc_encrypt: + subq %rcx,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + leaq (%r8),%rdi + leaq (%rsp),%rsi + movq %rbx,%rcx +@@ -594,11 +644,14 @@ padlock_cbc_encrypt: + popq %rbx + popq %rbp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_cbc_encrypt,.-padlock_cbc_encrypt + .globl padlock_cfb_encrypt + .type padlock_cfb_encrypt,@function + .align 16 + padlock_cfb_encrypt: ++.cfi_startproc ++.byte 243,15,30,250 + pushq %rbp + pushq %rbx + +@@ -616,9 +669,9 @@ padlock_cfb_encrypt: + xorl %ebx,%ebx + testl $32,(%rdx) + jnz .Lcfb_aligned +- testq $15,%rdi ++ testq $0x0f,%rdi + setz %al +- testq $15,%rsi ++ testq $0x0f,%rsi + setz %bl + testl %ebx,%eax + jnz .Lcfb_aligned +@@ -645,12 +698,12 @@ padlock_cfb_encrypt: + movq %rcx,%r10 + movq %rbx,%rcx + movq %rbx,%r11 +- testq $15,%rdi ++ testq $0x0f,%rdi + cmovnzq %rsp,%rdi +- testq $15,%rsi ++ testq $0x0f,%rsi + jz .Lcfb_inp_aligned + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + movq %rbx,%rcx + movq %rdi,%rsi +@@ -658,17 +711,17 @@ padlock_cfb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,224 ++.byte 0xf3,0x0f,0xa7,224 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + movq %r8,%rdi + movq %r11,%rbx +- testq $15,%rdi ++ testq $0x0f,%rdi + jz .Lcfb_out_aligned + movq %rbx,%rcx + leaq (%rsp),%rsi + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + .Lcfb_out_aligned: + movq %r9,%rsi +@@ -698,7 +751,7 @@ padlock_cfb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,224 ++.byte 0xf3,0x0f,0xa7,224 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + .Lcfb_exit: +@@ -708,11 +761,14 @@ padlock_cfb_encrypt: + popq %rbx + popq %rbp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_cfb_encrypt,.-padlock_cfb_encrypt + .globl padlock_ofb_encrypt + .type padlock_ofb_encrypt,@function + .align 16 + padlock_ofb_encrypt: ++.cfi_startproc ++.byte 243,15,30,250 + pushq %rbp + pushq %rbx + +@@ -730,9 +786,9 @@ padlock_ofb_encrypt: + xorl %ebx,%ebx + testl $32,(%rdx) + jnz .Lofb_aligned +- testq $15,%rdi ++ testq $0x0f,%rdi + setz %al +- testq $15,%rsi ++ testq $0x0f,%rsi + setz %bl + testl %ebx,%eax + jnz .Lofb_aligned +@@ -759,12 +815,12 @@ padlock_ofb_encrypt: + movq %rcx,%r10 + movq %rbx,%rcx + movq %rbx,%r11 +- testq $15,%rdi ++ testq $0x0f,%rdi + cmovnzq %rsp,%rdi +- testq $15,%rsi ++ testq $0x0f,%rsi + jz .Lofb_inp_aligned + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + movq %rbx,%rcx + movq %rdi,%rsi +@@ -772,17 +828,17 @@ padlock_ofb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,232 ++.byte 0xf3,0x0f,0xa7,232 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + movq %r8,%rdi + movq %r11,%rbx +- testq $15,%rdi ++ testq $0x0f,%rdi + jz .Lofb_out_aligned + movq %rbx,%rcx + leaq (%rsp),%rsi + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + .Lofb_out_aligned: + movq %r9,%rsi +@@ -812,7 +868,7 @@ padlock_ofb_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,232 ++.byte 0xf3,0x0f,0xa7,232 + movdqa (%rax),%xmm0 + movdqa %xmm0,-16(%rdx) + .Lofb_exit: +@@ -822,11 +878,14 @@ padlock_ofb_encrypt: + popq %rbx + popq %rbp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_ofb_encrypt,.-padlock_ofb_encrypt + .globl padlock_ctr32_encrypt + .type padlock_ctr32_encrypt,@function + .align 16 + padlock_ctr32_encrypt: ++.cfi_startproc ++.byte 243,15,30,250 + pushq %rbp + pushq %rbx + +@@ -844,9 +903,9 @@ padlock_ctr32_encrypt: + xorl %ebx,%ebx + testl $32,(%rdx) + jnz .Lctr32_aligned +- testq $15,%rdi ++ testq $0x0f,%rdi + setz %al +- testq $15,%rsi ++ testq $0x0f,%rsi + setz %bl + testl %ebx,%eax + jnz .Lctr32_aligned +@@ -881,7 +940,7 @@ padlock_ctr32_encrypt: + cmoveq %rdi,%rax + addq %rcx,%rax + negq %rax +- andq $4095,%rax ++ andq $0xfff,%rax + cmpq $32,%rax + movq $-32,%rax + cmovaeq %rbx,%rax +@@ -897,12 +956,12 @@ padlock_ctr32_encrypt: + movq %rcx,%r10 + movq %rbx,%rcx + movq %rbx,%r11 +- testq $15,%rdi ++ testq $0x0f,%rdi + cmovnzq %rsp,%rdi +- testq $15,%rsi ++ testq $0x0f,%rsi + jz .Lctr32_inp_aligned + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + movq %rbx,%rcx + movq %rdi,%rsi +@@ -910,23 +969,23 @@ padlock_ctr32_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,216 ++.byte 0xf3,0x0f,0xa7,216 + movl -4(%rdx),%eax +- testl $4294901760,%eax ++ testl $0xffff0000,%eax + jnz .Lctr32_no_carry + bswapl %eax +- addl $65536,%eax ++ addl $0x10000,%eax + bswapl %eax + movl %eax,-4(%rdx) + .Lctr32_no_carry: + movq %r8,%rdi + movq %r11,%rbx +- testq $15,%rdi ++ testq $0x0f,%rdi + jz .Lctr32_out_aligned + movq %rbx,%rcx + leaq (%rsp),%rsi + shrq $3,%rcx +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + subq %rbx,%rdi + .Lctr32_out_aligned: + movq %r9,%rsi +@@ -944,7 +1003,7 @@ padlock_ctr32_encrypt: + cmoveq %rdi,%rax + addq %rcx,%rax + negq %rax +- andq $4095,%rax ++ andq $0xfff,%rax + cmpq $32,%rax + movq $-32,%rax + cmovaeq %rbx,%rax +@@ -959,7 +1018,7 @@ padlock_ctr32_encrypt: + subq %rax,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + movq %rsp,%rsi + movq %r8,%rdi + movq %rbx,%rcx +@@ -986,7 +1045,7 @@ padlock_ctr32_encrypt: + movl -4(%rdx),%eax + bswapl %eax + negl %eax +- andl $65535,%eax ++ andl $0xffff,%eax + movq $1048576,%rbx + shll $4,%eax + cmovzq %rbx,%rax +@@ -1003,11 +1062,11 @@ padlock_ctr32_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,216 ++.byte 0xf3,0x0f,0xa7,216 + + movl -4(%rdx),%eax + bswapl %eax +- addl $65536,%eax ++ addl $0x10000,%eax + bswapl %eax + movl %eax,-4(%rdx) + +@@ -1021,7 +1080,7 @@ padlock_ctr32_encrypt: + .Lctr32_aligned_skip: + leaq (%rsi,%rcx,1),%rbp + negq %rbp +- andq $4095,%rbp ++ andq $0xfff,%rbp + xorl %eax,%eax + cmpq $32,%rbp + movq $32-1,%rbp +@@ -1032,7 +1091,7 @@ padlock_ctr32_encrypt: + leaq -16(%rdx),%rax + leaq 16(%rdx),%rbx + shrq $4,%rcx +-.byte 0xf3,0x0f,0xa7,216 ++.byte 0xf3,0x0f,0xa7,216 + testq %rbp,%rbp + jz .Lctr32_exit + +@@ -1044,7 +1103,7 @@ padlock_ctr32_encrypt: + subq %rcx,%rsp + shrq $3,%rcx + leaq (%rsp),%rdi +-.byte 0xf3,0x48,0xa5 ++.byte 0xf3,0x48,0xa5 + leaq (%r8),%rdi + leaq (%rsp),%rsi + movq %rbx,%rcx +@@ -1056,6 +1115,7 @@ padlock_ctr32_encrypt: + popq %rbx + popq %rbp + .byte 0xf3,0xc3 ++.cfi_endproc + .size padlock_ctr32_encrypt,.-padlock_ctr32_encrypt + .byte 86,73,65,32,80,97,100,108,111,99,107,32,120,56,54,95,54,52,32,109,111,100,117,108,101,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 + .align 16 +@@ -1063,8 +1123,26 @@ padlock_ctr32_encrypt: + .align 8 + .Lpadlock_saved_context: + .quad 0 +- ++ .section ".note.gnu.property", "a" ++ .p2align 3 ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ # "GNU" encoded with .byte, since .asciz isn't supported ++ # on Solaris. ++ .byte 0x47 ++ .byte 0x4e ++ .byte 0x55 ++ .byte 0 ++1: ++ .p2align 3 ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align 3 ++4: + + .section .note.GNU-stack,"",%progbits +- +- diff --git a/SOURCES/gnutls-3.7.2-libopts-covscan.patch b/SOURCES/gnutls-3.7.2-libopts-covscan.patch new file mode 100644 index 0000000..a85738f --- /dev/null +++ b/SOURCES/gnutls-3.7.2-libopts-covscan.patch @@ -0,0 +1,72 @@ +From de11338de900f5c8840268264bceccbf76cca34f Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Thu, 21 Oct 2021 12:19:30 +0200 +Subject: [PATCH 1/2] autoopts: makeshell: use ferror before fclose + +Signed-off-by: Daiki Ueno +--- + src/libopts/makeshell.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/src/libopts/makeshell.c b/src/libopts/makeshell.c +index b6cb441a..7eb17a1f 100644 +--- a/src/libopts/makeshell.c ++++ b/src/libopts/makeshell.c +@@ -164,9 +164,8 @@ optionParseShell(tOptions * opts) + #ifdef HAVE_FCHMOD + fchmod(STDOUT_FILENO, 0755); + #endif +- fclose(stdout); + +- if (ferror(stdout)) ++ if (ferror(stdout) || fclose(stdout)) + fserr_exit(opts->pzProgName, zwriting, zstdout_name); + + AGFREE(script_text); +-- +2.31.1 + + +From 161097d36b608b615482e42e56a465c9fd740c26 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Thu, 21 Oct 2021 12:43:07 +0200 +Subject: [PATCH 2/2] autoopts: load: fix resource leak in error path + +Signed-off-by: Daiki Ueno +--- + src/libopts/load.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/src/libopts/load.c b/src/libopts/load.c +index 3f1ce2e6..ad1c4584 100644 +--- a/src/libopts/load.c ++++ b/src/libopts/load.c +@@ -219,8 +219,11 @@ add_prog_path(char * buf, int b_sz, char const * fname, char const * prg_path) + * IF we cannot find a directory name separator, + * THEN we do not have a path name to our executable file. + */ +- if (pz == NULL) ++ if (pz == NULL) { ++ if (path != prg_path) ++ AGFREE(path); + return false; ++ } + + fname += skip; + fname_len = strlen(fname) + 1; // + NUL byte +@@ -230,8 +233,11 @@ add_prog_path(char * buf, int b_sz, char const * fname, char const * prg_path) + * Concatenate the file name to the end of the executable path. + * The result may be either a file or a directory. + */ +- if (dir_len + fname_len > (unsigned)b_sz) ++ if (dir_len + fname_len > (unsigned)b_sz) { ++ if (path != prg_path) ++ AGFREE(path); + return false; ++ } + + memcpy(buf, path, dir_len); + memcpy(buf + dir_len, fname, fname_len); +-- +2.31.1 + diff --git a/SPECS/gnutls.spec b/SPECS/gnutls.spec index b872a80..b5e6c20 100644 --- a/SPECS/gnutls.spec +++ b/SPECS/gnutls.spec @@ -1,10 +1,12 @@ # This spec file has been automatically updated Version: 3.7.2 -Release: 4%{?dist} +Release: 8%{?dist} Patch1: gnutls-3.6.7-no-now-guile.patch Patch2: gnutls-3.2.7-rpath.patch Patch3: gnutls-3.7.2-config-allowlisting.patch Patch4: gnutls-3.7.2-key-share-ecdhx.patch +Patch5: gnutls-3.7.2-enable-intel-cet.patch +Patch6: gnutls-3.7.2-libopts-covscan.patch %bcond_with bootstrap %bcond_without dane %if 0%{?rhel} @@ -168,7 +170,17 @@ echo "SYSTEM=NORMAL" >> tests/system.prio %if !%{with bootstrap} # These are ordered by dependency: touch doc/functions/* doc/enums/* -touch doc/enums.texi doc/invoke*.texi doc/gnutls-api.texi +touch doc/enums.texi doc/gnutls-api.texi +touch doc/invoke-gnutls-cli.texi +touch doc/invoke-gnutls-cli-debug.texi +touch doc/invoke-gnutls-serv.texi +touch doc/invoke-certtool.texi +touch doc/invoke-ocsptool.texi +touch doc/invoke-danetool.texi +touch doc/invoke-srptool.texi +touch doc/invoke-psktool.texi +touch doc/invoke-p11tool.texi +touch doc/invoke-tpmtool.texi touch doc/stamp_functions doc/stamp_enums touch doc/gnutls.info doc/gnutls.html doc/manpages/stamp_mans %endif @@ -177,8 +189,9 @@ touch doc/gnutls.info doc/gnutls.html doc/manpages/stamp_mans # via the crypto policies %build -CCASFLAGS="$CCASFLAGS -Wa,--generate-missing-build-notes=yes" -export CCASFLAGS +%ifarch aarch64 ppc64le +%define _lto_cflags %{nil} +%endif %if %{with guile} # These should be checked by m4/guile.m4 instead of configure.ac @@ -309,6 +322,21 @@ make check %{?_smp_mflags} GNUTLS_SYSTEM_PRIORITY_FILE=/dev/null %endif %changelog +* Thu Oct 21 2021 Daiki Ueno - 3.7.2-8 +- Fix issues in bundled libopts, spotted by covscan (#1938730) + +* Tue Oct 12 2021 Daiki Ueno - 3.7.2-7 +- Enable Intel CET +- Remove unnecessary CCASFLAGS setting for annocheck + +* Thu Aug 19 2021 Daiki Ueno - 3.7.2-6 +- Reorder doc/invoke-*.texi generation (#1975482) +- Temporarily disable LTO for aarch64 and ppc64le + +* Mon Aug 09 2021 Mohan Boddu - 3.7.2-5 +- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags + Related: rhbz#1991688 + * Mon Aug 2 2021 Daiki Ueno - 3.7.2-4 - Disable GOST cryptography by default (#1945292) - Tighten timestamp adjustment when not bootstrapping (#1975482)