Blame SOURCES/openssl-1.1.1-cve-2022-4304-RSA-oracle.patch

dc0b1f
From 43d8f88511991533f53680a751e9326999a6a31f Mon Sep 17 00:00:00 2001
dc0b1f
From: Matt Caswell <matt@openssl.org>
dc0b1f
Date: Fri, 20 Jan 2023 15:26:54 +0000
dc0b1f
Subject: [PATCH 1/6] Fix Timing Oracle in RSA decryption
dc0b1f
dc0b1f
A timing based side channel exists in the OpenSSL RSA Decryption
dc0b1f
implementation which could be sufficient to recover a plaintext across
dc0b1f
a network in a Bleichenbacher style attack. To achieve a successful
dc0b1f
decryption an attacker would have to be able to send a very large number
dc0b1f
of trial messages for decryption. The vulnerability affects all RSA
dc0b1f
padding modes: PKCS#1 v1.5, RSA-OEAP and RSASVE.
dc0b1f
dc0b1f
Patch written by Dmitry Belyavsky and Hubert Kario
dc0b1f
dc0b1f
CVE-2022-4304
dc0b1f
dc0b1f
Reviewed-by: Dmitry Belyavskiy <beldmit@gmail.com>
dc0b1f
Reviewed-by: Tomas Mraz <tomas@openssl.org>
dc0b1f
---
dc0b1f
 crypto/bn/bn_blind.c    |  14 -
dc0b1f
 crypto/bn/bn_err.c      |   2 +
dc0b1f
 crypto/bn/bn_local.h    |  14 +
dc0b1f
 crypto/bn/build.info    |   3 +-
dc0b1f
 crypto/bn/rsa_sup_mul.c | 614 ++++++++++++++++++++++++++++++++++++++++
dc0b1f
 crypto/err/openssl.txt  |   3 +-
dc0b1f
 crypto/rsa/rsa_ossl.c   |  17 +-
dc0b1f
 include/crypto/bn.h     |   5 +
dc0b1f
 include/openssl/bnerr.h |   1 +
dc0b1f
 9 files changed, 653 insertions(+), 20 deletions(-)
dc0b1f
 create mode 100644 crypto/bn/rsa_sup_mul.c
dc0b1f
dc0b1f
diff --git a/crypto/bn/bn_blind.c b/crypto/bn/bn_blind.c
dc0b1f
index 76fc7ebcff..6e9d239321 100644
dc0b1f
--- a/crypto/bn/bn_blind.c
dc0b1f
+++ b/crypto/bn/bn_blind.c
dc0b1f
@@ -13,20 +13,6 @@
dc0b1f
 
dc0b1f
 #define BN_BLINDING_COUNTER     32
dc0b1f
 
dc0b1f
-struct bn_blinding_st {
dc0b1f
-    BIGNUM *A;
dc0b1f
-    BIGNUM *Ai;
dc0b1f
-    BIGNUM *e;
dc0b1f
-    BIGNUM *mod;                /* just a reference */
dc0b1f
-    CRYPTO_THREAD_ID tid;
dc0b1f
-    int counter;
dc0b1f
-    unsigned long flags;
dc0b1f
-    BN_MONT_CTX *m_ctx;
dc0b1f
-    int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
dc0b1f
-                       const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
dc0b1f
-    CRYPTO_RWLOCK *lock;
dc0b1f
-};
dc0b1f
-
dc0b1f
 BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod)
dc0b1f
 {
dc0b1f
     BN_BLINDING *ret = NULL;
dc0b1f
diff --git a/crypto/bn/bn_err.c b/crypto/bn/bn_err.c
dc0b1f
index dd87c152cf..3dd8d9a568 100644
dc0b1f
--- a/crypto/bn/bn_err.c
dc0b1f
+++ b/crypto/bn/bn_err.c
dc0b1f
@@ -73,6 +73,8 @@ static const ERR_STRING_DATA BN_str_functs[] = {
dc0b1f
     {ERR_PACK(ERR_LIB_BN, BN_F_BN_SET_WORDS, 0), "bn_set_words"},
dc0b1f
     {ERR_PACK(ERR_LIB_BN, BN_F_BN_STACK_PUSH, 0), "BN_STACK_push"},
dc0b1f
     {ERR_PACK(ERR_LIB_BN, BN_F_BN_USUB, 0), "BN_usub"},
dc0b1f
+    {ERR_PACK(ERR_LIB_BN, BN_F_OSSL_BN_RSA_DO_UNBLIND, 0),
dc0b1f
+    "ossl_bn_rsa_do_unblind"},
dc0b1f
     {0, NULL}
dc0b1f
 };
dc0b1f
 
dc0b1f
diff --git a/crypto/bn/bn_local.h b/crypto/bn/bn_local.h
dc0b1f
index 62a969b134..4d8cb64675 100644
dc0b1f
--- a/crypto/bn/bn_local.h
dc0b1f
+++ b/crypto/bn/bn_local.h
dc0b1f
@@ -283,6 +283,20 @@ struct bn_gencb_st {
dc0b1f
     } cb;
dc0b1f
 };
dc0b1f
 
dc0b1f
+struct bn_blinding_st {
dc0b1f
+    BIGNUM *A;
dc0b1f
+    BIGNUM *Ai;
dc0b1f
+    BIGNUM *e;
dc0b1f
+    BIGNUM *mod;                /* just a reference */
dc0b1f
+    CRYPTO_THREAD_ID tid;
dc0b1f
+    int counter;
dc0b1f
+    unsigned long flags;
dc0b1f
+    BN_MONT_CTX *m_ctx;
dc0b1f
+    int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
dc0b1f
+                       const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
dc0b1f
+    CRYPTO_RWLOCK *lock;
dc0b1f
+};
dc0b1f
+
dc0b1f
 /*-
dc0b1f
  * BN_window_bits_for_exponent_size -- macro for sliding window mod_exp functions
dc0b1f
  *
dc0b1f
diff --git a/crypto/bn/build.info b/crypto/bn/build.info
dc0b1f
index b9ed5322fa..c9fe2fdada 100644
dc0b1f
--- a/crypto/bn/build.info
dc0b1f
+++ b/crypto/bn/build.info
dc0b1f
@@ -5,7 +5,8 @@ SOURCE[../../libcrypto]=\
dc0b1f
         bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c \
dc0b1f
         {- $target{bn_asm_src} -} \
dc0b1f
         bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \
dc0b1f
-        bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c
dc0b1f
+        bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c \
dc0b1f
+        rsa_sup_mul.c
dc0b1f
 
dc0b1f
 INCLUDE[bn_exp.o]=..
dc0b1f
 
dc0b1f
diff --git a/crypto/bn/rsa_sup_mul.c b/crypto/bn/rsa_sup_mul.c
dc0b1f
new file mode 100644
dc0b1f
index 0000000000..acafefd5fe
dc0b1f
--- /dev/null
dc0b1f
+++ b/crypto/bn/rsa_sup_mul.c
dc0b1f
@@ -0,0 +1,614 @@
dc0b1f
+#include <openssl/e_os2.h>
dc0b1f
+#include <stddef.h>
dc0b1f
+#include <sys/types.h>
dc0b1f
+#include <string.h>
dc0b1f
+#include <openssl/bn.h>
dc0b1f
+#include <openssl/err.h>
dc0b1f
+#include <openssl/rsaerr.h>
dc0b1f
+#include "internal/numbers.h"
dc0b1f
+#include "internal/constant_time.h"
dc0b1f
+#include "bn_local.h"
dc0b1f
+
dc0b1f
+# if BN_BYTES == 8
dc0b1f
+typedef uint64_t limb_t;
dc0b1f
+#  if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__ == 16
dc0b1f
+/* nonstandard; implemented by gcc on 64-bit platforms */
dc0b1f
+typedef __uint128_t limb2_t;
dc0b1f
+#   define HAVE_LIMB2_T
dc0b1f
+#  endif
dc0b1f
+#  define LIMB_BIT_SIZE 64
dc0b1f
+#  define LIMB_BYTE_SIZE 8
dc0b1f
+# elif BN_BYTES == 4
dc0b1f
+typedef uint32_t limb_t;
dc0b1f
+typedef uint64_t limb2_t;
dc0b1f
+#  define LIMB_BIT_SIZE 32
dc0b1f
+#  define LIMB_BYTE_SIZE 4
dc0b1f
+#  define HAVE_LIMB2_T
dc0b1f
+# else
dc0b1f
+#  error "Not supported"
dc0b1f
+# endif
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * For multiplication we're using schoolbook multiplication,
dc0b1f
+ * so if we have two numbers, each with 6 "digits" (words)
dc0b1f
+ * the multiplication is calculated as follows:
dc0b1f
+ *                        A B C D E F
dc0b1f
+ *                     x  I J K L M N
dc0b1f
+ *                     --------------
dc0b1f
+ *                                N*F
dc0b1f
+ *                              N*E
dc0b1f
+ *                            N*D
dc0b1f
+ *                          N*C
dc0b1f
+ *                        N*B
dc0b1f
+ *                      N*A
dc0b1f
+ *                              M*F
dc0b1f
+ *                            M*E
dc0b1f
+ *                          M*D
dc0b1f
+ *                        M*C
dc0b1f
+ *                      M*B
dc0b1f
+ *                    M*A
dc0b1f
+ *                            L*F
dc0b1f
+ *                          L*E
dc0b1f
+ *                        L*D
dc0b1f
+ *                      L*C
dc0b1f
+ *                    L*B
dc0b1f
+ *                  L*A
dc0b1f
+ *                          K*F
dc0b1f
+ *                        K*E
dc0b1f
+ *                      K*D
dc0b1f
+ *                    K*C
dc0b1f
+ *                  K*B
dc0b1f
+ *                K*A
dc0b1f
+ *                        J*F
dc0b1f
+ *                      J*E
dc0b1f
+ *                    J*D
dc0b1f
+ *                  J*C
dc0b1f
+ *                J*B
dc0b1f
+ *              J*A
dc0b1f
+ *                      I*F
dc0b1f
+ *                    I*E
dc0b1f
+ *                  I*D
dc0b1f
+ *                I*C
dc0b1f
+ *              I*B
dc0b1f
+ *         +  I*A
dc0b1f
+ *         ==========================
dc0b1f
+ *                        N*B N*D N*F
dc0b1f
+ *                    + N*A N*C N*E
dc0b1f
+ *                    + M*B M*D M*F
dc0b1f
+ *                  + M*A M*C M*E
dc0b1f
+ *                  + L*B L*D L*F
dc0b1f
+ *                + L*A L*C L*E
dc0b1f
+ *                + K*B K*D K*F
dc0b1f
+ *              + K*A K*C K*E
dc0b1f
+ *              + J*B J*D J*F
dc0b1f
+ *            + J*A J*C J*E
dc0b1f
+ *            + I*B I*D I*F
dc0b1f
+ *          + I*A I*C I*E
dc0b1f
+ *
dc0b1f
+ *                1+1 1+3 1+5
dc0b1f
+ *              1+0 1+2 1+4
dc0b1f
+ *              0+1 0+3 0+5
dc0b1f
+ *            0+0 0+2 0+4
dc0b1f
+ *
dc0b1f
+ *            0 1 2 3 4 5 6
dc0b1f
+ * which requires n^2 multiplications and 2n full length additions
dc0b1f
+ * as we can keep every other result of limb multiplication in two separate
dc0b1f
+ * limbs
dc0b1f
+ */
dc0b1f
+
dc0b1f
+#if defined HAVE_LIMB2_T
dc0b1f
+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
dc0b1f
+{
dc0b1f
+    limb2_t t;
dc0b1f
+    /*
dc0b1f
+     * this is idiomatic code to tell compiler to use the native mul
dc0b1f
+     * those three lines will actually compile to single instruction
dc0b1f
+     */
dc0b1f
+
dc0b1f
+    t = (limb2_t)a * b;
dc0b1f
+    *hi = t >> LIMB_BIT_SIZE;
dc0b1f
+    *lo = (limb_t)t;
dc0b1f
+}
dc0b1f
+#elif (BN_BYTES == 8) && (defined _MSC_VER)
dc0b1f
+/* https://learn.microsoft.com/en-us/cpp/intrinsics/umul128?view=msvc-170 */
dc0b1f
+#pragma intrinsic(_umul128)
dc0b1f
+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
dc0b1f
+{
dc0b1f
+    *lo = _umul128(a, b, hi);
dc0b1f
+}
dc0b1f
+#else
dc0b1f
+/*
dc0b1f
+ * if the compiler doesn't have either a 128bit data type nor a "return
dc0b1f
+ * high 64 bits of multiplication"
dc0b1f
+ */
dc0b1f
+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
dc0b1f
+{
dc0b1f
+    limb_t a_low = (limb_t)(uint32_t)a;
dc0b1f
+    limb_t a_hi = a >> 32;
dc0b1f
+    limb_t b_low = (limb_t)(uint32_t)b;
dc0b1f
+    limb_t b_hi = b >> 32;
dc0b1f
+
dc0b1f
+    limb_t p0 = a_low * b_low;
dc0b1f
+    limb_t p1 = a_low * b_hi;
dc0b1f
+    limb_t p2 = a_hi * b_low;
dc0b1f
+    limb_t p3 = a_hi * b_hi;
dc0b1f
+
dc0b1f
+    uint32_t cy = (uint32_t)(((p0 >> 32) + (uint32_t)p1 + (uint32_t)p2) >> 32);
dc0b1f
+
dc0b1f
+    *lo = p0 + (p1 << 32) + (p2 << 32);
dc0b1f
+    *hi = p3 + (p1 >> 32) + (p2 >> 32) + cy;
dc0b1f
+}
dc0b1f
+#endif
dc0b1f
+
dc0b1f
+/* add two limbs with carry in, return carry out */
dc0b1f
+static ossl_inline limb_t _add_limb(limb_t *ret, limb_t a, limb_t b, limb_t carry)
dc0b1f
+{
dc0b1f
+    limb_t carry1, carry2, t;
dc0b1f
+    /*
dc0b1f
+     * `c = a + b; if (c < a)` is idiomatic code that makes compilers
dc0b1f
+     * use add with carry on assembly level
dc0b1f
+     */
dc0b1f
+
dc0b1f
+    *ret = a + carry;
dc0b1f
+    if (*ret < a)
dc0b1f
+        carry1 = 1;
dc0b1f
+    else
dc0b1f
+        carry1 = 0;
dc0b1f
+
dc0b1f
+    t = *ret;
dc0b1f
+    *ret = t + b;
dc0b1f
+    if (*ret < t)
dc0b1f
+        carry2 = 1;
dc0b1f
+    else
dc0b1f
+        carry2 = 0;
dc0b1f
+
dc0b1f
+    return carry1 + carry2;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * add two numbers of the same size, return overflow
dc0b1f
+ *
dc0b1f
+ * add a to b, place result in ret; all arrays need to be n limbs long
dc0b1f
+ * return overflow from addition (0 or 1)
dc0b1f
+ */
dc0b1f
+static ossl_inline limb_t add(limb_t *ret, limb_t *a, limb_t *b, size_t n)
dc0b1f
+{
dc0b1f
+    limb_t c = 0;
dc0b1f
+    ossl_ssize_t i;
dc0b1f
+
dc0b1f
+    for(i = n - 1; i > -1; i--)
dc0b1f
+        c = _add_limb(&ret[i], a[i], b[i], c);
dc0b1f
+
dc0b1f
+    return c;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * return number of limbs necessary for temporary values
dc0b1f
+ * when multiplying numbers n limbs large
dc0b1f
+ */
dc0b1f
+static ossl_inline size_t mul_limb_numb(size_t n)
dc0b1f
+{
dc0b1f
+    return  2 * n * 2;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * multiply two numbers of the same size
dc0b1f
+ *
dc0b1f
+ * multiply a by b, place result in ret; a and b need to be n limbs long
dc0b1f
+ * ret needs to be 2*n limbs long, tmp needs to be mul_limb_numb(n) limbs
dc0b1f
+ * long
dc0b1f
+ */
dc0b1f
+static void limb_mul(limb_t *ret, limb_t *a, limb_t *b, size_t n, limb_t *tmp)
dc0b1f
+{
dc0b1f
+    limb_t *r_odd, *r_even;
dc0b1f
+    size_t i, j, k;
dc0b1f
+
dc0b1f
+    r_odd = tmp;
dc0b1f
+    r_even = &tmp[2 * n];
dc0b1f
+
dc0b1f
+    memset(ret, 0, 2 * n * sizeof(limb_t));
dc0b1f
+
dc0b1f
+    for (i = 0; i < n; i++) {
dc0b1f
+        for (k = 0; k < i + n + 1; k++) {
dc0b1f
+            r_even[k] = 0;
dc0b1f
+            r_odd[k] = 0;
dc0b1f
+        }
dc0b1f
+        for (j = 0; j < n; j++) {
dc0b1f
+            /*
dc0b1f
+             * place results from even and odd limbs in separate arrays so that
dc0b1f
+             * we don't have to calculate overflow every time we get individual
dc0b1f
+             * limb multiplication result
dc0b1f
+             */
dc0b1f
+            if (j % 2 == 0)
dc0b1f
+                _mul_limb(&r_even[i + j], &r_even[i + j + 1], a[i], b[j]);
dc0b1f
+            else
dc0b1f
+                _mul_limb(&r_odd[i + j], &r_odd[i + j + 1], a[i], b[j]);
dc0b1f
+        }
dc0b1f
+        /*
dc0b1f
+         * skip the least significant limbs when adding multiples of
dc0b1f
+         * more significant limbs (they're zero anyway)
dc0b1f
+         */
dc0b1f
+        add(ret, ret, r_even, n + i + 1);
dc0b1f
+        add(ret, ret, r_odd, n + i + 1);
dc0b1f
+    }
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* modifies the value in place by performing a right shift by one bit */
dc0b1f
+static ossl_inline void rshift1(limb_t *val, size_t n)
dc0b1f
+{
dc0b1f
+    limb_t shift_in = 0, shift_out = 0;
dc0b1f
+    size_t i;
dc0b1f
+
dc0b1f
+    for (i = 0; i < n; i++) {
dc0b1f
+        shift_out = val[i] & 1;
dc0b1f
+        val[i] = shift_in << (LIMB_BIT_SIZE - 1) | (val[i] >> 1);
dc0b1f
+        shift_in = shift_out;
dc0b1f
+    }
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* extend the LSB of flag to all bits of limb */
dc0b1f
+static ossl_inline limb_t mk_mask(limb_t flag)
dc0b1f
+{
dc0b1f
+    flag |= flag << 1;
dc0b1f
+    flag |= flag << 2;
dc0b1f
+    flag |= flag << 4;
dc0b1f
+    flag |= flag << 8;
dc0b1f
+    flag |= flag << 16;
dc0b1f
+#if (LIMB_BYTE_SIZE == 8)
dc0b1f
+    flag |= flag << 32;
dc0b1f
+#endif
dc0b1f
+    return flag;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * copy from either a or b to ret based on flag
dc0b1f
+ * when flag == 0, then copies from b
dc0b1f
+ * when flag == 1, then copies from a
dc0b1f
+ */
dc0b1f
+static ossl_inline void cselect(limb_t flag, limb_t *ret, limb_t *a, limb_t *b, size_t n)
dc0b1f
+{
dc0b1f
+    /*
dc0b1f
+     * would be more efficient with non volatile mask, but then gcc
dc0b1f
+     * generates code with jumps
dc0b1f
+     */
dc0b1f
+    volatile limb_t mask;
dc0b1f
+    size_t i;
dc0b1f
+
dc0b1f
+    mask = mk_mask(flag);
dc0b1f
+    for (i = 0; i < n; i++) {
dc0b1f
+#if (LIMB_BYTE_SIZE == 8)
dc0b1f
+        ret[i] = constant_time_select_64(mask, a[i], b[i]);
dc0b1f
+#else
dc0b1f
+        ret[i] = constant_time_select_32(mask, a[i], b[i]);
dc0b1f
+#endif
dc0b1f
+    }
dc0b1f
+}
dc0b1f
+
dc0b1f
+static limb_t _sub_limb(limb_t *ret, limb_t a, limb_t b, limb_t borrow)
dc0b1f
+{
dc0b1f
+    limb_t borrow1, borrow2, t;
dc0b1f
+    /*
dc0b1f
+     * while it doesn't look constant-time, this is idiomatic code
dc0b1f
+     * to tell compilers to use the carry bit from subtraction
dc0b1f
+     */
dc0b1f
+
dc0b1f
+    *ret = a - borrow;
dc0b1f
+    if (*ret > a)
dc0b1f
+        borrow1 = 1;
dc0b1f
+    else
dc0b1f
+        borrow1 = 0;
dc0b1f
+
dc0b1f
+    t = *ret;
dc0b1f
+    *ret = t - b;
dc0b1f
+    if (*ret > t)
dc0b1f
+        borrow2 = 1;
dc0b1f
+    else
dc0b1f
+        borrow2 = 0;
dc0b1f
+
dc0b1f
+    return borrow1 + borrow2;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * place the result of a - b into ret, return the borrow bit.
dc0b1f
+ * All arrays need to be n limbs long
dc0b1f
+ */
dc0b1f
+static limb_t sub(limb_t *ret, limb_t *a, limb_t *b, size_t n)
dc0b1f
+{
dc0b1f
+    limb_t borrow = 0;
dc0b1f
+    ossl_ssize_t i;
dc0b1f
+
dc0b1f
+    for (i = n - 1; i > -1; i--)
dc0b1f
+        borrow = _sub_limb(&ret[i], a[i], b[i], borrow);
dc0b1f
+
dc0b1f
+    return borrow;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* return the number of limbs necessary to allocate for the mod() tmp operand */
dc0b1f
+static ossl_inline size_t mod_limb_numb(size_t anum, size_t modnum)
dc0b1f
+{
dc0b1f
+    return (anum + modnum) * 3;
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * calculate a % mod, place the result in ret
dc0b1f
+ * size of a is defined by anum, size of ret and mod is modnum,
dc0b1f
+ * size of tmp is returned by mod_limb_numb()
dc0b1f
+ */
dc0b1f
+static void mod(limb_t *ret, limb_t *a, size_t anum, limb_t *mod,
dc0b1f
+               size_t modnum, limb_t *tmp)
dc0b1f
+{
dc0b1f
+    limb_t *atmp, *modtmp, *rettmp;
dc0b1f
+    limb_t res;
dc0b1f
+    size_t i;
dc0b1f
+
dc0b1f
+    memset(tmp, 0, mod_limb_numb(anum, modnum) * LIMB_BYTE_SIZE);
dc0b1f
+
dc0b1f
+    atmp = tmp;
dc0b1f
+    modtmp = &tmp[anum + modnum];
dc0b1f
+    rettmp = &tmp[(anum + modnum) * 2];
dc0b1f
+
dc0b1f
+    for (i = modnum; i 
dc0b1f
+        atmp[i] = a[i-modnum];
dc0b1f
+
dc0b1f
+    for (i = 0; i < modnum; i++)
dc0b1f
+        modtmp[i] = mod[i];
dc0b1f
+
dc0b1f
+    for (i = 0; i < anum * LIMB_BIT_SIZE; i++) {
dc0b1f
+        rshift1(modtmp, anum + modnum);
dc0b1f
+        res = sub(rettmp, atmp, modtmp, anum+modnum);
dc0b1f
+        cselect(res, atmp, atmp, rettmp, anum+modnum);
dc0b1f
+    }
dc0b1f
+
dc0b1f
+    memcpy(ret, &atmp[anum], sizeof(limb_t) * modnum);
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* necessary size of tmp for a _mul_add_limb() call with provided anum */
dc0b1f
+static ossl_inline size_t _mul_add_limb_numb(size_t anum)
dc0b1f
+{
dc0b1f
+    return 2 * (anum + 1);
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* multiply a by m, add to ret, return carry */
dc0b1f
+static limb_t _mul_add_limb(limb_t *ret, limb_t *a, size_t anum,
dc0b1f
+                           limb_t m, limb_t *tmp)
dc0b1f
+{
dc0b1f
+    limb_t carry = 0;
dc0b1f
+    limb_t *r_odd, *r_even;
dc0b1f
+    size_t i;
dc0b1f
+
dc0b1f
+    memset(tmp, 0, sizeof(limb_t) * (anum + 1) * 2);
dc0b1f
+
dc0b1f
+    r_odd = tmp;
dc0b1f
+    r_even = &tmp[anum + 1];
dc0b1f
+
dc0b1f
+    for (i = 0; i < anum; i++) {
dc0b1f
+        /*
dc0b1f
+         * place the results from even and odd limbs in separate arrays
dc0b1f
+         * so that we have to worry about carry just once
dc0b1f
+         */
dc0b1f
+        if (i % 2 == 0)
dc0b1f
+            _mul_limb(&r_even[i], &r_even[i + 1], a[i], m);
dc0b1f
+        else
dc0b1f
+            _mul_limb(&r_odd[i], &r_odd[i + 1], a[i], m);
dc0b1f
+    }
dc0b1f
+    /* assert: add() carry here will be equal zero */
dc0b1f
+    add(r_even, r_even, r_odd, anum + 1);
dc0b1f
+    /*
dc0b1f
+     * while here it will not overflow as the max value from multiplication
dc0b1f
+     * is -2 while max overflow from addition is 1, so the max value of
dc0b1f
+     * carry is -1 (i.e. max int)
dc0b1f
+     */
dc0b1f
+    carry = add(ret, ret, &r_even[1], anum) + r_even[0];
dc0b1f
+
dc0b1f
+    return carry;
dc0b1f
+}
dc0b1f
+
dc0b1f
+static ossl_inline size_t mod_montgomery_limb_numb(size_t modnum)
dc0b1f
+{
dc0b1f
+    return modnum * 2 + _mul_add_limb_numb(modnum);
dc0b1f
+}
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * calculate a % mod, place result in ret
dc0b1f
+ * assumes that a is in Montgomery form with the R (Montgomery modulus) being
dc0b1f
+ * smallest power of two big enough to fit mod and that's also a power
dc0b1f
+ * of the count of number of bits in limb_t (B).
dc0b1f
+ * For calculation, we also need n', such that mod * n' == -1 mod B.
dc0b1f
+ * anum must be <= 2 * modnum
dc0b1f
+ * ret needs to be modnum words long
dc0b1f
+ * tmp needs to be mod_montgomery_limb_numb(modnum) limbs long
dc0b1f
+ */
dc0b1f
+static void mod_montgomery(limb_t *ret, limb_t *a, size_t anum, limb_t *mod,
dc0b1f
+                          size_t modnum, limb_t ni0, limb_t *tmp)
dc0b1f
+{
dc0b1f
+    limb_t carry, v;
dc0b1f
+    limb_t *res, *rp, *tmp2;
dc0b1f
+    ossl_ssize_t i;
dc0b1f
+
dc0b1f
+    res = tmp;
dc0b1f
+    /*
dc0b1f
+     * for intermediate result we need an integer twice as long as modulus
dc0b1f
+     * but keep the input in the least significant limbs
dc0b1f
+     */
dc0b1f
+    memset(res, 0, sizeof(limb_t) * (modnum * 2));
dc0b1f
+    memcpy(&res[modnum * 2 - anum], a, sizeof(limb_t) * anum);
dc0b1f
+    rp = &res[modnum];
dc0b1f
+    tmp2 = &res[modnum * 2];
dc0b1f
+
dc0b1f
+    carry = 0;
dc0b1f
+
dc0b1f
+    /* add multiples of the modulus to the value until R divides it cleanly */
dc0b1f
+    for (i = modnum; i > 0; i--, rp--) {
dc0b1f
+        v = _mul_add_limb(rp, mod, modnum, rp[modnum - 1] * ni0, tmp2);
dc0b1f
+        v = v + carry + rp[-1];
dc0b1f
+        carry |= (v != rp[-1]);
dc0b1f
+        carry &= (v <= rp[-1]);
dc0b1f
+        rp[-1] = v;
dc0b1f
+    }
dc0b1f
+
dc0b1f
+    /* perform the final reduction by mod... */
dc0b1f
+    carry -= sub(ret, rp, mod, modnum);
dc0b1f
+
dc0b1f
+    /* ...conditionally */
dc0b1f
+    cselect(carry, ret, rp, ret, modnum);
dc0b1f
+}
dc0b1f
+
dc0b1f
+/* allocated buffer should be freed afterwards */
dc0b1f
+static void BN_to_limb(const BIGNUM *bn, limb_t *buf, size_t limbs)
dc0b1f
+{
dc0b1f
+    int i;
dc0b1f
+    int real_limbs = (BN_num_bytes(bn) + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;
dc0b1f
+    limb_t *ptr = buf + (limbs - real_limbs);
dc0b1f
+
dc0b1f
+    for (i = 0; i < real_limbs; i++)
dc0b1f
+         ptr[i] = bn->d[real_limbs - i - 1];
dc0b1f
+}
dc0b1f
+
dc0b1f
+#if LIMB_BYTE_SIZE == 8
dc0b1f
+static ossl_inline uint64_t be64(uint64_t host)
dc0b1f
+{
dc0b1f
+    const union {
dc0b1f
+        long one;
dc0b1f
+        char little;
dc0b1f
+    } is_endian = { 1 };
dc0b1f
+
dc0b1f
+    if (is_endian.little) {
dc0b1f
+        uint64_t big = 0;
dc0b1f
+
dc0b1f
+        big |= (host & 0xff00000000000000) >> 56;
dc0b1f
+        big |= (host & 0x00ff000000000000) >> 40;
dc0b1f
+        big |= (host & 0x0000ff0000000000) >> 24;
dc0b1f
+        big |= (host & 0x000000ff00000000) >>  8;
dc0b1f
+        big |= (host & 0x00000000ff000000) <<  8;
dc0b1f
+        big |= (host & 0x0000000000ff0000) << 24;
dc0b1f
+        big |= (host & 0x000000000000ff00) << 40;
dc0b1f
+        big |= (host & 0x00000000000000ff) << 56;
dc0b1f
+        return big;
dc0b1f
+    } else {
dc0b1f
+        return host;
dc0b1f
+    }
dc0b1f
+}
dc0b1f
+
dc0b1f
+#else
dc0b1f
+/* Not all platforms have htobe32(). */
dc0b1f
+static ossl_inline uint32_t be32(uint32_t host)
dc0b1f
+{
dc0b1f
+    const union {
dc0b1f
+        long one;
dc0b1f
+        char little;
dc0b1f
+    } is_endian = { 1 };
dc0b1f
+
dc0b1f
+    if (is_endian.little) {
dc0b1f
+        uint32_t big = 0;
dc0b1f
+
dc0b1f
+        big |= (host & 0xff000000) >> 24;
dc0b1f
+        big |= (host & 0x00ff0000) >> 8;
dc0b1f
+        big |= (host & 0x0000ff00) << 8;
dc0b1f
+        big |= (host & 0x000000ff) << 24;
dc0b1f
+        return big;
dc0b1f
+    } else {
dc0b1f
+        return host;
dc0b1f
+    }
dc0b1f
+}
dc0b1f
+#endif
dc0b1f
+
dc0b1f
+/*
dc0b1f
+ * We assume that intermediate, possible_arg2, blinding, and ctx are used
dc0b1f
+ * similar to BN_BLINDING_invert_ex() arguments.
dc0b1f
+ * to_mod is RSA modulus.
dc0b1f
+ * buf and num is the serialization buffer and its length.
dc0b1f
+ *
dc0b1f
+ * Here we use classic/Montgomery multiplication and modulo. After the calculation finished
dc0b1f
+ * we serialize the new structure instead of BIGNUMs taking endianness into account.
dc0b1f
+ */
dc0b1f
+int ossl_bn_rsa_do_unblind(const BIGNUM *intermediate,
dc0b1f
+                           const BN_BLINDING *blinding,
dc0b1f
+                           const BIGNUM *possible_arg2,
dc0b1f
+                           const BIGNUM *to_mod, BN_CTX *ctx,
dc0b1f
+                           unsigned char *buf, int num)
dc0b1f
+{
dc0b1f
+    limb_t *l_im = NULL, *l_mul = NULL, *l_mod = NULL;
dc0b1f
+    limb_t *l_ret = NULL, *l_tmp = NULL, l_buf;
dc0b1f
+    size_t l_im_count = 0, l_mul_count = 0, l_size = 0, l_mod_count = 0;
dc0b1f
+    size_t l_tmp_count = 0;
dc0b1f
+    int ret = 0;
dc0b1f
+    size_t i;
dc0b1f
+    unsigned char *tmp;
dc0b1f
+    const BIGNUM *arg1 = intermediate;
dc0b1f
+    const BIGNUM *arg2 = (possible_arg2 == NULL) ? blinding->Ai : possible_arg2;
dc0b1f
+
dc0b1f
+    l_im_count  = (BN_num_bytes(arg1)   + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;
dc0b1f
+    l_mul_count = (BN_num_bytes(arg2)   + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;
dc0b1f
+    l_mod_count = (BN_num_bytes(to_mod) + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;
dc0b1f
+
dc0b1f
+    l_size = l_im_count > l_mul_count ? l_im_count : l_mul_count;
dc0b1f
+    l_im  = OPENSSL_zalloc(l_size * LIMB_BYTE_SIZE);
dc0b1f
+    l_mul = OPENSSL_zalloc(l_size * LIMB_BYTE_SIZE);
dc0b1f
+    l_mod = OPENSSL_zalloc(l_mod_count * LIMB_BYTE_SIZE);
dc0b1f
+
dc0b1f
+    if ((l_im == NULL) || (l_mul == NULL) || (l_mod == NULL))
dc0b1f
+        goto err;
dc0b1f
+
dc0b1f
+    BN_to_limb(arg1,   l_im,  l_size);
dc0b1f
+    BN_to_limb(arg2,   l_mul, l_size);
dc0b1f
+    BN_to_limb(to_mod, l_mod, l_mod_count);
dc0b1f
+
dc0b1f
+    l_ret = OPENSSL_malloc(2 * l_size * LIMB_BYTE_SIZE);
dc0b1f
+
dc0b1f
+    if (blinding->m_ctx != NULL) {
dc0b1f
+        l_tmp_count = mul_limb_numb(l_size) > mod_montgomery_limb_numb(l_mod_count) ?
dc0b1f
+                      mul_limb_numb(l_size) : mod_montgomery_limb_numb(l_mod_count);
dc0b1f
+        l_tmp = OPENSSL_malloc(l_tmp_count * LIMB_BYTE_SIZE);
dc0b1f
+    } else {
dc0b1f
+        l_tmp_count = mul_limb_numb(l_size) > mod_limb_numb(2 * l_size, l_mod_count) ?
dc0b1f
+                      mul_limb_numb(l_size) : mod_limb_numb(2 * l_size, l_mod_count);
dc0b1f
+        l_tmp = OPENSSL_malloc(l_tmp_count * LIMB_BYTE_SIZE);
dc0b1f
+    }
dc0b1f
+
dc0b1f
+    if ((l_ret == NULL) || (l_tmp == NULL))
dc0b1f
+        goto err;
dc0b1f
+
dc0b1f
+    if (blinding->m_ctx != NULL) {
dc0b1f
+        limb_mul(l_ret, l_im, l_mul, l_size, l_tmp);
dc0b1f
+        mod_montgomery(l_ret, l_ret, 2 * l_size, l_mod, l_mod_count,
dc0b1f
+                       blinding->m_ctx->n0[0], l_tmp);
dc0b1f
+    } else {
dc0b1f
+        limb_mul(l_ret, l_im, l_mul, l_size, l_tmp);
dc0b1f
+        mod(l_ret, l_ret, 2 * l_size, l_mod, l_mod_count, l_tmp);
dc0b1f
+    }
dc0b1f
+
dc0b1f
+    /* modulus size in bytes can be equal to num but after limbs conversion it becomes bigger */
dc0b1f
+    if (num < BN_num_bytes(to_mod)) {
dc0b1f
+        BNerr(BN_F_OSSL_BN_RSA_DO_UNBLIND, ERR_R_PASSED_INVALID_ARGUMENT);
dc0b1f
+        goto err;
dc0b1f
+    }
dc0b1f
+
dc0b1f
+    memset(buf, 0, num);
dc0b1f
+    tmp = buf + num - BN_num_bytes(to_mod);
dc0b1f
+    for (i = 0; i < l_mod_count; i++) {
dc0b1f
+#if LIMB_BYTE_SIZE == 8
dc0b1f
+        l_buf = be64(l_ret[i]);
dc0b1f
+#else
dc0b1f
+        l_buf = be32(l_ret[i]);
dc0b1f
+#endif
dc0b1f
+        if (i == 0) {
dc0b1f
+            int delta = LIMB_BYTE_SIZE - ((l_mod_count * LIMB_BYTE_SIZE) - num);
dc0b1f
+
dc0b1f
+            memcpy(tmp, ((char *)&l_buf) + LIMB_BYTE_SIZE - delta, delta);
dc0b1f
+            tmp += delta;
dc0b1f
+        } else {
dc0b1f
+            memcpy(tmp, &l_buf, LIMB_BYTE_SIZE);
dc0b1f
+            tmp += LIMB_BYTE_SIZE;
dc0b1f
+        }
dc0b1f
+    }
dc0b1f
+    ret = num;
dc0b1f
+
dc0b1f
+ err:
dc0b1f
+    OPENSSL_free(l_im);
dc0b1f
+    OPENSSL_free(l_mul);
dc0b1f
+    OPENSSL_free(l_mod);
dc0b1f
+    OPENSSL_free(l_tmp);
dc0b1f
+    OPENSSL_free(l_ret);
dc0b1f
+
dc0b1f
+    return ret;
dc0b1f
+}
dc0b1f
diff --git a/crypto/err/openssl.txt b/crypto/err/openssl.txt
dc0b1f
index 9f91a4a811..ba3a46d5b9 100644
dc0b1f
--- a/crypto/err/openssl.txt
dc0b1f
+++ b/crypto/err/openssl.txt
dc0b1f
@@ -1,4 +1,4 @@
dc0b1f
-# Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
dc0b1f
+# Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
dc0b1f
 #
dc0b1f
 # Licensed under the OpenSSL license (the "License").  You may not use
dc0b1f
 # this file except in compliance with the License.  You can obtain a copy
dc0b1f
@@ -232,6 +232,7 @@ BN_F_BN_RSHIFT:146:BN_rshift
dc0b1f
 BN_F_BN_SET_WORDS:144:bn_set_words
dc0b1f
 BN_F_BN_STACK_PUSH:148:BN_STACK_push
dc0b1f
 BN_F_BN_USUB:115:BN_usub
dc0b1f
+BN_F_OSSL_BN_RSA_DO_UNBLIND:151:ossl_bn_rsa_do_unblind
dc0b1f
 BUF_F_BUF_MEM_GROW:100:BUF_MEM_grow
dc0b1f
 BUF_F_BUF_MEM_GROW_CLEAN:105:BUF_MEM_grow_clean
dc0b1f
 BUF_F_BUF_MEM_NEW:101:BUF_MEM_new
dc0b1f
diff --git a/crypto/rsa/rsa_ossl.c b/crypto/rsa/rsa_ossl.c
dc0b1f
index b52a66f6a6..6c3c0cf78d 100644
dc0b1f
--- a/crypto/rsa/rsa_ossl.c
dc0b1f
+++ b/crypto/rsa/rsa_ossl.c
dc0b1f
@@ -465,11 +465,20 @@ static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,
dc0b1f
         BN_free(d);
dc0b1f
     }
dc0b1f
 
dc0b1f
-    if (blinding)
dc0b1f
-        if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
dc0b1f
+    if (blinding) {
dc0b1f
+        /*
dc0b1f
+         * ossl_bn_rsa_do_unblind() combines blinding inversion and
dc0b1f
+         * 0-padded BN BE serialization
dc0b1f
+         */
dc0b1f
+        j = ossl_bn_rsa_do_unblind(ret, blinding, unblind, rsa->n, ctx,
dc0b1f
+                                   buf, num);
dc0b1f
+        if (j == 0)
dc0b1f
             goto err;
dc0b1f
-
dc0b1f
-    j = BN_bn2binpad(ret, buf, num);
dc0b1f
+    } else {
dc0b1f
+        j = BN_bn2binpad(ret, buf, num);
dc0b1f
+        if (j < 0)
dc0b1f
+            goto err;
dc0b1f
+    }
dc0b1f
 
dc0b1f
     switch (padding) {
dc0b1f
     case RSA_PKCS1_PADDING:
dc0b1f
diff --git a/include/crypto/bn.h b/include/crypto/bn.h
dc0b1f
index 60afda1dad..b5f36fb25a 100644
dc0b1f
--- a/include/crypto/bn.h
dc0b1f
+++ b/include/crypto/bn.h
dc0b1f
@@ -86,5 +86,10 @@ int bn_lshift_fixed_top(BIGNUM *r, const BIGNUM *a, int n);
dc0b1f
 int bn_rshift_fixed_top(BIGNUM *r, const BIGNUM *a, int n);
dc0b1f
 int bn_div_fixed_top(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
dc0b1f
                      const BIGNUM *d, BN_CTX *ctx);
dc0b1f
+int ossl_bn_rsa_do_unblind(const BIGNUM *intermediate,
dc0b1f
+                           const BN_BLINDING *blinding,
dc0b1f
+                           const BIGNUM *possible_arg2,
dc0b1f
+                           const BIGNUM *to_mod, BN_CTX *ctx,
dc0b1f
+                           unsigned char *buf, int num);
dc0b1f
 
dc0b1f
 #endif
dc0b1f
diff --git a/include/openssl/bnerr.h b/include/openssl/bnerr.h
dc0b1f
index 9f3c7cfaab..a0752cea52 100644
dc0b1f
--- a/include/openssl/bnerr.h
dc0b1f
+++ b/include/openssl/bnerr.h
dc0b1f
@@ -72,6 +72,7 @@ int ERR_load_BN_strings(void);
dc0b1f
 # define BN_F_BN_SET_WORDS                                144
dc0b1f
 # define BN_F_BN_STACK_PUSH                               148
dc0b1f
 # define BN_F_BN_USUB                                     115
dc0b1f
+# define BN_F_OSSL_BN_RSA_DO_UNBLIND                      151
dc0b1f
 
dc0b1f
 /*
dc0b1f
  * BN reason codes.
dc0b1f
-- 
dc0b1f
2.39.1
dc0b1f