Blame SOURCES/valgrind-3.14.0-memcheck-new-IROps.patch

560544
commit e221eca26be6b2396e3fcbf4117e630fc22e79f6
560544
Author: Julian Seward <jseward@acm.org>
560544
Date:   Tue Nov 20 11:28:42 2018 +0100
560544
560544
    Add Memcheck support for IROps added in 42719898.
560544
    
560544
    memcheck/mc_translate.c:
560544
    
560544
    Add mkRight{32,64} as right-travelling analogues to mkLeft{32,64}.
560544
    
560544
    doCmpORD: for the cases of a signed comparison against zero, compute
560544
    definedness of the 3 result bits (lt,gt,eq) separately, and, for the lt and eq
560544
    bits, do it exactly accurately.
560544
    
560544
    expensiveCountTrailingZeroes: no functional change.  Re-analyse/verify and add
560544
    comments.
560544
    
560544
    expensiveCountLeadingZeroes: add.  Very similar to
560544
    expensiveCountTrailingZeroes.
560544
    
560544
    Add some comments to mark unary ops which are self-shadowing.
560544
    
560544
    Route Iop_Ctz{,Nat}{32,64} through expensiveCountTrailingZeroes.
560544
    Route Iop_Clz{,Nat}{32,64} through expensiveCountLeadingZeroes.
560544
    
560544
    Add instrumentation for Iop_PopCount{32,64} and Iop_Reverse8sIn32_x1.
560544
    
560544
    memcheck/tests/vbit-test/irops.c
560544
    
560544
    Add dummy new entries for all new IROps, just enough to make it compile and
560544
    run.
560544
560544
diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c
560544
index 68a2ab3..c24db91 100644
560544
--- a/memcheck/mc_translate.c
560544
+++ b/memcheck/mc_translate.c
560544
@@ -737,6 +737,34 @@ static IRAtom* mkLeft64 ( MCEnv* mce, IRAtom* a1 ) {
560544
    return assignNew('V', mce, Ity_I64, unop(Iop_Left64, a1));
560544
 }
560544
 
560544
+/* --------- The Right-family of operations. --------- */
560544
+
560544
+/* Unfortunately these are a lot more expensive then their Left
560544
+   counterparts.  Fortunately they are only very rarely used -- only for
560544
+   count-leading-zeroes instrumentation. */
560544
+
560544
+static IRAtom* mkRight32 ( MCEnv* mce, IRAtom* a1 )
560544
+{
560544
+   for (Int i = 1; i <= 16; i *= 2) {
560544
+      // a1 |= (a1 >>u i)
560544
+      IRAtom* tmp
560544
+         = assignNew('V', mce, Ity_I32, binop(Iop_Shr32, a1, mkU8(i)));
560544
+      a1 = assignNew('V', mce, Ity_I32, binop(Iop_Or32, a1, tmp));
560544
+   }
560544
+   return a1;
560544
+}
560544
+
560544
+static IRAtom* mkRight64 ( MCEnv* mce, IRAtom* a1 )
560544
+{
560544
+   for (Int i = 1; i <= 32; i *= 2) {
560544
+      // a1 |= (a1 >>u i)
560544
+      IRAtom* tmp
560544
+         = assignNew('V', mce, Ity_I64, binop(Iop_Shr64, a1, mkU8(i)));
560544
+      a1 = assignNew('V', mce, Ity_I64, binop(Iop_Or64, a1, tmp));
560544
+   }
560544
+   return a1;
560544
+}
560544
+
560544
 /* --------- 'Improvement' functions for AND/OR. --------- */
560544
 
560544
 /* ImproveAND(data, vbits) = data OR vbits.  Defined (0) data 0s give
560544
@@ -1280,20 +1308,18 @@ static IRAtom* doCmpORD ( MCEnv*  mce,
560544
                           IRAtom* xxhash, IRAtom* yyhash, 
560544
                           IRAtom* xx,     IRAtom* yy )
560544
 {
560544
-   Bool   m64    = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD64U;
560544
-   Bool   syned  = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD32S;
560544
-   IROp   opOR   = m64 ? Iop_Or64  : Iop_Or32;
560544
-   IROp   opAND  = m64 ? Iop_And64 : Iop_And32;
560544
-   IROp   opSHL  = m64 ? Iop_Shl64 : Iop_Shl32;
560544
-   IROp   opSHR  = m64 ? Iop_Shr64 : Iop_Shr32;
560544
-   IRType ty     = m64 ? Ity_I64   : Ity_I32;
560544
-   Int    width  = m64 ? 64        : 32;
560544
+   Bool   m64      = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD64U;
560544
+   Bool   syned    = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD32S;
560544
+   IROp   opOR     = m64 ? Iop_Or64   : Iop_Or32;
560544
+   IROp   opAND    = m64 ? Iop_And64  : Iop_And32;
560544
+   IROp   opSHL    = m64 ? Iop_Shl64  : Iop_Shl32;
560544
+   IROp   opSHR    = m64 ? Iop_Shr64  : Iop_Shr32;
560544
+   IROp   op1UtoWS = m64 ? Iop_1Uto64 : Iop_1Uto32;
560544
+   IRType ty       = m64 ? Ity_I64    : Ity_I32;
560544
+   Int    width    = m64 ? 64         : 32;
560544
 
560544
    Bool (*isZero)(IRAtom*) = m64 ? isZeroU64 : isZeroU32;
560544
 
560544
-   IRAtom* threeLeft1 = NULL;
560544
-   IRAtom* sevenLeft1 = NULL;
560544
-
560544
    tl_assert(isShadowAtom(mce,xxhash));
560544
    tl_assert(isShadowAtom(mce,yyhash));
560544
    tl_assert(isOriginalAtom(mce,xx));
560544
@@ -1312,30 +1338,55 @@ static IRAtom* doCmpORD ( MCEnv*  mce,
560544
       /* fancy interpretation */
560544
       /* if yy is zero, then it must be fully defined (zero#). */
560544
       tl_assert(isZero(yyhash));
560544
-      threeLeft1 = m64 ? mkU64(3<<1) : mkU32(3<<1);
560544
+      // This is still inaccurate, but I don't think it matters, since
560544
+      // nobody writes code of the form
560544
+      // "is <partially-undefined-value> signedly greater than zero?".
560544
+      // We therefore simply declare "x >s 0" to be undefined if any bit in
560544
+      // x is undefined.  That's clearly suboptimal in some cases.  Eg, if
560544
+      // the highest order bit is a defined 1 then x is negative so it
560544
+      // doesn't matter whether the remaining bits are defined or not.
560544
+      IRAtom* t_0_gt_0_0
560544
+         = assignNew(
560544
+              'V', mce,ty,
560544
+              binop(
560544
+                 opAND,
560544
+                 mkPCastTo(mce,ty, xxhash),
560544
+                 m64 ? mkU64(1<<2) : mkU32(1<<2)
560544
+              ));
560544
+      // For "x 
560544
+      // and we have a precise result.
560544
+      IRAtom* t_lt_0_0_0
560544
+         = assignNew(
560544
+              'V', mce,ty,
560544
+              binop(
560544
+                 opSHL,
560544
+                 assignNew(
560544
+                    'V', mce,ty,
560544
+                    binop(opSHR, xxhash, mkU8(width-1))),
560544
+                 mkU8(3)
560544
+              ));
560544
+      // For "x == 0" we can hand the problem off to expensiveCmpEQorNE.
560544
+      IRAtom* t_0_0_eq_0
560544
+         = assignNew(
560544
+              'V', mce,ty,
560544
+              binop(
560544
+                 opSHL,
560544
+                 assignNew('V', mce,ty,
560544
+                    unop(
560544
+                    op1UtoWS,
560544
+                    expensiveCmpEQorNE(mce, ty, xxhash, yyhash, xx, yy))
560544
+                 ),
560544
+                 mkU8(1)
560544
+              ));
560544
       return
560544
          binop(
560544
             opOR,
560544
-            assignNew(
560544
-               'V', mce,ty,
560544
-               binop(
560544
-                  opAND,
560544
-                  mkPCastTo(mce,ty, xxhash), 
560544
-                  threeLeft1
560544
-               )),
560544
-            assignNew(
560544
-               'V', mce,ty,
560544
-               binop(
560544
-                  opSHL,
560544
-                  assignNew(
560544
-                     'V', mce,ty,
560544
-                     binop(opSHR, xxhash, mkU8(width-1))),
560544
-                  mkU8(3)
560544
-               ))
560544
-	 );
560544
+            assignNew('V', mce,ty, binop(opOR, t_lt_0_0_0, t_0_gt_0_0)),
560544
+            t_0_0_eq_0
560544
+         );
560544
    } else {
560544
       /* standard interpretation */
560544
-      sevenLeft1 = m64 ? mkU64(7<<1) : mkU32(7<<1);
560544
+      IRAtom* sevenLeft1 = m64 ? mkU64(7<<1) : mkU32(7<<1);
560544
       return 
560544
          binop( 
560544
             opAND, 
560544
@@ -2211,14 +2262,14 @@ IRAtom* expensiveCountTrailingZeroes ( MCEnv* mce, IROp czop,
560544
    tl_assert(sameKindedAtoms(atom,vatom));
560544
 
560544
    switch (czop) {
560544
-      case Iop_Ctz32:
560544
+      case Iop_Ctz32: case Iop_CtzNat32:
560544
          ty = Ity_I32;
560544
          xorOp = Iop_Xor32;
560544
          subOp = Iop_Sub32;
560544
          andOp = Iop_And32;
560544
          one = mkU32(1);
560544
          break;
560544
-      case Iop_Ctz64:
560544
+      case Iop_Ctz64: case Iop_CtzNat64:
560544
          ty = Ity_I64;
560544
          xorOp = Iop_Xor64;
560544
          subOp = Iop_Sub64;
560544
@@ -2232,8 +2283,30 @@ IRAtom* expensiveCountTrailingZeroes ( MCEnv* mce, IROp czop,
560544
 
560544
    // improver = atom ^ (atom - 1)
560544
    //
560544
-   // That is, improver has its low ctz(atom) bits equal to one;
560544
-   // higher bits (if any) equal to zero.
560544
+   // That is, improver has its low ctz(atom)+1 bits equal to one;
560544
+   // higher bits (if any) equal to zero.  So it's exactly the right
560544
+   // mask to use to remove the irrelevant undefined input bits.
560544
+   /* Here are some examples:
560544
+         atom   = U...U 1 0...0
560544
+         atom-1 = U...U 0 1...1
560544
+         ^ed    = 0...0 1 11111, which correctly describes which bits of |atom|
560544
+                                 actually influence the result
560544
+      A boundary case
560544
+         atom   = 0...0
560544
+         atom-1 = 1...1
560544
+         ^ed    = 11111, also a correct mask for the input: all input bits
560544
+                         are relevant
560544
+      Another boundary case
560544
+         atom   = 1..1 1
560544
+         atom-1 = 1..1 0
560544
+         ^ed    = 0..0 1, also a correct mask: only the rightmost input bit
560544
+                          is relevant
560544
+      Now with misc U bits interspersed:
560544
+         atom   = U...U 1 0 U...U 0 1 0...0
560544
+         atom-1 = U...U 1 0 U...U 0 0 1...1
560544
+         ^ed    = 0...0 0 0 0...0 0 1 1...1, also correct
560544
+      (Per re-check/analysis of 14 Nov 2018)
560544
+   */
560544
    improver = assignNew('V', mce,ty,
560544
                         binop(xorOp,
560544
                               atom,
560544
@@ -2242,8 +2315,96 @@ IRAtom* expensiveCountTrailingZeroes ( MCEnv* mce, IROp czop,
560544
 
560544
    // improved = vatom & improver
560544
    //
560544
-   // That is, treat any V bits above the first ctz(atom) bits as
560544
-   // "defined".
560544
+   // That is, treat any V bits to the left of the rightmost ctz(atom)+1
560544
+   // bits as "defined".
560544
+   improved = assignNew('V', mce, ty,
560544
+                        binop(andOp, vatom, improver));
560544
+
560544
+   // Return pessimizing cast of improved.
560544
+   return mkPCastTo(mce, ty, improved);
560544
+}
560544
+
560544
+static
560544
+IRAtom* expensiveCountLeadingZeroes ( MCEnv* mce, IROp czop,
560544
+                                      IRAtom* atom, IRAtom* vatom )
560544
+{
560544
+   IRType ty;
560544
+   IROp shrOp, notOp, andOp;
560544
+   IRAtom* (*mkRight)(MCEnv*, IRAtom*);
560544
+   IRAtom *improver, *improved;
560544
+   tl_assert(isShadowAtom(mce,vatom));
560544
+   tl_assert(isOriginalAtom(mce,atom));
560544
+   tl_assert(sameKindedAtoms(atom,vatom));
560544
+
560544
+   switch (czop) {
560544
+      case Iop_Clz32: case Iop_ClzNat32:
560544
+         ty = Ity_I32;
560544
+         shrOp = Iop_Shr32;
560544
+         notOp = Iop_Not32;
560544
+         andOp = Iop_And32;
560544
+         mkRight = mkRight32;
560544
+         break;
560544
+      case Iop_Clz64: case Iop_ClzNat64:
560544
+         ty = Ity_I64;
560544
+         shrOp = Iop_Shr64;
560544
+         notOp = Iop_Not64;
560544
+         andOp = Iop_And64;
560544
+         mkRight = mkRight64;
560544
+         break;
560544
+      default:
560544
+         ppIROp(czop);
560544
+         VG_(tool_panic)("memcheck:expensiveCountLeadingZeroes");
560544
+   }
560544
+
560544
+   // This is in principle very similar to how expensiveCountTrailingZeroes
560544
+   // works.  That function computed an "improver", which it used to mask
560544
+   // off all but the rightmost 1-bit and the zeroes to the right of it,
560544
+   // hence removing irrelevant bits from the input.  Here, we play the
560544
+   // exact same game but with the left-vs-right roles interchanged.
560544
+   // Unfortunately calculation of the improver in this case is
560544
+   // significantly more expensive.
560544
+   //
560544
+   // improver = ~(RIGHT(atom) >>u 1)
560544
+   //
560544
+   // That is, improver has its upper clz(atom)+1 bits equal to one;
560544
+   // lower bits (if any) equal to zero.  So it's exactly the right
560544
+   // mask to use to remove the irrelevant undefined input bits.
560544
+   /* Here are some examples:
560544
+         atom             = 0...0 1 U...U
560544
+         R(atom)          = 0...0 1 1...1
560544
+         R(atom) >>u 1    = 0...0 0 1...1
560544
+         ~(R(atom) >>u 1) = 1...1 1 0...0
560544
+                            which correctly describes which bits of |atom|
560544
+                            actually influence the result
560544
+      A boundary case
560544
+         atom             = 0...0
560544
+         R(atom)          = 0...0
560544
+         R(atom) >>u 1    = 0...0
560544
+         ~(R(atom) >>u 1) = 1...1
560544
+                            also a correct mask for the input: all input bits
560544
+                            are relevant
560544
+      Another boundary case
560544
+         atom             = 1 1..1
560544
+         R(atom)          = 1 1..1
560544
+         R(atom) >>u 1    = 0 1..1
560544
+         ~(R(atom) >>u 1) = 1 0..0
560544
+                            also a correct mask: only the leftmost input bit
560544
+                            is relevant
560544
+      Now with misc U bits interspersed:
560544
+         atom             = 0...0 1 U...U 0 1 U...U
560544
+         R(atom)          = 0...0 1 1...1 1 1 1...1
560544
+         R(atom) >>u 1    = 0...0 0 1...1 1 1 1...1
560544
+         ~(R(atom) >>u 1) = 1...1 1 0...0 0 0 0...0, also correct
560544
+      (Per initial implementation of 15 Nov 2018)
560544
+   */
560544
+   improver = mkRight(mce, atom);
560544
+   improver = assignNew('V', mce, ty, binop(shrOp, improver, mkU8(1)));
560544
+   improver = assignNew('V', mce, ty, unop(notOp, improver));
560544
+
560544
+   // improved = vatom & improver
560544
+   //
560544
+   // That is, treat any V bits to the right of the leftmost clz(atom)+1
560544
+   // bits as "defined".
560544
    improved = assignNew('V', mce, ty,
560544
                         binop(andOp, vatom, improver));
560544
 
560544
@@ -4705,6 +4866,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_RecipEst32F0x4:
560544
          return unary32F0x4(mce, vatom);
560544
 
560544
+      // These are self-shadowing.
560544
       case Iop_32UtoV128:
560544
       case Iop_64UtoV128:
560544
       case Iop_Dup8x16:
560544
@@ -4745,6 +4907,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_MulI128by10Carry:
560544
       case Iop_F16toF64x2:
560544
       case Iop_F64toF16x2:
560544
+         // FIXME JRS 2018-Nov-15.  This is surely not correct!
560544
          return vatom;
560544
 
560544
       case Iop_I32StoF128: /* signed I32 -> F128 */
560544
@@ -4770,7 +4933,6 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_RoundF64toF64_NegINF:
560544
       case Iop_RoundF64toF64_PosINF:
560544
       case Iop_RoundF64toF64_ZERO:
560544
-      case Iop_Clz64:
560544
       case Iop_D32toD64:
560544
       case Iop_I32StoD64:
560544
       case Iop_I32UtoD64:
560544
@@ -4785,17 +4947,32 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_D64toD128:
560544
          return mkPCastTo(mce, Ity_I128, vatom);
560544
 
560544
-      case Iop_Clz32:
560544
       case Iop_TruncF64asF32:
560544
       case Iop_NegF32:
560544
       case Iop_AbsF32:
560544
       case Iop_F16toF32: 
560544
          return mkPCastTo(mce, Ity_I32, vatom);
560544
 
560544
-      case Iop_Ctz32:
560544
-      case Iop_Ctz64:
560544
+      case Iop_Ctz32: case Iop_CtzNat32:
560544
+      case Iop_Ctz64: case Iop_CtzNat64:
560544
          return expensiveCountTrailingZeroes(mce, op, atom, vatom);
560544
 
560544
+      case Iop_Clz32: case Iop_ClzNat32:
560544
+      case Iop_Clz64: case Iop_ClzNat64:
560544
+         return expensiveCountLeadingZeroes(mce, op, atom, vatom);
560544
+
560544
+      // PopCount32: this is slightly pessimistic.  It is true that the
560544
+      // result depends on all input bits, so that aspect of the PCast is
560544
+      // correct.  However, regardless of the input, only the lowest 5 bits
560544
+      // out of the output can ever be undefined.  So we could actually
560544
+      // "improve" the results here by marking the top 27 bits of output as
560544
+      // defined.  A similar comment applies for PopCount64.
560544
+      case Iop_PopCount32:
560544
+         return mkPCastTo(mce, Ity_I32, vatom);
560544
+      case Iop_PopCount64:
560544
+         return mkPCastTo(mce, Ity_I64, vatom);
560544
+
560544
+      // These are self-shadowing.
560544
       case Iop_1Uto64:
560544
       case Iop_1Sto64:
560544
       case Iop_8Uto64:
560544
@@ -4821,6 +4998,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_V256to64_2: case Iop_V256to64_3:
560544
          return assignNew('V', mce, Ity_I64, unop(op, vatom));
560544
 
560544
+      // These are self-shadowing.
560544
       case Iop_64to32:
560544
       case Iop_64HIto32:
560544
       case Iop_1Uto32:
560544
@@ -4830,8 +5008,10 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_16Sto32:
560544
       case Iop_8Sto32:
560544
       case Iop_V128to32:
560544
+      case Iop_Reverse8sIn32_x1:
560544
          return assignNew('V', mce, Ity_I32, unop(op, vatom));
560544
 
560544
+      // These are self-shadowing.
560544
       case Iop_8Sto16:
560544
       case Iop_8Uto16:
560544
       case Iop_32to16:
560544
@@ -4840,6 +5020,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_GetMSBs8x16:
560544
          return assignNew('V', mce, Ity_I16, unop(op, vatom));
560544
 
560544
+      // These are self-shadowing.
560544
       case Iop_1Uto8:
560544
       case Iop_1Sto8:
560544
       case Iop_16to8:
560544
@@ -4868,6 +5049,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_Not16:
560544
       case Iop_Not8:
560544
       case Iop_Not1:
560544
+         // FIXME JRS 2018-Nov-15.  This is surely not correct!
560544
          return vatom;
560544
 
560544
       case Iop_CmpNEZ8x8:
560544
@@ -4929,6 +5111,7 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
560544
       case Iop_Ctz64x2:
560544
          return mkPCast64x2(mce, vatom);
560544
 
560544
+      // This is self-shadowing.
560544
       case Iop_PwBitMtxXpose64x2:
560544
          return assignNew('V', mce, Ity_V128, unop(op, vatom));
560544
 
560544
diff --git a/memcheck/tests/vbit-test/irops.c b/memcheck/tests/vbit-test/irops.c
560544
index bfd82fc..e8bf67d 100644
560544
--- a/memcheck/tests/vbit-test/irops.c
560544
+++ b/memcheck/tests/vbit-test/irops.c
560544
@@ -111,6 +111,12 @@ static irop_t irops[] = {
560544
   { DEFOP(Iop_Clz32,      UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 1, .arm = 1, .ppc64 = 1, .ppc32 = 1, .mips32 =1, .mips64 = 1 },
560544
   { DEFOP(Iop_Ctz64,      UNDEF_ALL),  .s390x = 0, .amd64 = 1, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 =0, .mips64 = 0 },
560544
   { DEFOP(Iop_Ctz32,      UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 1, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 =0, .mips64 = 0 },
560544
+  { DEFOP(Iop_ClzNat64,   UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 1, .ppc32 = 0, .mips32 =0, .mips64 = 0 }, // ppc32 asserts
560544
+  { DEFOP(Iop_ClzNat32,   UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 1, .ppc32 = 1, .mips32 =0, .mips64 = 0 },
560544
+  { DEFOP(Iop_CtzNat64,   UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 1, .ppc32 = 0, .mips32 =0, .mips64 = 0 },
560544
+  { DEFOP(Iop_CtzNat32,   UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 1, .mips32 =0, .mips64 = 0 },
560544
+  { DEFOP(Iop_PopCount64, UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 1, .ppc32 = 0, .mips32 =0, .mips64 = 0 },
560544
+  { DEFOP(Iop_PopCount32, UNDEF_ALL),  .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 1, .ppc32 = 1, .mips32 =0, .mips64 = 0 },
560544
   { DEFOP(Iop_CmpLT32S,   UNDEF_ALL),  .s390x = 1, .amd64 = 1, .x86 = 1, .arm = 1, .ppc64 = 1, .ppc32 = 1, .mips32 =1, .mips64 = 1 },
560544
   { DEFOP(Iop_CmpLT64S,   UNDEF_ALL),  .s390x = 1, .amd64 = 1, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 =0, .mips64 = 1 }, // ppc, mips assert
560544
   { DEFOP(Iop_CmpLE32S,   UNDEF_ALL),  .s390x = 1, .amd64 = 1, .x86 = 1, .arm = 1, .ppc64 = 1, .ppc32 = 1, .mips32 =1, .mips64 = 1 },
560544
@@ -336,6 +342,7 @@ static irop_t irops[] = {
560544
   { DEFOP(Iop_Sad8Ux4, UNDEF_UNKNOWN), },
560544
   { DEFOP(Iop_CmpNEZ16x2, UNDEF_UNKNOWN), },
560544
   { DEFOP(Iop_CmpNEZ8x4, UNDEF_UNKNOWN), },
560544
+  { DEFOP(Iop_Reverse8sIn32_x1, UNDEF_UNKNOWN) },
560544
   /* ------------------ 64-bit SIMD FP ------------------------ */
560544
   { DEFOP(Iop_I32UtoFx2, UNDEF_UNKNOWN), },
560544
   { DEFOP(Iop_I32StoFx2, UNDEF_UNKNOWN), },