|
|
8febcd |
commit 3cc0232c46a5905b4a6c2fbd302b58bf5f90b3d5
|
|
|
8febcd |
Author: Carl Love <cel@us.ibm.com>
|
|
|
8febcd |
Date: Mon Jan 11 16:00:57 2021 -0600
|
|
|
8febcd |
|
|
|
8febcd |
PPC64: ISA 3.1 VSX PCV Generate Operations
|
|
|
8febcd |
|
|
|
8febcd |
xgenpcvbm VSX Vector Generate PCV from Byte Mask
|
|
|
8febcd |
xxgenpcvdmVSX Vector Generate PCV from Doubleword Mask
|
|
|
8febcd |
xxgenpcvhmVSX Vector Generate PCV from Halfword Mask
|
|
|
8febcd |
xxgenpcvwmVSX Vector Generate PCV from Word Mask
|
|
|
8febcd |
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_defs.h b/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
index deda4dfce..54ce923a9 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
@@ -169,6 +169,23 @@ void write_ACC_entry (VexGuestPPC64State* gst, UInt offset, UInt acc,
|
|
|
8febcd |
void get_ACC_entry (VexGuestPPC64State* gst, UInt offset, UInt acc,
|
|
|
8febcd |
UInt reg, UInt *result);
|
|
|
8febcd |
|
|
|
8febcd |
+extern void vector_gen_pvc_byte_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi,
|
|
|
8febcd |
+ ULong src_lo,
|
|
|
8febcd |
+ UInt rtn_val, UInt IMM );
|
|
|
8febcd |
+extern void vector_gen_pvc_hword_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi,
|
|
|
8febcd |
+ ULong src_lo,
|
|
|
8febcd |
+ UInt rtn_val, UInt IMM );
|
|
|
8febcd |
+extern void vector_gen_pvc_word_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi,
|
|
|
8febcd |
+ ULong src_lo,
|
|
|
8febcd |
+ UInt rtn_val, UInt IMM );
|
|
|
8febcd |
+extern void vector_gen_pvc_dword_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi,
|
|
|
8febcd |
+ ULong src_lo,
|
|
|
8febcd |
+ UInt rtn_val, UInt IMM );
|
|
|
8febcd |
+
|
|
|
8febcd |
/* 8-bit XO value from instruction description */
|
|
|
8febcd |
#define XVI4GER8 0b00100011
|
|
|
8febcd |
#define XVI4GER8PP 0b00100010
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
index c24191ef3..75497abb9 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
@@ -701,6 +701,738 @@ ULong vector_evaluate64_helper( ULong srcA, ULong srcB, ULong srcC,
|
|
|
8febcd |
#undef MAX_IMM_BITS
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+/*--------------------------------------------------*/
|
|
|
8febcd |
+/*---- VSX Vector Generate PCV from Mask helpers ---*/
|
|
|
8febcd |
+/*--------------------------------------------------*/
|
|
|
8febcd |
+static void write_VSX_entry (VexGuestPPC64State* gst, UInt reg_offset,
|
|
|
8febcd |
+ ULong *vsx_entry)
|
|
|
8febcd |
+{
|
|
|
8febcd |
+ U128* pU128_dst;
|
|
|
8febcd |
+ pU128_dst = (U128*) (((UChar*) gst) + reg_offset);
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The U128 type is defined as an array of unsigned intetgers. */
|
|
|
8febcd |
+ /* Writing in LE order */
|
|
|
8febcd |
+ (*pU128_dst)[0] = (UInt)(vsx_entry[1] & 0xFFFFFFFF);
|
|
|
8febcd |
+ (*pU128_dst)[1] = (UInt)(vsx_entry[1] >> 32);
|
|
|
8febcd |
+ (*pU128_dst)[2] = (UInt)(vsx_entry[0] & 0xFFFFFFFF);
|
|
|
8febcd |
+ (*pU128_dst)[3] = (UInt)(vsx_entry[0] >> 32);
|
|
|
8febcd |
+ return;
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+/* CALLED FROM GENERATED CODE */
|
|
|
8febcd |
+void vector_gen_pvc_byte_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi, ULong src_lo,
|
|
|
8febcd |
+ UInt reg_offset, UInt imm ) {
|
|
|
8febcd |
+ /* The function computes the 128-bit result then writes it directly
|
|
|
8febcd |
+ into the guest state VSX register. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ UInt i, shift_by, sel_shift_by, half_sel;
|
|
|
8febcd |
+ ULong index, src, result[2];
|
|
|
8febcd |
+ ULong j;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ result[0] = 0;
|
|
|
8febcd |
+ result[1] = 0;
|
|
|
8febcd |
+ j = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The algorithm in the ISA is written with IBM numbering zero on left and
|
|
|
8febcd |
+ N-1 on right. The loop index is converted to "i" to match the algorithm
|
|
|
8febcd |
+ for claritiy of matching the C code to the algorithm in the ISA. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (imm == 0b00) { // big endian expansion
|
|
|
8febcd |
+ for( index = 0; index < 16; index++) {
|
|
|
8febcd |
+ i = 15 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*8;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 8) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 7;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= j << shift_by;
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (index + (unsigned long long)0x10) << shift_by;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b01) { // big endian compression
|
|
|
8febcd |
+ /* If IMM=0b00001, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement a
|
|
|
8febcd |
+ compression of the sparse byte elements in a source vector specified
|
|
|
8febcd |
+ by the byte-element mask in VSR[VRB+32] into the leftmost byte
|
|
|
8febcd |
+ elements of a result vector.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 16; index++) {
|
|
|
8febcd |
+ i = 15 - index;
|
|
|
8febcd |
+ shift_by = i*8;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 8) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 7;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 8)
|
|
|
8febcd |
+ result[1] |= (index) << (15 - j)*8;
|
|
|
8febcd |
+ else
|
|
|
8febcd |
+ result[0] |= (index) << (7 - j)*8;
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ /* The algorithim says set to undefined, leave as 0
|
|
|
8febcd |
+ for( index = 3 - j; index < 4; index++) {
|
|
|
8febcd |
+ result |= (0 << (index*8));
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b10) { //little-endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00010, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement an
|
|
|
8febcd |
+ expansion of the rightmost byte elements of a source vector into the
|
|
|
8febcd |
+ byte elements of a result vector specified by the byte-element mask
|
|
|
8febcd |
+ in VSR[VRB+32]. */
|
|
|
8febcd |
+ for( index = 0; index < 16; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*8;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 8) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 7;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* mod shift amount by 8 since src is either the upper or lower
|
|
|
8febcd |
+ 64-bits. */
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= j << shift_by;
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (index + (unsigned long long)0x10) << shift_by;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b11) { //little-endian compression
|
|
|
8febcd |
+ /* If IMM=0b00011, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement a
|
|
|
8febcd |
+ compression of the sparse byte elements in a source vector specified
|
|
|
8febcd |
+ by the byte-element mask in VSR[VRB+32] into the rightmost byte
|
|
|
8febcd |
+ elements of a result vector. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ for( index = 0; index < 16; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*8;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 8) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 7;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 8)
|
|
|
8febcd |
+ result[0] |= (index) << (j-8)*8;
|
|
|
8febcd |
+ else
|
|
|
8febcd |
+ result[1] |= (index) << j*8;
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The algorithim says set to undefined, leave as 0
|
|
|
8febcd |
+ for( index = 3 - j; index < 4; index++) {
|
|
|
8febcd |
+ result |= (0 << (index*8));
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ vex_printf("ERROR, vector_gen_pvc_byte_mask_dirty_helper, imm value %u not supported.\n",
|
|
|
8febcd |
+ imm);
|
|
|
8febcd |
+ vassert(0);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ write_VSX_entry( gst, reg_offset, result);
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+/* CALLED FROM GENERATED CODE */
|
|
|
8febcd |
+void vector_gen_pvc_hword_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi, ULong src_lo,
|
|
|
8febcd |
+ UInt reg_offset,
|
|
|
8febcd |
+ UInt imm ) {
|
|
|
8febcd |
+ /* The function computes the 128-bit result then writes it directly
|
|
|
8febcd |
+ into the guest state VSX register. */
|
|
|
8febcd |
+ UInt i, shift_by, sel_shift_by, half_sel;
|
|
|
8febcd |
+ ULong index, src, result[2];
|
|
|
8febcd |
+ ULong j;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ result[0] = 0;
|
|
|
8febcd |
+ result[1] = 0;
|
|
|
8febcd |
+ j = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The algorithm in the ISA is written with IBM numbering zero on left and
|
|
|
8febcd |
+ N-1 on right. The loop index is converted to "i" to match the algorithm
|
|
|
8febcd |
+ for claritiy of matching the C code to the algorithm in the ISA. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (imm == 0b00) { // big endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00000, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement an
|
|
|
8febcd |
+ expansion of the leftmost halfword elements of a source vector into
|
|
|
8febcd |
+ the halfword elements of a result vector specified by the halfword-
|
|
|
8febcd |
+ element mask in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 8; index++) {
|
|
|
8febcd |
+ i = 7 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*16;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 4) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 15;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ // half-word i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (2*j + 0x0) << (shift_by+8);
|
|
|
8febcd |
+ // half-word i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (2*j + 0x1) << shift_by;
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (2*index + 0x10) << (shift_by+8);
|
|
|
8febcd |
+ result[half_sel] |= (2*index + 0x11) << shift_by;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b01) { // big endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00001,let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement a
|
|
|
8febcd |
+ compression of the sparse halfword elements in a source vector
|
|
|
8febcd |
+ specified by the halfword-element mask in VSR[VRB+32] into the
|
|
|
8febcd |
+ leftmost halfword elements of a result vector.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 8; index++) {
|
|
|
8febcd |
+ i = 7 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*16;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 4) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 15;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 4) {
|
|
|
8febcd |
+ // half-word i, byte 0
|
|
|
8febcd |
+ result[1] |= (2*index + 0x0) << ((7 - j)*16 + 8);
|
|
|
8febcd |
+ // half-word i, byte 1
|
|
|
8febcd |
+ result[1] |= (2*index + 0x1) << ((7 - j)*16);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ // half-word i, byte 0
|
|
|
8febcd |
+ result[0] |= (2*index + 0x0) << ((3 - j)*16 + 8);
|
|
|
8febcd |
+ // half-word i, byte 1
|
|
|
8febcd |
+ result[0] |= (2*index + 0x1) << ((3 - j)*16);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b10) { //little-endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00010, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement an
|
|
|
8febcd |
+ expansion of the rightmost halfword elements of a source vector into
|
|
|
8febcd |
+ the halfword elements of a result vector specified by the halfword-
|
|
|
8febcd |
+ element mask in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 8; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+ shift_by = i*16;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 4) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 15;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ // half-word i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (2*j + 0x00) << shift_by;
|
|
|
8febcd |
+ // half-word i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (2*j + 0x01) << (shift_by+8);
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ // half-word i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (2*index + 0x10) << shift_by;
|
|
|
8febcd |
+ // half-word i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (2*index + 0x11) << (shift_by+8);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b11) { //little-endian compression
|
|
|
8febcd |
+ /* If IMM=0b00011, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement a
|
|
|
8febcd |
+ compression of the sparse halfword elements in a source vector
|
|
|
8febcd |
+ specified by the halfword-element mask in VSR[VRB+32] into the
|
|
|
8febcd |
+ rightmost halfword elements of a result vector. */
|
|
|
8febcd |
+ for( index = 0; index < 8; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+ shift_by = i*16;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 4) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 15;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 4) {
|
|
|
8febcd |
+ // half-word j, byte 0
|
|
|
8febcd |
+ result[0] |= (2*index + 0x0) << ((j-4)*16);
|
|
|
8febcd |
+ // half-word j, byte 1
|
|
|
8febcd |
+ result[0] |= (2*index + 0x1) << ((j-4)*16+8);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ // half-word j, byte 0
|
|
|
8febcd |
+ result[1] |= (2*index + 0x0) << (j*16);
|
|
|
8febcd |
+ // half-word j, byte 1
|
|
|
8febcd |
+ result[1] |= (2*index + 0x1) << ((j*16)+8);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ vex_printf("ERROR, vector_gen_pvc_hword_dirty_mask_helper, imm value %u not supported.\n",
|
|
|
8febcd |
+ imm);
|
|
|
8febcd |
+ vassert(0);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ write_VSX_entry( gst, reg_offset, result);
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+/* CALLED FROM GENERATED CODE */
|
|
|
8febcd |
+void vector_gen_pvc_word_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi, ULong src_lo,
|
|
|
8febcd |
+ UInt reg_offset, UInt imm ) {
|
|
|
8febcd |
+ /* The function computes the 128-bit result then writes it directly
|
|
|
8febcd |
+ into the guest state VSX register. */
|
|
|
8febcd |
+ UInt i, shift_by, sel_shift_by, half_sel;
|
|
|
8febcd |
+ ULong index, src, result[2];
|
|
|
8febcd |
+ ULong j;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ result[0] = 0;
|
|
|
8febcd |
+ result[1] = 0;
|
|
|
8febcd |
+ j = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The algorithm in the ISA is written with IBM numbering zero on left and
|
|
|
8febcd |
+ N-1 on right. The loop index is converted to "i" to match the algorithm
|
|
|
8febcd |
+ for claritiy of matching the C code to the algorithm in the ISA. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (imm == 0b00) { // big endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00000, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement an
|
|
|
8febcd |
+ expansion of the leftmost word elements of a source vector into the
|
|
|
8febcd |
+ word elements of a result vector specified by the word-element mask
|
|
|
8febcd |
+ in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 4; index++) {
|
|
|
8febcd |
+ i = 3 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*32;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 2) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 31;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= (4*j+0) << (shift_by+24); // word i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (4*j+1) << (shift_by+16); // word i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (4*j+2) << (shift_by+8); // word i, byte 2
|
|
|
8febcd |
+ result[half_sel] |= (4*j+3) << shift_by; // word i, byte 3
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x10) << (shift_by+24);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x11) << (shift_by+16);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x12) << (shift_by+8);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x13) << shift_by;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b01) { // big endian compression
|
|
|
8febcd |
+ /* If IMM=0b00001, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement a
|
|
|
8febcd |
+ compression of the sparse word elements in a source vector specified
|
|
|
8febcd |
+ by the word-element mask in VSR[VRB+32] into the leftmost word
|
|
|
8febcd |
+ elements of a result vector.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 4; index++) {
|
|
|
8febcd |
+ i = 3 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*32;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 2) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 31;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 2) {
|
|
|
8febcd |
+ // word j, byte 0
|
|
|
8febcd |
+ result[1] |= (4*index+0) << ((3 - j)*32 + 24);
|
|
|
8febcd |
+ // word j, byte 1
|
|
|
8febcd |
+ result[1] |= (4*index+1) << ((3 - j)*32 + 16);
|
|
|
8febcd |
+ // word j, byte 2
|
|
|
8febcd |
+ result[1] |= (4*index+2) << ((3 - j)*32 + 8);
|
|
|
8febcd |
+ // word j, byte 3
|
|
|
8febcd |
+ result[1] |= (4*index+3) << ((3 - j)*32 + 0);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[0] |= (4*index+0) << ((1 - j)*32 + 24);
|
|
|
8febcd |
+ result[0] |= (4*index+1) << ((1 - j)*32 + 16);
|
|
|
8febcd |
+ result[0] |= (4*index+2) << ((1 - j)*32 + 8);
|
|
|
8febcd |
+ result[0] |= (4*index+3) << ((1 - j)*32 + 0);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b10) { //little-endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00010, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement an
|
|
|
8febcd |
+ expansion of the rightmost word elements of a source vector into the
|
|
|
8febcd |
+ word elements of a result vector specified by the word-element mask
|
|
|
8febcd |
+ in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 4; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*32;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 2) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 31;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= (4*j+0) << (shift_by + 0); // word j, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (4*j+1) << (shift_by + 8); // word j, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (4*j+2) << (shift_by + 16); // word j, byte 2
|
|
|
8febcd |
+ result[half_sel] |= (4*j+3) << (shift_by + 24); // word j, byte 3
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x10) << (shift_by + 0);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x11) << (shift_by + 8);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x12) << (shift_by + 16);
|
|
|
8febcd |
+ result[half_sel] |= (4*index + 0x13) << (shift_by + 24);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b11) { //little-endian compression
|
|
|
8febcd |
+ /* If IMM=0b00011, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement a
|
|
|
8febcd |
+ compression of the sparse word elements in a source vector specified
|
|
|
8febcd |
+ by the word-element mask in VSR[VRB+32] into the rightmost word
|
|
|
8febcd |
+ elements of a result vector. */
|
|
|
8febcd |
+ for( index = 0; index < 4; index++) {
|
|
|
8febcd |
+ i =index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ shift_by = i*32;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i >= 2) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ shift_by = shift_by - 64;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = shift_by + 31;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j >= 2){
|
|
|
8febcd |
+ // word j, byte 0
|
|
|
8febcd |
+ result[0] |= (4*index + 0x0) << ((j-2)*32+0);
|
|
|
8febcd |
+ // word j, byte 1
|
|
|
8febcd |
+ result[0] |= (4*index + 0x1) << ((j-2)*32+8);
|
|
|
8febcd |
+ // word j, byte 2
|
|
|
8febcd |
+ result[0] |= (4*index + 0x2) << ((j-2)*32+16);
|
|
|
8febcd |
+ // word j, byte 3
|
|
|
8febcd |
+ result[0] |= (4*index + 0x3) << ((j-2)*32+24);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[1] |= (4*index + 0x0) << (j*32+0);
|
|
|
8febcd |
+ result[1] |= (4*index + 0x1) << (j*32+8);
|
|
|
8febcd |
+ result[1] |= (4*index + 0x2) << (j*32+16);
|
|
|
8febcd |
+ result[1] |= (4*index + 0x3) << (j*32+24);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ vex_printf("ERROR, vector_gen_pvc_word_mask_dirty_helper, imm value %u not supported.\n",
|
|
|
8febcd |
+ imm);
|
|
|
8febcd |
+ vassert(0);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ write_VSX_entry( gst, reg_offset, result);
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+/* CALLED FROM GENERATED CODE */
|
|
|
8febcd |
+void vector_gen_pvc_dword_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
+ ULong src_hi, ULong src_lo,
|
|
|
8febcd |
+ UInt reg_offset, UInt imm ) {
|
|
|
8febcd |
+ /* The function computes the 128-bit result then writes it directly
|
|
|
8febcd |
+ into the guest state VSX register. */
|
|
|
8febcd |
+ UInt sel_shift_by, half_sel;
|
|
|
8febcd |
+ ULong index, src, result[2];
|
|
|
8febcd |
+ ULong j, i;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ result[0] = 0;
|
|
|
8febcd |
+ result[1] = 0;
|
|
|
8febcd |
+ j = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The algorithm in the ISA is written with IBM numbering zero on left and
|
|
|
8febcd |
+ N-1 on right. The loop index is converted to "i" to match the algorithm
|
|
|
8febcd |
+ for claritiy of matching the C code to the algorithm in the ISA. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (imm == 0b00) { // big endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00000, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement an
|
|
|
8febcd |
+ expansion of the leftmost doubleword elements of a source vector into
|
|
|
8febcd |
+ the doubleword elements of a result vector specified by the
|
|
|
8febcd |
+ doubleword-element mask in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 2; index++) {
|
|
|
8febcd |
+ i = 1 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i == 1) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = 63;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x0) << 56; // dword i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x1) << 48; // dword i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x2) << 40; // dword i, byte 2
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x3) << 32; // dword i, byte 3
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x4) << 24; // dword i, byte 4
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x5) << 16; // dword i, byte 5
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x6) << 8; // dword i, byte 6
|
|
|
8febcd |
+ result[half_sel] |= (8*j + 0x7) << 0; // dword i, byte 7
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x10) << 56;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x11) << 48;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x12) << 40;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x13) << 32;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x14) << 24;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x15) << 16;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x16) << 8;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x17) << 0;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ } else if (imm == 0b01) { // big endian compression
|
|
|
8febcd |
+ /* If IMM=0b00001, let pcv be the the permute control vector required to
|
|
|
8febcd |
+ enable a left-indexed permute (vperm or xxperm) to implement a
|
|
|
8febcd |
+ compression of the sparse doubleword elements in a source vector
|
|
|
8febcd |
+ specified by the doubleword-element mask in VSR[VRB+32] into the
|
|
|
8febcd |
+ leftmost doubleword elements of a result vector.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ for( index = 0; index < 2; index++) {
|
|
|
8febcd |
+ i = 1 - index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i == 1) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = 63;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j == 1) {
|
|
|
8febcd |
+ result[1] |= (8*index + 0x0) << 56; // double-word j, byte 0
|
|
|
8febcd |
+ result[1] |= (8*index + 0x1) << 48; // double-word j, byte 1
|
|
|
8febcd |
+ result[1] |= (8*index + 0x2) << 40; // double-word j, byte 2
|
|
|
8febcd |
+ result[1] |= (8*index + 0x3) << 32; // double-word j, byte 3
|
|
|
8febcd |
+ result[1] |= (8*index + 0x4) << 24; // double-word j, byte 4
|
|
|
8febcd |
+ result[1] |= (8*index + 0x5) << 16; // double-word j, byte 5
|
|
|
8febcd |
+ result[1] |= (8*index + 0x6) << 8; // double-word j, byte 6
|
|
|
8febcd |
+ result[1] |= (8*index + 0x7) << 0; // double-word j, byte 7
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[0] |= (8*index + 0x0) << 56; // double-word j, byte 0
|
|
|
8febcd |
+ result[0] |= (8*index + 0x1) << 48; // double-word j, byte 1
|
|
|
8febcd |
+ result[0] |= (8*index + 0x2) << 40; // double-word j, byte 2
|
|
|
8febcd |
+ result[0] |= (8*index + 0x3) << 32; // double-word j, byte 3
|
|
|
8febcd |
+ result[0] |= (8*index + 0x4) << 24; // double-word j, byte 4
|
|
|
8febcd |
+ result[0] |= (8*index + 0x5) << 16; // double-word j, byte 5
|
|
|
8febcd |
+ result[0] |= (8*index + 0x6) << 8; // double-word j, byte 6
|
|
|
8febcd |
+ result[0] |= (8*index + 0x7) << 0; // double-word j, byte 7
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ } else if (imm == 0b10) { //little-endian expansion
|
|
|
8febcd |
+ /* If IMM=0b00010, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement an
|
|
|
8febcd |
+ expansion of the rightmost doubleword elements of a source vector
|
|
|
8febcd |
+ into the doubleword elements of a result vector specified by the
|
|
|
8febcd |
+ doubleword-element mask in VSR[VRB+32].
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ for( index = 0; index < 2; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i == 1) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = 63;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( ((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ result[half_sel] |= (8*j+0) << 0; // double-word i, byte 0
|
|
|
8febcd |
+ result[half_sel] |= (8*j+1) << 8; // double-word i, byte 1
|
|
|
8febcd |
+ result[half_sel] |= (8*j+2) << 16; // double-word i, byte 2
|
|
|
8febcd |
+ result[half_sel] |= (8*j+3) << 24; // double-word i, byte 3
|
|
|
8febcd |
+ result[half_sel] |= (8*j+4) << 32; // double-word i, byte 4
|
|
|
8febcd |
+ result[half_sel] |= (8*j+5) << 40; // double-word i, byte 5
|
|
|
8febcd |
+ result[half_sel] |= (8*j+6) << 48; // double-word i, byte 6
|
|
|
8febcd |
+ result[half_sel] |= (8*j+7) << 56; // double-word i, byte 7
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x10) << 0;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x11) << 8;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x12) << 16;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x13) << 24;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x14) << 32;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x15) << 40;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x16) << 48;
|
|
|
8febcd |
+ result[half_sel] |= (8*index + 0x17) << 56;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if (imm == 0b11) { //little-endian compression
|
|
|
8febcd |
+ /* If IMM=0b00011, let pcv be the permute control vector required to
|
|
|
8febcd |
+ enable a right-indexed permute (vpermr or xxpermr) to implement a
|
|
|
8febcd |
+ compression of the sparse doubleword elements in a source vector
|
|
|
8febcd |
+ specified by the doubleword-element mask in VSR[VRB+32] into the
|
|
|
8febcd |
+ rightmost doubleword elements of a result vector. */
|
|
|
8febcd |
+ for( index = 0; index < 2; index++) {
|
|
|
8febcd |
+ i = index;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if ( i == 1) {
|
|
|
8febcd |
+ src = src_hi;
|
|
|
8febcd |
+ half_sel = 0;
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ src = src_lo;
|
|
|
8febcd |
+ half_sel = 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sel_shift_by = 63;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((src >> sel_shift_by) & 0x1) == 1) {
|
|
|
8febcd |
+ if (j == 1) {
|
|
|
8febcd |
+ result[0] |= (8*index + 0x0) << 0; // double-word j, byte 0
|
|
|
8febcd |
+ result[0] |= (8*index + 0x1) << 8; // double-word j, byte 1
|
|
|
8febcd |
+ result[0] |= (8*index + 0x2) << 16; // double-word j, byte 2
|
|
|
8febcd |
+ result[0] |= (8*index + 0x3) << 24; // double-word j, byte 3
|
|
|
8febcd |
+ result[0] |= (8*index + 0x4) << 32; // double-word j, byte 4
|
|
|
8febcd |
+ result[0] |= (8*index + 0x5) << 40; // double-word j, byte 5
|
|
|
8febcd |
+ result[0] |= (8*index + 0x6) << 48; // double-word j, byte 6
|
|
|
8febcd |
+ result[0] |= (8*index + 0x7) << 56; // double-word j, byte 7
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ result[1] |= (8*index + 0x0) << 0;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x1) << 8;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x2) << 16;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x3) << 24;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x4) << 32;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x5) << 40;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x6) << 48;
|
|
|
8febcd |
+ result[1] |= (8*index + 0x7) << 56;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ j++;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ vex_printf("ERROR, vector_gen_pvc_dword_mask_helper, imm value %u not supported.\n",
|
|
|
8febcd |
+ imm);
|
|
|
8febcd |
+ vassert(0);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ write_VSX_entry( gst, reg_offset, result);
|
|
|
8febcd |
+}
|
|
|
8febcd |
|
|
|
8febcd |
/*------------------------------------------------*/
|
|
|
8febcd |
/*---- VSX Matrix signed integer GER functions ---*/
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
index bcabf69dd..354be6b53 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
@@ -3322,6 +3322,7 @@ static IRExpr * locate_vector_ele_eq ( IRTemp src, IRExpr *value,
|
|
|
8febcd |
#define DFORM_IMMASK 0xffffffff
|
|
|
8febcd |
#define DSFORM_IMMASK 0xfffffffc
|
|
|
8febcd |
#define DQFORM_IMMASK 0xfffffff0
|
|
|
8febcd |
+#define DA8LSFORM_IMMASK 0x3fffffff // Algebraic 8LS Dform
|
|
|
8febcd |
|
|
|
8febcd |
#define ISA_3_1_PREFIX_CHECK if (prefix) {if (!allow_isa_3_1) goto decode_noIsa3_1;}
|
|
|
8febcd |
|
|
|
8febcd |
@@ -6109,6 +6110,87 @@ static void vsx_matrix_64bit_float_ger ( const VexAbiInfo* vbi,
|
|
|
8febcd |
stmt( IRStmt_Dirty(d) );
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+static void vector_gen_pvc_mask ( const VexAbiInfo* vbi,
|
|
|
8febcd |
+ IRExpr *src, UInt IMM,
|
|
|
8febcd |
+ UInt opc2, UInt VSX_addr ) {
|
|
|
8febcd |
+ /* The function takes a 64-bit source and an immediate value. The function
|
|
|
8febcd |
+ calls a helper to execute the xxgenpcvbm, xxgenpcvhm, xxgenpcvwm,
|
|
|
8febcd |
+ xxgenpcvdm instruction. The instructions are not practical to do with
|
|
|
8febcd |
+ Iops. The instruction is implemented with a dirty helper that
|
|
|
8febcd |
+ calculates the 128-bit result and writes it directly into the guest
|
|
|
8febcd |
+ state VSX register.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ IRTemp src_hi = newTemp( Ity_I64);
|
|
|
8febcd |
+ IRTemp src_lo = newTemp( Ity_I64);
|
|
|
8febcd |
+
|
|
|
8febcd |
+ IRDirty* d;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ vassert( (VSX_addr >= 0) && (VSX_addr < 64) );
|
|
|
8febcd |
+ UInt reg_offset = offsetofPPCGuestState( guest_VSR0 )
|
|
|
8febcd |
+ + sizeof(U128) * VSX_addr;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( src_hi, unop( Iop_V128HIto64, src ) );
|
|
|
8febcd |
+ assign( src_lo, unop( Iop_V128to64, src ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ IRExpr** args = mkIRExprVec_5(
|
|
|
8febcd |
+ IRExpr_GSPTR(),
|
|
|
8febcd |
+ mkexpr( src_hi ),
|
|
|
8febcd |
+ mkexpr( src_lo ),
|
|
|
8febcd |
+ mkU32( reg_offset ),
|
|
|
8febcd |
+ mkU64( IMM ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ switch( opc2 ) {
|
|
|
8febcd |
+ case 0x394: // xxgenpcvbm
|
|
|
8febcd |
+ d = unsafeIRDirty_0_N (
|
|
|
8febcd |
+ 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_gen_pvc_byte_mask_dirty_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &vector_gen_pvc_byte_mask_dirty_helper ),
|
|
|
8febcd |
+ args);
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x395: // xxgenpcvhm
|
|
|
8febcd |
+ d = unsafeIRDirty_0_N (
|
|
|
8febcd |
+ 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_gen_pvc_hword_mask_dirty_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &vector_gen_pvc_hword_mask_dirty_helper ),
|
|
|
8febcd |
+ args);
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x3B4: // xxgenpcvwm
|
|
|
8febcd |
+ d = unsafeIRDirty_0_N (
|
|
|
8febcd |
+ 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_gen_pvc_word_mask_dirty_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &vector_gen_pvc_word_mask_dirty_helper ),
|
|
|
8febcd |
+ args);
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x3B5: // xxgenpcvdm
|
|
|
8febcd |
+ d = unsafeIRDirty_0_N (
|
|
|
8febcd |
+ 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_gen_pvc_dword_mask_dirty_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &vector_gen_pvc_dword_mask_dirty_helper ),
|
|
|
8febcd |
+ args);
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ default:
|
|
|
8febcd |
+ vex_printf("ERROR: Unkown instruction = %u in vector_gen_pvc_mask()\n",
|
|
|
8febcd |
+ opc2);
|
|
|
8febcd |
+ return;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ d->nFxState = 1;
|
|
|
8febcd |
+ vex_bzero(&d->fxState, sizeof(d->fxState));
|
|
|
8febcd |
+ d->fxState[0].fx = Ifx_Modify;
|
|
|
8febcd |
+ d->fxState[0].size = sizeof(U128);
|
|
|
8febcd |
+ d->fxState[0].offset = reg_offset;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* execute the dirty call, side-effecting guest state */
|
|
|
8febcd |
+ stmt( IRStmt_Dirty(d) );
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
static IRExpr * UNSIGNED_CMP_GT_V128 ( IRExpr *vA, IRExpr *vB ) {
|
|
|
8febcd |
/* This function does an unsigned compare of two V128 values. The
|
|
|
8febcd |
* function is for use in 32-bit mode only as it is expensive. The
|
|
|
8febcd |
@@ -35227,6 +35309,54 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
return True;
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+static Bool dis_vector_generate_pvc_from_mask ( UInt prefix,
|
|
|
8febcd |
+ UInt theInstr,
|
|
|
8febcd |
+ const VexAbiInfo* vbi )
|
|
|
8febcd |
+{
|
|
|
8febcd |
+ UChar XT_addr = ifieldRegXT(theInstr);
|
|
|
8febcd |
+ UChar vB_addr = ifieldRegB(theInstr);
|
|
|
8febcd |
+ IRTemp vB = newTemp( Ity_V128 );
|
|
|
8febcd |
+ UInt opc2 = ifieldOPClo10(theInstr);
|
|
|
8febcd |
+ UInt IMM = IFIELD(theInstr, (31-15), 5); // bits[11:15]
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( vB, getVReg( vB_addr ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ switch( opc2 ) {
|
|
|
8febcd |
+ case 0x394:
|
|
|
8febcd |
+ DIP("xxgenpcvbm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
|
|
|
8febcd |
+ /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
|
|
|
8febcd |
+ write it to the VSX result register. */
|
|
|
8febcd |
+ vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x395:
|
|
|
8febcd |
+ DIP("xxgenpcvhm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
|
|
|
8febcd |
+ /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
|
|
|
8febcd |
+ write it to the VSX result register. */
|
|
|
8febcd |
+ vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x3B4:
|
|
|
8febcd |
+ DIP("xxgenpcvwm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
|
|
|
8febcd |
+ /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
|
|
|
8febcd |
+ write it to the VSX result register. */
|
|
|
8febcd |
+ vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ case 0x3B5:
|
|
|
8febcd |
+ DIP("xxgenpcvdm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
|
|
|
8febcd |
+ /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
|
|
|
8febcd |
+ write it to the VSX result register. */
|
|
|
8febcd |
+ vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ default:
|
|
|
8febcd |
+ return False;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ return True;
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
static Int dis_nop_prefix ( UInt prefix, UInt theInstr )
|
|
|
8febcd |
{
|
|
|
8febcd |
Bool is_prefix = prefix_instruction( prefix );
|
|
|
8febcd |
@@ -35748,14 +35878,9 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
}
|
|
|
8febcd |
goto decode_failure;
|
|
|
8febcd |
|
|
|
8febcd |
- case 0x31: // lfsu, stxv
|
|
|
8febcd |
+ case 0x31: // lfsu
|
|
|
8febcd |
if (!allow_F) goto decode_noF;
|
|
|
8febcd |
- if (prefix_instruction( prefix )) { // stxv
|
|
|
8febcd |
- if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
|
|
|
8febcd |
- if (dis_fp_pair_prefix( prefix, theInstr )) goto decode_success;
|
|
|
8febcd |
- } else { // lfsu
|
|
|
8febcd |
- if (dis_fp_load( prefix, theInstr )) goto decode_success;
|
|
|
8febcd |
- }
|
|
|
8febcd |
+ if (dis_fp_load( prefix, theInstr )) goto decode_success;
|
|
|
8febcd |
goto decode_failure;
|
|
|
8febcd |
|
|
|
8febcd |
case 0x32:
|
|
|
8febcd |
@@ -35842,7 +35967,6 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
case 0x39: // pld, lxsd, lxssp, lfdp
|
|
|
8febcd |
{
|
|
|
8febcd |
UInt opc2tmp = ifieldOPC0o2(theInstr);
|
|
|
8febcd |
-
|
|
|
8febcd |
if (!allow_F) goto decode_noF;
|
|
|
8febcd |
if (prefix_instruction( prefix )) { // pld
|
|
|
8febcd |
if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
|
|
|
8febcd |
@@ -36125,12 +36249,6 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
goto decode_failure;
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
- /* The vsxOpc2 returned is the "normalized" value, representing the
|
|
|
8febcd |
- * instructions secondary opcode as taken from the standard secondary
|
|
|
8febcd |
- * opcode field [21:30] (IBM notatition), even if the actual field
|
|
|
8febcd |
- * is non-standard. These normalized values are given in the opcode
|
|
|
8febcd |
- * appendices of the ISA 2.06 document.
|
|
|
8febcd |
- */
|
|
|
8febcd |
if ( ( opc2 == 0x168 ) && ( IFIELD( theInstr, 19, 2 ) == 0 ) )// xxspltib
|
|
|
8febcd |
{
|
|
|
8febcd |
/* This is a special case of the XX1 form where the RA, RB
|
|
|
8febcd |
@@ -36153,6 +36271,23 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
goto decode_failure;
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+ if ( ( opc2 == 0x394 ) || // xxgenpcvbm
|
|
|
8febcd |
+ ( opc2 == 0x395 ) || // xxgenpcvwm
|
|
|
8febcd |
+ ( opc2 == 0x3B4 ) || // xxgenpcvhm
|
|
|
8febcd |
+ ( opc2 == 0x3B5 ) ) { // xxgenpcvdm
|
|
|
8febcd |
+ if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
|
|
|
8febcd |
+ if (dis_vector_generate_pvc_from_mask( prefix, theInstr,
|
|
|
8febcd |
+ abiinfo ))
|
|
|
8febcd |
+ goto decode_success;
|
|
|
8febcd |
+ goto decode_failure;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* The vsxOpc2 returned is the "normalized" value, representing the
|
|
|
8febcd |
+ * instructions secondary opcode as taken from the standard secondary
|
|
|
8febcd |
+ * opcode field [21:30] (IBM notatition), even if the actual field
|
|
|
8febcd |
+ * is non-standard. These normalized values are given in the opcode
|
|
|
8febcd |
+ * appendices of the ISA 2.06 document.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
vsxOpc2 = get_VSX60_opc2(opc2, theInstr);
|
|
|
8febcd |
|
|
|
8febcd |
switch (vsxOpc2) {
|
|
|
8febcd |
commit 078f89e99b6f62e043f6138c6a7ae238befc1f2a
|
|
|
8febcd |
Author: Carl Love <cel@us.ibm.com>
|
|
|
8febcd |
Date: Fri Feb 26 15:46:55 2021 -0600
|
|
|
8febcd |
|
|
|
8febcd |
PPC64: Reduced-Precision - bfloat16 Outer Product & Format Conversion Operations
|
|
|
8febcd |
|
|
|
8febcd |
Add support for:
|
|
|
8febcd |
|
|
|
8febcd |
pmxvbf16ger2 Prefixed Masked VSX Vector bfloat16 GER (Rank-2 Update)
|
|
|
8febcd |
pmxvbf16ger2pp Prefixed Masked VSX Vector bfloat16 GER (Rank-2 Update) Positive
|
|
|
8febcd |
multiply, Positive accumulate
|
|
|
8febcd |
pmxvbf16ger2pn Prefixed Masked VSX Vector bfloat16 GER (Rank-2 Update) Positive
|
|
|
8febcd |
multiply, Negative accumulate
|
|
|
8febcd |
pmxvbf16ger2np Prefixed Masked VSX Vector bfloat16 GER (Rank-2 Update) Negative
|
|
|
8febcd |
multiply, Positive accumulate
|
|
|
8febcd |
pmxvbf16ger2nn Prefixed Masked VSX Vector bfloat16 GER (Rank-2 Update) Negative
|
|
|
8febcd |
multiply, Negative accumulate
|
|
|
8febcd |
xvbf16ger2VSX Vector bfloat16 GER (Rank-2 Update)
|
|
|
8febcd |
xvbf16ger2pp VSX Vector bfloat16 GER (Rank-2 Update) Positive multiply, Positive
|
|
|
8febcd |
accumulate
|
|
|
8febcd |
xvbf16ger2pn VSX Vector bfloat16 GER (Rank-2 Update) Positive multiply, Negative
|
|
|
8febcd |
accumulate
|
|
|
8febcd |
xvbf16ger2np VSX Vector bfloat16 GER (Rank-2 Update) Negative multiply, Positive
|
|
|
8febcd |
accumulate
|
|
|
8febcd |
xvbf16ger2nn VSX Vector bfloat16 GER (Rank-2 Update) Negative multiply, Negative
|
|
|
8febcd |
accumulate
|
|
|
8febcd |
xvcvbf16sp VSX Vector Convert bfloat16 to Single-Precision format
|
|
|
8febcd |
xvcvspbf16 VSX Vector Convert with round Single-Precision to bfloat16 format
|
|
|
8febcd |
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_defs.h b/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
index 54ce923a9..d36d6c07d 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_defs.h
|
|
|
8febcd |
@@ -150,6 +150,8 @@ extern ULong convert_to_zoned_helper( ULong src_hi, ULong src_low,
|
|
|
8febcd |
ULong return_upper );
|
|
|
8febcd |
extern ULong convert_to_national_helper( ULong src, ULong return_upper );
|
|
|
8febcd |
extern ULong convert_from_zoned_helper( ULong src_hi, ULong src_low );
|
|
|
8febcd |
+extern ULong convert_from_floattobf16_helper( ULong src );
|
|
|
8febcd |
+extern ULong convert_from_bf16tofloat_helper( ULong src );
|
|
|
8febcd |
extern ULong convert_from_national_helper( ULong src_hi, ULong src_low );
|
|
|
8febcd |
extern ULong generate_C_FPCC_helper( ULong size, ULong src_hi, ULong src );
|
|
|
8febcd |
extern ULong extract_bits_under_mask_helper( ULong src, ULong mask,
|
|
|
8febcd |
@@ -201,6 +203,11 @@ extern void vector_gen_pvc_dword_mask_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
#define XVF16GER2PN 0b10010010
|
|
|
8febcd |
#define XVF16GER2NP 0b01010010
|
|
|
8febcd |
#define XVF16GER2NN 0b11010010
|
|
|
8febcd |
+#define XVBF16GER2 0b00110011
|
|
|
8febcd |
+#define XVBF16GER2PP 0b00110010
|
|
|
8febcd |
+#define XVBF16GER2PN 0b10110010
|
|
|
8febcd |
+#define XVBF16GER2NP 0b01110010
|
|
|
8febcd |
+#define XVBF16GER2NN 0b11110010
|
|
|
8febcd |
#define XVF32GER 0b00011011
|
|
|
8febcd |
#define XVF32GERPP 0b00011010
|
|
|
8febcd |
#define XVF32GERPN 0b10011010
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
index 75497abb9..6bcee966d 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
@@ -1905,6 +1905,125 @@ static Double conv_f16_to_double( ULong input )
|
|
|
8febcd |
# endif
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+#define BF16_SIGN_MASK 0x8000
|
|
|
8febcd |
+#define BF16_EXP_MASK 0x7F80
|
|
|
8febcd |
+#define BF16_FRAC_MASK 0x007F
|
|
|
8febcd |
+#define BF16_BIAS 127
|
|
|
8febcd |
+#define BF16_MAX_UNBIASED_EXP 127
|
|
|
8febcd |
+#define BF16_MIN_UNBIASED_EXP -126
|
|
|
8febcd |
+#define FLOAT_SIGN_MASK 0x80000000
|
|
|
8febcd |
+#define FLOAT_EXP_MASK 0x7F800000
|
|
|
8febcd |
+#define FLOAT_FRAC_MASK 0x007FFFFF
|
|
|
8febcd |
+#define FLOAT_FRAC_BIT8 0x00008000
|
|
|
8febcd |
+#define FLOAT_BIAS 127
|
|
|
8febcd |
+
|
|
|
8febcd |
+static Float conv_bf16_to_float( UInt input )
|
|
|
8febcd |
+{
|
|
|
8febcd |
+ /* input is 16-bit bfloat.
|
|
|
8febcd |
+ bias +127, exponent 8-bits, fraction 7-bits
|
|
|
8febcd |
+
|
|
|
8febcd |
+ output is 32-bit float.
|
|
|
8febcd |
+ bias +127, exponent 8-bits, fraction 22-bits
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ UInt input_exp, input_fraction, unbiased_exp;
|
|
|
8febcd |
+ UInt output_exp, output_fraction;
|
|
|
8febcd |
+ UInt sign;
|
|
|
8febcd |
+ union convert_t conv;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sign = (UInt)(input & BF16_SIGN_MASK);
|
|
|
8febcd |
+ input_exp = input & BF16_EXP_MASK;
|
|
|
8febcd |
+ unbiased_exp = (input_exp >> 7) - (UInt)BF16_BIAS;
|
|
|
8febcd |
+ input_fraction = input & BF16_FRAC_MASK;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((input_exp & BF16_EXP_MASK) == BF16_EXP_MASK) &&
|
|
|
8febcd |
+ (input_fraction != 0)) {
|
|
|
8febcd |
+ /* input is NaN or SNaN, exp all 1's, fraction != 0 */
|
|
|
8febcd |
+ output_exp = FLOAT_EXP_MASK;
|
|
|
8febcd |
+ output_fraction = input_fraction;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if(((input_exp & BF16_EXP_MASK) == BF16_EXP_MASK) &&
|
|
|
8febcd |
+ ( input_fraction == 0)) {
|
|
|
8febcd |
+ /* input is infinity, exp all 1's, fraction = 0 */
|
|
|
8febcd |
+ output_exp = FLOAT_EXP_MASK;
|
|
|
8febcd |
+ output_fraction = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if((input_exp == 0) && (input_fraction == 0)) {
|
|
|
8febcd |
+ /* input is zero */
|
|
|
8febcd |
+ output_exp = 0;
|
|
|
8febcd |
+ output_fraction = 0;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if((input_exp == 0) && (input_fraction != 0)) {
|
|
|
8febcd |
+ /* input is denormal */
|
|
|
8febcd |
+ output_fraction = input_fraction;
|
|
|
8febcd |
+ output_exp = (-(Int)BF16_BIAS + (Int)FLOAT_BIAS ) << 23;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ /* result is normal */
|
|
|
8febcd |
+ output_exp = (unbiased_exp + FLOAT_BIAS) << 23;
|
|
|
8febcd |
+ output_fraction = input_fraction;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ conv.u32 = sign << (31 - 15) | output_exp | (output_fraction << (23-7));
|
|
|
8febcd |
+ return conv.f;
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+static UInt conv_float_to_bf16( UInt input )
|
|
|
8febcd |
+{
|
|
|
8febcd |
+ /* input is 32-bit float stored as unsigned 32-bit.
|
|
|
8febcd |
+ bias +127, exponent 8-bits, fraction 23-bits
|
|
|
8febcd |
+
|
|
|
8febcd |
+ output is 16-bit bfloat.
|
|
|
8febcd |
+ bias +127, exponent 8-bits, fraction 7-bits
|
|
|
8febcd |
+
|
|
|
8febcd |
+ If the unbiased exponent of the input is greater than the max floating
|
|
|
8febcd |
+ point unbiased exponent value, the result of the floating point 16-bit
|
|
|
8febcd |
+ value is infinity.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ UInt input_exp, input_fraction;
|
|
|
8febcd |
+ UInt output_exp, output_fraction;
|
|
|
8febcd |
+ UInt result, sign;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ sign = input & FLOAT_SIGN_MASK;
|
|
|
8febcd |
+ input_exp = input & FLOAT_EXP_MASK;
|
|
|
8febcd |
+ input_fraction = input & FLOAT_FRAC_MASK;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ if (((input_exp & FLOAT_EXP_MASK) == FLOAT_EXP_MASK) &&
|
|
|
8febcd |
+ (input_fraction != 0)) {
|
|
|
8febcd |
+ /* input is NaN or SNaN, exp all 1's, fraction != 0 */
|
|
|
8febcd |
+ output_exp = BF16_EXP_MASK;
|
|
|
8febcd |
+ output_fraction = (ULong)input_fraction >> (23 - 7);
|
|
|
8febcd |
+ } else if (((input_exp & FLOAT_EXP_MASK) == FLOAT_EXP_MASK) &&
|
|
|
8febcd |
+ ( input_fraction == 0)) {
|
|
|
8febcd |
+ /* input is infinity, exp all 1's, fraction = 0 */
|
|
|
8febcd |
+ output_exp = BF16_EXP_MASK;
|
|
|
8febcd |
+ output_fraction = 0;
|
|
|
8febcd |
+ } else if ((input_exp == 0) && (input_fraction == 0)) {
|
|
|
8febcd |
+ /* input is zero */
|
|
|
8febcd |
+ output_exp = 0;
|
|
|
8febcd |
+ output_fraction = 0;
|
|
|
8febcd |
+ } else if ((input_exp == 0) && (input_fraction != 0)) {
|
|
|
8febcd |
+ /* input is denormal */
|
|
|
8febcd |
+ output_exp = 0;
|
|
|
8febcd |
+ output_fraction = (ULong)input_fraction >> (23 - 7);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ /* result is normal */
|
|
|
8febcd |
+ output_exp = (input_exp - BF16_BIAS + FLOAT_BIAS) >> (23 - 7);
|
|
|
8febcd |
+ output_fraction = (ULong)input_fraction >> (23 - 7);
|
|
|
8febcd |
+
|
|
|
8febcd |
+ /* Round result. Look at the 8th bit position of the 32-bit floating
|
|
|
8febcd |
+ pointt fraction. The F16 fraction is only 7 bits wide so if the 8th
|
|
|
8febcd |
+ bit of the F32 is a 1 we need to round up by adding 1 to the output
|
|
|
8febcd |
+ fraction. */
|
|
|
8febcd |
+ if ((input_fraction & FLOAT_FRAC_BIT8) == FLOAT_FRAC_BIT8)
|
|
|
8febcd |
+ /* Round the F16 fraction up by 1 */
|
|
|
8febcd |
+ output_fraction = output_fraction + 1;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
+ result = sign >> (31 - 15) | output_exp | output_fraction;
|
|
|
8febcd |
+ return result;
|
|
|
8febcd |
+}
|
|
|
8febcd |
|
|
|
8febcd |
static Float conv_double_to_float( Double src )
|
|
|
8febcd |
{
|
|
|
8febcd |
@@ -1942,6 +2061,36 @@ static Float negate_float( Float input )
|
|
|
8febcd |
return -input;
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+/* This C-helper takes a vector of two 32-bit floating point values
|
|
|
8febcd |
+ * and returns a vector containing two 16-bit bfloats.
|
|
|
8febcd |
+ input: word0 word1
|
|
|
8febcd |
+ output 0x0 hword1 0x0 hword3
|
|
|
8febcd |
+ Called from generated code.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ULong convert_from_floattobf16_helper( ULong src ) {
|
|
|
8febcd |
+ ULong resultHi, resultLo;
|
|
|
8febcd |
+
|
|
|
8febcd |
+ resultHi = (ULong)conv_float_to_bf16( (UInt)(src >> 32));
|
|
|
8febcd |
+ resultLo = (ULong)conv_float_to_bf16( (UInt)(src & 0xFFFFFFFF));
|
|
|
8febcd |
+ return (resultHi << 32) | resultLo;
|
|
|
8febcd |
+
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+/* This C-helper takes a vector of two 16-bit bfloating point values
|
|
|
8febcd |
+ * and returns a vector containing one 32-bit float.
|
|
|
8febcd |
+ input: 0x0 hword1 0x0 hword3
|
|
|
8febcd |
+ output: word0 word1
|
|
|
8febcd |
+ */
|
|
|
8febcd |
+ULong convert_from_bf16tofloat_helper( ULong src ) {
|
|
|
8febcd |
+ ULong result;
|
|
|
8febcd |
+ union convert_t conv;
|
|
|
8febcd |
+ conv.f = conv_bf16_to_float( (UInt)(src >> 32) );
|
|
|
8febcd |
+ result = (ULong) conv.u32;
|
|
|
8febcd |
+ conv.f = conv_bf16_to_float( (UInt)(src & 0xFFFFFFFF));
|
|
|
8febcd |
+ result = (result << 32) | (ULong) conv.u32;
|
|
|
8febcd |
+ return result;
|
|
|
8febcd |
+ }
|
|
|
8febcd |
+
|
|
|
8febcd |
void vsx_matrix_16bit_float_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
UInt offset_ACC,
|
|
|
8febcd |
ULong srcA_hi, ULong srcA_lo,
|
|
|
8febcd |
@@ -2002,24 +2151,44 @@ void vsx_matrix_16bit_float_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
srcB_word[0][j] = (UInt)((srcB_lo >> (16-16*j)) & mask);
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+ /* Note the isa is not consistent in the src naming. Will use the
|
|
|
8febcd |
+ naming src10, src11, src20, src21 used with xvf16ger2 instructions.
|
|
|
8febcd |
+ */
|
|
|
8febcd |
for( j = 0; j < 4; j++) {
|
|
|
8febcd |
if (((pmsk >> 1) & 0x1) == 0) {
|
|
|
8febcd |
src10 = 0;
|
|
|
8febcd |
src20 = 0;
|
|
|
8febcd |
} else {
|
|
|
8febcd |
- src10 = conv_f16_to_double((ULong)srcA_word[i][0]);
|
|
|
8febcd |
- src20 = conv_f16_to_double((ULong)srcB_word[j][0]);
|
|
|
8febcd |
+ if (( inst == XVF16GER2 ) || ( inst == XVF16GER2PP )
|
|
|
8febcd |
+ || ( inst == XVF16GER2PN ) || ( inst == XVF16GER2NP )
|
|
|
8febcd |
+ || ( inst == XVF16GER2NN )) {
|
|
|
8febcd |
+ src10 = conv_f16_to_double((ULong)srcA_word[i][0]);
|
|
|
8febcd |
+ src20 = conv_f16_to_double((ULong)srcB_word[j][0]);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ /* Input is in bfloat format, result is stored in the
|
|
|
8febcd |
+ "traditional" 64-bit float format. */
|
|
|
8febcd |
+ src10 = (double)conv_bf16_to_float((ULong)srcA_word[i][0]);
|
|
|
8febcd |
+ src20 = (double)conv_bf16_to_float((ULong)srcB_word[j][0]);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
if ((pmsk & 0x1) == 0) {
|
|
|
8febcd |
src11 = 0;
|
|
|
8febcd |
src21 = 0;
|
|
|
8febcd |
} else {
|
|
|
8febcd |
- src11 = conv_f16_to_double((ULong)srcA_word[i][1]);
|
|
|
8febcd |
- src21 = conv_f16_to_double((ULong)srcB_word[j][1]);
|
|
|
8febcd |
+ if (( inst == XVF16GER2 ) || ( inst == XVF16GER2PP )
|
|
|
8febcd |
+ || ( inst == XVF16GER2PN ) || ( inst == XVF16GER2NP )
|
|
|
8febcd |
+ || ( inst == XVF16GER2NN )) {
|
|
|
8febcd |
+ src11 = conv_f16_to_double((ULong)srcA_word[i][1]);
|
|
|
8febcd |
+ src21 = conv_f16_to_double((ULong)srcB_word[j][1]);
|
|
|
8febcd |
+ } else {
|
|
|
8febcd |
+ /* Input is in bfloat format, result is stored in the
|
|
|
8febcd |
+ "traditional" 64-bit float format. */
|
|
|
8febcd |
+ src11 = (double)conv_bf16_to_float((ULong)srcA_word[i][1]);
|
|
|
8febcd |
+ src21 = (double)conv_bf16_to_float((ULong)srcB_word[j][1]);
|
|
|
8febcd |
+ }
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
-
|
|
|
8febcd |
prod = src10 * src20;
|
|
|
8febcd |
msum = prod + src11 * src21;
|
|
|
8febcd |
|
|
|
8febcd |
@@ -2027,26 +2196,26 @@ void vsx_matrix_16bit_float_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
/* Note, we do not track the exception handling bits
|
|
|
8febcd |
ox, ux, xx, si, mz, vxsnan and vximz in the FPSCR. */
|
|
|
8febcd |
|
|
|
8febcd |
- if ( inst == XVF16GER2 )
|
|
|
8febcd |
+ if (( inst == XVF16GER2 ) || ( inst == XVBF16GER2 ) )
|
|
|
8febcd |
result[j] = reinterpret_float_as_int(
|
|
|
8febcd |
conv_double_to_float(msum) );
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVF16GER2PP )
|
|
|
8febcd |
+ else if (( inst == XVF16GER2PP ) || (inst == XVBF16GER2PP ))
|
|
|
8febcd |
result[j] = reinterpret_float_as_int(
|
|
|
8febcd |
conv_double_to_float(msum)
|
|
|
8febcd |
+ acc_word[j] );
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVF16GER2PN )
|
|
|
8febcd |
+ else if (( inst == XVF16GER2PN ) || ( inst == XVBF16GER2PN ))
|
|
|
8febcd |
result[j] = reinterpret_float_as_int(
|
|
|
8febcd |
conv_double_to_float(msum)
|
|
|
8febcd |
+ negate_float( acc_word[j] ) );
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVF16GER2NP )
|
|
|
8febcd |
+ else if (( inst == XVF16GER2NP ) || ( inst == XVBF16GER2NP ))
|
|
|
8febcd |
result[j] = reinterpret_float_as_int(
|
|
|
8febcd |
conv_double_to_float( negate_double( msum ) )
|
|
|
8febcd |
+ acc_word[j] );
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVF16GER2NN )
|
|
|
8febcd |
+ else if (( inst == XVF16GER2NN ) || ( inst == XVBF16GER2NN ))
|
|
|
8febcd |
result[j] = reinterpret_float_as_int(
|
|
|
8febcd |
conv_double_to_float( negate_double( msum ) )
|
|
|
8febcd |
+ negate_float( acc_word[j] ) );
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
index 354be6b53..20553a539 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
@@ -5688,6 +5688,57 @@ static IRExpr * convert_from_national ( const VexAbiInfo* vbi, IRExpr *src ) {
|
|
|
8febcd |
return mkexpr( result );
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
+static IRExpr * vector_convert_floattobf16 ( const VexAbiInfo* vbi,
|
|
|
8febcd |
+ IRExpr *src ) {
|
|
|
8febcd |
+ /* The function takes 128-bit value containing four 32-bit floats and
|
|
|
8febcd |
+ returns a 128-bit value containint four 16-bit bfloats in the lower
|
|
|
8febcd |
+ halfwords. */
|
|
|
8febcd |
+
|
|
|
8febcd |
+ IRTemp resultHi = newTemp( Ity_I64);
|
|
|
8febcd |
+ IRTemp resultLo = newTemp( Ity_I64);
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( resultHi,
|
|
|
8febcd |
+ mkIRExprCCall( Ity_I64, 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_convert_floattobf16_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &convert_from_floattobf16_helper ),
|
|
|
8febcd |
+ mkIRExprVec_1( unop( Iop_V128HIto64, src ) ) ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( resultLo,
|
|
|
8febcd |
+ mkIRExprCCall( Ity_I64, 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_convert_floattobf16_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &convert_from_floattobf16_helper ),
|
|
|
8febcd |
+ mkIRExprVec_1( unop( Iop_V128to64, src ) ) ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ return binop( Iop_64HLtoV128, mkexpr( resultHi ), mkexpr( resultLo ) );
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
+static IRExpr * vector_convert_bf16tofloat ( const VexAbiInfo* vbi,
|
|
|
8febcd |
+ IRExpr *src ) {
|
|
|
8febcd |
+ /* The function takes 128-bit value containing four 16-bit bfloats in
|
|
|
8febcd |
+ the lower halfwords and returns a 128-bit value containint four
|
|
|
8febcd |
+ 32-bit floats. */
|
|
|
8febcd |
+ IRTemp resultHi = newTemp( Ity_I64);
|
|
|
8febcd |
+ IRTemp resultLo = newTemp( Ity_I64);
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( resultHi,
|
|
|
8febcd |
+ mkIRExprCCall( Ity_I64, 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_convert_bf16tofloat_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &convert_from_bf16tofloat_helper ),
|
|
|
8febcd |
+ mkIRExprVec_1( unop( Iop_V128HIto64, src ) ) ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ assign( resultLo,
|
|
|
8febcd |
+ mkIRExprCCall( Ity_I64, 0 /*regparms*/,
|
|
|
8febcd |
+ "vector_convert_bf16tofloat_helper",
|
|
|
8febcd |
+ fnptr_to_fnentry( vbi,
|
|
|
8febcd |
+ &convert_from_bf16tofloat_helper ),
|
|
|
8febcd |
+ mkIRExprVec_1( unop( Iop_V128to64, src ) ) ) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ return binop( Iop_64HLtoV128, mkexpr( resultHi ), mkexpr( resultLo ) );
|
|
|
8febcd |
+}
|
|
|
8febcd |
+
|
|
|
8febcd |
static IRExpr * popcnt64 ( const VexAbiInfo* vbi,
|
|
|
8febcd |
IRExpr *src ){
|
|
|
8febcd |
/* The function takes a 64-bit source and counts the number of bits in the
|
|
|
8febcd |
@@ -5936,6 +5987,7 @@ static void vsx_matrix_ger ( const VexAbiInfo* vbi,
|
|
|
8febcd |
case XVI16GER2:
|
|
|
8febcd |
case XVI16GER2S:
|
|
|
8febcd |
case XVF16GER2:
|
|
|
8febcd |
+ case XVBF16GER2:
|
|
|
8febcd |
case XVF32GER:
|
|
|
8febcd |
AT_fx = Ifx_Write;
|
|
|
8febcd |
break;
|
|
|
8febcd |
@@ -5943,6 +5995,10 @@ static void vsx_matrix_ger ( const VexAbiInfo* vbi,
|
|
|
8febcd |
case XVI8GER4PP:
|
|
|
8febcd |
case XVI16GER2PP:
|
|
|
8febcd |
case XVI16GER2SPP:
|
|
|
8febcd |
+ case XVBF16GER2PP:
|
|
|
8febcd |
+ case XVBF16GER2PN:
|
|
|
8febcd |
+ case XVBF16GER2NP:
|
|
|
8febcd |
+ case XVBF16GER2NN:
|
|
|
8febcd |
case XVF16GER2PP:
|
|
|
8febcd |
case XVF16GER2PN:
|
|
|
8febcd |
case XVF16GER2NP:
|
|
|
8febcd |
@@ -23899,6 +23955,24 @@ dis_vxs_misc( UInt prefix, UInt theInstr, const VexAbiInfo* vbi, UInt opc2,
|
|
|
8febcd |
mkexpr( sub_element1 ),
|
|
|
8febcd |
mkexpr( sub_element0 ) ) ) );
|
|
|
8febcd |
|
|
|
8febcd |
+ } else if ((inst_select == 16) && !prefix) {
|
|
|
8febcd |
+ IRTemp result = newTemp(Ity_V128);
|
|
|
8febcd |
+ UChar xT_addr = ifieldRegXT ( theInstr );
|
|
|
8febcd |
+ UChar xB_addr = ifieldRegXB ( theInstr );
|
|
|
8febcd |
+ /* Convert 16-bit bfloat to 32-bit float, not a prefix inst */
|
|
|
8febcd |
+ DIP("xvcvbf16sp v%u,v%u\n", xT_addr, xB_addr);
|
|
|
8febcd |
+ assign( result, vector_convert_bf16tofloat( vbi, mkexpr( vB ) ) );
|
|
|
8febcd |
+ putVSReg( XT, mkexpr( result) );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ } else if ((inst_select == 17) && !prefix) {
|
|
|
8febcd |
+ IRTemp result = newTemp(Ity_V128);
|
|
|
8febcd |
+ UChar xT_addr = ifieldRegXT ( theInstr );
|
|
|
8febcd |
+ UChar xB_addr = ifieldRegXB ( theInstr );
|
|
|
8febcd |
+ /* Convert 32-bit float to 16-bit bfloat, not a prefix inst */
|
|
|
8febcd |
+ DIP("xvcvspbf16 v%u,v%u\n", xT_addr, xB_addr);
|
|
|
8febcd |
+ assign( result, vector_convert_floattobf16( vbi, mkexpr( vB ) ) );
|
|
|
8febcd |
+ putVSReg( XT, mkexpr( result) );
|
|
|
8febcd |
+
|
|
|
8febcd |
} else if (inst_select == 23) {
|
|
|
8febcd |
DIP("xxbrd v%u, v%u\n", (UInt)XT, (UInt)XB);
|
|
|
8febcd |
|
|
|
8febcd |
@@ -34956,6 +35030,41 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
getVSReg( rB_addr ), AT,
|
|
|
8febcd |
( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
break;
|
|
|
8febcd |
+ case XVBF16GER2:
|
|
|
8febcd |
+ DIP("xvbf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ), AT,
|
|
|
8febcd |
+ ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2PP:
|
|
|
8febcd |
+ DIP("xvbf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ), AT,
|
|
|
8febcd |
+ ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2PN:
|
|
|
8febcd |
+ DIP("xvbf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ), AT,
|
|
|
8febcd |
+ ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2NP:
|
|
|
8febcd |
+ DIP("xvbf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ), AT,
|
|
|
8febcd |
+ ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2NN:
|
|
|
8febcd |
+ DIP("xvbf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ), AT,
|
|
|
8febcd |
+ ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
case XVF32GER:
|
|
|
8febcd |
DIP("xvf32ger %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
|
|
|
8febcd |
@@ -35106,6 +35215,61 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
AT,
|
|
|
8febcd |
( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
break;
|
|
|
8febcd |
+ case XVBF16GER2:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvbf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( (MASKS << 9 )
|
|
|
8febcd |
+ | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2PP:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvbf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( (MASKS << 9 )
|
|
|
8febcd |
+ | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2PN:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvbf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( (MASKS << 9 )
|
|
|
8febcd |
+ | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2NP:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvbf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( (MASKS << 9 )
|
|
|
8febcd |
+ | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVBF16GER2NN:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvbf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ),
|
|
|
8febcd |
+ getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( (MASKS << 9 )
|
|
|
8febcd |
+ | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
case XVF16GER2:
|
|
|
8febcd |
PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
@@ -36181,6 +36345,11 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
(opc2 == XVI4GER8PP) || // xvi4ger8pp
|
|
|
8febcd |
(opc2 == XVI8GER4) || // xvi8ger4
|
|
|
8febcd |
(opc2 == XVI8GER4PP) || // xvi8ger4pp
|
|
|
8febcd |
+ (opc2 == XVBF16GER2) || // xvbf16ger2
|
|
|
8febcd |
+ (opc2 == XVBF16GER2PP) || // xvbf16ger2pp
|
|
|
8febcd |
+ (opc2 == XVBF16GER2PN) || // xvbf16ger2pn
|
|
|
8febcd |
+ (opc2 == XVBF16GER2NP) || // xvbf16ger2np
|
|
|
8febcd |
+ (opc2 == XVBF16GER2NN) || // xvbf16ger2nn
|
|
|
8febcd |
(opc2 == XVF16GER2) || // xvf16ger2
|
|
|
8febcd |
(opc2 == XVF16GER2PP) || // xvf16ger2pp
|
|
|
8febcd |
(opc2 == XVF16GER2PN) || // xvf16ger2pn
|
|
|
8febcd |
commit e09fdaf569b975717465ed8043820d0198d4d47d
|
|
|
8febcd |
Author: Carl Love <cel@us.ibm.com>
|
|
|
8febcd |
Date: Fri Feb 26 16:05:12 2021 -0600
|
|
|
8febcd |
|
|
|
8febcd |
PPC64: Reduced-Precision: Missing Integer-based Outer Product Operations
|
|
|
8febcd |
|
|
|
8febcd |
Add support for:
|
|
|
8febcd |
|
|
|
8febcd |
pmxvi16ger2 VSX Vector 16-bit Signed Integer GER (rank-2 update), Prefixed
|
|
|
8febcd |
Masked
|
|
|
8febcd |
pmxvi16ger2pp VSX Vector 16-bit Signed Integer GER (rank-2 update) (Positive
|
|
|
8febcd |
multiply, Positive accumulate), Prefixed Masked
|
|
|
8febcd |
pmxvi8ger4spp VSX Vector 8-bit Signed/Unsigned Integer GER (rank-4 update) with
|
|
|
8febcd |
Saturation (Positive multiply, Positive accumulate), Prefixed Masked
|
|
|
8febcd |
xvi16ger2 VSX Vector 16-bit Signed Integer GER (rank-2 update)
|
|
|
8febcd |
xvi16ger2pp VSX Vector 16-bit Signed Integer GER (rank-2 update) (Positive
|
|
|
8febcd |
multiply, Positive accumulate)
|
|
|
8febcd |
xvi8ger4spp VSX Vector 8-bit Signed/Unsigned Integer GER (rank-4 update) with
|
|
|
8febcd |
Saturation (Positive multiply, Positive accumulate)
|
|
|
8febcd |
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
index 6bcee966d..d8131eb60 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_helpers.c
|
|
|
8febcd |
@@ -1446,16 +1446,16 @@ static UInt exts4( UInt src)
|
|
|
8febcd |
return src & 0xF; /* make sure high order bits are zero */
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
-static UInt exts8( UInt src)
|
|
|
8febcd |
+static ULong exts8( UInt src)
|
|
|
8febcd |
{
|
|
|
8febcd |
- /* Input is an 8-bit value. Extend bit 7 to bits [31:8] */
|
|
|
8febcd |
+ /* Input is an 8-bit value. Extend bit 7 to bits [63:8] */
|
|
|
8febcd |
if (( src >> 7 ) & 0x1)
|
|
|
8febcd |
- return src | 0xFFFFFF00; /* sign bit is a 1, extend */
|
|
|
8febcd |
+ return src | 0xFFFFFFFFFFFFFF00ULL; /* sign bit is a 1, extend */
|
|
|
8febcd |
else
|
|
|
8febcd |
return src & 0xFF; /* make sure high order bits are zero */
|
|
|
8febcd |
}
|
|
|
8febcd |
|
|
|
8febcd |
-static UInt extz8( UInt src)
|
|
|
8febcd |
+static ULong extz8( UInt src)
|
|
|
8febcd |
{
|
|
|
8febcd |
/* Input is an 8-bit value. Extend src on the left with zeros. */
|
|
|
8febcd |
return src & 0xFF; /* make sure high order bits are zero */
|
|
|
8febcd |
@@ -1662,12 +1662,12 @@ void vsx_matrix_8bit_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
ULong srcB_hi, ULong srcB_lo,
|
|
|
8febcd |
UInt masks_inst )
|
|
|
8febcd |
{
|
|
|
8febcd |
- UInt i, j, mask, sum, inst, acc_entry, prefix_inst;
|
|
|
8febcd |
+ UInt i, j, mask, inst, acc_entry, prefix_inst;
|
|
|
8febcd |
|
|
|
8febcd |
UInt srcA_bytes[4][4]; /* word, byte */
|
|
|
8febcd |
UInt srcB_bytes[4][4]; /* word, byte */
|
|
|
8febcd |
UInt acc_word[4];
|
|
|
8febcd |
- UInt prod0, prod1, prod2, prod3;
|
|
|
8febcd |
+ ULong prod0, prod1, prod2, prod3, sum;
|
|
|
8febcd |
UInt result[4];
|
|
|
8febcd |
UInt pmsk = 0;
|
|
|
8febcd |
UInt xmsk = 0;
|
|
|
8febcd |
@@ -1742,10 +1742,13 @@ void vsx_matrix_8bit_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
sum = prod0 + prod1 + prod2 + prod3;
|
|
|
8febcd |
|
|
|
8febcd |
if ( inst == XVI8GER4 )
|
|
|
8febcd |
- result[j] = sum;
|
|
|
8febcd |
+ result[j] = chop64to32( sum );
|
|
|
8febcd |
|
|
|
8febcd |
else if ( inst == XVI8GER4PP )
|
|
|
8febcd |
- result[j] = sum + acc_word[j];
|
|
|
8febcd |
+ result[j] = chop64to32( sum + acc_word[j] );
|
|
|
8febcd |
+
|
|
|
8febcd |
+ else if ( inst == XVI8GER4SPP )
|
|
|
8febcd |
+ result[j] = clampS64toS32(sum + acc_word[j]);
|
|
|
8febcd |
|
|
|
8febcd |
} else {
|
|
|
8febcd |
result[j] = 0;
|
|
|
8febcd |
@@ -1821,7 +1824,7 @@ void vsx_matrix_16bit_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
else
|
|
|
8febcd |
prod1 = exts16to64( srcA_word[i][1] )
|
|
|
8febcd |
* exts16to64( srcB_word[j][1] );
|
|
|
8febcd |
- /* sum is UInt so the result is choped to 32-bits */
|
|
|
8febcd |
+
|
|
|
8febcd |
sum = prod0 + prod1;
|
|
|
8febcd |
|
|
|
8febcd |
if ( inst == XVI16GER2 )
|
|
|
8febcd |
@@ -1830,13 +1833,11 @@ void vsx_matrix_16bit_ger_dirty_helper( VexGuestPPC64State* gst,
|
|
|
8febcd |
else if ( inst == XVI16GER2S )
|
|
|
8febcd |
result[j] = clampS64toS32( sum );
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVI16GER2PP ) {
|
|
|
8febcd |
+ else if ( inst == XVI16GER2PP )
|
|
|
8febcd |
result[j] = chop64to32( sum + acc_word[j] );
|
|
|
8febcd |
- }
|
|
|
8febcd |
|
|
|
8febcd |
- else if ( inst == XVI16GER2SPP ) {
|
|
|
8febcd |
+ else if ( inst == XVI16GER2SPP )
|
|
|
8febcd |
result[j] = clampS64toS32( sum + acc_word[j] );
|
|
|
8febcd |
- }
|
|
|
8febcd |
|
|
|
8febcd |
} else {
|
|
|
8febcd |
result[j] = 0;
|
|
|
8febcd |
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
index 20553a539..e54f0f389 100644
|
|
|
8febcd |
--- a/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
+++ b/VEX/priv/guest_ppc_toIR.c
|
|
|
8febcd |
@@ -5993,6 +5993,7 @@ static void vsx_matrix_ger ( const VexAbiInfo* vbi,
|
|
|
8febcd |
break;
|
|
|
8febcd |
case XVI4GER8PP:
|
|
|
8febcd |
case XVI8GER4PP:
|
|
|
8febcd |
+ case XVI8GER4SPP:
|
|
|
8febcd |
case XVI16GER2PP:
|
|
|
8febcd |
case XVI16GER2SPP:
|
|
|
8febcd |
case XVBF16GER2PP:
|
|
|
8febcd |
@@ -34983,6 +34984,12 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
AT, ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
break;
|
|
|
8febcd |
+ case XVI8GER4SPP:
|
|
|
8febcd |
+ DIP("xvi8ger4spp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
case XVI16GER2S:
|
|
|
8febcd |
DIP("xvi16ger2s %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
|
|
|
8febcd |
@@ -34995,6 +35002,19 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
AT, ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
break;
|
|
|
8febcd |
+ case XVI16GER2:
|
|
|
8febcd |
+ DIP("xvi16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVI16GER2PP:
|
|
|
8febcd |
+ DIP("xvi16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT, ( ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+
|
|
|
8febcd |
case XVF16GER2:
|
|
|
8febcd |
DIP("xvf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
|
|
|
8febcd |
vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
|
|
|
8febcd |
@@ -35193,6 +35213,39 @@ static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
|
|
|
8febcd |
AT,
|
|
|
8febcd |
( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
break;
|
|
|
8febcd |
+ case XVI8GER4SPP:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 12, 4);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvi8ger4spp %u,r%u, r%u,%u,%u,%u\n",
|
|
|
8febcd |
+ AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT,
|
|
|
8febcd |
+ ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVI16GER2:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 12, 4);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvi16ger2 %u,r%u, r%u,%u,%u,%u\n",
|
|
|
8febcd |
+ AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT,
|
|
|
8febcd |
+ ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
+ case XVI16GER2PP:
|
|
|
8febcd |
+ PMSK = IFIELD( prefix, 12, 4);
|
|
|
8febcd |
+ XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
+ YMSK = IFIELD( prefix, 0, 4);
|
|
|
8febcd |
+ DIP("pmxvi16ger2pp %u,r%u, r%u,%u,%u,%u\n",
|
|
|
8febcd |
+ AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
|
|
|
8febcd |
+ vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
|
|
|
8febcd |
+ getVSReg( rA_addr ), getVSReg( rB_addr ),
|
|
|
8febcd |
+ AT,
|
|
|
8febcd |
+ ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ) );
|
|
|
8febcd |
+ break;
|
|
|
8febcd |
case XVI16GER2S:
|
|
|
8febcd |
PMSK = IFIELD( prefix, 14, 2);
|
|
|
8febcd |
XMSK = IFIELD( prefix, 4, 4);
|
|
|
8febcd |
@@ -36345,6 +36398,9 @@ DisResult disInstr_PPC_WRK (
|
|
|
8febcd |
(opc2 == XVI4GER8PP) || // xvi4ger8pp
|
|
|
8febcd |
(opc2 == XVI8GER4) || // xvi8ger4
|
|
|
8febcd |
(opc2 == XVI8GER4PP) || // xvi8ger4pp
|
|
|
8febcd |
+ (opc2 == XVI8GER4SPP) || // xvi8ger4spp
|
|
|
8febcd |
+ (opc2 == XVI16GER2) || // xvi16ger2
|
|
|
8febcd |
+ (opc2 == XVI16GER2PP) || // xvi16ger2pp
|
|
|
8febcd |
(opc2 == XVBF16GER2) || // xvbf16ger2
|
|
|
8febcd |
(opc2 == XVBF16GER2PP) || // xvbf16ger2pp
|
|
|
8febcd |
(opc2 == XVBF16GER2PN) || // xvbf16ger2pn
|