aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86InstrAVX512.td
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/X86InstrAVX512.td')
-rw-r--r--lib/Target/X86/X86InstrAVX512.td81
1 files changed, 69 insertions, 12 deletions
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index 14faaac08cc..e05669f9ef9 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -7969,26 +7969,53 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNode,
X86FoldableSchedWrite sched,
string Broadcast = _.BroadcastStr,
- string Alias = "", X86MemOperand MemOp = _Src.MemOp> {
+ string Alias = "", X86MemOperand MemOp = _Src.MemOp,
+ RegisterClass MaskRC = _.KRCWM> {
- defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
- (_.VT (OpNode (_Src.VT _Src.RC:$src)))>,
+ defm rr : AVX512_maskable_common<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _Src.RC:$src),
+ (ins _.RC:$src0, MaskRC:$mask, _Src.RC:$src),
+ (ins MaskRC:$mask, _Src.RC:$src),
+ OpcodeStr, "$src", "$src",
+ (_.VT (OpNode (_Src.VT _Src.RC:$src))),
+ (vselect MaskRC:$mask,
+ (_.VT (OpNode (_Src.VT _Src.RC:$src))),
+ _.RC:$src0),
+ vselect, "$src0 = $dst">,
EVEX, Sched<[sched]>;
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins MemOp:$src), OpcodeStr#Alias, "$src", "$src",
+ defm rm : AVX512_maskable_common<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins MemOp:$src),
+ (ins _.RC:$src0, MaskRC:$mask, MemOp:$src),
+ (ins MaskRC:$mask, MemOp:$src),
+ OpcodeStr#Alias, "$src", "$src",
(_.VT (OpNode (_Src.VT
- (_Src.LdFrag addr:$src))))>,
+ (_Src.LdFrag addr:$src)))),
+ (vselect MaskRC:$mask,
+ (_.VT (OpNode (_Src.VT
+ (_Src.LdFrag addr:$src)))),
+ _.RC:$src0),
+ vselect, "$src0 = $dst">,
EVEX, Sched<[sched.Folded]>;
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _Src.ScalarMemOp:$src), OpcodeStr,
+ defm rmb : AVX512_maskable_common<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _Src.ScalarMemOp:$src),
+ (ins _.RC:$src0, MaskRC:$mask, _Src.ScalarMemOp:$src),
+ (ins MaskRC:$mask, _Src.ScalarMemOp:$src),
+ OpcodeStr,
"${src}"##Broadcast, "${src}"##Broadcast,
(_.VT (OpNode (_Src.VT
(X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
- ))>, EVEX, EVEX_B,
- Sched<[sched.Folded]>;
+ )),
+ (vselect MaskRC:$mask,
+ (_.VT
+ (OpNode
+ (_Src.VT
+ (X86VBroadcast
+ (_Src.ScalarLdFrag addr:$src))))),
+ _.RC:$src0),
+ vselect, "$src0 = $dst">,
+ EVEX, EVEX_B, Sched<[sched.Folded]>;
}
// Coversion with SAE - suppress all exceptions
multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
@@ -8039,7 +8066,8 @@ multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sc
}
let Predicates = [HasVLX] in {
defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info,
- X86vfpround, sched.XMM, "{1to2}", "{x}">, EVEX_V128;
+ null_frag, sched.XMM, "{1to2}", "{x}", f128mem, VK2WM>,
+ EVEX_V128;
defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fpround,
sched.YMM, "{1to4}", "{y}">, EVEX_V256;
@@ -8073,6 +8101,35 @@ let Predicates = [HasVLX] in {
(VCVTPS2PDZ128rm addr:$src)>;
def : Pat<(v4f64 (extloadv4f32 addr:$src)),
(VCVTPS2PDZ256rm addr:$src)>;
+
+ // Special patterns to allow use of X86vmfpround for masking. Instruction
+ // patterns have been disabled with null_frag.
+ def : Pat<(X86vfpround (v2f64 VR128X:$src)),
+ (VCVTPD2PSZ128rr VR128X:$src)>;
+ def : Pat<(X86vmfpround (v2f64 VR128X:$src), (v4f32 VR128X:$src0),
+ VK2WM:$mask),
+ (VCVTPD2PSZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>;
+ def : Pat<(X86vmfpround (v2f64 VR128X:$src), v4f32x_info.ImmAllZerosV,
+ VK2WM:$mask),
+ (VCVTPD2PSZ128rrkz VK2WM:$mask, VR128X:$src)>;
+
+ def : Pat<(X86vfpround (loadv2f64 addr:$src)),
+ (VCVTPD2PSZ128rm addr:$src)>;
+ def : Pat<(X86vmfpround (loadv2f64 addr:$src), (v4f32 VR128X:$src0),
+ VK2WM:$mask),
+ (VCVTPD2PSZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+ def : Pat<(X86vmfpround (loadv2f64 addr:$src), v4f32x_info.ImmAllZerosV,
+ VK2WM:$mask),
+ (VCVTPD2PSZ128rmkz VK2WM:$mask, addr:$src)>;
+
+ def : Pat<(X86vfpround (v2f64 (X86VBroadcast (loadf64 addr:$src)))),
+ (VCVTPD2PSZ128rmb addr:$src)>;
+ def : Pat<(X86vmfpround (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+ (v4f32 VR128X:$src0), VK2WM:$mask),
+ (VCVTPD2PSZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>;
+ def : Pat<(X86vmfpround (v2f64 (X86VBroadcast (loadf64 addr:$src))),
+ v4f32x_info.ImmAllZerosV, VK2WM:$mask),
+ (VCVTPD2PSZ128rmbkz VK2WM:$mask, addr:$src)>;
}
// Convert Signed/Unsigned Doubleword to Double