summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-10-03 21:10:29 +0000
committerCraig Topper <craig.topper@intel.com>2018-10-03 21:10:29 +0000
commit95384e803c4cd32a9d6e50f3b96a632d054d0583 (patch)
tree77ce75cce282add9cd1f95d37f79f8b1617f7521
parent21945177ca7db844c98071c3ae66207ea2593cb4 (diff)
[X86] Stop promoting vector ISD::SELECT to vXi64.
The additional patterns needed for this aren't overwhelming and introducing extra bitcasts during lowering limits our ability to do computeNumSignBits. Not that I have a good example of that for select. I'm just becoming increasingly grumpy about promotion of AND/OR/XOR. SELECT was just a lot easier to fix.
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp12
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td32
2 files changed, 41 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 88c7c0725e0..38031b5057c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -876,12 +876,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
- setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
}
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
+ setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
+ setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
@@ -1058,6 +1060,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
+ setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
+ setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
@@ -1174,7 +1179,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
- setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
}
if (HasInt256) {
@@ -1347,6 +1351,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::v16i32, Custom);
+ setOperationAction(ISD::SELECT, MVT::v32i16, Custom);
+ setOperationAction(ISD::SELECT, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
@@ -1421,7 +1428,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64);
- setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
}
// Need to custom split v32i16/v64i8 bitcasts.
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 379ed2822bf..de45b4697ac 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -611,26 +611,58 @@ def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
let Predicates = [NoVLX] in {
+ def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
+ def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
+ def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
+
+ def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
+ def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
+ def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
}
let Predicates = [HasVLX] in {
+ def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
+ def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
+ def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
+
+ def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
+ def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
+ def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
}
+def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
+def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
+def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
+ (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),