aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2022-08-03 15:19:03 -0700
committerTom Stellard <tstellar@redhat.com>2022-08-05 01:12:26 -0700
commitb5b435517a77d9e7c1b30b827b543d47e195adba (patch)
tree6234e3c87a2f0f3b872821b57b834b74d0e1e288
parent98411113094fe93e3124f2db9d7a99cf03b8bdb3 (diff)
[RISCV] Prevent infinite loop after D129980.
D129980 converts (seteq (i64 (and X, 0xffffffff)), C1) into (seteq (i64 (sext_inreg X, i32)), C1). If bit 31 of X is 0, it will be turned back into an 'and' by SimplifyDemandedBits which can cause an infinite loop. To prevent this, check if bit 31 is 0 with computeKnownBits before doing the transformation. Fixes PR56905. Reviewed By: reames Differential Revision: https://reviews.llvm.org/D131113 (cherry picked from commit 53d560b22f5b5d91ae5296f030e0ca75a5d2c625)
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp6
-rw-r--r--llvm/test/CodeGen/RISCV/i64-icmp.ll22
2 files changed, 28 insertions, 0 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8fe49a90994f..d0ca325e9c14 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8199,6 +8199,12 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
if (!isIntEqualitySetCC(Cond))
return SDValue();
+ // Don't do this if the sign bit is provably zero, it will be turned back into
+ // an AND.
+ APInt SignMask = APInt::getOneBitSet(64, 31);
+ if (DAG.MaskedValueIsZero(N0.getOperand(0), SignMask))
+ return SDValue();
+
const APInt &C1 = N1C->getAPIntValue();
SDLoc dl(N);
diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll
index 503a4f1aff87..68ba57b1735b 100644
--- a/llvm/test/CodeGen/RISCV/i64-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll
@@ -738,3 +738,25 @@ define i64 @icmp_ne_zext_inreg_large_constant(i64 %a) nounwind {
%3 = zext i1 %2 to i64
ret i64 %3
}
+
+; This used to trigger an infinite loop where we toggled between 'and' and
+; 'sext_inreg'.
+define i64 @icmp_ne_zext_inreg_umin(i64 %a) nounwind {
+; RV64I-LABEL: icmp_ne_zext_inreg_umin:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 30141
+; RV64I-NEXT: addiw a1, a1, -747
+; RV64I-NEXT: bltu a0, a1, .LBB66_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: .LBB66_2:
+; RV64I-NEXT: addi a0, a0, -123
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: ret
+ %1 = call i64 @llvm.umin.i64(i64 %a, i64 123456789)
+ %2 = and i64 %1, 4294967295
+ %3 = icmp ne i64 %2, 123
+ %4 = zext i1 %3 to i64
+ ret i64 %4
+}
+declare i64 @llvm.umin.i64(i64, i64)