summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2018-08-21 23:07:11 +0000
committerHans Wennborg <hans@hanshq.net>2018-08-21 23:07:11 +0000
commitdc42a373349a713033077469b3c312806676bc6a (patch)
tree26d8256afcb111d6075cbc790291eed1a0b50a1c
parentfe978dfcd605ce3cae0d78b7fefb50f35b189c93 (diff)
Merging r340303:
------------------------------------------------------------------------ r340303 | ctopper | 2018-08-21 19:15:33 +0200 (Tue, 21 Aug 2018) | 9 lines [BypassSlowDivision] Teach bypass slow division not to interfere with div by constant where constants have been constant hoisted, but not moved from their basic block DAGCombiner doesn't pay attention to whether constants are opaque before doing the div by constant optimization. So BypassSlowDivision shouldn't introduce control flow that would make DAGCombiner unable to see an opaque constant. This can occur when a div and rem of the same constant are used in the same basic block. it will be hoisted, but not leave the block. Longer term we probably need to look into the X86 immediate cost model used by constant hoisting and maybe not mark div/rem immediates for hoisting at all. This fixes the case from PR38649. Differential Revision: https://reviews.llvm.org/D51000 ------------------------------------------------------------------------
-rw-r--r--llvm/lib/Transforms/Utils/BypassSlowDivision.cpp9
-rw-r--r--llvm/test/CodeGen/X86/divide-by-constant.ll110
2 files changed, 119 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 05512a6dff3..e7828af648a 100644
--- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -388,6 +388,15 @@ Optional<QuotRemPair> FastDivInsertionTask::insertFastDivAndRem() {
return None;
}
+ // After Constant Hoisting pass, long constants may be represented as
+ // bitcast instructions. As a result, some constants may look like an
+ // instruction at first, and an additional check is necessary to find out if
+ // an operand is actually a constant.
+ if (auto *BCI = dyn_cast<BitCastInst>(Divisor))
+ if (BCI->getParent() == SlowDivOrRem->getParent() &&
+ isa<ConstantInt>(BCI->getOperand(0)))
+ return None;
+
if (DividendShort && !isSignedOp()) {
// If the division is unsigned and Dividend is known to be short, then
// either
diff --git a/llvm/test/CodeGen/X86/divide-by-constant.ll b/llvm/test/CodeGen/X86/divide-by-constant.ll
index cc2dc1b1d09..0dbb215a1bf 100644
--- a/llvm/test/CodeGen/X86/divide-by-constant.ll
+++ b/llvm/test/CodeGen/X86/divide-by-constant.ll
@@ -330,3 +330,113 @@ entry:
%div = udiv i64 %rem, 7
ret i64 %div
}
+
+define { i64, i32 } @PR38622(i64) nounwind {
+; X32-LABEL: PR38622:
+; X32: # %bb.0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $-294967296 # imm = 0xEE6B2800
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: calll __udivdi3
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $-294967296 # imm = 0xEE6B2800
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: calll __umoddi3
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %edi, %edx
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebx
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: PR38622:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $11, %rax
+; X64-NEXT: movabsq $4835703278458517, %rcx # imm = 0x112E0BE826D695
+; X64-NEXT: mulq %rcx
+; X64-NEXT: shrq $9, %rdx
+; X64-NEXT: imull $-294967296, %edx, %eax # imm = 0xEE6B2800
+; X64-NEXT: subl %eax, %edi
+; X64-NEXT: movq %rdx, %rax
+; X64-NEXT: movl %edi, %edx
+; X64-NEXT: retq
+ %2 = udiv i64 %0, 4000000000
+ %3 = urem i64 %0, 4000000000
+ %4 = trunc i64 %3 to i32
+ %5 = insertvalue { i64, i32 } undef, i64 %2, 0
+ %6 = insertvalue { i64, i32 } %5, i32 %4, 1
+ ret { i64, i32 } %6
+}
+
+define { i64, i32 } @PR38622_signed(i64) nounwind {
+; X32-LABEL: PR38622_signed:
+; X32: # %bb.0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $-294967296 # imm = 0xEE6B2800
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: calll __divdi3
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $-294967296 # imm = 0xEE6B2800
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: calll __moddi3
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %edi, %edx
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebx
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: PR38622_signed:
+; X64: # %bb.0:
+; X64-NEXT: movabsq $1237940039285380275, %rcx # imm = 0x112E0BE826D694B3
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: imulq %rcx
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: shrq $63, %rcx
+; X64-NEXT: sarq $28, %rdx
+; X64-NEXT: leaq (%rdx,%rcx), %rax
+; X64-NEXT: addl %ecx, %edx
+; X64-NEXT: imull $-294967296, %edx, %ecx # imm = 0xEE6B2800
+; X64-NEXT: subl %ecx, %edi
+; X64-NEXT: movl %edi, %edx
+; X64-NEXT: retq
+ %2 = sdiv i64 %0, 4000000000
+ %3 = srem i64 %0, 4000000000
+ %4 = trunc i64 %3 to i32
+ %5 = insertvalue { i64, i32 } undef, i64 %2, 0
+ %6 = insertvalue { i64, i32 } %5, i32 %4, 1
+ ret { i64, i32 } %6
+}