diff options
author | George Burgess IV <george.burgess.iv@gmail.com> | 2018-10-09 03:18:56 +0000 |
---|---|---|
committer | George Burgess IV <george.burgess.iv@gmail.com> | 2018-10-09 03:18:56 +0000 |
commit | e1ec4deff8be6ed234ca6223e837bc0a8da7de2e (patch) | |
tree | 9ec3b0ddbeccaa48c80ac5fa067efeca636a1074 | |
parent | ebca18e58de934f3beda4b51e0c2f60a9924c925 (diff) |
Make LocationSize a proper Optional type; NFClinaro-local/ci/tcwg-llvm-kernel-baseline-aarch64-master-next-defconfig_nolse
This is the second in a series of changes intended to make
https://reviews.llvm.org/D44748 more easily reviewable. Please see that
patch for more context. The first change being r344012.
Since I was requested to do all of this with post-commit review, this is
about as small as I can make this patch.
This patch makes LocationSize into an actual type that wraps a uint64_t;
users are required to call getValue() in order to get the size now. If
the LocationSize has an Unknown size (e.g. if LocSize ==
MemoryLocation::UnknownSize), getValue() will assert.
This also adds DenseMap specializations for LocationInfo, which required
taking two more values from the set of values LocationInfo can
represent. Hence, heavy users of multi-exabyte arrays or structs may
observe slightly lower-quality code as a result of this change.
The intent is for getValue()s to be very close to a corresponding
hasValue() (which is often spelled `!= MemoryLocation::UnknownSize`).
Sadly, small diff context appears to crop that out sometimes, and the
last change in DSE does require a bit of nonlocal reasoning about
control-flow. :/
This also removes an assert, since it's now redundant with the assert in
getValue().
-rw-r--r-- | llvm/include/llvm/Analysis/MemoryLocation.h | 99 | ||||
-rw-r--r-- | llvm/lib/Analysis/BasicAliasAnalysis.cpp | 31 | ||||
-rw-r--r-- | llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp | 12 |
5 files changed, 125 insertions, 29 deletions
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h index 3e9014d3488..fcfc94e5e9b 100644 --- a/llvm/include/llvm/Analysis/MemoryLocation.h +++ b/llvm/include/llvm/Analysis/MemoryLocation.h @@ -34,8 +34,80 @@ class AnyMemIntrinsic; class TargetLibraryInfo; // Represents the size of a MemoryLocation. Logically, it's an -// Optional<uint64_t>, with a special UnknownSize value from `MemoryLocation`. -using LocationSize = uint64_t; +// Optional<uint64_t>. +// +// We plan to use it for more soon, but we're trying to transition to this brave +// new world in small, easily-reviewable pieces; please see D44748. +class LocationSize { + enum : uint64_t { + Unknown = ~uint64_t(0), + MapEmpty = Unknown - 1, + MapTombstone = Unknown - 2, + + // The maximum value we can represent without falling back to 'unknown'. + MaxValue = MapTombstone - 1, + }; + + uint64_t Value; + + // Hack to support implicit construction. This should disappear when the + // public LocationSize ctor goes away. + enum DirectConstruction { Direct }; + + constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {} + +public: + constexpr LocationSize(uint64_t Raw) + : Value(Raw > MaxValue ? Unknown : Raw) {} + + static LocationSize precise(uint64_t Value) { return LocationSize(Value); } + constexpr static LocationSize unknown() { + return LocationSize(Unknown, Direct); + } + + // Sentinel values, generally used for maps. + constexpr static LocationSize mapTombstone() { + return LocationSize(MapTombstone, Direct); + } + constexpr static LocationSize mapEmpty() { + return LocationSize(MapEmpty, Direct); + } + + bool hasValue() const { return Value != Unknown; } + uint64_t getValue() const { + assert(hasValue() && "Getting value from an unknown LocationSize!"); + return Value; + } + + bool operator==(const LocationSize &Other) const { + return Value == Other.Value; + } + + bool operator!=(const LocationSize &Other) const { + return !(*this == Other); + } + + void print(raw_ostream &OS) const { OS << Value; } + + // Returns an opaque value that represents this LocationSize. Cannot be + // reliably converted back into a LocationSize. + uint64_t toRaw() const { return Value; } + + // NOTE: These comparison operators will go away with D44748. Please don't + // rely on them. + bool operator<(const LocationSize &Other) const { + return Value < Other.Value; + } + + bool operator>(const LocationSize &Other) const { + return Other < *this; + } +}; + +inline raw_ostream &operator<<(raw_ostream &OS, LocationSize Size) { + Size.print(OS); + return OS; +} /// Representation for a specific memory location. /// @@ -143,13 +215,30 @@ public: } }; -// Specialize DenseMapInfo for MemoryLocation. +// Specialize DenseMapInfo. +template <> struct DenseMapInfo<LocationSize> { + static inline LocationSize getEmptyKey() { + return LocationSize::mapEmpty(); + } + static inline LocationSize getTombstoneKey() { + return LocationSize::mapTombstone(); + } + static unsigned getHashValue(const LocationSize &Val) { + return DenseMapInfo<uint64_t>::getHashValue(Val.toRaw()); + } + static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) { + return LHS == RHS; + } +}; + template <> struct DenseMapInfo<MemoryLocation> { static inline MemoryLocation getEmptyKey() { - return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), 0); + return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), + DenseMapInfo<LocationSize>::getEmptyKey()); } static inline MemoryLocation getTombstoneKey() { - return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), 0); + return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), + DenseMapInfo<LocationSize>::getTombstoneKey()); } static unsigned getHashValue(const MemoryLocation &Val) { return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^ diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 6d4ac96ad37..072a50a9fd6 100644 --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -1023,8 +1023,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, MaybeV2Size == MemoryLocation::UnknownSize) return MayAlias; - const uint64_t V1Size = MaybeV1Size; - const uint64_t V2Size = MaybeV2Size; + const uint64_t V1Size = MaybeV1Size.getValue(); + const uint64_t V2Size = MaybeV2Size.getValue(); ConstantInt *C1 = dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); @@ -1188,7 +1188,7 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, !GEPOp->isInBounds()) return false; - const uint64_t ObjectAccessSize = MaybeObjectAccessSize; + const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue(); // We need the object to be an alloca or a globalvariable, and want to know // the offset of the pointer from the object precisely, so no variable @@ -1352,7 +1352,7 @@ BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { if (GEP1BaseOffset >= 0) { if (V2Size != MemoryLocation::UnknownSize) { - if ((uint64_t)GEP1BaseOffset < V2Size) + if ((uint64_t)GEP1BaseOffset < V2Size.getValue()) return PartialAlias; return NoAlias; } @@ -1367,7 +1367,7 @@ BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, // stripped a gep with negative index ('gep <ptr>, -1, ...). if (V1Size != MemoryLocation::UnknownSize && V2Size != MemoryLocation::UnknownSize) { - if (-(uint64_t)GEP1BaseOffset < V1Size) + if (-(uint64_t)GEP1BaseOffset < V1Size.getValue()) return PartialAlias; return NoAlias; } @@ -1417,8 +1417,9 @@ BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, // two locations do not alias. uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); if (V1Size != MemoryLocation::UnknownSize && - V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && - V1Size <= Modulo - ModOffset) + V2Size != MemoryLocation::UnknownSize && + ModOffset >= V2Size.getValue() && + V1Size.getValue() <= Modulo - ModOffset) return NoAlias; // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. @@ -1426,7 +1427,7 @@ BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. if (AllPositive && GEP1BaseOffset > 0 && V2Size != MemoryLocation::UnknownSize && - V2Size <= (uint64_t)GEP1BaseOffset) + V2Size.getValue() <= (uint64_t)GEP1BaseOffset) return NoAlias; if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, @@ -1715,9 +1716,11 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, // side, then we know such behavior is undefined and can assume no alias. bool NullIsValidLocation = NullPointerIsDefined(&F); if ((V1Size != MemoryLocation::UnknownSize && - isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) || + isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI, + NullIsValidLocation)) || (V2Size != MemoryLocation::UnknownSize && - isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation))) + isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI, + NullIsValidLocation))) return NoAlias; // Check the cache before climbing up use-def chains. This also terminates @@ -1777,8 +1780,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, if (O1 == O2) if (V1Size != MemoryLocation::UnknownSize && V2Size != MemoryLocation::UnknownSize && - (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) || - isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation))) + (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || + isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) return AliasCache[Locs] = PartialAlias; // Recurse back into the best AA results we have, potentially with refined @@ -1868,8 +1871,8 @@ bool BasicAAResult::constantOffsetHeuristic( MaybeV2Size == MemoryLocation::UnknownSize) return false; - const uint64_t V1Size = MaybeV1Size; - const uint64_t V2Size = MaybeV2Size; + const uint64_t V1Size = MaybeV1Size.getValue(); + const uint64_t V2Size = MaybeV2Size.getValue(); const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; diff --git a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp index db350e2c22f..b43b48eeef7 100644 --- a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp +++ b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp @@ -561,8 +561,8 @@ bool CFLAndersAAResult::FunctionInfo::mayAlias( MaybeRHSSize == MemoryLocation::UnknownSize) return true; - const uint64_t LHSSize = MaybeLHSSize; - const uint64_t RHSSize = MaybeRHSSize; + const uint64_t LHSSize = MaybeLHSSize.getValue(); + const uint64_t RHSSize = MaybeRHSSize.getValue(); for (const auto &OVal : make_range(RangePair)) { // Be conservative about UnknownOffset diff --git a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp index 7bea994121c..61f625477ca 100644 --- a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -43,8 +43,12 @@ AliasResult SCEVAAResult::alias(const MemoryLocation &LocA, if (SE.getEffectiveSCEVType(AS->getType()) == SE.getEffectiveSCEVType(BS->getType())) { unsigned BitWidth = SE.getTypeSizeInBits(AS->getType()); - APInt ASizeInt(BitWidth, LocA.Size); - APInt BSizeInt(BitWidth, LocB.Size); + APInt ASizeInt(BitWidth, LocA.Size.hasValue() + ? LocA.Size.getValue() + : MemoryLocation::UnknownSize); + APInt BSizeInt(BitWidth, LocB.Size.hasValue() + ? LocB.Size.getValue() + : MemoryLocation::UnknownSize); // Compute the difference between the two pointers. const SCEV *BA = SE.getMinusSCEV(BS, AS); diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 0a3a00b8380..810c2fee0e0 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -354,8 +354,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later, Earlier.Size == MemoryLocation::UnknownSize) return OW_Unknown; - const uint64_t LaterSize = Later.Size; - const uint64_t EarlierSize = Earlier.Size; + const uint64_t LaterSize = Later.Size.getValue(); + const uint64_t EarlierSize = Earlier.Size.getValue(); const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); @@ -1004,11 +1004,10 @@ static bool removePartiallyOverlappedStores(AliasAnalysis *AA, Instruction *EarlierWrite = OI.first; MemoryLocation Loc = getLocForWrite(EarlierWrite); assert(isRemovable(EarlierWrite) && "Expect only removable instruction"); - assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc"); const Value *Ptr = Loc.Ptr->stripPointerCasts(); int64_t EarlierStart = 0; - int64_t EarlierSize = int64_t(Loc.Size); + int64_t EarlierSize = int64_t(Loc.Size.getValue()); GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL); OverlapIntervalsTy &IntervalMap = OI.second; Changed |= @@ -1205,8 +1204,9 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, assert(!EnablePartialOverwriteTracking && "Do not expect to perform " "when partial-overwrite " "tracking is enabled"); - int64_t EarlierSize = DepLoc.Size; - int64_t LaterSize = Loc.Size; + // The overwrite result is known, so these must be known, too. + int64_t EarlierSize = DepLoc.Size.getValue(); + int64_t LaterSize = Loc.Size.getValue(); bool IsOverwriteEnd = (OR == OW_End); MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize, InstWriteOffset, LaterSize, IsOverwriteEnd); |