- Notifications
You must be signed in to change notification settings - Fork 15.3k
[Asan][RISCV] Support asan check for segment load/store RVV intrinsics. #100931
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
…rget intrinsics. Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch provide TTI hooks to make targets describe their intrinsic informations to asan. Note, 1. this patch renames InterestingMemoryOperand to MemoryRefInfo. 2. this patch does not support RVV indexed/segment load/store.
…structions. This is based on llvm#97070. This patch helps AddressSanitizer to support indexed/segement instructions. It adds a new member maybeOffset inot MemoryRefInfo to describle the offset between the pointer and the address of this memory reference.
This is based on llvm#100930. RVV segment is an array of contingous NF elements. This patch emulates RVV segment as a large integer with bitwidth equaled to NF * SEW. The reason to not emulate RVV segment as some aggregated type is that vector type should use premative types as element types. There is another approach is to create NF memoryRefInfo objects. It could avoid create pseudo types, but this approach also generates large code for asan check.
| @llvm/pr-subscribers-compiler-rt-sanitizer @llvm/pr-subscribers-llvm-transforms Author: Yeting Kuo (yetingk) ChangesThis is based on #100930. Patch is 384.36 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/100931.diff 10 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h new file mode 100644 index 0000000000000..e26348d1e95f3 --- /dev/null +++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h @@ -0,0 +1,57 @@ +//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines MemoryRefInfo class that is used when getting +// the information of a memory reference instruction. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H +#define LLVM_ANALYSIS_MEMORYREFINFO_H + +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instruction.h" +#include "llvm/Support/TypeSize.h" + +namespace llvm { +class MemoryRefInfo { +public: + Use *PtrUse = nullptr; + bool IsWrite; + Type *OpType; + TypeSize TypeStoreSize = TypeSize::getFixed(0); + MaybeAlign Alignment; + // The mask Value, if we're looking at a masked load/store. + Value *MaybeMask; + // The EVL Value, if we're looking at a vp intrinsic. + Value *MaybeEVL; + // The Stride Value, if we're looking at a strided load/store. + Value *MaybeStride; + // The Offset Value, if we're looking at a indexed load/store. The + // offset actually means byte-offset instead of array index. + Value *MaybeOffset; + + MemoryRefInfo() = default; + MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite, + class Type *OpType, MaybeAlign Alignment, + Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, + Value *MaybeStride = nullptr, Value *MaybeOffset = nullptr) + : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride), + MaybeOffset(MaybeOffset) { + const DataLayout &DL = I->getDataLayout(); + TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); + PtrUse = &I->getOperandUse(OperandNo); + } + + Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); } + Value *getPtr() { return PtrUse->get(); } + operator bool() { return PtrUse != nullptr; } +}; + +} // namespace llvm +#endif diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index cf378008e4c7c..59a6a0390e98c 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -23,6 +23,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallBitVector.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/IR/FMF.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" @@ -955,6 +956,10 @@ class TargetTransformInfo { MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const; + // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting. + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const; + /// Should the Select Optimization pass be enabled and ran. bool enableSelectOptimize() const; @@ -1944,6 +1949,8 @@ class TargetTransformInfo::Concept { virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; virtual MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0; + virtual bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const = 0; virtual bool enableSelectOptimize() = 0; virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0; virtual bool enableInterleavedAccessVectorization() = 0; @@ -2500,6 +2507,12 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept { bool IsZeroCmp) const override { return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp); } + + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const override { + return Impl.getMemoryRefInfo(Interesting, II); + } + bool enableSelectOptimize() override { return Impl.enableSelectOptimize(); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 47fde08735c0c..eccb8d4157765 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -394,6 +394,11 @@ class TargetTransformInfoImplBase { return {}; } + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const { + return false; + } + bool enableSelectOptimize() const { return true; } bool shouldTreatInstructionLikeSelect(const Instruction *I) { diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h index 9fe2716220e83..f7bd36c2def03 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -14,6 +14,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H #include "llvm/Analysis/CFG.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" @@ -22,37 +23,6 @@ namespace llvm { -class InterestingMemoryOperand { -public: - Use *PtrUse; - bool IsWrite; - Type *OpType; - TypeSize TypeStoreSize = TypeSize::getFixed(0); - MaybeAlign Alignment; - // The mask Value, if we're looking at a masked load/store. - Value *MaybeMask; - // The EVL Value, if we're looking at a vp intrinsic. - Value *MaybeEVL; - // The Stride Value, if we're looking at a strided load/store. - Value *MaybeStride; - - InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, MaybeAlign Alignment, - Value *MaybeMask = nullptr, - Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) - : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { - const DataLayout &DL = I->getDataLayout(); - TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); - PtrUse = &I->getOperandUse(OperandNo); - } - - Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); } - - Value *getPtr() { return PtrUse->get(); } -}; - // Get AddressSanitizer parameters. void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 693f7a5bb7af5..f5adff1787b11 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -622,6 +622,11 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); } +bool TargetTransformInfo::getMemoryRefInfo( + SmallVectorImpl<MemoryRefInfo> &Interesting, IntrinsicInst *II) const { + return TTIImpl->getMemoryRefInfo(Interesting, II); +} + bool TargetTransformInfo::enableSelectOptimize() const { return TTIImpl->enableSelectOptimize(); } diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index f9eef60f77b7a..1a99449f451f4 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -13,7 +13,9 @@ #include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/CodeGen/CostTable.h" #include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/IR/PatternMatch.h" #include <cmath> #include <optional> @@ -36,6 +38,260 @@ static cl::opt<unsigned> SLPMaxVF( "exclusively by SLP vectorizer."), cl::Hidden); +bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const { + const DataLayout &DL = getDataLayout(); + Intrinsic::ID IntNo = II->getIntrinsicID(); + LLVMContext &C = II->getContext(); + Type *XLenIntTy = IntegerType::get(C, ST->getXLen()); + IRBuilder<> IB(II); + bool HasMask = false; + + auto getNFields = [](const IntrinsicInst *II, unsigned PtrOperandNo, + bool IsWrite) -> unsigned { + if (IsWrite) + return PtrOperandNo; + if (auto *STy = dyn_cast<StructType>(II->getType())) + return STy->getNumElements(); + return 1; + }; + + switch (IntNo) { + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vse_mask: + case Intrinsic::riscv_vlseg2_mask: + case Intrinsic::riscv_vlseg3_mask: + case Intrinsic::riscv_vlseg4_mask: + case Intrinsic::riscv_vlseg5_mask: + case Intrinsic::riscv_vlseg6_mask: + case Intrinsic::riscv_vlseg7_mask: + case Intrinsic::riscv_vlseg8_mask: + case Intrinsic::riscv_vsseg2_mask: + case Intrinsic::riscv_vsseg3_mask: + case Intrinsic::riscv_vsseg4_mask: + case Intrinsic::riscv_vsseg5_mask: + case Intrinsic::riscv_vsseg6_mask: + case Intrinsic::riscv_vsseg7_mask: + case Intrinsic::riscv_vsseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vse: + case Intrinsic::riscv_vlseg2: + case Intrinsic::riscv_vlseg3: + case Intrinsic::riscv_vlseg4: + case Intrinsic::riscv_vlseg5: + case Intrinsic::riscv_vlseg6: + case Intrinsic::riscv_vlseg7: + case Intrinsic::riscv_vlseg8: + case Intrinsic::riscv_vsseg2: + case Intrinsic::riscv_vsseg3: + case Intrinsic::riscv_vsseg4: + case Intrinsic::riscv_vsseg5: + case Intrinsic::riscv_vsseg6: + case Intrinsic::riscv_vsseg7: + case Intrinsic::riscv_vsseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 1 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::get(MaskType, 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL); + return true; + } + case Intrinsic::riscv_vlse_mask: + case Intrinsic::riscv_vsse_mask: + case Intrinsic::riscv_vlsseg2_mask: + case Intrinsic::riscv_vlsseg3_mask: + case Intrinsic::riscv_vlsseg4_mask: + case Intrinsic::riscv_vlsseg5_mask: + case Intrinsic::riscv_vlsseg6_mask: + case Intrinsic::riscv_vlsseg7_mask: + case Intrinsic::riscv_vlsseg8_mask: + case Intrinsic::riscv_vssseg2_mask: + case Intrinsic::riscv_vssseg3_mask: + case Intrinsic::riscv_vssseg4_mask: + case Intrinsic::riscv_vssseg5_mask: + case Intrinsic::riscv_vssseg6_mask: + case Intrinsic::riscv_vssseg7_mask: + case Intrinsic::riscv_vssseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vsse: + case Intrinsic::riscv_vlsseg2: + case Intrinsic::riscv_vlsseg3: + case Intrinsic::riscv_vlsseg4: + case Intrinsic::riscv_vlsseg5: + case Intrinsic::riscv_vlsseg6: + case Intrinsic::riscv_vlsseg7: + case Intrinsic::riscv_vlsseg8: + case Intrinsic::riscv_vssseg2: + case Intrinsic::riscv_vssseg3: + case Intrinsic::riscv_vssseg4: + case Intrinsic::riscv_vssseg5: + case Intrinsic::riscv_vssseg6: + case Intrinsic::riscv_vssseg7: + case Intrinsic::riscv_vssseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + + Value *Stride = II->getArgOperand(PtrOperandNo + 1); + // Use the pointer alignment as the element alignment if the stride is a + // multiple of the pointer alignment. Otherwise, the element alignment + // should be Align(1). + unsigned PointerAlign = Alignment.valueOrOne().value(); + if (!isa<ConstantInt>(Stride) || + cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0) + Alignment = Align(1); + + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::get(MaskType, 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL, Stride); + return true; + } + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei_mask: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei_mask: + case Intrinsic::riscv_vloxseg2_mask: + case Intrinsic::riscv_vloxseg3_mask: + case Intrinsic::riscv_vloxseg4_mask: + case Intrinsic::riscv_vloxseg5_mask: + case Intrinsic::riscv_vloxseg6_mask: + case Intrinsic::riscv_vloxseg7_mask: + case Intrinsic::riscv_vloxseg8_mask: + case Intrinsic::riscv_vluxseg2_mask: + case Intrinsic::riscv_vluxseg3_mask: + case Intrinsic::riscv_vluxseg4_mask: + case Intrinsic::riscv_vluxseg5_mask: + case Intrinsic::riscv_vluxseg6_mask: + case Intrinsic::riscv_vluxseg7_mask: + case Intrinsic::riscv_vluxseg8_mask: + case Intrinsic::riscv_vsoxseg2_mask: + case Intrinsic::riscv_vsoxseg3_mask: + case Intrinsic::riscv_vsoxseg4_mask: + case Intrinsic::riscv_vsoxseg5_mask: + case Intrinsic::riscv_vsoxseg6_mask: + case Intrinsic::riscv_vsoxseg7_mask: + case Intrinsic::riscv_vsoxseg8_mask: + case Intrinsic::riscv_vsuxseg2_mask: + case Intrinsic::riscv_vsuxseg3_mask: + case Intrinsic::riscv_vsuxseg4_mask: + case Intrinsic::riscv_vsuxseg5_mask: + case Intrinsic::riscv_vsuxseg6_mask: + case Intrinsic::riscv_vsuxseg7_mask: + case Intrinsic::riscv_vsuxseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsuxei: + case Intrinsic::riscv_vloxseg2: + case Intrinsic::riscv_vloxseg3: + case Intrinsic::riscv_vloxseg4: + case Intrinsic::riscv_vloxseg5: + case Intrinsic::riscv_vloxseg6: + case Intrinsic::riscv_vloxseg7: + case Intrinsic::riscv_vloxseg8: + case Intrinsic::riscv_vluxseg2: + case Intrinsic::riscv_vluxseg3: + case Intrinsic::riscv_vluxseg4: + case Intrinsic::riscv_vluxseg5: + case Intrinsic::riscv_vluxseg6: + case Intrinsic::riscv_vluxseg7: + case Intrinsic::riscv_vluxseg8: + case Intrinsic::riscv_vsoxseg2: + case Intrinsic::riscv_vsoxseg3: + case Intrinsic::riscv_vsoxseg4: + case Intrinsic::riscv_vsoxseg5: + case Intrinsic::riscv_vsoxseg6: + case Intrinsic::riscv_vsoxseg7: + case Intrinsic::riscv_vsoxseg8: + case Intrinsic::riscv_vsuxseg2: + case Intrinsic::riscv_vsuxseg3: + case Intrinsic::riscv_vsuxseg4: + case Intrinsic::riscv_vsuxseg5: + case Intrinsic::riscv_vsuxseg6: + case Intrinsic::riscv_vsuxseg7: + case Intrinsic::riscv_vsuxseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + // RVV indexed loads/stores zero-extend offset operands which are narrower + // than XLEN to XLEN. + Value *OffsetOp = II->getArgOperand(PtrOperandNo + 1); + Type *OffsetTy = OffsetOp->getType(); + if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) { + VectorType *OrigType = cast<VectorType>(OffsetTy); + Type *ExtendTy = VectorType::get(XLenIntTy, OrigType); + OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); + } + Value *Mask = ConstantInt::get(OffsetTy->getWithNewBitWidth(1), 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Align(1), Mask, EVL, + /* Stride */ nullptr, OffsetOp); + } + } + return false; +} + InstructionCost RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT, TTI::TargetCostKind CostKind) { diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 9c37a4f6ec2d0..e9a4721c37890 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -60,6 +60,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> { : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {} + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const; + bool areInlineCompatible(const Function *Caller, const Function *Callee) const; diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 149866a8e4200..32639ebb73c59 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -29,6 +29,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/StackSafetyAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/Demangle/Demangle.h" @@ -754,12 +755,13 @@ struct AddressSanitizer { bool isInterestingAlloca(const AllocaInst &AI); bool ignoreAccess(Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); + void getMemoryRefInfos(Instruction *I, + SmallVectorImpl<MemoryRefInfo> &Interesting, + const TargetTransformInfo *TTI); - void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool... [truncated] |
| @llvm/pr-subscribers-llvm-analysis Author: Yeting Kuo (yetingk) ChangesThis is based on #100930. Patch is 384.36 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/100931.diff 10 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h new file mode 100644 index 0000000000000..e26348d1e95f3 --- /dev/null +++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h @@ -0,0 +1,57 @@ +//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines MemoryRefInfo class that is used when getting +// the information of a memory reference instruction. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H +#define LLVM_ANALYSIS_MEMORYREFINFO_H + +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instruction.h" +#include "llvm/Support/TypeSize.h" + +namespace llvm { +class MemoryRefInfo { +public: + Use *PtrUse = nullptr; + bool IsWrite; + Type *OpType; + TypeSize TypeStoreSize = TypeSize::getFixed(0); + MaybeAlign Alignment; + // The mask Value, if we're looking at a masked load/store. + Value *MaybeMask; + // The EVL Value, if we're looking at a vp intrinsic. + Value *MaybeEVL; + // The Stride Value, if we're looking at a strided load/store. + Value *MaybeStride; + // The Offset Value, if we're looking at a indexed load/store. The + // offset actually means byte-offset instead of array index. + Value *MaybeOffset; + + MemoryRefInfo() = default; + MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite, + class Type *OpType, MaybeAlign Alignment, + Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, + Value *MaybeStride = nullptr, Value *MaybeOffset = nullptr) + : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride), + MaybeOffset(MaybeOffset) { + const DataLayout &DL = I->getDataLayout(); + TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); + PtrUse = &I->getOperandUse(OperandNo); + } + + Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); } + Value *getPtr() { return PtrUse->get(); } + operator bool() { return PtrUse != nullptr; } +}; + +} // namespace llvm +#endif diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index cf378008e4c7c..59a6a0390e98c 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -23,6 +23,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallBitVector.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/IR/FMF.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" @@ -955,6 +956,10 @@ class TargetTransformInfo { MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const; + // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting. + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const; + /// Should the Select Optimization pass be enabled and ran. bool enableSelectOptimize() const; @@ -1944,6 +1949,8 @@ class TargetTransformInfo::Concept { virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; virtual MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0; + virtual bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const = 0; virtual bool enableSelectOptimize() = 0; virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0; virtual bool enableInterleavedAccessVectorization() = 0; @@ -2500,6 +2507,12 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept { bool IsZeroCmp) const override { return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp); } + + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const override { + return Impl.getMemoryRefInfo(Interesting, II); + } + bool enableSelectOptimize() override { return Impl.enableSelectOptimize(); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 47fde08735c0c..eccb8d4157765 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -394,6 +394,11 @@ class TargetTransformInfoImplBase { return {}; } + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const { + return false; + } + bool enableSelectOptimize() const { return true; } bool shouldTreatInstructionLikeSelect(const Instruction *I) { diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h index 9fe2716220e83..f7bd36c2def03 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -14,6 +14,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H #include "llvm/Analysis/CFG.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" @@ -22,37 +23,6 @@ namespace llvm { -class InterestingMemoryOperand { -public: - Use *PtrUse; - bool IsWrite; - Type *OpType; - TypeSize TypeStoreSize = TypeSize::getFixed(0); - MaybeAlign Alignment; - // The mask Value, if we're looking at a masked load/store. - Value *MaybeMask; - // The EVL Value, if we're looking at a vp intrinsic. - Value *MaybeEVL; - // The Stride Value, if we're looking at a strided load/store. - Value *MaybeStride; - - InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, MaybeAlign Alignment, - Value *MaybeMask = nullptr, - Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) - : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { - const DataLayout &DL = I->getDataLayout(); - TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); - PtrUse = &I->getOperandUse(OperandNo); - } - - Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); } - - Value *getPtr() { return PtrUse->get(); } -}; - // Get AddressSanitizer parameters. void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 693f7a5bb7af5..f5adff1787b11 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -622,6 +622,11 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); } +bool TargetTransformInfo::getMemoryRefInfo( + SmallVectorImpl<MemoryRefInfo> &Interesting, IntrinsicInst *II) const { + return TTIImpl->getMemoryRefInfo(Interesting, II); +} + bool TargetTransformInfo::enableSelectOptimize() const { return TTIImpl->enableSelectOptimize(); } diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index f9eef60f77b7a..1a99449f451f4 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -13,7 +13,9 @@ #include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/CodeGen/CostTable.h" #include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/IR/PatternMatch.h" #include <cmath> #include <optional> @@ -36,6 +38,260 @@ static cl::opt<unsigned> SLPMaxVF( "exclusively by SLP vectorizer."), cl::Hidden); +bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const { + const DataLayout &DL = getDataLayout(); + Intrinsic::ID IntNo = II->getIntrinsicID(); + LLVMContext &C = II->getContext(); + Type *XLenIntTy = IntegerType::get(C, ST->getXLen()); + IRBuilder<> IB(II); + bool HasMask = false; + + auto getNFields = [](const IntrinsicInst *II, unsigned PtrOperandNo, + bool IsWrite) -> unsigned { + if (IsWrite) + return PtrOperandNo; + if (auto *STy = dyn_cast<StructType>(II->getType())) + return STy->getNumElements(); + return 1; + }; + + switch (IntNo) { + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vse_mask: + case Intrinsic::riscv_vlseg2_mask: + case Intrinsic::riscv_vlseg3_mask: + case Intrinsic::riscv_vlseg4_mask: + case Intrinsic::riscv_vlseg5_mask: + case Intrinsic::riscv_vlseg6_mask: + case Intrinsic::riscv_vlseg7_mask: + case Intrinsic::riscv_vlseg8_mask: + case Intrinsic::riscv_vsseg2_mask: + case Intrinsic::riscv_vsseg3_mask: + case Intrinsic::riscv_vsseg4_mask: + case Intrinsic::riscv_vsseg5_mask: + case Intrinsic::riscv_vsseg6_mask: + case Intrinsic::riscv_vsseg7_mask: + case Intrinsic::riscv_vsseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vse: + case Intrinsic::riscv_vlseg2: + case Intrinsic::riscv_vlseg3: + case Intrinsic::riscv_vlseg4: + case Intrinsic::riscv_vlseg5: + case Intrinsic::riscv_vlseg6: + case Intrinsic::riscv_vlseg7: + case Intrinsic::riscv_vlseg8: + case Intrinsic::riscv_vsseg2: + case Intrinsic::riscv_vsseg3: + case Intrinsic::riscv_vsseg4: + case Intrinsic::riscv_vsseg5: + case Intrinsic::riscv_vsseg6: + case Intrinsic::riscv_vsseg7: + case Intrinsic::riscv_vsseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 1 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::get(MaskType, 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL); + return true; + } + case Intrinsic::riscv_vlse_mask: + case Intrinsic::riscv_vsse_mask: + case Intrinsic::riscv_vlsseg2_mask: + case Intrinsic::riscv_vlsseg3_mask: + case Intrinsic::riscv_vlsseg4_mask: + case Intrinsic::riscv_vlsseg5_mask: + case Intrinsic::riscv_vlsseg6_mask: + case Intrinsic::riscv_vlsseg7_mask: + case Intrinsic::riscv_vlsseg8_mask: + case Intrinsic::riscv_vssseg2_mask: + case Intrinsic::riscv_vssseg3_mask: + case Intrinsic::riscv_vssseg4_mask: + case Intrinsic::riscv_vssseg5_mask: + case Intrinsic::riscv_vssseg6_mask: + case Intrinsic::riscv_vssseg7_mask: + case Intrinsic::riscv_vssseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vsse: + case Intrinsic::riscv_vlsseg2: + case Intrinsic::riscv_vlsseg3: + case Intrinsic::riscv_vlsseg4: + case Intrinsic::riscv_vlsseg5: + case Intrinsic::riscv_vlsseg6: + case Intrinsic::riscv_vlsseg7: + case Intrinsic::riscv_vlsseg8: + case Intrinsic::riscv_vssseg2: + case Intrinsic::riscv_vssseg3: + case Intrinsic::riscv_vssseg4: + case Intrinsic::riscv_vssseg5: + case Intrinsic::riscv_vssseg6: + case Intrinsic::riscv_vssseg7: + case Intrinsic::riscv_vssseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + + Value *Stride = II->getArgOperand(PtrOperandNo + 1); + // Use the pointer alignment as the element alignment if the stride is a + // multiple of the pointer alignment. Otherwise, the element alignment + // should be Align(1). + unsigned PointerAlign = Alignment.valueOrOne().value(); + if (!isa<ConstantInt>(Stride) || + cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0) + Alignment = Align(1); + + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::get(MaskType, 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL, Stride); + return true; + } + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei_mask: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei_mask: + case Intrinsic::riscv_vloxseg2_mask: + case Intrinsic::riscv_vloxseg3_mask: + case Intrinsic::riscv_vloxseg4_mask: + case Intrinsic::riscv_vloxseg5_mask: + case Intrinsic::riscv_vloxseg6_mask: + case Intrinsic::riscv_vloxseg7_mask: + case Intrinsic::riscv_vloxseg8_mask: + case Intrinsic::riscv_vluxseg2_mask: + case Intrinsic::riscv_vluxseg3_mask: + case Intrinsic::riscv_vluxseg4_mask: + case Intrinsic::riscv_vluxseg5_mask: + case Intrinsic::riscv_vluxseg6_mask: + case Intrinsic::riscv_vluxseg7_mask: + case Intrinsic::riscv_vluxseg8_mask: + case Intrinsic::riscv_vsoxseg2_mask: + case Intrinsic::riscv_vsoxseg3_mask: + case Intrinsic::riscv_vsoxseg4_mask: + case Intrinsic::riscv_vsoxseg5_mask: + case Intrinsic::riscv_vsoxseg6_mask: + case Intrinsic::riscv_vsoxseg7_mask: + case Intrinsic::riscv_vsoxseg8_mask: + case Intrinsic::riscv_vsuxseg2_mask: + case Intrinsic::riscv_vsuxseg3_mask: + case Intrinsic::riscv_vsuxseg4_mask: + case Intrinsic::riscv_vsuxseg5_mask: + case Intrinsic::riscv_vsuxseg6_mask: + case Intrinsic::riscv_vsuxseg7_mask: + case Intrinsic::riscv_vsuxseg8_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsuxei: + case Intrinsic::riscv_vloxseg2: + case Intrinsic::riscv_vloxseg3: + case Intrinsic::riscv_vloxseg4: + case Intrinsic::riscv_vloxseg5: + case Intrinsic::riscv_vloxseg6: + case Intrinsic::riscv_vloxseg7: + case Intrinsic::riscv_vloxseg8: + case Intrinsic::riscv_vluxseg2: + case Intrinsic::riscv_vluxseg3: + case Intrinsic::riscv_vluxseg4: + case Intrinsic::riscv_vluxseg5: + case Intrinsic::riscv_vluxseg6: + case Intrinsic::riscv_vluxseg7: + case Intrinsic::riscv_vluxseg8: + case Intrinsic::riscv_vsoxseg2: + case Intrinsic::riscv_vsoxseg3: + case Intrinsic::riscv_vsoxseg4: + case Intrinsic::riscv_vsoxseg5: + case Intrinsic::riscv_vsoxseg6: + case Intrinsic::riscv_vsoxseg7: + case Intrinsic::riscv_vsoxseg8: + case Intrinsic::riscv_vsuxseg2: + case Intrinsic::riscv_vsuxseg3: + case Intrinsic::riscv_vsuxseg4: + case Intrinsic::riscv_vsuxseg5: + case Intrinsic::riscv_vsuxseg6: + case Intrinsic::riscv_vsuxseg7: + case Intrinsic::riscv_vsuxseg8: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + // The results of segment loads are struct type. + if (auto *STy = dyn_cast<StructType>(Ty)) + Ty = STy->getTypeAtIndex(0U); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + // RVV indexed loads/stores zero-extend offset operands which are narrower + // than XLEN to XLEN. + Value *OffsetOp = II->getArgOperand(PtrOperandNo + 1); + Type *OffsetTy = OffsetOp->getType(); + if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) { + VectorType *OrigType = cast<VectorType>(OffsetTy); + Type *ExtendTy = VectorType::get(XLenIntTy, OrigType); + OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); + } + Value *Mask = ConstantInt::get(OffsetTy->getWithNewBitWidth(1), 1); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + unsigned NF = getNFields(II, PtrOperandNo, IsWrite); + + // RVV uses contiguous nf elements as a segment. + if (NF > 1) { + unsigned ElemSize = Ty->getScalarSizeInBits(); + auto *SegTy = IntegerType::get(C, ElemSize * NF); + Ty = VectorType::get(SegTy, cast<VectorType>(Ty)); + } + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Align(1), Mask, EVL, + /* Stride */ nullptr, OffsetOp); + } + } + return false; +} + InstructionCost RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT, TTI::TargetCostKind CostKind) { diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 9c37a4f6ec2d0..e9a4721c37890 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -60,6 +60,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> { : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {} + bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting, + IntrinsicInst *II) const; + bool areInlineCompatible(const Function *Caller, const Function *Callee) const; diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 149866a8e4200..32639ebb73c59 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -29,6 +29,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/StackSafetyAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/Demangle/Demangle.h" @@ -754,12 +755,13 @@ struct AddressSanitizer { bool isInterestingAlloca(const AllocaInst &AI); bool ignoreAccess(Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); + void getMemoryRefInfos(Instruction *I, + SmallVectorImpl<MemoryRefInfo> &Interesting, + const TargetTransformInfo *TTI); - void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool... [truncated] |
| //===----------------------------------------------------------------------===// | ||
| // | ||
| // This file defines MemoryRefInfo class that is used when getting | ||
| // the information of a memory reference instruction. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Most of the patch is NFC which renames this class,
Can you please split "move and rename" into one patch, and functional change into another.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I am sorry that I merged the old pr contains the renaming part before seeing your comment.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do you still need this patch?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes. I still need this patch. But it depeneds #100930.
This is based on #100930.
RVV segment is an array of contingous NF elements. This patch emulates
RVV segment as a large integer with bitwidth equaled to NF * SEW. The
reason to not emulate RVV segment as some aggregated type is that vector type
should use premative types as element types.
There is another approach is to create NF memoryRefInfo objects. It
could avoid create pseudo types, but this approach also generates large
code for asan check.