25 #include "llvm/ADT/SmallBitVector.h" 26 #include "llvm/ADT/StringExtras.h" 27 #include "llvm/ADT/StringSwitch.h" 28 #include "llvm/ADT/Triple.h" 29 #include "llvm/ADT/Twine.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/Support/raw_ostream.h" 35 using namespace clang;
36 using namespace CodeGen;
54 llvm::LLVMContext &LLVMContext) {
58 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
59 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
69 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
71 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
85 ByRef, Realign, Padding);
116 unsigned maxAllRegisters) {
117 unsigned intCount = 0, fpCount = 0;
119 if (
type->isPointerTy()) {
121 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
123 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
125 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
130 return (intCount + fpCount > maxAllRegisters);
135 unsigned numElts)
const {
165 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
166 !RT->getDecl()->canPassInRegisters()) {
179 if (UD->
hasAttr<TransparentUnionAttr>()) {
180 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
218 uint64_t Members)
const {
223 raw_ostream &OS = llvm::errs();
224 OS <<
"(ABIArgInfo Kind=";
227 OS <<
"Direct Type=";
240 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
243 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
244 <<
" ByVal=" << getIndirectByVal()
245 <<
" Realign=" << getIndirectRealign();
250 case CoerceAndExpand:
251 OS <<
"CoerceAndExpand Type=";
252 getCoerceAndExpandType()->print(OS);
265 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
267 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
269 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
271 Ptr->getName() +
".aligned");
295 bool AllowHigherAlign) {
305 if (AllowHigherAlign && DirectAlign > SlotSize) {
321 !DirectTy->isStructTy()) {
344 std::pair<CharUnits, CharUnits> ValueInfo,
346 bool AllowHigherAlign) {
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
360 DirectTy = DirectTy->getPointerTo(0);
363 DirectSize, DirectAlign,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name =
"") {
380 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
431 return llvm::CallingConv::SPIR_KERNEL;
435 llvm::PointerType *T,
QualType QT)
const {
436 return llvm::ConstantPointerNull::get(T);
443 "Address space agnostic languages only");
452 if (
auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
456 Src, DestTy, Src->hasName() ? Src->getName() +
".ascast" :
"");
465 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
471 llvm::AtomicOrdering Ordering,
472 llvm::LLVMContext &Ctx)
const {
473 return Ctx.getOrInsertSyncScopeID(
"");
491 if (AT->getSize() == 0)
493 FT = AT->getElementType();
504 if (isa<CXXRecordDecl>(RT->
getDecl()))
522 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
523 for (
const auto &I : CXXRD->bases())
527 for (
const auto *I : RD->
fields())
550 const Type *Found =
nullptr;
553 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
554 for (
const auto &I : CXXRD->bases()) {
572 for (
const auto *FD : RD->
fields()) {
586 if (AT->getSize().getZExtValue() != 1)
588 FT = AT->getElementType();
624 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
627 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
636 return Address(Addr, TyAlignForABI);
639 "Unexpected ArgInfo Kind in generic VAArg emitter!");
642 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
644 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
646 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
648 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
661 class DefaultABIInfo :
public ABIInfo {
672 I.info = classifyArgumentType(I.type);
701 Ty = EnumTy->getDecl()->getIntegerType();
716 RetTy = EnumTy->getDecl()->getIntegerType();
729 DefaultABIInfo defaultInfo;
746 Arg.info = classifyArgumentType(Arg.type);
753 bool asReturnValue)
const override {
757 bool isSwiftErrorInRegister()
const override {
767 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
770 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
771 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
772 llvm::Function *Fn = cast<llvm::Function>(GV);
774 B.addAttribute(
"wasm-import-module",
Attr->getImportModule());
775 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
777 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
778 llvm::Function *Fn = cast<llvm::Function>(GV);
780 B.addAttribute(
"wasm-import-name",
Attr->getImportName());
781 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
783 if (
const auto *
Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
784 llvm::Function *Fn = cast<llvm::Function>(GV);
786 B.addAttribute(
"wasm-export-name",
Attr->getExportName());
787 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
791 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
792 llvm::Function *Fn = cast<llvm::Function>(GV);
793 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
794 Fn->addFnAttr(
"no-prototype");
819 return defaultInfo.classifyArgumentType(Ty);
839 return defaultInfo.classifyReturnType(RetTy);
860 class PNaClABIInfo :
public ABIInfo {
905 Ty = EnumTy->getDecl()->getIntegerType();
925 RetTy = EnumTy->getDecl()->getIntegerType();
934 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
935 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
936 IRType->getScalarSizeInBits() != 64;
940 StringRef Constraint,
942 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
943 .Cases(
"y",
"&y",
"^Ym",
true)
945 if (IsMMXCons && Ty->isVectorTy()) {
946 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
962 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
963 if (BT->getKind() == BuiltinType::LongDouble) {
965 &llvm::APFloat::x87DoubleExtended())
974 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
982 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
983 return NumMembers <= 4;
1003 llvm::SmallBitVector IsPreassigned;
1005 unsigned FreeRegs = 0;
1006 unsigned FreeSSERegs = 0;
1011 VectorcallMaxParamNumAsReg = 6
1021 static const unsigned MinABIStackAlignInBytes = 4;
1023 bool IsDarwinVectorABI;
1024 bool IsRetSmallStructInRegABI;
1025 bool IsWin32StructABI;
1026 bool IsSoftFloatABI;
1028 unsigned DefaultNumRegisterParameters;
1030 static bool isRegisterSize(
unsigned Size) {
1031 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1036 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1040 uint64_t NumMembers)
const override {
1042 return isX86VectorCallAggregateSmallEnough(NumMembers);
1054 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1062 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1064 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1065 bool &NeedsPadding)
const;
1066 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1068 bool canExpandIndirectArgument(
QualType Ty)
const;
1077 void runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const;
1086 bool RetSmallStructInRegABI,
bool Win32StructABI,
1087 unsigned NumRegisterParameters,
bool SoftFloatABI)
1088 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1089 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1090 IsWin32StructABI(Win32StructABI),
1091 IsSoftFloatABI(SoftFloatABI),
1093 DefaultNumRegisterParameters(NumRegisterParameters) {}
1096 bool asReturnValue)
const override {
1104 bool isSwiftErrorInRegister()
const override {
1113 bool RetSmallStructInRegABI,
bool Win32StructABI,
1114 unsigned NumRegisterParameters,
bool SoftFloatABI)
1116 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1117 NumRegisterParameters, SoftFloatABI)) {}
1119 static bool isStructReturnInRegABI(
1122 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1135 StringRef Constraint,
1137 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1141 std::string &Constraints,
1142 std::vector<llvm::Type *> &ResultRegTypes,
1143 std::vector<llvm::Type *> &ResultTruncRegTypes,
1144 std::vector<LValue> &ResultRegDests,
1145 std::string &AsmString,
1146 unsigned NumOutputs)
const override;
1150 unsigned Sig = (0xeb << 0) |
1154 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1157 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1158 return "movl\t%ebp, %ebp" 1159 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1174 unsigned NumNewOuts,
1175 std::string &AsmString) {
1177 llvm::raw_string_ostream OS(Buf);
1179 while (Pos < AsmString.size()) {
1180 size_t DollarStart = AsmString.find(
'$', Pos);
1181 if (DollarStart == std::string::npos)
1182 DollarStart = AsmString.size();
1183 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1184 if (DollarEnd == std::string::npos)
1185 DollarEnd = AsmString.size();
1186 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1188 size_t NumDollars = DollarEnd - DollarStart;
1189 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1191 size_t DigitStart = Pos;
1192 if (AsmString[DigitStart] ==
'{') {
1196 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1197 if (DigitEnd == std::string::npos)
1198 DigitEnd = AsmString.size();
1199 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1200 unsigned OperandIndex;
1201 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1202 if (OperandIndex >= FirstIn)
1203 OperandIndex += NumNewOuts;
1211 AsmString = std::move(OS.str());
1215 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1217 std::vector<llvm::Type *> &ResultRegTypes,
1218 std::vector<llvm::Type *> &ResultTruncRegTypes,
1219 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1220 unsigned NumOutputs)
const {
1225 if (!Constraints.empty())
1227 if (RetWidth <= 32) {
1228 Constraints +=
"={eax}";
1229 ResultRegTypes.push_back(CGF.
Int32Ty);
1232 Constraints +=
"=A";
1233 ResultRegTypes.push_back(CGF.
Int64Ty);
1238 ResultTruncRegTypes.push_back(CoerceTy);
1242 CoerceTy->getPointerTo()));
1243 ResultRegDests.push_back(ReturnSlot);
1250 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1256 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1262 if (Size == 64 || Size == 128)
1277 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1281 if (!RT)
return false;
1293 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1302 Ty = CTy->getElementType();
1312 return Size == 32 || Size == 64;
1317 for (
const auto *FD : RD->
fields()) {
1327 if (FD->isBitField())
1352 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1359 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1360 if (!IsWin32StructABI) {
1363 if (!CXXRD->isCLike())
1367 if (CXXRD->isDynamicClass())
1384 if (State.FreeRegs) {
1393 CCState &State)
const {
1398 uint64_t NumElts = 0;
1399 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1400 State.CC == llvm::CallingConv::X86_RegCall) &&
1408 if (IsDarwinVectorABI) {
1420 if ((Size == 8 || Size == 16 || Size == 32) ||
1421 (Size == 64 && VT->getNumElements() == 1))
1425 return getIndirectReturnResult(RetTy, State);
1434 if (RT->getDecl()->hasFlexibleArrayMember())
1435 return getIndirectReturnResult(RetTy, State);
1440 return getIndirectReturnResult(RetTy, State);
1448 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1457 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1458 || SeltTy->hasPointerRepresentation())
1466 return getIndirectReturnResult(RetTy, State);
1471 RetTy = EnumTy->getDecl()->getIntegerType();
1488 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1489 for (
const auto &I : CXXRD->bases())
1493 for (
const auto *i : RD->
fields()) {
1506 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1507 unsigned Align)
const {
1510 if (Align <= MinABIStackAlignInBytes)
1514 if (!IsDarwinVectorABI) {
1516 return MinABIStackAlignInBytes;
1524 return MinABIStackAlignInBytes;
1528 CCState &State)
const {
1530 if (State.FreeRegs) {
1540 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1541 if (StackAlign == 0)
1546 bool Realign = TypeAlign > StackAlign;
1551 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1558 if (K == BuiltinType::Float || K == BuiltinType::Double)
1564 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1565 if (!IsSoftFloatABI) {
1566 Class C = classify(Ty);
1572 unsigned SizeInRegs = (Size + 31) / 32;
1574 if (SizeInRegs == 0)
1578 if (SizeInRegs > State.FreeRegs) {
1587 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1591 State.FreeRegs -= SizeInRegs;
1595 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1597 bool &NeedsPadding)
const {
1604 NeedsPadding =
false;
1607 if (!updateFreeRegs(Ty, State))
1613 if (State.CC == llvm::CallingConv::X86_FastCall ||
1614 State.CC == llvm::CallingConv::X86_VectorCall ||
1615 State.CC == llvm::CallingConv::X86_RegCall) {
1616 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1617 NeedsPadding =
true;
1625 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1626 if (!updateFreeRegs(Ty, State))
1632 if (State.CC == llvm::CallingConv::X86_FastCall ||
1633 State.CC == llvm::CallingConv::X86_VectorCall ||
1634 State.CC == llvm::CallingConv::X86_RegCall) {
1645 void X86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const {
1656 for (
int I = 0, E = Args.size(); I < E; ++I) {
1658 uint64_t NumElts = 0;
1662 if (State.FreeSSERegs >= NumElts) {
1663 State.FreeSSERegs -= NumElts;
1665 State.IsPreassigned.set(I);
1672 CCState &State)
const {
1674 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1675 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1676 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1685 return getIndirectResult(Ty,
false, State);
1695 uint64_t NumElts = 0;
1696 if ((IsRegCall || IsVectorCall) &&
1698 if (State.FreeSSERegs >= NumElts) {
1699 State.FreeSSERegs -= NumElts;
1704 return getDirectX86Hva();
1710 return getIndirectResult(Ty,
false, State);
1717 return getIndirectResult(Ty,
true, State);
1724 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1725 bool NeedsPadding =
false;
1727 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1730 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1736 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1744 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1745 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1747 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1749 return getIndirectResult(Ty,
true, State);
1755 if (IsDarwinVectorABI) {
1757 if ((Size == 8 || Size == 16 || Size == 32) ||
1758 (Size == 64 && VT->getNumElements() == 1))
1771 Ty = EnumTy->getDecl()->getIntegerType();
1773 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1790 else if (State.CC == llvm::CallingConv::X86_FastCall)
1792 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1794 State.FreeSSERegs = 6;
1797 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1799 State.FreeSSERegs = 8;
1801 State.FreeRegs = DefaultNumRegisterParameters;
1808 if (State.FreeRegs) {
1821 if (State.CC == llvm::CallingConv::X86_VectorCall)
1822 runVectorCallFirstPass(FI, State);
1824 bool UsedInAlloca =
false;
1826 for (
int I = 0, E = Args.size(); I < E; ++I) {
1828 if (State.IsPreassigned.test(I))
1838 rewriteWithInAlloca(FI);
1848 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1855 StackOffset = FieldEnd.
alignTo(FieldAlign);
1856 if (StackOffset != FieldEnd) {
1857 CharUnits NumBytes = StackOffset - FieldEnd;
1859 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1860 FrameFields.push_back(Ty);
1885 llvm_unreachable(
"invalid enum");
1888 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1889 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1906 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1913 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1923 for (; I != E; ++I) {
1925 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1943 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1950 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1952 assert(Triple.getArch() == llvm::Triple::x86);
1954 switch (Opts.getStructReturnConvention()) {
1963 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1966 switch (Triple.getOS()) {
1967 case llvm::Triple::DragonFly:
1968 case llvm::Triple::FreeBSD:
1969 case llvm::Triple::OpenBSD:
1970 case llvm::Triple::Win32:
1977 void X86_32TargetCodeGenInfo::setTargetAttributes(
1979 if (GV->isDeclaration())
1981 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1982 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1983 llvm::Function *Fn = cast<llvm::Function>(GV);
1984 Fn->addFnAttr(
"stackrealign");
1986 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1987 llvm::Function *Fn = cast<llvm::Function>(GV);
1988 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1993 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2016 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
2043 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2045 case X86AVXABILevel::AVX512:
2047 case X86AVXABILevel::AVX:
2052 llvm_unreachable(
"Unknown AVXLevel");
2077 static Class merge(Class Accum, Class Field);
2093 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2119 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2120 bool isNamedArg)
const;
2124 unsigned IROffset,
QualType SourceTy,
2125 unsigned SourceOffset)
const;
2127 unsigned IROffset,
QualType SourceTy,
2128 unsigned SourceOffset)
const;
2144 unsigned &neededInt,
unsigned &neededSSE,
2145 bool isNamedArg)
const;
2148 unsigned &NeededSSE)
const;
2151 unsigned &NeededSSE)
const;
2153 bool IsIllegalVectorType(
QualType Ty)
const;
2160 bool honorsRevision0_98()
const {
2166 bool classifyIntegerMMXAsSSE()
const {
2168 if (
getContext().getLangOpts().getClangABICompat() <=
2173 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2175 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2181 bool passInt128VectorsInMem()
const {
2183 if (
getContext().getLangOpts().getClangABICompat() <=
2188 return T.isOSLinux() || T.isOSNetBSD();
2194 bool Has64BitPointers;
2199 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2203 unsigned neededInt, neededSSE;
2209 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2210 return (vectorTy->getBitWidth() > 128);
2222 bool has64BitPointers()
const {
2223 return Has64BitPointers;
2227 bool asReturnValue)
const override {
2230 bool isSwiftErrorInRegister()
const override {
2240 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2249 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2253 uint64_t NumMembers)
const override {
2255 return isX86VectorCallAggregateSmallEnough(NumMembers);
2259 bool asReturnValue)
const override {
2263 bool isSwiftErrorInRegister()
const override {
2269 bool IsVectorCall,
bool IsRegCall)
const;
2272 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2273 bool IsVectorCall,
bool IsRegCall)
const;
2285 const X86_64ABIInfo &getABIInfo()
const {
2291 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
const override {
2310 StringRef Constraint,
2312 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2315 bool isNoProtoCallVariadic(
const CallArgList &args,
2324 bool HasAVXType =
false;
2325 for (CallArgList::const_iterator
2326 it = args.begin(), ie = args.end(); it != ie; ++it) {
2327 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2342 unsigned Sig = (0xeb << 0) |
2346 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2349 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2351 if (GV->isDeclaration())
2353 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2354 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2355 llvm::Function *Fn = cast<llvm::Function>(GV);
2356 Fn->addFnAttr(
"stackrealign");
2358 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2359 llvm::Function *Fn = cast<llvm::Function>(GV);
2360 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2366 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2370 bool Quote = (Lib.find(
" ") != StringRef::npos);
2371 std::string ArgStr = Quote ?
"\"" :
"";
2373 if (!Lib.endswith_lower(
".lib") && !Lib.endswith_lower(
".a"))
2375 ArgStr += Quote ?
"\"" :
"";
2379 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2382 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2383 unsigned NumRegisterParameters)
2384 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2385 Win32StructABI, NumRegisterParameters,
false) {}
2387 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2390 void getDependentLibraryOption(llvm::StringRef Lib,
2392 Opt =
"/DEFAULTLIB:";
2393 Opt += qualifyWindowsLibrary(Lib);
2396 void getDetectMismatchOption(llvm::StringRef Name,
2397 llvm::StringRef
Value,
2399 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2403 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2405 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2408 Fn->addFnAttr(
"stack-probe-size",
2411 Fn->addFnAttr(
"no-stack-arg-probe");
2415 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2417 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2418 if (GV->isDeclaration())
2420 addStackProbeTargetAttributes(D, GV, CGM);
2429 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2446 void getDependentLibraryOption(llvm::StringRef Lib,
2448 Opt =
"/DEFAULTLIB:";
2449 Opt += qualifyWindowsLibrary(Lib);
2452 void getDetectMismatchOption(llvm::StringRef Name,
2453 llvm::StringRef
Value,
2455 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2459 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2462 if (GV->isDeclaration())
2464 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2465 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2466 llvm::Function *Fn = cast<llvm::Function>(GV);
2467 Fn->addFnAttr(
"stackrealign");
2469 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2470 llvm::Function *Fn = cast<llvm::Function>(GV);
2471 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2475 addStackProbeTargetAttributes(D, GV, CGM);
2479 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2504 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2506 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2508 if (Hi == SSEUp && Lo != SSE)
2512 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2536 assert((Accum != Memory && Accum != ComplexX87) &&
2537 "Invalid accumulated classification during merge.");
2538 if (Accum == Field || Field == NoClass)
2540 if (Field == Memory)
2542 if (Accum == NoClass)
2546 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2547 Accum == X87 || Accum == X87Up)
2552 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2553 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2564 Class &Current = OffsetBase < 64 ? Lo : Hi;
2570 if (k == BuiltinType::Void) {
2572 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2575 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2577 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2579 }
else if (k == BuiltinType::LongDouble) {
2581 if (LDF == &llvm::APFloat::IEEEquad()) {
2584 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2587 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2590 llvm_unreachable(
"unexpected long double representation!");
2599 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2610 if (Has64BitPointers) {
2617 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2618 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2619 if (EB_FuncPtr != EB_ThisAdj) {
2633 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2642 uint64_t EB_Lo = (OffsetBase) / 64;
2643 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2646 }
else if (Size == 64) {
2647 QualType ElementType = VT->getElementType();
2656 if (!classifyIntegerMMXAsSSE() &&
2667 if (OffsetBase && OffsetBase != 64)
2669 }
else if (Size == 128 ||
2670 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2671 QualType ElementType = VT->getElementType();
2674 if (passInt128VectorsInMem() && Size != 128 &&
2706 else if (Size <= 128)
2714 if (LDF == &llvm::APFloat::IEEEquad())
2716 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2717 Current = ComplexX87;
2718 else if (LDF == &llvm::APFloat::IEEEdouble())
2721 llvm_unreachable(
"unexpected long double representation!");
2726 uint64_t EB_Real = (OffsetBase) / 64;
2728 if (Hi == NoClass && EB_Real != EB_Imag)
2748 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2755 uint64_t ArraySize = AT->getSize().getZExtValue();
2762 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2765 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2766 Class FieldLo, FieldHi;
2767 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2768 Lo = merge(Lo, FieldLo);
2769 Hi = merge(Hi, FieldHi);
2770 if (Lo == Memory || Hi == Memory)
2774 postMerge(Size, Lo, Hi);
2775 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2805 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2806 for (
const auto &I : CXXRD->bases()) {
2807 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2808 "Unexpected base class!");
2810 cast<CXXRecordDecl>(I.getType()->castAs<
RecordType>()->getDecl());
2817 Class FieldLo, FieldHi;
2820 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2821 Lo = merge(Lo, FieldLo);
2822 Hi = merge(Hi, FieldHi);
2823 if (Lo == Memory || Hi == Memory) {
2824 postMerge(Size, Lo, Hi);
2833 i != e; ++i, ++idx) {
2835 bool BitField = i->isBitField();
2838 if (BitField && i->isUnnamedBitfield())
2848 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2849 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2851 postMerge(Size, Lo, Hi);
2855 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2857 postMerge(Size, Lo, Hi);
2867 Class FieldLo, FieldHi;
2873 assert(!i->isUnnamedBitfield());
2875 uint64_t Size = i->getBitWidthValue(
getContext());
2877 uint64_t EB_Lo = Offset / 64;
2878 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2881 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2886 FieldHi = EB_Hi ?
Integer : NoClass;
2889 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2890 Lo = merge(Lo, FieldLo);
2891 Hi = merge(Hi, FieldHi);
2892 if (Lo == Memory || Hi == Memory)
2896 postMerge(Size, Lo, Hi);
2906 Ty = EnumTy->getDecl()->getIntegerType();
2915 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2918 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2919 if (Size <= 64 || Size > LargestVector)
2921 QualType EltTy = VecTy->getElementType();
2922 if (passInt128VectorsInMem() &&
2932 unsigned freeIntRegs)
const {
2944 Ty = EnumTy->getDecl()->getIntegerType();
2978 if (freeIntRegs == 0) {
2983 if (Align == 8 && Size <= 64)
3000 if (isa<llvm::VectorType>(IRType)) {
3003 if (passInt128VectorsInMem() &&
3004 IRType->getVectorElementType()->isIntegerTy(128)) {
3007 return llvm::VectorType::get(llvm::Type::getInt64Ty(
getVMContext()),
3014 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3019 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
3023 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
3039 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
3040 if (TySize <= StartBit)
3044 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
3045 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3048 for (
unsigned i = 0; i != NumElts; ++i) {
3050 unsigned EltOffset = i*EltSize;
3051 if (EltOffset >= EndBit)
break;
3053 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3055 EndBit-EltOffset, Context))
3067 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3068 for (
const auto &I : CXXRD->bases()) {
3069 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3070 "Unexpected base class!");
3072 cast<CXXRecordDecl>(I.getType()->castAs<
RecordType>()->getDecl());
3076 if (BaseOffset >= EndBit)
continue;
3078 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3080 EndBit-BaseOffset, Context))
3091 i != e; ++i, ++idx) {
3095 if (FieldOffset >= EndBit)
break;
3097 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3116 const llvm::DataLayout &TD) {
3118 if (IROffset == 0 && IRType->isFloatTy())
3122 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3123 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3124 unsigned Elt = SL->getElementContainingOffset(IROffset);
3125 IROffset -= SL->getElementOffset(Elt);
3130 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3132 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3133 IROffset -= IROffset/EltSize*EltSize;
3144 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3145 QualType SourceTy,
unsigned SourceOffset)
const {
3158 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3179 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3180 QualType SourceTy,
unsigned SourceOffset)
const {
3183 if (IROffset == 0) {
3185 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3186 IRType->isIntegerTy(64))
3195 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3196 IRType->isIntegerTy(32) ||
3197 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3198 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3199 cast<llvm::IntegerType>(IRType)->getBitWidth();
3207 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3209 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3210 if (IROffset < SL->getSizeInBytes()) {
3211 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3212 IROffset -= SL->getElementOffset(FieldIdx);
3214 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3215 SourceTy, SourceOffset);
3219 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3222 unsigned EltOffset = IROffset/EltSize*EltSize;
3223 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3229 unsigned TySizeInBytes =
3232 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3237 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3248 const llvm::DataLayout &TD) {
3253 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3254 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3255 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3256 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3268 if (Lo->isFloatTy())
3269 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3271 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3272 &&
"Invalid/unknown lo type");
3273 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3277 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3280 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3281 "Invalid x86-64 argument pair!");
3289 X86_64ABIInfo::Class Lo, Hi;
3290 classify(RetTy, 0, Lo, Hi,
true);
3293 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3294 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3303 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3304 "Unknown missing lo part");
3309 llvm_unreachable(
"Invalid classification for lo word.");
3314 return getIndirectReturnResult(RetTy);
3319 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3323 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3326 RetTy = EnumTy->getDecl()->getIntegerType();
3337 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3350 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3351 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3362 llvm_unreachable(
"Invalid classification for hi word.");
3369 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3374 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3385 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3386 ResType = GetByteVectorType(RetTy);
3397 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3414 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3420 X86_64ABIInfo::Class Lo, Hi;
3421 classify(Ty, 0, Lo, Hi, isNamedArg);
3425 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3426 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3437 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3438 "Unknown missing lo part");
3451 return getIndirectResult(Ty, freeIntRegs);
3455 llvm_unreachable(
"Invalid classification for lo word.");
3468 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3471 Ty = EnumTy->getDecl()->getIntegerType();
3485 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3499 llvm_unreachable(
"Invalid classification for hi word.");
3501 case NoClass:
break;
3506 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3528 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3529 ResType = GetByteVectorType(Ty);
3543 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3544 unsigned &NeededSSE)
const {
3546 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3548 if (RT->getDecl()->hasFlexibleArrayMember())
3549 return getIndirectReturnResult(Ty);
3552 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3553 if (CXXRD->isDynamicClass()) {
3554 NeededInt = NeededSSE = 0;
3555 return getIndirectReturnResult(Ty);
3558 for (
const auto &I : CXXRD->bases())
3559 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3561 NeededInt = NeededSSE = 0;
3562 return getIndirectReturnResult(Ty);
3567 for (
const auto *FD : RT->getDecl()->fields()) {
3568 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3569 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3571 NeededInt = NeededSSE = 0;
3572 return getIndirectReturnResult(Ty);
3575 unsigned LocalNeededInt, LocalNeededSSE;
3577 LocalNeededSSE,
true)
3579 NeededInt = NeededSSE = 0;
3580 return getIndirectReturnResult(Ty);
3582 NeededInt += LocalNeededInt;
3583 NeededSSE += LocalNeededSSE;
3591 unsigned &NeededInt,
3592 unsigned &NeededSSE)
const {
3597 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3606 if (CallingConv == llvm::CallingConv::Win64) {
3607 WinX86_64ABIInfo Win64ABIInfo(
CGT, AVXLevel);
3608 Win64ABIInfo.computeInfo(FI);
3612 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3615 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3616 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3617 unsigned NeededInt, NeededSSE;
3623 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3624 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3625 FreeIntRegs -= NeededInt;
3626 FreeSSERegs -= NeededSSE;
3655 it != ie; ++it, ++ArgNo) {
3656 bool IsNamedArg = ArgNo < NumRequiredArgs;
3658 if (IsRegCall && it->type->isStructureOrClassType())
3659 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3662 NeededSSE, IsNamedArg);
3668 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3669 FreeIntRegs -= NeededInt;
3670 FreeSSERegs -= NeededSSE;
3672 it->info = getIndirectResult(it->type, FreeIntRegs);
3698 llvm::PointerType::getUnqual(LTy));
3707 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3708 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3709 "overflow_arg_area.next");
3725 unsigned neededInt, neededSSE;
3733 if (!neededInt && !neededSSE)
3749 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3753 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3754 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3761 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3762 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3763 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3769 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3790 if (neededInt && neededSSE) {
3792 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3796 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3799 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3800 "Unexpected ABI info for mixed regs");
3801 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3802 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3805 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3806 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3822 }
else if (neededInt) {
3828 std::pair<CharUnits, CharUnits> SizeAlign =
3830 uint64_t TySize = SizeAlign.first.getQuantity();
3841 }
else if (neededSSE == 1) {
3846 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3865 RegAddrLo, ST->getStructElementType(0)));
3868 RegAddrHi, ST->getStructElementType(1)));
3911 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3914 const Type *
Base =
nullptr;
3915 uint64_t NumElts = 0;
3919 FreeSSERegs -= NumElts;
3920 return getDirectX86Hva();
3926 bool IsReturnType,
bool IsVectorCall,
3927 bool IsRegCall)
const {
3933 Ty = EnumTy->getDecl()->getIntegerType();
3936 uint64_t Width = Info.
Width;
3941 if (!IsReturnType) {
3951 const Type *
Base =
nullptr;
3952 uint64_t NumElts = 0;
3955 if ((IsVectorCall || IsRegCall) &&
3958 if (FreeSSERegs >= NumElts) {
3959 FreeSSERegs -= NumElts;
3965 }
else if (IsVectorCall) {
3966 if (FreeSSERegs >= NumElts &&
3968 FreeSSERegs -= NumElts;
3970 }
else if (IsReturnType) {
3983 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3990 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3998 switch (BT->getKind()) {
3999 case BuiltinType::Bool:
4004 case BuiltinType::LongDouble:
4009 if (LDF == &llvm::APFloat::x87DoubleExtended())
4014 case BuiltinType::Int128:
4015 case BuiltinType::UInt128:
4025 llvm::VectorType::get(llvm::Type::getInt64Ty(
getVMContext()), 2));
4036 unsigned FreeSSERegs,
4038 bool IsRegCall)
const {
4043 if (Count < VectorcallMaxParamNumAsReg)
4044 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4048 unsigned ZeroSSERegsAvail = 0;
4049 I.info = classify(I.type, ZeroSSERegsAvail,
false,
4050 IsVectorCall, IsRegCall);
4056 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4062 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4063 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4067 if (CC == llvm::CallingConv::X86_64_SysV) {
4068 X86_64ABIInfo SysVABIInfo(
CGT, AVXLevel);
4069 SysVABIInfo.computeInfo(FI);
4073 unsigned FreeSSERegs = 0;
4077 }
else if (IsRegCall) {
4084 IsVectorCall, IsRegCall);
4089 }
else if (IsRegCall) {
4095 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4098 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4106 bool IsIndirect =
false;
4112 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4124 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4125 bool IsSoftFloatABI;
4131 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4155 Ty = CTy->getElementType();
4163 const Type *AlignTy =
nullptr;
4180 if (
getTarget().getTriple().isOSDarwin()) {
4182 TI.second = getParamTypeAlignment(Ty);
4190 const unsigned OverflowLimit = 8;
4218 if (isInt || IsSoftFloatABI) {
4227 if (isI64 || (isF64 && IsSoftFloatABI)) {
4228 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4229 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4233 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4239 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4242 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4255 if (!(isInt || IsSoftFloatABI)) {
4264 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4272 Builder.CreateAdd(NumRegs,
4273 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4284 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4292 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4302 if (Align > OverflowAreaAlign) {
4312 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4339 llvm::IntegerType *i8 = CGF.
Int8Ty;
4340 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4341 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4342 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4384 static const unsigned GPRBits = 64;
4387 bool IsSoftFloatABI;
4391 bool IsQPXVectorTy(
const Type *Ty)
const {
4396 unsigned NumElements = VT->getNumElements();
4397 if (NumElements == 1)
4400 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4403 }
else if (VT->getElementType()->
4404 isSpecificBuiltinType(BuiltinType::Float)) {
4413 bool IsQPXVectorTy(
QualType Ty)
const {
4421 IsSoftFloatABI(SoftFloatABI) {}
4423 bool isPromotableTypeForABI(
QualType Ty)
const;
4431 uint64_t Members)
const override;
4449 if (IsQPXVectorTy(T) ||
4465 bool asReturnValue)
const override {
4469 bool isSwiftErrorInRegister()
const override {
4478 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4492 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4494 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4510 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4513 Ty = EnumTy->getDecl()->getIntegerType();
4522 switch (BT->getKind()) {
4523 case BuiltinType::Int:
4524 case BuiltinType::UInt:
4538 Ty = CTy->getElementType();
4542 if (IsQPXVectorTy(Ty)) {
4553 const Type *AlignAsType =
nullptr;
4557 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4560 AlignAsType = EltType;
4564 const Type *
Base =
nullptr;
4565 uint64_t Members = 0;
4566 if (!AlignAsType &&
Kind == ELFv2 &&
4571 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4576 }
else if (AlignAsType) {
4595 uint64_t &Members)
const {
4597 uint64_t NElements = AT->getSize().getZExtValue();
4602 Members *= NElements;
4611 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4612 for (
const auto &I : CXXRD->bases()) {
4617 uint64_t FldMembers;
4621 Members += FldMembers;
4625 for (
const auto *FD : RD->
fields()) {
4630 if (AT->getSize().getZExtValue() == 0)
4632 FT = AT->getElementType();
4642 uint64_t FldMembers;
4647 std::max(Members, FldMembers) : Members + FldMembers);
4661 Ty = CT->getElementType();
4677 QualType EltTy = VT->getElementType();
4678 unsigned NumElements =
4693 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4697 if (BT->getKind() == BuiltinType::Float ||
4698 BT->getKind() == BuiltinType::Double ||
4699 BT->getKind() == BuiltinType::LongDouble ||
4701 (BT->getKind() == BuiltinType::Float128))) {
4714 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4715 const Type *
Base, uint64_t Members)
const {
4725 return Members * NumRegs <= 8;
4741 else if (Size < 128) {
4751 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4755 const Type *Base =
nullptr;
4756 uint64_t Members = 0;
4757 if (
Kind == ELFv2 &&
4760 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4769 if (Bits > 0 && Bits <= 8 * GPRBits) {
4774 if (Bits <= GPRBits)
4776 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4780 uint64_t RegBits = ABIAlign * 8;
4781 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4783 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4792 TyAlign > ABIAlign);
4813 else if (Size < 128) {
4821 const Type *Base =
nullptr;
4822 uint64_t Members = 0;
4823 if (
Kind == ELFv2 &&
4826 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4832 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4837 if (Bits > GPRBits) {
4838 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4839 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4842 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4858 TypeInfo.second = getParamTypeAlignment(Ty);
4870 if (EltSize < SlotSize) {
4872 SlotSize * 2, SlotSize,
4879 SlotSize - EltSize);
4881 2 * SlotSize - EltSize);
4912 llvm::IntegerType *i8 = CGF.
Int8Ty;
4913 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4914 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4915 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4952 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4988 ABIKind getABIKind()
const {
return Kind; }
4989 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4995 uint64_t Members)
const override;
4997 bool isIllegalVectorType(
QualType Ty)
const;
5005 it.info = classifyArgumentType(it.type);
5016 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
5017 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5018 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5025 bool asReturnValue)
const override {
5028 bool isSwiftErrorInRegister()
const override {
5033 unsigned elts)
const override;
5041 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5042 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5049 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
5051 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5053 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5059 bool BranchTargetEnforcement = CGM.
getCodeGenOpts().BranchTargetEnforcement;
5060 if (
const auto *TA = FD->
getAttr<TargetAttr>()) {
5067 assert(Error.empty());
5074 auto *Fn = cast<llvm::Function>(GV);
5076 Fn->addFnAttr(
"sign-return-address",
5081 Fn->addFnAttr(
"sign-return-address-key",
5087 if (BranchTargetEnforcement)
5088 Fn->addFnAttr(
"branch-target-enforcement");
5092 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
5094 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
5095 : AArch64TargetCodeGenInfo(CGT, K) {}
5097 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5100 void getDependentLibraryOption(llvm::StringRef Lib,
5102 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5105 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5107 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5111 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5113 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5114 if (GV->isDeclaration())
5116 addStackProbeTargetAttributes(D, GV, CGM);
5124 if (isIllegalVectorType(Ty)) {
5137 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5142 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5151 Ty = EnumTy->getDecl()->getIntegerType();
5169 if (IsEmpty || Size == 0) {
5175 if (IsEmpty && Size == 0)
5181 const Type *Base =
nullptr;
5182 uint64_t Members = 0;
5192 if (
getTarget().isRenderScriptTarget()) {
5196 if (
Kind == AArch64ABIInfo::AAPCS) {
5198 Alignment = Alignment < 128 ? 64 : 128;
5201 (
unsigned)
getTarget().getPointerWidth(0));
5203 Size = llvm::alignTo(Size, Alignment);
5209 Size == Alignment ? BaseTy
5210 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5217 bool IsVariadic)
const {
5228 RetTy = EnumTy->getDecl()->getIntegerType();
5239 const Type *Base =
nullptr;
5240 uint64_t Members = 0;
5242 !(
getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5251 if (
getTarget().isRenderScriptTarget()) {
5255 Size = llvm::alignTo(Size, 64);
5259 if (Alignment < 128 && Size == 128) {
5270 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5273 unsigned NumElements = VT->getNumElements();
5276 if (!llvm::isPowerOf2_32(NumElements))
5282 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5283 Triple.isOSBinFormatMachO())
5286 return Size != 64 && (Size != 128 || NumElements == 1);
5291 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5293 unsigned elts)
const {
5294 if (!llvm::isPowerOf2_32(elts))
5302 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5308 if (BT->isFloatingPoint())
5312 if (VecSize == 64 || VecSize == 128)
5318 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5319 uint64_t Members)
const {
5320 return Members <= 4;
5331 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5335 unsigned NumRegs = 1;
5336 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5337 BaseTy = ArrTy->getElementType();
5338 NumRegs = ArrTy->getNumElements();
5340 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5364 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
5370 RegSize = llvm::alignTo(RegSize, 8);
5376 RegSize = 16 * NumRegs;
5388 UsingStack = CGF.
Builder.CreateICmpSGE(
5389 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5391 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5400 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5403 reg_offs = CGF.
Builder.CreateAdd(
5404 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5406 reg_offs = CGF.
Builder.CreateAnd(
5407 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5416 NewOffset = CGF.
Builder.CreateAdd(
5417 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5423 InRegs = CGF.
Builder.CreateICmpSLE(
5424 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5426 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5440 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5448 MemTy = llvm::PointerType::getUnqual(MemTy);
5451 const Type *Base =
nullptr;
5452 uint64_t NumMembers = 0;
5454 if (IsHFA && NumMembers > 1) {
5459 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5462 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5464 std::max(TyAlign, BaseTyInfo.second));
5469 BaseTyInfo.first.getQuantity() < 16)
5470 Offset = 16 - BaseTyInfo.first.getQuantity();
5472 for (
unsigned i = 0; i < NumMembers; ++i) {
5489 CharUnits SlotSize = BaseAddr.getAlignment();
5492 TySize < SlotSize) {
5515 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5517 OnStackPtr = CGF.
Builder.CreateAdd(
5518 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5520 OnStackPtr = CGF.
Builder.CreateAnd(
5521 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5526 Address OnStackAddr(OnStackPtr,
5533 StackSize = StackSlotSize;
5535 StackSize = TySize.
alignTo(StackSlotSize);
5539 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5545 TySize < StackSlotSize) {
5560 OnStackAddr, OnStackBlock,
"vaargs.addr");
5593 bool IsIndirect =
false;
5594 if (TyInfo.first.getQuantity() > 16) {
5595 const Type *Base =
nullptr;
5596 uint64_t Members = 0;
5601 TyInfo, SlotSize,
true);
5636 bool isEABI()
const {
5637 switch (
getTarget().getTriple().getEnvironment()) {
5638 case llvm::Triple::Android:
5639 case llvm::Triple::EABI:
5640 case llvm::Triple::EABIHF:
5641 case llvm::Triple::GNUEABI:
5642 case llvm::Triple::GNUEABIHF:
5643 case llvm::Triple::MuslEABI:
5644 case llvm::Triple::MuslEABIHF:
5651 bool isEABIHF()
const {
5652 switch (
getTarget().getTriple().getEnvironment()) {
5653 case llvm::Triple::EABIHF:
5654 case llvm::Triple::GNUEABIHF:
5655 case llvm::Triple::MuslEABIHF:
5662 ABIKind getABIKind()
const {
return Kind; }
5666 unsigned functionCallConv)
const;
5668 unsigned functionCallConv)
const;
5670 uint64_t Members)
const;
5672 bool isIllegalVectorType(
QualType Ty)
const;
5673 bool containsAnyFP16Vectors(
QualType Ty)
const;
5677 uint64_t Members)
const override;
5679 bool isEffectivelyAAPCS_VFP(
unsigned callConvention,
bool acceptHalf)
const;
5691 bool asReturnValue)
const override {
5694 bool isSwiftErrorInRegister()
const override {
5698 unsigned elts)
const override;
5706 const ARMABIInfo &getABIInfo()
const {
5714 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5715 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5727 unsigned getSizeOfUnwindException()
const override {
5728 if (getABIInfo().isEABI())
return 88;
5732 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5734 if (GV->isDeclaration())
5736 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5740 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5745 switch (Attr->getInterrupt()) {
5746 case ARMInterruptAttr::Generic: Kind =
"";
break;
5747 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5748 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5749 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5750 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5751 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5754 llvm::Function *Fn = cast<llvm::Function>(GV);
5756 Fn->addFnAttr(
"interrupt", Kind);
5758 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5759 if (ABI == ARMABIInfo::APCS)
5765 llvm::AttrBuilder B;
5766 B.addStackAlignmentAttr(8);
5767 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5771 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5774 : ARMTargetCodeGenInfo(CGT, K) {}
5776 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5779 void getDependentLibraryOption(llvm::StringRef Lib,
5781 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5784 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5786 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5790 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5792 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5793 if (GV->isDeclaration())
5795 addStackProbeTargetAttributes(D, GV, CGM);
5821 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5822 return llvm::CallingConv::ARM_AAPCS_VFP;
5824 return llvm::CallingConv::ARM_AAPCS;
5826 return llvm::CallingConv::ARM_APCS;
5832 switch (getABIKind()) {
5833 case APCS:
return llvm::CallingConv::ARM_APCS;
5834 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5835 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5836 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5838 llvm_unreachable(
"bad ABI kind");
5841 void ARMABIInfo::setCCs() {
5847 if (abiCC != getLLVMDefaultCC())
5858 if (Size == 64 || Size == 128) {
5868 uint64_t Members)
const {
5869 assert(Base &&
"Base class should be set for homogeneous aggregate");
5873 if (!
getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
5875 llvm::Type *NewVecTy = llvm::VectorType::get(
5877 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5885 unsigned functionCallConv)
const {
5895 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv,
false);
5900 if (isIllegalVectorType(Ty))
5901 return coerceIllegalVector(Ty);
5917 Ty = EnumTy->getDecl()->getIntegerType();
5935 const Type *Base =
nullptr;
5936 uint64_t Members = 0;
5938 return classifyHomogeneousAggregate(Ty, Base, Members);
5939 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5943 const Type *Base =
nullptr;
5944 uint64_t Members = 0;
5946 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5953 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5966 uint64_t ABIAlign = 4;
5968 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5969 getABIKind() == ARMABIInfo::AAPCS) {
5976 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5979 TyAlign > ABIAlign);
5984 if (
getTarget().isRenderScriptTarget()) {
6005 llvm::LLVMContext &VMContext) {
6037 if (!RT)
return false;
6048 bool HadField =
false;
6051 i != e; ++i, ++idx) {
6090 unsigned functionCallConv)
const {
6094 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv,
true);
6105 (VT->getElementType()->isFloat16Type() ||
6106 VT->getElementType()->isHalfType()))
6107 return coerceIllegalVector(RetTy);
6124 RetTy = EnumTy->getDecl()->getIntegerType();
6131 if (getABIKind() == APCS) {
6165 const Type *Base =
nullptr;
6166 uint64_t Members = 0;
6168 return classifyHomogeneousAggregate(RetTy, Base, Members);
6177 if (
getTarget().isRenderScriptTarget()) {
6190 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6193 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6201 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6207 (VT->getElementType()->isFloat16Type() ||
6208 VT->getElementType()->isHalfType()))
6216 unsigned NumElements = VT->getNumElements();
6218 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6222 unsigned NumElements = VT->getNumElements();
6225 if (!llvm::isPowerOf2_32(NumElements))
6235 bool ARMABIInfo::containsAnyFP16Vectors(
QualType Ty)
const {
6237 uint64_t NElements = AT->getSize().getZExtValue();
6240 return containsAnyFP16Vectors(AT->getElementType());
6245 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6247 return containsAnyFP16Vectors(B.getType());
6252 return FD && containsAnyFP16Vectors(FD->getType());
6259 return (VT->getElementType()->isFloat16Type() ||
6260 VT->getElementType()->isHalfType());
6265 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6267 unsigned numElts)
const {
6268 if (!llvm::isPowerOf2_32(numElts))
6270 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6279 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6283 if (BT->getKind() == BuiltinType::Float ||
6284 BT->getKind() == BuiltinType::Double ||
6285 BT->getKind() == BuiltinType::LongDouble)
6289 if (VecSize == 64 || VecSize == 128)
6295 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6296 uint64_t Members)
const {
6297 return Members <= 4;
6300 bool ARMABIInfo::isEffectivelyAAPCS_VFP(
unsigned callConvention,
6301 bool acceptHalf)
const {
6304 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6306 return (getABIKind() == AAPCS_VFP) ||
6307 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6325 bool IsIndirect =
false;
6326 const Type *Base =
nullptr;
6327 uint64_t Members = 0;
6334 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6342 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6343 getABIKind() == ARMABIInfo::AAPCS) {
6346 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6354 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6365 class NVPTXABIInfo :
public ABIInfo {
6382 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6384 bool shouldEmitStaticExternCAliases()
const override;
6389 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6404 return isUnsupportedType(Context, AT->getElementType());
6411 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6413 if (isUnsupportedType(Context, I.getType()))
6417 if (isUnsupportedType(Context, I->getType()))
6424 llvm::LLVMContext &LLVMContext,
6429 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6430 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
6431 const uint64_t NumElements = (Size + Div - 1) / Div;
6450 RetTy = EnumTy->getDecl()->getIntegerType();
6459 Ty = EnumTy->getDecl()->getIntegerType();
6484 llvm_unreachable(
"NVPTX does not support varargs");
6487 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6489 if (GV->isDeclaration())
6491 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6494 llvm::Function *F = cast<llvm::Function>(GV);
6500 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6503 addNVVMMetadata(F,
"kernel", 1);
6505 F->addFnAttr(llvm::Attribute::NoInline);
6514 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6516 addNVVMMetadata(F,
"kernel", 1);
6518 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6521 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6523 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6528 if (
Attr->getMinBlocks()) {
6530 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6533 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6539 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6541 llvm::Module *M = F->getParent();
6545 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6547 llvm::Metadata *MDVals[] = {
6548 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6549 llvm::ConstantAsMetadata::get(
6550 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6552 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6555 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
6573 bool isPromotableIntegerType(
QualType Ty)
const;
6574 bool isCompoundType(
QualType Ty)
const;
6575 bool isVectorArgumentType(
QualType Ty)
const;
6576 bool isFPArgumentType(
QualType Ty)
const;
6586 I.info = classifyArgumentType(I.type);
6593 bool asReturnValue)
const override {
6596 bool isSwiftErrorInRegister()
const override {
6609 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6612 Ty = EnumTy->getDecl()->getIntegerType();
6620 switch (BT->getKind()) {
6621 case BuiltinType::Int:
6622 case BuiltinType::UInt:
6630 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6636 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6637 return (HasVector &&
6642 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6644 switch (BT->getKind()) {
6645 case BuiltinType::Float:
6646 case BuiltinType::Double:
6661 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6662 for (
const auto &I : CXXRD->bases()) {
6671 Found = GetSingleElementType(Base);
6675 for (
const auto *FD : RD->
fields()) {
6687 Found = GetSingleElementType(FD->getType());
6718 bool InFPRs =
false;
6719 bool IsVector =
false;
6723 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6728 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6729 IsVector = ArgTy->isVectorTy();
6730 UnpaddedSize = TyInfo.first;
6731 DirectAlign = TyInfo.second;
6734 if (IsVector && UnpaddedSize > PaddedSize)
6736 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6738 CharUnits Padding = (PaddedSize - UnpaddedSize);
6742 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6759 "overflow_arg_area");
6767 unsigned MaxRegs, RegCountField, RegSaveIndex;
6778 RegPadding = Padding;
6784 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6791 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6798 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6800 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6803 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6815 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6817 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6838 "overflow_arg_area");
6845 MemAddr, InMemBlock,
"va_arg.addr");
6857 if (isVectorArgumentType(RetTy))
6871 if (isPromotableIntegerType(Ty))
6878 QualType SingleElementTy = GetSingleElementType(Ty);
6879 if (isVectorArgumentType(SingleElementTy) &&
6880 getContext().getTypeSize(SingleElementTy) == Size)
6884 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6897 if (isFPArgumentType(SingleElementTy)) {
6898 assert(Size == 32 || Size == 64);
6909 if (isCompoundType(Ty))
6925 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6931 void MSP430TargetCodeGenInfo::setTargetAttributes(
6933 if (GV->isDeclaration())
6935 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6936 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6941 llvm::Function *F = cast<llvm::Function>(GV);
6944 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6947 F->addFnAttr(llvm::Attribute::NoInline);
6948 F->addFnAttr(
"interrupt", llvm::utostr(InterruptAttr->getNumber()));
6958 class MipsABIInfo :
public ABIInfo {
6960 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6961 void CoerceToIntArgs(uint64_t TySize,
6968 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6969 StackAlignInBytes(IsO32 ? 8 : 16) {}
6980 unsigned SizeOfUnwindException;
6984 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6990 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6992 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6994 llvm::Function *Fn = cast<llvm::Function>(GV);
6996 if (FD->
hasAttr<MipsLongCallAttr>())
6997 Fn->addFnAttr(
"long-call");
6998 else if (FD->
hasAttr<MipsShortCallAttr>())
6999 Fn->addFnAttr(
"short-call");
7002 if (GV->isDeclaration())
7005 if (FD->
hasAttr<Mips16Attr>()) {
7006 Fn->addFnAttr(
"mips16");
7008 else if (FD->
hasAttr<NoMips16Attr>()) {
7009 Fn->addFnAttr(
"nomips16");
7012 if (FD->
hasAttr<MicroMipsAttr>())
7013 Fn->addFnAttr(
"micromips");
7014 else if (FD->
hasAttr<NoMicroMipsAttr>())
7015 Fn->addFnAttr(
"nomicromips");
7017 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
7022 switch (Attr->getInterrupt()) {
7023 case MipsInterruptAttr::eic: Kind =
"eic";
break;
7024 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
7025 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
7026 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
7027 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
7028 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
7029 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
7030 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
7031 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
7034 Fn->addFnAttr(
"interrupt", Kind);
7041 unsigned getSizeOfUnwindException()
const override {
7042 return SizeOfUnwindException;
7047 void MipsABIInfo::CoerceToIntArgs(
7049 llvm::IntegerType *IntTy =
7050 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
7053 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7054 ArgList.push_back(IntTy);
7057 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7060 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
7069 CoerceToIntArgs(TySize, ArgList);
7080 CoerceToIntArgs(TySize, ArgList);
7086 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
7088 uint64_t LastOffset = 0;
7090 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
7095 i != e; ++i, ++idx) {
7099 if (!BT || BT->
getKind() != BuiltinType::Double)
7107 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7108 ArgList.push_back(I64);
7111 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
7112 LastOffset = Offset + 64;
7115 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7116 ArgList.append(IntArgList.begin(), IntArgList.end());
7121 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7123 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7126 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
7133 uint64_t OrigOffset =
Offset;
7138 (uint64_t)StackAlignInBytes);
7139 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7140 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7148 Offset = OrigOffset + MinABIStackAlignInBytes;
7157 getPaddingType(OrigOffset, CurrOffset));
7164 Ty = EnumTy->getDecl()->getIntegerType();
7168 return extendType(Ty);
7171 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
7175 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
7195 for (; b != e; ++b) {
7212 CoerceToIntArgs(Size, RTList);
7224 if (!IsO32 && Size == 0)
7248 RetTy = EnumTy->getDecl()->getIntegerType();
7278 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7280 bool DidPromote =
false;
7300 TyInfo, ArgSlotSize,
true);
7371 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7373 if (GV->isDeclaration())
7375 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7377 auto *Fn = cast<llvm::Function>(GV);
7379 if (FD->getAttr<AVRInterruptAttr>())
7380 Fn->addFnAttr(
"interrupt");
7382 if (FD->getAttr<AVRSignalAttr>())
7383 Fn->addFnAttr(
"signal");
7396 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7399 : DefaultTargetCodeGenInfo(CGT) {}
7401 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7405 void TCETargetCodeGenInfo::setTargetAttributes(
7407 if (GV->isDeclaration())
7409 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7412 llvm::Function *F = cast<llvm::Function>(GV);
7415 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7417 F->addFnAttr(llvm::Attribute::NoInline);
7418 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7421 llvm::LLVMContext &Context = F->getContext();
7422 llvm::NamedMDNode *OpenCLMetadata =
7424 "opencl.kernel_wg_size_info");
7427 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7430 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7433 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7436 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7443 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7444 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7458 class HexagonABIInfo :
public ABIInfo {
7498 Ty = EnumTy->getDecl()->getIntegerType();
7536 RetTy = EnumTy->getDecl()->getIntegerType();
7576 class LanaiABIInfo :
public DefaultABIInfo {
7580 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7603 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7605 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7607 if (SizeInRegs == 0)
7610 if (SizeInRegs > State.FreeRegs) {
7615 State.FreeRegs -= SizeInRegs;
7621 CCState &State)
const {
7623 if (State.FreeRegs) {
7631 const unsigned MinABIStackAlignInBytes = 4;
7635 MinABIStackAlignInBytes);
7639 CCState &State)
const {
7645 return getIndirectResult(Ty,
false, State);
7654 return getIndirectResult(Ty,
true, State);
7662 if (SizeInRegs <= State.FreeRegs) {
7663 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7665 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7666 State.FreeRegs -= SizeInRegs;
7671 return getIndirectResult(Ty,
true, State);
7676 Ty = EnumTy->getDecl()->getIntegerType();
7678 bool InReg = shouldUseInReg(Ty, State);
7703 class AMDGPUABIInfo final :
public DefaultABIInfo {
7705 static const unsigned MaxNumRegsForArgsRet = 16;
7707 unsigned numRegsForType(
QualType Ty)
const;
7711 uint64_t Members)
const override;
7715 unsigned ToAS)
const {
7717 if (
auto STy = dyn_cast<llvm::StructType>(Ty)) {
7719 bool Changed =
false;
7720 for (
auto T : STy->elements()) {
7721 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
7722 EltTys.push_back(NT);
7723 Changed |= (NT != T);
7730 EltTys, (STy->getName() +
".coerce").str(), STy->isPacked());
7731 return llvm::StructType::get(
getVMContext(), EltTys, STy->isPacked());
7734 if (
auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
7735 auto T = ATy->getElementType();
7736 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
7740 return llvm::ArrayType::get(NT, ATy->getNumElements());
7743 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
7744 return llvm::PointerType::get(
7745 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
7751 DefaultABIInfo(CGT) {}
7762 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7766 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7767 const Type *Base, uint64_t Members)
const {
7771 return Members * NumRegs <= MaxNumRegsForArgsRet;
7775 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7776 unsigned NumRegs = 0;
7781 QualType EltTy = VT->getElementType();
7786 return (VT->getNumElements() + 1) / 2;
7788 unsigned EltNumRegs = (EltSize + 31) / 32;
7789 return EltNumRegs * VT->getNumElements();
7797 QualType FieldTy = Field->getType();
7798 NumRegs += numRegsForType(FieldTy);
7804 return (
getContext().getTypeSize(Ty) + 31) / 32;
7813 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7815 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7816 Arg.info = classifyKernelArgumentType(Arg.type);
7825 llvm_unreachable(
"AMDGPU does not support varargs");
7860 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7883 LTy = coerceKernelArgumentType(
7895 unsigned &NumRegsLeft)
const {
7896 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7925 unsigned NumRegs = (Size + 31) / 32;
7926 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7939 if (NumRegsLeft > 0) {
7940 unsigned NumRegs = numRegsForType(Ty);
7941 if (NumRegsLeft >= NumRegs) {
7942 NumRegsLeft -= NumRegs;
7951 unsigned NumRegs = numRegsForType(Ty);
7952 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7962 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7964 unsigned getOpenCLKernelCallingConv()
const override;
7967 llvm::PointerType *T,
QualType QT)
const override;
7969 LangAS getASTAllocaAddressSpace()
const override {
7974 const VarDecl *D)
const override;
7977 llvm::AtomicOrdering Ordering,
7978 llvm::LLVMContext &Ctx)
const override;
7981 llvm::Function *BlockInvokeFunc,
7983 bool shouldEmitStaticExternCAliases()
const override;
7989 llvm::GlobalValue *GV) {
7993 return D->
hasAttr<OpenCLKernelAttr>() ||
7994 (isa<FunctionDecl>(D) && D->
hasAttr<CUDAGlobalAttr>()) ||
7996 (D->
hasAttr<CUDADeviceAttr>() || D->
hasAttr<CUDAConstantAttr>() ||
7997 D->
hasAttr<HIPPinnedShadowAttr>()));
8001 llvm::GlobalValue *GV) {
8005 return isa<VarDecl>(D) && D->
hasAttr<HIPPinnedShadowAttr>();
8008 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
8012 GV->setDSOLocal(
false);
8015 GV->setDSOLocal(
true);
8018 if (GV->isDeclaration())
8020 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8024 llvm::Function *F = cast<llvm::Function>(GV);
8027 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
8030 const bool IsOpenCLKernel = M.
getLangOpts().OpenCL &&
8031 FD->
hasAttr<OpenCLKernelAttr>();
8033 FD->
hasAttr<CUDAGlobalAttr>();
8034 if ((IsOpenCLKernel || IsHIPKernel) &&
8035 (M.
getTriple().getOS() == llvm::Triple::AMDHSA))
8036 F->addFnAttr(
"amdgpu-implicitarg-num-bytes",
"56");
8038 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
8039 if (ReqdWGS || FlatWGS) {
8043 Min = FlatWGS->getMin()
8046 Max = FlatWGS->getMax()
8050 if (ReqdWGS && Min == 0 && Max == 0)
8051 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
8054 assert(Min <= Max &&
"Min must be less than or equal Max");
8056 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
8057 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
8059 assert(Max == 0 &&
"Max must be zero");
8060 }
else if (IsOpenCLKernel || IsHIPKernel) {
8063 std::string AttrVal =
8064 std::string(
"1,") + llvm::utostr(M.
getLangOpts().GPUMaxThreadsPerBlock);
8065 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
8068 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
8070 Attr->getMin()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
8071 unsigned Max =
Attr->getMax() ?
Attr->getMax()
8077 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
8079 std::string AttrVal = llvm::utostr(Min);
8081 AttrVal = AttrVal +
"," + llvm::utostr(Max);
8082 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
8084 assert(Max == 0 &&
"Max must be zero");
8087 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
8088 unsigned NumSGPR =
Attr->getNumSGPR();
8091 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
8094 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
8095 uint32_t NumVGPR =
Attr->getNumVGPR();
8098 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
8102 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8103 return llvm::CallingConv::AMDGPU_KERNEL;
8111 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
8115 return llvm::ConstantPointerNull::get(PT);
8118 auto NPT = llvm::PointerType::get(PT->getElementType(),
8120 return llvm::ConstantExpr::getAddrSpaceCast(
8121 llvm::ConstantPointerNull::get(NPT), PT);
8125 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
8129 "Address space agnostic languages only");
8133 return DefaultGlobalAS;
8142 return ConstAS.getValue();
8144 return DefaultGlobalAS;
8148 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
8150 llvm::AtomicOrdering Ordering,
8151 llvm::LLVMContext &Ctx)
const {
8167 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
8169 Name = Twine(Twine(Name) + Twine(
"-")).str();
8171 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
8174 return Ctx.getOrInsertSyncScopeID(Name);
8177 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
8183 FT = getABIInfo().getContext().adjustFunctionType(
8194 class SparcV8ABIInfo :
public DefaultABIInfo {
8257 class SparcV9ABIInfo :
public ABIInfo {
8278 struct CoerceBuilder {
8279 llvm::LLVMContext &Context;
8280 const llvm::DataLayout &DL;
8285 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
8286 : Context(c), DL(dl), Size(0), InReg(
false) {}
8289 void pad(uint64_t ToSize) {
8290 assert(ToSize >= Size &&
"Cannot remove elements");
8295 uint64_t Aligned = llvm::alignTo(Size, 64);
8296 if (Aligned > Size && Aligned <= ToSize) {
8297 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8302 while (Size + 64 <= ToSize) {
8303 Elems.push_back(llvm::Type::getInt64Ty(Context));
8308 if (Size < ToSize) {
8309 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8323 Elems.push_back(Ty);
8324 Size = Offset + Bits;
8328 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8329 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8330 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8331 llvm::Type *ElemTy = StrTy->getElementType(i);
8332 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8333 switch (ElemTy->getTypeID()) {
8334 case llvm::Type::StructTyID:
8335 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8337 case llvm::Type::FloatTyID:
8338 addFloat(ElemOffset, ElemTy, 32);
8340 case llvm::Type::DoubleTyID:
8341 addFloat(ElemOffset, ElemTy, 64);
8343 case llvm::Type::FP128TyID:
8344 addFloat(ElemOffset, ElemTy, 128);
8346 case llvm::Type::PointerTyID:
8347 if (ElemOffset % 64 == 0) {
8349 Elems.push_back(ElemTy);
8360 bool isUsableType(llvm::StructType *Ty)
const {
8361 return llvm::makeArrayRef(Elems) == Ty->elements();
8366 if (Elems.size() == 1)
8367 return Elems.front();
8369 return llvm::StructType::get(Context, Elems);
8384 if (Size > SizeLimit)
8389 Ty = EnumTy->getDecl()->getIntegerType();
8392 if (Size < 64 && Ty->isIntegerType())
8406 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8411 CB.addStruct(0, StrTy);
8412 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8415 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8434 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8444 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8475 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8507 llvm::IntegerType *i8 = CGF.
Int8Ty;
8508 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8509 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8536 class ARCABIInfo :
public DefaultABIInfo {
8538 using DefaultABIInfo::DefaultABIInfo;
8545 if (!State.FreeRegs)
8551 if (sz < State.FreeRegs)
8552 State.FreeRegs -= sz;
8568 updateState(I.info, I.type, State);
8592 const unsigned MinABIStackAlignInBytes = 4;
8595 TypeAlign > MinABIStackAlignInBytes);
8606 uint8_t FreeRegs)
const {
8612 return getIndirectByRef(Ty, FreeRegs > 0);
8615 return getIndirectByValue(Ty);
8620 Ty = EnumTy->getDecl()->getIntegerType();
8622 auto SizeInRegs = llvm::alignTo(
getContext().getTypeSize(Ty), 32) / 32;
8627 return getIndirectByValue(Ty);
8635 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8637 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8639 return FreeRegs >= SizeInRegs ?
8656 auto RetSize = llvm::alignTo(
getContext().getTypeSize(RetTy), 32) / 32;
8658 return getIndirectByRef(RetTy,
true);
8729 class TypeStringCache {
8730 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8734 std::string Swapped;
8737 std::map<const IdentifierInfo *, struct Entry> Map;
8738 unsigned IncompleteCount;
8739 unsigned IncompleteUsedCount;
8741 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8751 class FieldEncoding {
8755 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8756 StringRef str() {
return Enc; }
8757 bool operator<(
const FieldEncoding &rhs)
const {
8758 if (HasName != rhs.HasName)
return HasName;
8759 return Enc < rhs.Enc;
8763 class XCoreABIInfo :
public DefaultABIInfo {
8771 mutable TypeStringCache TSC;
8775 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8795 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8796 AI.setCoerceToType(ArgTy);
8797 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8801 switch (AI.getKind()) {
8805 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8807 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8815 ArgSize = ArgSize.
alignTo(SlotSize);
8839 std::string StubEnc) {
8843 assert( (E.Str.empty() || E.State == Recursive) &&
8844 "Incorrectly use of addIncomplete");
8845 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8846 E.Swapped.swap(E.Str);
8847 E.Str.swap(StubEnc);
8856 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8859 auto I = Map.find(ID);
8860 assert(I != Map.end() &&
"Entry not present");
8861 Entry &E = I->second;
8863 E.State == IncompleteUsed) &&
8864 "Entry must be an incomplete type");
8865 bool IsRecursive =
false;
8866 if (E.State == IncompleteUsed) {
8869 --IncompleteUsedCount;
8871 if (E.Swapped.empty())
8875 E.Swapped.swap(E.Str);
8877 E.State = Recursive;
8885 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8887 if (!ID || IncompleteUsedCount)
8890 if (IsRecursive && !E.Str.empty()) {
8891 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8892 "This is not the same Recursive entry");
8898 assert(E.Str.empty() &&
"Entry already present");
8900 E.State = IsRecursive? Recursive : NonRecursive;
8909 auto I = Map.find(ID);
8912 Entry &E = I->second;
8913 if (E.State == Recursive && IncompleteCount)
8918 E.State = IncompleteUsed;
8919 ++IncompleteUsedCount;
8940 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8944 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8945 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8946 llvm::MDString::get(Ctx, Enc.str())};
8947 llvm::NamedMDNode *MD =
8948 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8949 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8962 unsigned getOpenCLKernelCallingConv()
const override;
8970 DefaultABIInfo SPIRABI(CGM.
getTypes());
8971 SPIRABI.computeInfo(FI);
8976 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8977 return llvm::CallingConv::SPIR_KERNEL;
8982 TypeStringCache &TSC);
8990 TypeStringCache &TSC) {
8991 for (
const auto *Field : RD->
fields()) {
8994 Enc += Field->getName();
8996 if (Field->isBitField()) {
8998 llvm::raw_svector_ostream OS(Enc);
8999 OS << Field->getBitWidthValue(CGM.
getContext());
9002 if (!
appendType(Enc, Field->getType(), CGM, TSC))
9004 if (Field->isBitField())
9007 FE.emplace_back(!Field->getName().empty(), Enc);
9019 StringRef TypeString = TSC.lookupStr(ID);
9020 if (!TypeString.empty()) {
9026 size_t Start = Enc.size();
9034 bool IsRecursive =
false;
9041 std::string StubEnc(Enc.substr(Start).str());
9043 TSC.addIncomplete(ID, std::move(StubEnc));
9045 (void) TSC.removeIncomplete(ID);
9048 IsRecursive = TSC.removeIncomplete(ID);
9054 unsigned E = FE.size();
9055 for (
unsigned I = 0; I != E; ++I) {
9062 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
9068 TypeStringCache &TSC,
9071 StringRef TypeString = TSC.lookupStr(ID);
9072 if (!TypeString.empty()) {
9077 size_t Start = Enc.size();
9086 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
9088 SmallStringEnc EnumEnc;
9090 EnumEnc += I->getName();
9092 I->getInitVal().toString(EnumEnc);
9094 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
9097 unsigned E = FE.size();
9098 for (
unsigned I = 0; I != E; ++I) {
9105 TSC.addIfComplete(ID, Enc.substr(Start),
false);
9113 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
9121 Enc += Table[Lookup];
9126 const char *EncType;
9128 case BuiltinType::Void:
9131 case BuiltinType::Bool:
9134 case BuiltinType::Char_U:
9137 case BuiltinType::UChar:
9140 case BuiltinType::SChar:
9143 case BuiltinType::UShort:
9146 case BuiltinType::Short:
9149 case BuiltinType::UInt:
9152 case BuiltinType::Int:
9155 case BuiltinType::ULong:
9158 case BuiltinType::Long:
9161 case BuiltinType::ULongLong:
9164 case BuiltinType::LongLong:
9167 case BuiltinType::Float:
9170 case BuiltinType::Double:
9173 case BuiltinType::LongDouble:
9186 TypeStringCache &TSC) {
9198 TypeStringCache &TSC, StringRef NoSizeEnc) {
9203 CAT->getSize().toStringUnsigned(Enc);
9219 TypeStringCache &TSC) {
9226 auto I = FPT->param_type_begin();
9227 auto E = FPT->param_type_end();
9236 if (FPT->isVariadic())
9239 if (FPT->isVariadic())
9253 TypeStringCache &TSC) {
9290 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9293 return appendType(Enc, FD->getType(), CGM, TSC);
9296 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9299 QualType QT = VD->getType().getCanonicalType();
9316 class RISCVABIInfo :
public DefaultABIInfo {
9324 static const int NumArgGPRs = 8;
9325 static const int NumArgFPRs = 8;
9334 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
9341 int &ArgFPRsLeft)
const;
9351 CharUnits &Field2Off,
int &NeededArgGPRs,
9352 int &NeededArgFPRs)
const;
9378 IsRetIndirect =
true;
9387 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9388 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
9393 bool IsFixed = ArgNum < NumFixedArgs;
9404 bool RISCVABIInfo::detectFPCCEligibleStructHelper(
QualType Ty,
CharUnits CurOff,
9412 if (IsInt || IsFloat) {
9414 if (IsInt && Size > XLen)
9419 if (IsFloat && (Size > FLen || Size < 32))
9423 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
9441 QualType EltTy = CTy->getElementType();
9446 assert(CurOff.
isZero() &&
"Unexpected offset for first field");
9447 Field2Ty = Field1Ty;
9453 uint64_t ArraySize = ATy->getSize().getZExtValue();
9454 QualType EltTy = ATy->getElementType();
9456 for (uint64_t i = 0; i < ArraySize; ++i) {
9457 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
9458 Field1Off, Field2Ty, Field2Off);
9477 int ZeroWidthBitFieldCount = 0;
9480 uint64_t FieldOffInBits = Layout.
getFieldOffset(FD->getFieldIndex());
9482 if (FD->isBitField()) {
9483 unsigned BitWidth = FD->getBitWidthValue(
getContext());
9486 if (
getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
9488 if (BitWidth == 0) {
9489 ZeroWidthBitFieldCount++;
9494 bool Ret = detectFPCCEligibleStructHelper(
9495 QTy, CurOff +
getContext().toCharUnitsFromBits(FieldOffInBits),
9496 Field1Ty, Field1Off, Field2Ty, Field2Off);
9503 if (Field2Ty && ZeroWidthBitFieldCount > 0)
9506 return Field1Ty !=
nullptr;
9521 int &NeededArgFPRs)
const {
9526 bool IsCandidate = detectFPCCEligibleStructHelper(
9529 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
9533 if (Field1Ty && Field1Ty->isFloatingPointTy())
9537 if (Field2Ty && Field2Ty->isFloatingPointTy())
9547 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
9553 CoerceElts.push_back(llvm::ArrayType::get(
9556 CoerceElts.push_back(Field1Ty);
9557 UnpaddedCoerceElts.push_back(Field1Ty);
9562 UnpaddedCoerceElts[0]);
9572 if (Field2Off > Field2OffNoPadNoPack)
9573 Padding = Field2Off - Field2OffNoPadNoPack;
9574 else if (Field2Off != Field2Align && Field2Off > Field1Size)
9575 Padding = Field2Off - Field1Size;
9580 CoerceElts.push_back(llvm::ArrayType::get(
9583 CoerceElts.push_back(Field2Ty);
9584 UnpaddedCoerceElts.push_back(Field2Ty);
9587 llvm::StructType::get(
getVMContext(), CoerceElts, IsPacked);
9588 auto UnpaddedCoerceToType =
9589 llvm::StructType::get(
getVMContext(), UnpaddedCoerceElts, IsPacked);
9596 int &ArgFPRsLeft)
const {
9597 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
9616 if (IsFixed && Ty->
isFloatingType() && FLen >= Size && ArgFPRsLeft) {
9623 if (IsFixed && Ty->
isComplexType() && FLen && ArgFPRsLeft >= 2) {
9625 if (
getContext().getTypeSize(EltTy) <= FLen) {
9639 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
9640 NeededArgGPRs, NeededArgFPRs);
9641 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
9642 NeededArgFPRs <= ArgFPRsLeft) {
9643 ArgGPRsLeft -= NeededArgGPRs;
9644 ArgFPRsLeft -= NeededArgFPRs;
9645 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
9651 bool MustUseStack =
false;
9655 int NeededArgGPRs = 1;
9656 if (!IsFixed && NeededAlign == 2 * XLen)
9657 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9658 else if (Size > XLen && Size <= 2 * XLen)
9661 if (NeededArgGPRs > ArgGPRsLeft) {
9662 MustUseStack =
true;
9663 NeededArgGPRs = ArgGPRsLeft;
9666 ArgGPRsLeft -= NeededArgGPRs;
9671 Ty = EnumTy->getDecl()->getIntegerType();
9675 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9676 return extendType(Ty);
9684 if (Size <= 2 * XLen) {
9692 }
else if (Alignment == 2 * XLen) {
9707 int ArgGPRsLeft = 2;
9708 int ArgFPRsLeft = FLen ? 2 : 0;
9727 std::pair<CharUnits, CharUnits> SizeAndAlign =
9731 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9752 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
9754 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9757 const auto *
Attr = FD->getAttr<RISCVInterruptAttr>();
9762 switch (
Attr->getInterrupt()) {
9763 case RISCVInterruptAttr::user: Kind =
"user";
break;
9764 case RISCVInterruptAttr::supervisor: Kind =
"supervisor";
break;
9765 case RISCVInterruptAttr::machine: Kind =
"machine";
break;
9768 auto *Fn = cast<llvm::Function>(GV);
9770 Fn->addFnAttr(
"interrupt", Kind);
9780 return getTriple().supportsCOMDAT();
9784 if (TheTargetCodeGenInfo)
9785 return *TheTargetCodeGenInfo;
9789 this->TheTargetCodeGenInfo.reset(
P);
9794 switch (Triple.getArch()) {
9796 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
9798 case llvm::Triple::le32:
9799 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9800 case llvm::Triple::mips:
9801 case llvm::Triple::mipsel:
9802 if (Triple.getOS() == llvm::Triple::NaCl)
9803 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9804 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
9806 case llvm::Triple::mips64:
9807 case llvm::Triple::mips64el:
9808 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
9810 case llvm::Triple::avr:
9811 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
9813 case llvm::Triple::aarch64:
9814 case llvm::Triple::aarch64_32:
9815 case llvm::Triple::aarch64_be: {
9816 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
9817 if (
getTarget().getABI() ==
"darwinpcs")
9818 Kind = AArch64ABIInfo::DarwinPCS;
9819 else if (Triple.isOSWindows())
9821 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9823 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
9826 case llvm::Triple::wasm32:
9827 case llvm::Triple::wasm64:
9828 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
9830 case llvm::Triple::arm:
9831 case llvm::Triple::armeb:
9832 case llvm::Triple::thumb:
9833 case llvm::Triple::thumbeb: {
9834 if (Triple.getOS() == llvm::Triple::Win32) {
9836 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9839 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
9841 if (ABIStr ==
"apcs-gnu")
9842 Kind = ARMABIInfo::APCS;
9843 else if (ABIStr ==
"aapcs16")
9844 Kind = ARMABIInfo::AAPCS16_VFP;
9845 else if (CodeGenOpts.FloatABI ==
"hard" ||
9846 (CodeGenOpts.FloatABI !=
"soft" &&
9847 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9848 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9849 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9850 Kind = ARMABIInfo::AAPCS_VFP;
9852 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
9855 case llvm::Triple::ppc:
9857 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft" ||
9859 case llvm::Triple::ppc64:
9860 if (Triple.isOSBinFormatELF()) {
9861 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
9863 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9865 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9867 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9870 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
9871 case llvm::Triple::ppc64le: {
9872 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
9873 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
9875 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9877 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9879 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9883 case llvm::Triple::nvptx:
9884 case llvm::Triple::nvptx64:
9885 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
9887 case llvm::Triple::msp430:
9888 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
9890 case llvm::Triple::riscv32:
9891 case llvm::Triple::riscv64: {
9894 unsigned ABIFLen = 0;
9895 if (ABIStr.endswith(
"f"))
9897 else if (ABIStr.endswith(
"d"))
9899 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
9902 case llvm::Triple::systemz: {
9904 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
9907 case llvm::Triple::tce:
9908 case llvm::Triple::tcele:
9909 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
9911 case llvm::Triple::x86: {
9912 bool IsDarwinVectorABI = Triple.isOSDarwin();
9913 bool RetSmallStructInRegABI =
9914 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9915 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9917 if (Triple.getOS() == llvm::Triple::Win32) {
9918 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
9919 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9920 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9922 return SetCGInfo(
new X86_32TargetCodeGenInfo(
9923 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9924 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9925 CodeGenOpts.FloatABI ==
"soft"));
9929 case llvm::Triple::x86_64: {
9933 ? X86AVXABILevel::AVX512
9936 switch (Triple.getOS()) {
9937 case llvm::Triple::Win32:
9938 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9940 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
9943 case llvm::Triple::hexagon:
9944 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
9945 case llvm::Triple::lanai:
9946 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
9947 case llvm::Triple::r600:
9948 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9949 case llvm::Triple::amdgcn:
9950 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9951 case llvm::Triple::sparc:
9952 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
9953 case llvm::Triple::sparcv9:
9954 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
9955 case llvm::Triple::xcore:
9956 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
9957 case llvm::Triple::arc:
9958 return SetCGInfo(
new ARCTargetCodeGenInfo(Types));
9959 case llvm::Triple::spir:
9960 case llvm::Triple::spir64:
9961 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
9972 llvm::Function *Invoke,
9974 auto *InvokeFT = Invoke->getFunctionType();
9976 for (
auto &
P : InvokeFT->params())
9977 ArgTys.push_back(
P);
9979 std::string Name = Invoke->getName().str() +
"_kernel";
9980 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9983 auto IP = CGF.
Builder.saveIP();
9986 Builder.SetInsertPoint(BB);
9988 for (
auto &A : F->args())
9990 Builder.CreateCall(Invoke, Args);
9991 Builder.CreateRetVoid();
9992 Builder.restoreIP(IP);
10004 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
10010 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
10011 auto *InvokeFT = Invoke->getFunctionType();
10020 ArgTys.push_back(BlockTy);
10021 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
10022 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
10023 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
10024 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
10025 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
10026 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
10027 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
10028 ArgTys.push_back(InvokeFT->getParamType(I));
10029 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
10030 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
10031 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
10032 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
10033 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
10034 ArgNames.push_back(
10035 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).str()));
10037 std::string Name = Invoke->getName().str() +
"_kernel";
10038 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
10041 F->addFnAttr(
"enqueued-block");
10042 auto IP = CGF.
Builder.saveIP();
10044 Builder.SetInsertPoint(BB);
10045 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
10046 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
10047 BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
10048 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
10049 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
10051 Args.push_back(
Cast);
10052 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
10054 Builder.CreateCall(Invoke, Args);
10055 Builder.CreateRetVoid();
10056 Builder.restoreIP(IP);
10058 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
10059 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
10060 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
10061 F->setMetadata(
"kernel_arg_base_type",
10062 llvm::MDNode::get(C, ArgBaseTypeNames));
10063 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
10065 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Represents a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
bool Cast(InterpState &S, CodePtr OpPC)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
MutableArrayRef< ArgInfo > arguments()
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
ASTContext & getContext() const
bool ReturnValue(const T &V, APValue &R)
Convers a value to an APValue.
Represents a variable declaration or definition.
Objects with "hidden" visibility are not seen by the dynamic linker.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
virtual bool validateBranchProtection(StringRef Spec, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
CodeGenOptions::SignReturnAddressKeyValue SignKey
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
unsigned arg_size() const
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
Represents a member of a struct/union/class.
bool isReferenceType() const
CharUnits getTypeUnadjustedAlignInChars(QualType T) const
getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, in characters, before alignment adjustments.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
SignReturnAddressKeyValue
static bool requiresAMDGPUDefaultVisibility(const Decl *D, llvm::GlobalValue *GV)
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
__DEVICE__ int max(int __a, int __b)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
Objects with "default" visibility are seen by the dynamic linker and act like normal objects...
virtual bool hasLegalHalfType() const
Determine whether _Float16 is supported on this target.
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
const_arg_iterator arg_begin() const
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Scope - A scope is a transient data structure that is used while parsing the program.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isFloat128Type() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
unsigned getTypeUnadjustedAlign(QualType T) const
Return the ABI-specified natural alignment of a (complete) type T, before alignment adjustments...
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
ExtInfo withCallingConv(CallingConv cc) const
CodeGenOptions::SignReturnAddressScope SignReturnAddr
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
Contains information gathered from parsing the contents of TargetAttr.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
Address getAddress(CodeGenFunction &CGF) const
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
const T * castAs() const
Member-template castAs<specific type>.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
llvm::CallingConv::ID RuntimeCC
static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
Objects with "protected" visibility are seen by the dynamic linker but always dynamically resolve to ...
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
bool BranchTargetEnforcement
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
static bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
SyncScope
Defines synch scope values used internally by clang.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ...
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
StringRef getName() const
Return the actual identifier string.
StringRef BranchProtection
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
bool isFloat16Type() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
ExtInfo getExtInfo() const
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
virtual bool hasFloat16Type() const
Determine whether the _Float16 type is supported on this target.
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature...
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
__DEVICE__ int min(int __a, int __b)
Attempt to be ABI-compatible with code generated by Clang 9.0.x (SVN r351319).
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const
const llvm::Triple & getTriple() const