25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/raw_ostream.h" 34 using namespace clang;
35 using namespace CodeGen;
53 llvm::LLVMContext &LLVMContext) {
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
68 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
84 ByRef, Realign, Padding);
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
118 if (
type->isPointerTy()) {
120 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
134 unsigned numElts)
const {
164 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
165 !RT->getDecl()->canPassInRegisters()) {
178 if (UD->
hasAttr<TransparentUnionAttr>()) {
179 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
217 uint64_t Members)
const {
222 raw_ostream &OS = llvm::errs();
223 OS <<
"(ABIArgInfo Kind=";
226 OS <<
"Direct Type=";
239 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
242 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
243 <<
" ByVal=" << getIndirectByVal()
244 <<
" Realign=" << getIndirectRealign();
249 case CoerceAndExpand:
250 OS <<
"CoerceAndExpand Type=";
251 getCoerceAndExpandType()->print(OS);
264 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
266 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
268 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
270 Ptr->getName() +
".aligned");
294 bool AllowHigherAlign) {
304 if (AllowHigherAlign && DirectAlign > SlotSize) {
321 !DirectTy->isStructTy()) {
344 std::pair<CharUnits, CharUnits> ValueInfo,
346 bool AllowHigherAlign) {
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
360 DirectTy = DirectTy->getPointerTo(0);
363 DirectSize, DirectAlign,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name =
"") {
380 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
431 return llvm::CallingConv::SPIR_KERNEL;
435 llvm::PointerType *T,
QualType QT)
const {
436 return llvm::ConstantPointerNull::get(T);
443 "Address space agnostic languages only");
452 if (
auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
468 return C.getOrInsertSyncScopeID(
"");
486 if (AT->getSize() == 0)
488 FT = AT->getElementType();
499 if (isa<CXXRecordDecl>(RT->
getDecl()))
517 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
518 for (
const auto &I : CXXRD->bases())
522 for (
const auto *I : RD->
fields())
545 const Type *Found =
nullptr;
548 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
549 for (
const auto &I : CXXRD->bases()) {
567 for (
const auto *FD : RD->
fields()) {
581 if (AT->getSize().getZExtValue() != 1)
583 FT = AT->getElementType();
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
622 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
631 return Address(Addr, TyAlignForABI);
634 "Unexpected ArgInfo Kind in generic VAArg emitter!");
637 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
639 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
641 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
643 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
656 class DefaultABIInfo :
public ABIInfo {
667 I.info = classifyArgumentType(I.type);
696 Ty = EnumTy->getDecl()->getIntegerType();
711 RetTy = EnumTy->getDecl()->getIntegerType();
724 DefaultABIInfo defaultInfo;
741 Arg.info = classifyArgumentType(Arg.type);
748 bool asReturnValue)
const override {
752 bool isSwiftErrorInRegister()
const override {
762 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
764 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
765 llvm::Function *Fn = cast<llvm::Function>(GV);
766 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
767 Fn->addFnAttr(
"no-prototype");
792 return defaultInfo.classifyArgumentType(Ty);
812 return defaultInfo.classifyReturnType(RetTy);
830 class PNaClABIInfo :
public ABIInfo {
875 Ty = EnumTy->getDecl()->getIntegerType();
895 RetTy = EnumTy->getDecl()->getIntegerType();
904 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
905 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
906 IRType->getScalarSizeInBits() != 64;
910 StringRef Constraint,
912 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
913 .Cases(
"y",
"&y",
"^Ym",
true)
915 if (IsMMXCons && Ty->isVectorTy()) {
916 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
932 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
933 if (BT->getKind() == BuiltinType::LongDouble) {
935 &llvm::APFloat::x87DoubleExtended())
944 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
952 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
953 return NumMembers <= 4;
970 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
974 unsigned FreeSSERegs;
979 VectorcallMaxParamNumAsReg = 6
989 static const unsigned MinABIStackAlignInBytes = 4;
991 bool IsDarwinVectorABI;
992 bool IsRetSmallStructInRegABI;
993 bool IsWin32StructABI;
996 unsigned DefaultNumRegisterParameters;
998 static bool isRegisterSize(
unsigned Size) {
999 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1004 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1008 uint64_t NumMembers)
const override {
1010 return isX86VectorCallAggregateSmallEnough(NumMembers);
1022 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1030 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1032 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1033 bool &NeedsPadding)
const;
1034 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1036 bool canExpandIndirectArgument(
QualType Ty)
const;
1046 bool &UsedInAlloca)
const;
1055 bool RetSmallStructInRegABI,
bool Win32StructABI,
1056 unsigned NumRegisterParameters,
bool SoftFloatABI)
1057 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1058 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1059 IsWin32StructABI(Win32StructABI),
1060 IsSoftFloatABI(SoftFloatABI),
1062 DefaultNumRegisterParameters(NumRegisterParameters) {}
1065 bool asReturnValue)
const override {
1073 bool isSwiftErrorInRegister()
const override {
1082 bool RetSmallStructInRegABI,
bool Win32StructABI,
1083 unsigned NumRegisterParameters,
bool SoftFloatABI)
1085 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1086 NumRegisterParameters, SoftFloatABI)) {}
1088 static bool isStructReturnInRegABI(
1091 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1104 StringRef Constraint,
1106 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1110 std::string &Constraints,
1111 std::vector<llvm::Type *> &ResultRegTypes,
1112 std::vector<llvm::Type *> &ResultTruncRegTypes,
1113 std::vector<LValue> &ResultRegDests,
1114 std::string &AsmString,
1115 unsigned NumOutputs)
const override;
1119 unsigned Sig = (0xeb << 0) |
1123 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1126 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1127 return "movl\t%ebp, %ebp" 1128 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1143 unsigned NumNewOuts,
1144 std::string &AsmString) {
1146 llvm::raw_string_ostream OS(Buf);
1148 while (Pos < AsmString.size()) {
1149 size_t DollarStart = AsmString.find(
'$', Pos);
1150 if (DollarStart == std::string::npos)
1151 DollarStart = AsmString.size();
1152 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1153 if (DollarEnd == std::string::npos)
1154 DollarEnd = AsmString.size();
1155 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1157 size_t NumDollars = DollarEnd - DollarStart;
1158 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1160 size_t DigitStart = Pos;
1161 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1162 if (DigitEnd == std::string::npos)
1163 DigitEnd = AsmString.size();
1164 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1165 unsigned OperandIndex;
1166 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1167 if (OperandIndex >= FirstIn)
1168 OperandIndex += NumNewOuts;
1176 AsmString = std::move(OS.str());
1180 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1182 std::vector<llvm::Type *> &ResultRegTypes,
1183 std::vector<llvm::Type *> &ResultTruncRegTypes,
1184 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1185 unsigned NumOutputs)
const {
1190 if (!Constraints.empty())
1192 if (RetWidth <= 32) {
1193 Constraints +=
"={eax}";
1194 ResultRegTypes.push_back(CGF.
Int32Ty);
1197 Constraints +=
"=A";
1198 ResultRegTypes.push_back(CGF.
Int64Ty);
1203 ResultTruncRegTypes.push_back(CoerceTy);
1207 CoerceTy->getPointerTo()));
1208 ResultRegDests.push_back(ReturnSlot);
1215 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1221 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1227 if (Size == 64 || Size == 128)
1242 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1246 if (!RT)
return false;
1258 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1267 Ty = CTy->getElementType();
1277 return Size == 32 || Size == 64;
1282 for (
const auto *FD : RD->
fields()) {
1292 if (FD->isBitField())
1317 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1324 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1325 if (!IsWin32StructABI) {
1328 if (!CXXRD->isCLike())
1332 if (CXXRD->isDynamicClass())
1349 if (State.FreeRegs) {
1358 CCState &State)
const {
1363 uint64_t NumElts = 0;
1364 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1365 State.CC == llvm::CallingConv::X86_RegCall) &&
1373 if (IsDarwinVectorABI) {
1385 if ((Size == 8 || Size == 16 || Size == 32) ||
1386 (Size == 64 && VT->getNumElements() == 1))
1390 return getIndirectReturnResult(RetTy, State);
1399 if (RT->getDecl()->hasFlexibleArrayMember())
1400 return getIndirectReturnResult(RetTy, State);
1405 return getIndirectReturnResult(RetTy, State);
1413 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1422 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1423 || SeltTy->hasPointerRepresentation())
1431 return getIndirectReturnResult(RetTy, State);
1436 RetTy = EnumTy->getDecl()->getIntegerType();
1453 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1454 for (
const auto &I : CXXRD->bases())
1458 for (
const auto *i : RD->
fields()) {
1471 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1472 unsigned Align)
const {
1475 if (Align <= MinABIStackAlignInBytes)
1479 if (!IsDarwinVectorABI) {
1481 return MinABIStackAlignInBytes;
1489 return MinABIStackAlignInBytes;
1493 CCState &State)
const {
1495 if (State.FreeRegs) {
1505 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1506 if (StackAlign == 0)
1511 bool Realign = TypeAlign > StackAlign;
1516 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1523 if (K == BuiltinType::Float || K == BuiltinType::Double)
1529 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1530 if (!IsSoftFloatABI) {
1531 Class C = classify(Ty);
1537 unsigned SizeInRegs = (Size + 31) / 32;
1539 if (SizeInRegs == 0)
1543 if (SizeInRegs > State.FreeRegs) {
1552 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1556 State.FreeRegs -= SizeInRegs;
1560 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1562 bool &NeedsPadding)
const {
1569 NeedsPadding =
false;
1572 if (!updateFreeRegs(Ty, State))
1578 if (State.CC == llvm::CallingConv::X86_FastCall ||
1579 State.CC == llvm::CallingConv::X86_VectorCall ||
1580 State.CC == llvm::CallingConv::X86_RegCall) {
1581 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1582 NeedsPadding =
true;
1590 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1591 if (!updateFreeRegs(Ty, State))
1597 if (State.CC == llvm::CallingConv::X86_FastCall ||
1598 State.CC == llvm::CallingConv::X86_VectorCall ||
1599 State.CC == llvm::CallingConv::X86_RegCall) {
1611 CCState &State)
const {
1621 return getIndirectResult(Ty,
false, State);
1631 uint64_t NumElts = 0;
1632 if (State.CC == llvm::CallingConv::X86_RegCall &&
1635 if (State.FreeSSERegs >= NumElts) {
1636 State.FreeSSERegs -= NumElts;
1641 return getIndirectResult(Ty,
false, State);
1648 return getIndirectResult(Ty,
true, State);
1655 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1656 bool NeedsPadding =
false;
1658 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1661 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1667 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1675 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1676 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1678 State.CC == llvm::CallingConv::X86_FastCall ||
1679 State.CC == llvm::CallingConv::X86_VectorCall ||
1680 State.CC == llvm::CallingConv::X86_RegCall,
1683 return getIndirectResult(Ty,
true, State);
1689 if (IsDarwinVectorABI) {
1691 if ((Size == 8 || Size == 16 || Size == 32) ||
1692 (Size == 64 && VT->getNumElements() == 1))
1705 Ty = EnumTy->getDecl()->getIntegerType();
1707 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1720 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1721 bool &UsedInAlloca)
const {
1735 uint64_t NumElts = 0;
1739 if (State.FreeSSERegs >= NumElts) {
1740 State.FreeSSERegs -= NumElts;
1752 uint64_t NumElts = 0;
1758 if (State.FreeSSERegs >= NumElts) {
1759 State.FreeSSERegs -= NumElts;
1760 I.info = getDirectX86Hva();
1762 I.info = getIndirectResult(Ty,
false, State);
1764 }
else if (!IsHva) {
1776 else if (State.CC == llvm::CallingConv::X86_FastCall)
1778 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1780 State.FreeSSERegs = 6;
1783 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1785 State.FreeSSERegs = 8;
1787 State.FreeRegs = DefaultNumRegisterParameters;
1794 if (State.FreeRegs) {
1805 bool UsedInAlloca =
false;
1806 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1807 computeVectorCallArgs(FI, State, UsedInAlloca);
1819 rewriteWithInAlloca(FI);
1829 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1836 StackOffset = FieldEnd.
alignTo(FieldAlign);
1837 if (StackOffset != FieldEnd) {
1838 CharUnits NumBytes = StackOffset - FieldEnd;
1840 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1841 FrameFields.push_back(Ty);
1866 llvm_unreachable(
"invalid enum");
1869 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1870 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1887 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1894 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1904 for (; I != E; ++I) {
1906 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1924 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1931 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1933 assert(Triple.getArch() == llvm::Triple::x86);
1935 switch (Opts.getStructReturnConvention()) {
1944 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1947 switch (Triple.getOS()) {
1948 case llvm::Triple::DragonFly:
1949 case llvm::Triple::FreeBSD:
1950 case llvm::Triple::OpenBSD:
1951 case llvm::Triple::Win32:
1958 void X86_32TargetCodeGenInfo::setTargetAttributes(
1960 if (GV->isDeclaration())
1962 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1963 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1964 llvm::Function *Fn = cast<llvm::Function>(GV);
1965 Fn->addFnAttr(
"stackrealign");
1967 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1968 llvm::Function *Fn = cast<llvm::Function>(GV);
1969 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1974 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1997 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
2024 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2026 case X86AVXABILevel::AVX512:
2028 case X86AVXABILevel::AVX:
2033 llvm_unreachable(
"Unknown AVXLevel");
2058 static Class merge(Class Accum, Class Field);
2074 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2100 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2101 bool isNamedArg)
const;
2105 unsigned IROffset,
QualType SourceTy,
2106 unsigned SourceOffset)
const;
2108 unsigned IROffset,
QualType SourceTy,
2109 unsigned SourceOffset)
const;
2125 unsigned &neededInt,
unsigned &neededSSE,
2126 bool isNamedArg)
const;
2129 unsigned &NeededSSE)
const;
2132 unsigned &NeededSSE)
const;
2134 bool IsIllegalVectorType(
QualType Ty)
const;
2141 bool honorsRevision0_98()
const {
2147 bool classifyIntegerMMXAsSSE()
const {
2149 if (
getContext().getLangOpts().getClangABICompat() <=
2154 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2156 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2164 bool Has64BitPointers;
2169 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2173 unsigned neededInt, neededSSE;
2179 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2180 return (vectorTy->getBitWidth() > 128);
2192 bool has64BitPointers()
const {
2193 return Has64BitPointers;
2197 bool asReturnValue)
const override {
2200 bool isSwiftErrorInRegister()
const override {
2210 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2219 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2223 uint64_t NumMembers)
const override {
2225 return isX86VectorCallAggregateSmallEnough(NumMembers);
2229 bool asReturnValue)
const override {
2233 bool isSwiftErrorInRegister()
const override {
2239 bool IsVectorCall,
bool IsRegCall)
const;
2242 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2243 bool IsVectorCall,
bool IsRegCall)
const;
2253 const X86_64ABIInfo &getABIInfo()
const {
2272 StringRef Constraint,
2274 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2277 bool isNoProtoCallVariadic(
const CallArgList &args,
2286 bool HasAVXType =
false;
2287 for (CallArgList::const_iterator
2288 it = args.begin(), ie = args.end(); it != ie; ++it) {
2289 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2304 unsigned Sig = (0xeb << 0) |
2308 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2311 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2313 if (GV->isDeclaration())
2315 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2316 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2317 llvm::Function *Fn = cast<llvm::Function>(GV);
2318 Fn->addFnAttr(
"stackrealign");
2320 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2321 llvm::Function *Fn = cast<llvm::Function>(GV);
2322 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2328 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2331 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2333 void getDependentLibraryOption(llvm::StringRef Lib,
2337 if (Lib.find(
" ") != StringRef::npos)
2338 Opt +=
"\"" + Lib.str() +
"\"";
2344 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2348 bool Quote = (Lib.find(
" ") != StringRef::npos);
2349 std::string ArgStr = Quote ?
"\"" :
"";
2351 if (!Lib.endswith_lower(
".lib") && !Lib.endswith_lower(
".a"))
2353 ArgStr += Quote ?
"\"" :
"";
2357 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2360 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2361 unsigned NumRegisterParameters)
2362 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2363 Win32StructABI, NumRegisterParameters,
false) {}
2365 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2368 void getDependentLibraryOption(llvm::StringRef Lib,
2370 Opt =
"/DEFAULTLIB:";
2371 Opt += qualifyWindowsLibrary(Lib);
2374 void getDetectMismatchOption(llvm::StringRef Name,
2375 llvm::StringRef
Value,
2377 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2381 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2383 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2386 Fn->addFnAttr(
"stack-probe-size",
2389 Fn->addFnAttr(
"no-stack-arg-probe");
2393 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2395 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2396 if (GV->isDeclaration())
2398 addStackProbeTargetAttributes(D, GV, CGM);
2407 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2424 void getDependentLibraryOption(llvm::StringRef Lib,
2426 Opt =
"/DEFAULTLIB:";
2427 Opt += qualifyWindowsLibrary(Lib);
2430 void getDetectMismatchOption(llvm::StringRef Name,
2431 llvm::StringRef
Value,
2433 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2437 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2440 if (GV->isDeclaration())
2442 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2443 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2444 llvm::Function *Fn = cast<llvm::Function>(GV);
2445 Fn->addFnAttr(
"stackrealign");
2447 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2448 llvm::Function *Fn = cast<llvm::Function>(GV);
2449 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2453 addStackProbeTargetAttributes(D, GV, CGM);
2457 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2482 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2484 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2486 if (Hi == SSEUp && Lo != SSE)
2490 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2514 assert((Accum != Memory && Accum != ComplexX87) &&
2515 "Invalid accumulated classification during merge.");
2516 if (Accum == Field || Field == NoClass)
2518 if (Field == Memory)
2520 if (Accum == NoClass)
2524 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2525 Accum == X87 || Accum == X87Up)
2530 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2531 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2542 Class &Current = OffsetBase < 64 ? Lo : Hi;
2548 if (k == BuiltinType::Void) {
2550 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2553 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2555 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2557 }
else if (k == BuiltinType::LongDouble) {
2559 if (LDF == &llvm::APFloat::IEEEquad()) {
2562 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2565 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2568 llvm_unreachable(
"unexpected long double representation!");
2577 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2588 if (Has64BitPointers) {
2595 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2596 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2597 if (EB_FuncPtr != EB_ThisAdj) {
2611 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2620 uint64_t EB_Lo = (OffsetBase) / 64;
2621 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2624 }
else if (Size == 64) {
2625 QualType ElementType = VT->getElementType();
2634 if (!classifyIntegerMMXAsSSE() &&
2645 if (OffsetBase && OffsetBase != 64)
2647 }
else if (Size == 128 ||
2648 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2676 else if (Size <= 128)
2684 if (LDF == &llvm::APFloat::IEEEquad())
2686 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2687 Current = ComplexX87;
2688 else if (LDF == &llvm::APFloat::IEEEdouble())
2691 llvm_unreachable(
"unexpected long double representation!");
2696 uint64_t EB_Real = (OffsetBase) / 64;
2698 if (Hi == NoClass && EB_Real != EB_Imag)
2718 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2725 uint64_t ArraySize = AT->getSize().getZExtValue();
2732 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2735 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2736 Class FieldLo, FieldHi;
2737 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2738 Lo = merge(Lo, FieldLo);
2739 Hi = merge(Hi, FieldHi);
2740 if (Lo == Memory || Hi == Memory)
2744 postMerge(Size, Lo, Hi);
2745 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2775 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2776 for (
const auto &I : CXXRD->bases()) {
2777 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2778 "Unexpected base class!");
2780 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2787 Class FieldLo, FieldHi;
2790 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2791 Lo = merge(Lo, FieldLo);
2792 Hi = merge(Hi, FieldHi);
2793 if (Lo == Memory || Hi == Memory) {
2794 postMerge(Size, Lo, Hi);
2803 i != e; ++i, ++idx) {
2805 bool BitField = i->isBitField();
2808 if (BitField && i->isUnnamedBitfield())
2818 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2819 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2821 postMerge(Size, Lo, Hi);
2825 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2827 postMerge(Size, Lo, Hi);
2837 Class FieldLo, FieldHi;
2843 assert(!i->isUnnamedBitfield());
2845 uint64_t Size = i->getBitWidthValue(
getContext());
2847 uint64_t EB_Lo = Offset / 64;
2848 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2851 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2856 FieldHi = EB_Hi ?
Integer : NoClass;
2859 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2860 Lo = merge(Lo, FieldLo);
2861 Hi = merge(Hi, FieldHi);
2862 if (Lo == Memory || Hi == Memory)
2866 postMerge(Size, Lo, Hi);
2876 Ty = EnumTy->getDecl()->getIntegerType();
2885 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2888 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2889 if (Size <= 64 || Size > LargestVector)
2897 unsigned freeIntRegs)
const {
2909 Ty = EnumTy->getDecl()->getIntegerType();
2943 if (freeIntRegs == 0) {
2948 if (Align == 8 && Size <= 64)
2965 if (isa<llvm::VectorType>(IRType) ||
2966 IRType->getTypeID() == llvm::Type::FP128TyID)
2971 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2974 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
2990 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
2991 if (TySize <= StartBit)
2995 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
2996 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2999 for (
unsigned i = 0; i != NumElts; ++i) {
3001 unsigned EltOffset = i*EltSize;
3002 if (EltOffset >= EndBit)
break;
3004 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3006 EndBit-EltOffset, Context))
3018 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3019 for (
const auto &I : CXXRD->bases()) {
3020 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3021 "Unexpected base class!");
3023 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3027 if (BaseOffset >= EndBit)
continue;
3029 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3031 EndBit-BaseOffset, Context))
3042 i != e; ++i, ++idx) {
3046 if (FieldOffset >= EndBit)
break;
3048 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3067 const llvm::DataLayout &TD) {
3069 if (IROffset == 0 && IRType->isFloatTy())
3073 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3074 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3075 unsigned Elt = SL->getElementContainingOffset(IROffset);
3076 IROffset -= SL->getElementOffset(Elt);
3081 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3083 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3084 IROffset -= IROffset/EltSize*EltSize;
3095 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3096 QualType SourceTy,
unsigned SourceOffset)
const {
3109 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3130 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3131 QualType SourceTy,
unsigned SourceOffset)
const {
3134 if (IROffset == 0) {
3136 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3137 IRType->isIntegerTy(64))
3146 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3147 IRType->isIntegerTy(32) ||
3148 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3149 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3150 cast<llvm::IntegerType>(IRType)->getBitWidth();
3158 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3160 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3161 if (IROffset < SL->getSizeInBytes()) {
3162 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3163 IROffset -= SL->getElementOffset(FieldIdx);
3165 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3166 SourceTy, SourceOffset);
3170 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3173 unsigned EltOffset = IROffset/EltSize*EltSize;
3174 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3180 unsigned TySizeInBytes =
3183 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3188 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3199 const llvm::DataLayout &TD) {
3204 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3205 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3206 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3207 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3219 if (Lo->isFloatTy())
3220 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3222 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3223 &&
"Invalid/unknown lo type");
3224 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3228 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3231 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3232 "Invalid x86-64 argument pair!");
3240 X86_64ABIInfo::Class Lo, Hi;
3241 classify(RetTy, 0, Lo, Hi,
true);
3244 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3245 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3254 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3255 "Unknown missing lo part");
3260 llvm_unreachable(
"Invalid classification for lo word.");
3265 return getIndirectReturnResult(RetTy);
3270 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3274 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3277 RetTy = EnumTy->getDecl()->getIntegerType();
3288 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3301 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3302 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3313 llvm_unreachable(
"Invalid classification for hi word.");
3320 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3325 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3336 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3337 ResType = GetByteVectorType(RetTy);
3348 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3365 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3371 X86_64ABIInfo::Class Lo, Hi;
3372 classify(Ty, 0, Lo, Hi, isNamedArg);
3376 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3377 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3388 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3389 "Unknown missing lo part");
3402 return getIndirectResult(Ty, freeIntRegs);
3406 llvm_unreachable(
"Invalid classification for lo word.");
3419 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3422 Ty = EnumTy->getDecl()->getIntegerType();
3436 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3450 llvm_unreachable(
"Invalid classification for hi word.");
3452 case NoClass:
break;
3457 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3479 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3480 ResType = GetByteVectorType(Ty);
3494 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3495 unsigned &NeededSSE)
const {
3497 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3499 if (RT->getDecl()->hasFlexibleArrayMember())
3500 return getIndirectReturnResult(Ty);
3503 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3504 if (CXXRD->isDynamicClass()) {
3505 NeededInt = NeededSSE = 0;
3506 return getIndirectReturnResult(Ty);
3509 for (
const auto &I : CXXRD->bases())
3510 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3512 NeededInt = NeededSSE = 0;
3513 return getIndirectReturnResult(Ty);
3518 for (
const auto *FD : RT->getDecl()->fields()) {
3519 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3520 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3522 NeededInt = NeededSSE = 0;
3523 return getIndirectReturnResult(Ty);
3526 unsigned LocalNeededInt, LocalNeededSSE;
3528 LocalNeededSSE,
true)
3530 NeededInt = NeededSSE = 0;
3531 return getIndirectReturnResult(Ty);
3533 NeededInt += LocalNeededInt;
3534 NeededSSE += LocalNeededSSE;
3542 unsigned &NeededInt,
3543 unsigned &NeededSSE)
const {
3548 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3557 if (CallingConv == llvm::CallingConv::Win64) {
3558 WinX86_64ABIInfo Win64ABIInfo(
CGT);
3559 Win64ABIInfo.computeInfo(FI);
3563 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3566 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3567 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3568 unsigned NeededInt, NeededSSE;
3574 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3575 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3576 FreeIntRegs -= NeededInt;
3577 FreeSSERegs -= NeededSSE;
3606 it != ie; ++it, ++ArgNo) {
3607 bool IsNamedArg = ArgNo < NumRequiredArgs;
3609 if (IsRegCall && it->type->isStructureOrClassType())
3610 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3613 NeededSSE, IsNamedArg);
3619 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3620 FreeIntRegs -= NeededInt;
3621 FreeSSERegs -= NeededSSE;
3623 it->info = getIndirectResult(it->type, FreeIntRegs);
3649 llvm::PointerType::getUnqual(LTy));
3658 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3659 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3660 "overflow_arg_area.next");
3676 unsigned neededInt, neededSSE;
3684 if (!neededInt && !neededSSE)
3700 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3706 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3707 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3716 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3717 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3718 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3724 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3746 if (neededInt && neededSSE) {
3748 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3752 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3755 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3756 "Unexpected ABI info for mixed regs");
3757 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3758 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3761 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3762 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3781 }
else if (neededInt) {
3787 std::pair<CharUnits, CharUnits> SizeAlign =
3789 uint64_t TySize = SizeAlign.first.getQuantity();
3800 }
else if (neededSSE == 1) {
3805 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3824 RegAddrLo, ST->getStructElementType(0)));
3828 RegAddrHi, ST->getStructElementType(1)));
3872 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3875 const Type *
Base =
nullptr;
3876 uint64_t NumElts = 0;
3880 FreeSSERegs -= NumElts;
3881 return getDirectX86Hva();
3887 bool IsReturnType,
bool IsVectorCall,
3888 bool IsRegCall)
const {
3894 Ty = EnumTy->getDecl()->getIntegerType();
3897 uint64_t Width = Info.
Width;
3902 if (!IsReturnType) {
3912 const Type *
Base =
nullptr;
3913 uint64_t NumElts = 0;
3916 if ((IsVectorCall || IsRegCall) &&
3919 if (FreeSSERegs >= NumElts) {
3920 FreeSSERegs -= NumElts;
3926 }
else if (IsVectorCall) {
3927 if (FreeSSERegs >= NumElts &&
3929 FreeSSERegs -= NumElts;
3931 }
else if (IsReturnType) {
3944 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3951 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3959 switch (BT->getKind()) {
3960 case BuiltinType::Bool:
3965 case BuiltinType::LongDouble:
3970 if (LDF == &llvm::APFloat::x87DoubleExtended())
3975 case BuiltinType::Int128:
3976 case BuiltinType::UInt128:
3986 llvm::VectorType::get(llvm::Type::getInt64Ty(
getVMContext()), 2));
3997 unsigned FreeSSERegs,
3999 bool IsRegCall)
const {
4004 if (Count < VectorcallMaxParamNumAsReg)
4005 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4009 unsigned ZeroSSERegsAvail = 0;
4010 I.info = classify(I.type, ZeroSSERegsAvail,
false,
4011 IsVectorCall, IsRegCall);
4017 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4026 unsigned FreeSSERegs = 0;
4030 }
else if (IsRegCall) {
4037 IsVectorCall, IsRegCall);
4042 }
else if (IsRegCall) {
4048 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4051 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4059 bool IsIndirect =
false;
4065 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4077 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4078 bool IsSoftFloatABI;
4084 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4108 Ty = CTy->getElementType();
4116 const Type *AlignTy =
nullptr;
4133 if (
getTarget().getTriple().isOSDarwin()) {
4135 TI.second = getParamTypeAlignment(Ty);
4143 const unsigned OverflowLimit = 8;
4171 if (isInt || IsSoftFloatABI) {
4180 if (isI64 || (isF64 && IsSoftFloatABI)) {
4181 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4182 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4186 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4192 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4195 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4209 if (!(isInt || IsSoftFloatABI)) {
4218 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4226 Builder.CreateAdd(NumRegs,
4227 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4238 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4246 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4257 if (Align > OverflowAreaAlign) {
4267 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4294 llvm::IntegerType *i8 = CGF.
Int8Ty;
4295 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4296 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4297 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4339 static const unsigned GPRBits = 64;
4342 bool IsSoftFloatABI;
4346 bool IsQPXVectorTy(
const Type *Ty)
const {
4351 unsigned NumElements = VT->getNumElements();
4352 if (NumElements == 1)
4355 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4358 }
else if (VT->getElementType()->
4359 isSpecificBuiltinType(BuiltinType::Float)) {
4368 bool IsQPXVectorTy(
QualType Ty)
const {
4376 IsSoftFloatABI(SoftFloatABI) {}
4378 bool isPromotableTypeForABI(
QualType Ty)
const;
4386 uint64_t Members)
const override;
4404 if (IsQPXVectorTy(T) ||
4420 bool asReturnValue)
const override {
4424 bool isSwiftErrorInRegister()
const override {
4433 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4447 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4449 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4465 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4468 Ty = EnumTy->getDecl()->getIntegerType();
4477 switch (BT->getKind()) {
4478 case BuiltinType::Int:
4479 case BuiltinType::UInt:
4493 Ty = CTy->getElementType();
4497 if (IsQPXVectorTy(Ty)) {
4508 const Type *AlignAsType =
nullptr;
4512 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4515 AlignAsType = EltType;
4519 const Type *
Base =
nullptr;
4520 uint64_t Members = 0;
4521 if (!AlignAsType &&
Kind == ELFv2 &&
4526 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4531 }
else if (AlignAsType) {
4550 uint64_t &Members)
const {
4552 uint64_t NElements = AT->getSize().getZExtValue();
4557 Members *= NElements;
4566 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4567 for (
const auto &I : CXXRD->bases()) {
4572 uint64_t FldMembers;
4576 Members += FldMembers;
4580 for (
const auto *FD : RD->
fields()) {
4585 if (AT->getSize().getZExtValue() == 0)
4587 FT = AT->getElementType();
4597 uint64_t FldMembers;
4602 std::max(Members, FldMembers) : Members + FldMembers);
4616 Ty = CT->getElementType();
4632 QualType EltTy = VT->getElementType();
4633 unsigned NumElements =
4648 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4652 if (BT->getKind() == BuiltinType::Float ||
4653 BT->getKind() == BuiltinType::Double ||
4654 BT->getKind() == BuiltinType::LongDouble ||
4656 (BT->getKind() == BuiltinType::Float128))) {
4669 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4670 const Type *
Base, uint64_t Members)
const {
4680 return Members * NumRegs <= 8;
4696 else if (Size < 128) {
4706 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4710 const Type *Base =
nullptr;
4711 uint64_t Members = 0;
4712 if (
Kind == ELFv2 &&
4715 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4724 if (Bits > 0 && Bits <= 8 * GPRBits) {
4729 if (Bits <= GPRBits)
4731 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4735 uint64_t RegBits = ABIAlign * 8;
4736 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4738 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4747 TyAlign > ABIAlign);
4768 else if (Size < 128) {
4776 const Type *Base =
nullptr;
4777 uint64_t Members = 0;
4778 if (
Kind == ELFv2 &&
4781 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4787 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4792 if (Bits > GPRBits) {
4793 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4794 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4797 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4813 TypeInfo.second = getParamTypeAlignment(Ty);
4825 if (EltSize < SlotSize) {
4827 SlotSize * 2, SlotSize,
4834 SlotSize - EltSize);
4836 2 * SlotSize - EltSize);
4867 llvm::IntegerType *i8 = CGF.
Int8Ty;
4868 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4869 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4870 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4907 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4943 ABIKind getABIKind()
const {
return Kind; }
4944 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4950 uint64_t Members)
const override;
4952 bool isIllegalVectorType(
QualType Ty)
const;
4959 it.info = classifyArgumentType(it.type);
4970 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4971 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4972 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4979 bool asReturnValue)
const override {
4982 bool isSwiftErrorInRegister()
const override {
4987 unsigned elts)
const override;
4995 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4996 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5003 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
5005 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5007 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5010 llvm::Function *Fn = cast<llvm::Function>(GV);
5014 Fn->addFnAttr(
"sign-return-address",
5020 Fn->addFnAttr(
"sign-return-address-key",
5021 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5027 Fn->addFnAttr(
"branch-target-enforcement");
5031 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
5033 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
5034 : AArch64TargetCodeGenInfo(CGT, K) {}
5036 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5039 void getDependentLibraryOption(llvm::StringRef Lib,
5041 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5044 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5046 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5050 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5052 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5053 if (GV->isDeclaration())
5055 addStackProbeTargetAttributes(D, GV, CGM);
5063 if (isIllegalVectorType(Ty)) {
5076 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5081 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5090 Ty = EnumTy->getDecl()->getIntegerType();
5108 if (IsEmpty || Size == 0) {
5114 if (IsEmpty && Size == 0)
5120 const Type *Base =
nullptr;
5121 uint64_t Members = 0;
5131 if (
getTarget().isRenderScriptTarget()) {
5135 if (
Kind == AArch64ABIInfo::AAPCS) {
5137 Alignment = Alignment < 128 ? 64 : 128;
5141 Size = llvm::alignTo(Size, 64);
5145 if (Alignment < 128 && Size == 128) {
5166 RetTy = EnumTy->getDecl()->getIntegerType();
5177 const Type *Base =
nullptr;
5178 uint64_t Members = 0;
5187 if (
getTarget().isRenderScriptTarget()) {
5191 Size = llvm::alignTo(Size, 64);
5195 if (Alignment < 128 && Size == 128) {
5206 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5209 unsigned NumElements = VT->getNumElements();
5212 if (!llvm::isPowerOf2_32(NumElements))
5214 return Size != 64 && (Size != 128 || NumElements == 1);
5219 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5221 unsigned elts)
const {
5222 if (!llvm::isPowerOf2_32(elts))
5230 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5236 if (BT->isFloatingPoint())
5240 if (VecSize == 64 || VecSize == 128)
5246 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5247 uint64_t Members)
const {
5248 return Members <= 4;
5259 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5263 unsigned NumRegs = 1;
5264 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5265 BaseTy = ArrTy->getElementType();
5266 NumRegs = ArrTy->getNumElements();
5268 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5293 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5302 RegSize = llvm::alignTo(RegSize, 8);
5311 RegSize = 16 * NumRegs;
5323 UsingStack = CGF.
Builder.CreateICmpSGE(
5324 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5326 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5335 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5338 reg_offs = CGF.
Builder.CreateAdd(
5339 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5341 reg_offs = CGF.
Builder.CreateAnd(
5342 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5351 NewOffset = CGF.
Builder.CreateAdd(
5352 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5358 InRegs = CGF.
Builder.CreateICmpSLE(
5359 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5361 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5373 reg_top_offset,
"reg_top_p");
5375 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5383 MemTy = llvm::PointerType::getUnqual(MemTy);
5386 const Type *Base =
nullptr;
5387 uint64_t NumMembers = 0;
5389 if (IsHFA && NumMembers > 1) {
5394 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5397 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5399 std::max(TyAlign, BaseTyInfo.second));
5404 BaseTyInfo.first.getQuantity() < 16)
5405 Offset = 16 - BaseTyInfo.first.getQuantity();
5407 for (
unsigned i = 0; i < NumMembers; ++i) {
5425 CharUnits SlotSize = BaseAddr.getAlignment();
5428 TyInfo.first < SlotSize) {
5452 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5454 OnStackPtr = CGF.
Builder.CreateAdd(
5455 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5457 OnStackPtr = CGF.
Builder.CreateAnd(
5458 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5463 Address OnStackAddr(OnStackPtr,
5470 StackSize = StackSlotSize;
5472 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5476 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5482 TyInfo.first < StackSlotSize) {
5497 OnStackAddr, OnStackBlock,
"vaargs.addr");
5529 bool IsIndirect =
false;
5530 if (TyInfo.first.getQuantity() > 16) {
5531 const Type *Base =
nullptr;
5532 uint64_t Members = 0;
5537 TyInfo, SlotSize,
true);
5572 bool isEABI()
const {
5573 switch (
getTarget().getTriple().getEnvironment()) {
5574 case llvm::Triple::Android:
5575 case llvm::Triple::EABI:
5576 case llvm::Triple::EABIHF:
5577 case llvm::Triple::GNUEABI:
5578 case llvm::Triple::GNUEABIHF:
5579 case llvm::Triple::MuslEABI:
5580 case llvm::Triple::MuslEABIHF:
5587 bool isEABIHF()
const {
5588 switch (
getTarget().getTriple().getEnvironment()) {
5589 case llvm::Triple::EABIHF:
5590 case llvm::Triple::GNUEABIHF:
5591 case llvm::Triple::MuslEABIHF:
5598 ABIKind getABIKind()
const {
return Kind; }
5604 uint64_t Members)
const;
5606 bool isIllegalVectorType(
QualType Ty)
const;
5610 uint64_t Members)
const override;
5622 bool asReturnValue)
const override {
5625 bool isSwiftErrorInRegister()
const override {
5629 unsigned elts)
const override;
5637 const ARMABIInfo &getABIInfo()
const {
5645 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5646 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5658 unsigned getSizeOfUnwindException()
const override {
5659 if (getABIInfo().isEABI())
return 88;
5663 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5665 if (GV->isDeclaration())
5667 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5671 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5676 switch (Attr->getInterrupt()) {
5677 case ARMInterruptAttr::Generic: Kind =
"";
break;
5678 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5679 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5680 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5681 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5682 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5685 llvm::Function *Fn = cast<llvm::Function>(GV);
5687 Fn->addFnAttr(
"interrupt", Kind);
5689 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5690 if (ABI == ARMABIInfo::APCS)
5696 llvm::AttrBuilder B;
5697 B.addStackAlignmentAttr(8);
5698 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5702 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5705 : ARMTargetCodeGenInfo(CGT, K) {}
5707 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5710 void getDependentLibraryOption(llvm::StringRef Lib,
5712 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5715 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5717 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5721 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5723 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5724 if (GV->isDeclaration())
5726 addStackProbeTargetAttributes(D, GV, CGM);
5750 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5751 return llvm::CallingConv::ARM_AAPCS_VFP;
5753 return llvm::CallingConv::ARM_AAPCS;
5755 return llvm::CallingConv::ARM_APCS;
5761 switch (getABIKind()) {
5762 case APCS:
return llvm::CallingConv::ARM_APCS;
5763 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5764 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5765 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5767 llvm_unreachable(
"bad ABI kind");
5770 void ARMABIInfo::setCCs() {
5776 if (abiCC != getLLVMDefaultCC())
5787 if (Size == 64 || Size == 128) {
5797 uint64_t Members)
const {
5798 assert(Base &&
"Base class should be set for homogeneous aggregate");
5803 (VT->getElementType()->isFloat16Type() ||
5804 VT->getElementType()->isHalfType())) {
5806 llvm::Type *NewVecTy = llvm::VectorType::get(
5808 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5816 bool isVariadic)
const {
5824 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5829 if (isIllegalVectorType(Ty))
5830 return coerceIllegalVector(Ty);
5837 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5846 Ty = EnumTy->getDecl()->getIntegerType();
5861 if (IsEffectivelyAAPCS_VFP) {
5864 const Type *Base =
nullptr;
5865 uint64_t Members = 0;
5867 return classifyHomogeneousAggregate(Ty, Base, Members);
5868 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5872 const Type *Base =
nullptr;
5873 uint64_t Members = 0;
5875 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5882 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5895 uint64_t ABIAlign = 4;
5897 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5898 getABIKind() == ARMABIInfo::AAPCS) {
5905 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5908 TyAlign > ABIAlign);
5913 if (
getTarget().isRenderScriptTarget()) {
5934 llvm::LLVMContext &VMContext) {
5966 if (!RT)
return false;
5977 bool HadField =
false;
5980 i != e; ++i, ++idx) {
6019 bool isVariadic)
const {
6020 bool IsEffectivelyAAPCS_VFP =
6021 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
6032 (VT->getElementType()->isFloat16Type() ||
6033 VT->getElementType()->isHalfType()))
6034 return coerceIllegalVector(RetTy);
6042 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
6051 RetTy = EnumTy->getDecl()->getIntegerType();
6058 if (getABIKind() == APCS) {
6091 if (IsEffectivelyAAPCS_VFP) {
6092 const Type *Base =
nullptr;
6093 uint64_t Members = 0;
6095 return classifyHomogeneousAggregate(RetTy, Base, Members);
6104 if (
getTarget().isRenderScriptTarget()) {
6117 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6120 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6128 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6134 (VT->getElementType()->isFloat16Type() ||
6135 VT->getElementType()->isHalfType()))
6143 unsigned NumElements = VT->getNumElements();
6145 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6149 unsigned NumElements = VT->getNumElements();
6152 if (!llvm::isPowerOf2_32(NumElements))
6161 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6163 unsigned numElts)
const {
6164 if (!llvm::isPowerOf2_32(numElts))
6166 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6175 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6179 if (BT->getKind() == BuiltinType::Float ||
6180 BT->getKind() == BuiltinType::Double ||
6181 BT->getKind() == BuiltinType::LongDouble)
6185 if (VecSize == 64 || VecSize == 128)
6191 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6192 uint64_t Members)
const {
6193 return Members <= 4;
6208 CharUnits TyAlignForABI = TyInfo.second;
6211 bool IsIndirect =
false;
6212 const Type *Base =
nullptr;
6213 uint64_t Members = 0;
6220 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6228 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6229 getABIKind() == ARMABIInfo::AAPCS) {
6232 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6239 TyInfo.second = TyAlignForABI;
6251 class NVPTXABIInfo :
public ABIInfo {
6268 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6270 bool shouldEmitStaticExternCAliases()
const override;
6275 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6288 RetTy = EnumTy->getDecl()->getIntegerType();
6297 Ty = EnumTy->getDecl()->getIntegerType();
6322 llvm_unreachable(
"NVPTX does not support varargs");
6325 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6327 if (GV->isDeclaration())
6329 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6332 llvm::Function *F = cast<llvm::Function>(GV);
6338 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6341 addNVVMMetadata(F,
"kernel", 1);
6343 F->addFnAttr(llvm::Attribute::NoInline);
6352 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6354 addNVVMMetadata(F,
"kernel", 1);
6356 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6358 llvm::APSInt MaxThreads(32);
6359 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6361 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6366 if (
Attr->getMinBlocks()) {
6367 llvm::APSInt MinBlocks(32);
6368 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6371 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6377 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6379 llvm::Module *M = F->getParent();
6383 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6385 llvm::Metadata *MDVals[] = {
6386 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6387 llvm::ConstantAsMetadata::get(
6388 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6390 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6393 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
6411 bool isPromotableIntegerType(
QualType Ty)
const;
6412 bool isCompoundType(
QualType Ty)
const;
6413 bool isVectorArgumentType(
QualType Ty)
const;
6414 bool isFPArgumentType(
QualType Ty)
const;
6424 I.info = classifyArgumentType(I.type);
6431 bool asReturnValue)
const override {
6434 bool isSwiftErrorInRegister()
const override {
6447 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6450 Ty = EnumTy->getDecl()->getIntegerType();
6458 switch (BT->getKind()) {
6459 case BuiltinType::Int:
6460 case BuiltinType::UInt:
6468 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6474 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6475 return (HasVector &&
6480 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6482 switch (BT->getKind()) {
6483 case BuiltinType::Float:
6484 case BuiltinType::Double:
6499 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6500 for (
const auto &I : CXXRD->bases()) {
6509 Found = GetSingleElementType(Base);
6513 for (
const auto *FD : RD->
fields()) {
6525 Found = GetSingleElementType(FD->getType());
6556 bool InFPRs =
false;
6557 bool IsVector =
false;
6561 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6566 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6567 IsVector = ArgTy->isVectorTy();
6568 UnpaddedSize = TyInfo.first;
6569 DirectAlign = TyInfo.second;
6572 if (IsVector && UnpaddedSize > PaddedSize)
6574 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6576 CharUnits Padding = (PaddedSize - UnpaddedSize);
6580 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6588 "overflow_arg_area_ptr");
6598 "overflow_arg_area");
6606 unsigned MaxRegs, RegCountField, RegSaveIndex;
6617 RegPadding = Padding;
6624 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6631 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6638 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6640 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6643 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6646 "reg_save_area_ptr");
6656 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6658 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6679 "overflow_arg_area");
6686 MemAddr, InMemBlock,
"va_arg.addr");
6698 if (isVectorArgumentType(RetTy))
6712 if (isPromotableIntegerType(Ty))
6719 QualType SingleElementTy = GetSingleElementType(Ty);
6720 if (isVectorArgumentType(SingleElementTy) &&
6721 getContext().getTypeSize(SingleElementTy) == Size)
6725 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6738 if (isFPArgumentType(SingleElementTy)) {
6739 assert(Size == 32 || Size == 64);
6750 if (isCompoundType(Ty))
6766 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6772 void MSP430TargetCodeGenInfo::setTargetAttributes(
6774 if (GV->isDeclaration())
6776 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6777 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6782 llvm::Function *F = cast<llvm::Function>(GV);
6785 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6788 F->addFnAttr(llvm::Attribute::NoInline);
6789 F->addFnAttr(
"interrupt", llvm::utostr(InterruptAttr->getNumber()));
6799 class MipsABIInfo :
public ABIInfo {
6801 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6802 void CoerceToIntArgs(uint64_t TySize,
6809 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6810 StackAlignInBytes(IsO32 ? 8 : 16) {}
6821 unsigned SizeOfUnwindException;
6825 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6831 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6833 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6835 llvm::Function *Fn = cast<llvm::Function>(GV);
6837 if (FD->
hasAttr<MipsLongCallAttr>())
6838 Fn->addFnAttr(
"long-call");
6839 else if (FD->
hasAttr<MipsShortCallAttr>())
6840 Fn->addFnAttr(
"short-call");
6843 if (GV->isDeclaration())
6846 if (FD->
hasAttr<Mips16Attr>()) {
6847 Fn->addFnAttr(
"mips16");
6849 else if (FD->
hasAttr<NoMips16Attr>()) {
6850 Fn->addFnAttr(
"nomips16");
6853 if (FD->
hasAttr<MicroMipsAttr>())
6854 Fn->addFnAttr(
"micromips");
6855 else if (FD->
hasAttr<NoMicroMipsAttr>())
6856 Fn->addFnAttr(
"nomicromips");
6858 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6863 switch (Attr->getInterrupt()) {
6864 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6865 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6866 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6867 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6868 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6869 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6870 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6871 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6872 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6875 Fn->addFnAttr(
"interrupt", Kind);
6882 unsigned getSizeOfUnwindException()
const override {
6883 return SizeOfUnwindException;
6888 void MipsABIInfo::CoerceToIntArgs(
6890 llvm::IntegerType *IntTy =
6891 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6894 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6895 ArgList.push_back(IntTy);
6898 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6901 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
6910 CoerceToIntArgs(TySize, ArgList);
6921 CoerceToIntArgs(TySize, ArgList);
6927 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6929 uint64_t LastOffset = 0;
6931 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
6936 i != e; ++i, ++idx) {
6940 if (!BT || BT->
getKind() != BuiltinType::Double)
6948 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6949 ArgList.push_back(I64);
6952 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
6953 LastOffset = Offset + 64;
6956 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6957 ArgList.append(IntArgList.begin(), IntArgList.end());
6962 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6964 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6967 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
6974 uint64_t OrigOffset =
Offset;
6979 (uint64_t)StackAlignInBytes);
6980 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6981 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6989 Offset = OrigOffset + MinABIStackAlignInBytes;
6998 getPaddingType(OrigOffset, CurrOffset));
7005 Ty = EnumTy->getDecl()->getIntegerType();
7009 return extendType(Ty);
7012 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
7016 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
7036 for (; b != e; ++b) {
7053 CoerceToIntArgs(Size, RTList);
7065 if (!IsO32 && Size == 0)
7089 RetTy = EnumTy->getDecl()->getIntegerType();
7119 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7121 bool DidPromote =
false;
7141 TyInfo, ArgSlotSize,
true);
7212 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7214 if (GV->isDeclaration())
7216 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7218 auto *Fn = cast<llvm::Function>(GV);
7220 if (FD->getAttr<AVRInterruptAttr>())
7221 Fn->addFnAttr(
"interrupt");
7223 if (FD->getAttr<AVRSignalAttr>())
7224 Fn->addFnAttr(
"signal");
7237 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7240 : DefaultTargetCodeGenInfo(CGT) {}
7242 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7246 void TCETargetCodeGenInfo::setTargetAttributes(
7248 if (GV->isDeclaration())
7250 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7253 llvm::Function *F = cast<llvm::Function>(GV);
7256 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7258 F->addFnAttr(llvm::Attribute::NoInline);
7259 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7262 llvm::LLVMContext &Context = F->getContext();
7263 llvm::NamedMDNode *OpenCLMetadata =
7265 "opencl.kernel_wg_size_info");
7268 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7271 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7272 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7274 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7275 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7277 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7278 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7284 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7285 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7299 class HexagonABIInfo :
public ABIInfo {
7339 Ty = EnumTy->getDecl()->getIntegerType();
7377 RetTy = EnumTy->getDecl()->getIntegerType();
7417 class LanaiABIInfo :
public DefaultABIInfo {
7421 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7444 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7446 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7448 if (SizeInRegs == 0)
7451 if (SizeInRegs > State.FreeRegs) {
7456 State.FreeRegs -= SizeInRegs;
7462 CCState &State)
const {
7464 if (State.FreeRegs) {
7472 const unsigned MinABIStackAlignInBytes = 4;
7476 MinABIStackAlignInBytes);
7480 CCState &State)
const {
7486 return getIndirectResult(Ty,
false, State);
7495 return getIndirectResult(Ty,
true, State);
7503 if (SizeInRegs <= State.FreeRegs) {
7504 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7506 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7507 State.FreeRegs -= SizeInRegs;
7512 return getIndirectResult(Ty,
true, State);
7517 Ty = EnumTy->getDecl()->getIntegerType();
7519 bool InReg = shouldUseInReg(Ty, State);
7544 class AMDGPUABIInfo final :
public DefaultABIInfo {
7546 static const unsigned MaxNumRegsForArgsRet = 16;
7548 unsigned numRegsForType(
QualType Ty)
const;
7552 uint64_t Members)
const override;
7556 DefaultABIInfo(CGT) {}
7565 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7569 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7570 const Type *Base, uint64_t Members)
const {
7574 return Members * NumRegs <= MaxNumRegsForArgsRet;
7578 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7579 unsigned NumRegs = 0;
7584 QualType EltTy = VT->getElementType();
7589 return (VT->getNumElements() + 1) / 2;
7591 unsigned EltNumRegs = (EltSize + 31) / 32;
7592 return EltNumRegs * VT->getNumElements();
7600 QualType FieldTy = Field->getType();
7601 NumRegs += numRegsForType(FieldTy);
7607 return (
getContext().getTypeSize(Ty) + 31) / 32;
7616 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7618 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7619 Arg.info = classifyKernelArgumentType(Arg.type);
7658 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7685 unsigned &NumRegsLeft)
const {
7686 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7715 unsigned NumRegs = (Size + 31) / 32;
7716 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7729 if (NumRegsLeft > 0) {
7730 unsigned NumRegs = numRegsForType(Ty);
7731 if (NumRegsLeft >= NumRegs) {
7732 NumRegsLeft -= NumRegs;
7741 unsigned NumRegs = numRegsForType(Ty);
7742 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7752 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7754 unsigned getOpenCLKernelCallingConv()
const override;
7757 llvm::PointerType *T,
QualType QT)
const override;
7759 LangAS getASTAllocaAddressSpace()
const override {
7764 const VarDecl *D)
const override;
7766 llvm::LLVMContext &C)
const override;
7769 llvm::Function *BlockInvokeFunc,
7771 bool shouldEmitStaticExternCAliases()
const override;
7776 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7778 if (GV->isDeclaration())
7780 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7784 llvm::Function *F = cast<llvm::Function>(GV);
7787 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7790 (M.
getTriple().getOS() == llvm::Triple::AMDHSA))
7791 F->addFnAttr(
"amdgpu-implicitarg-num-bytes",
"48");
7793 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7794 if (ReqdWGS || FlatWGS) {
7795 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7796 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7797 if (ReqdWGS && Min == 0 && Max == 0)
7798 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7801 assert(Min <= Max &&
"Min must be less than or equal Max");
7803 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7804 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7806 assert(Max == 0 &&
"Max must be zero");
7809 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7810 unsigned Min =
Attr->getMin();
7811 unsigned Max =
Attr->getMax();
7814 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7816 std::string AttrVal = llvm::utostr(Min);
7818 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7819 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7821 assert(Max == 0 &&
"Max must be zero");
7824 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7825 unsigned NumSGPR =
Attr->getNumSGPR();
7828 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7831 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7832 uint32_t NumVGPR =
Attr->getNumVGPR();
7835 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7839 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7840 return llvm::CallingConv::AMDGPU_KERNEL;
7848 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7852 return llvm::ConstantPointerNull::get(PT);
7855 auto NPT = llvm::PointerType::get(PT->getElementType(),
7857 return llvm::ConstantExpr::getAddrSpaceCast(
7858 llvm::ConstantPointerNull::get(NPT), PT);
7862 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7866 "Address space agnostic languages only");
7870 return DefaultGlobalAS;
7879 return ConstAS.getValue();
7881 return DefaultGlobalAS;
7885 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
SyncScope S,
7886 llvm::LLVMContext &C)
const {
7901 return C.getOrInsertSyncScopeID(Name);
7904 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
7910 FT = getABIInfo().getContext().adjustFunctionType(
7921 class SparcV8ABIInfo :
public DefaultABIInfo {
7984 class SparcV9ABIInfo :
public ABIInfo {
8005 struct CoerceBuilder {
8006 llvm::LLVMContext &Context;
8007 const llvm::DataLayout &DL;
8012 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
8013 : Context(c), DL(dl), Size(0), InReg(
false) {}
8016 void pad(uint64_t ToSize) {
8017 assert(ToSize >= Size &&
"Cannot remove elements");
8022 uint64_t Aligned = llvm::alignTo(Size, 64);
8023 if (Aligned > Size && Aligned <= ToSize) {
8024 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8029 while (Size + 64 <= ToSize) {
8030 Elems.push_back(llvm::Type::getInt64Ty(Context));
8035 if (Size < ToSize) {
8036 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8050 Elems.push_back(Ty);
8051 Size = Offset + Bits;
8055 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8056 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8057 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8058 llvm::Type *ElemTy = StrTy->getElementType(i);
8059 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8060 switch (ElemTy->getTypeID()) {
8061 case llvm::Type::StructTyID:
8062 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8064 case llvm::Type::FloatTyID:
8065 addFloat(ElemOffset, ElemTy, 32);
8067 case llvm::Type::DoubleTyID:
8068 addFloat(ElemOffset, ElemTy, 64);
8070 case llvm::Type::FP128TyID:
8071 addFloat(ElemOffset, ElemTy, 128);
8073 case llvm::Type::PointerTyID:
8074 if (ElemOffset % 64 == 0) {
8076 Elems.push_back(ElemTy);
8087 bool isUsableType(llvm::StructType *Ty)
const {
8088 return llvm::makeArrayRef(Elems) == Ty->elements();
8093 if (Elems.size() == 1)
8094 return Elems.front();
8096 return llvm::StructType::get(Context, Elems);
8111 if (Size > SizeLimit)
8116 Ty = EnumTy->getDecl()->getIntegerType();
8119 if (Size < 64 && Ty->isIntegerType())
8133 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8138 CB.addStruct(0, StrTy);
8139 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8142 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8161 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8171 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8203 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8235 llvm::IntegerType *i8 = CGF.
Int8Ty;
8236 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8237 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8264 class ARCABIInfo :
public DefaultABIInfo {
8266 using DefaultABIInfo::DefaultABIInfo;
8273 if (!State.FreeRegs)
8279 if (sz < State.FreeRegs)
8280 State.FreeRegs -= sz;
8296 updateState(I.info, I.type, State);
8320 const unsigned MinABIStackAlignInBytes = 4;
8323 TypeAlign > MinABIStackAlignInBytes);
8334 uint8_t FreeRegs)
const {
8340 return getIndirectByRef(Ty, FreeRegs > 0);
8343 return getIndirectByValue(Ty);
8348 Ty = EnumTy->getDecl()->getIntegerType();
8350 auto SizeInRegs = llvm::alignTo(
getContext().getTypeSize(Ty), 32) / 32;
8355 return getIndirectByValue(Ty);
8363 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8365 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8367 return FreeRegs >= SizeInRegs ?
8384 auto RetSize = llvm::alignTo(
getContext().getTypeSize(RetTy), 32) / 32;
8386 return getIndirectByRef(RetTy,
true);
8457 class TypeStringCache {
8458 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8462 std::string Swapped;
8465 std::map<const IdentifierInfo *, struct Entry> Map;
8466 unsigned IncompleteCount;
8467 unsigned IncompleteUsedCount;
8469 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8479 class FieldEncoding {
8483 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8484 StringRef
str() {
return Enc; }
8485 bool operator<(
const FieldEncoding &rhs)
const {
8486 if (HasName != rhs.HasName)
return HasName;
8487 return Enc < rhs.Enc;
8491 class XCoreABIInfo :
public DefaultABIInfo {
8499 mutable TypeStringCache TSC;
8503 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8523 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8524 AI.setCoerceToType(ArgTy);
8525 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8529 switch (AI.getKind()) {
8533 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8535 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8543 ArgSize = ArgSize.
alignTo(SlotSize);
8568 std::string StubEnc) {
8572 assert( (E.Str.empty() || E.State == Recursive) &&
8573 "Incorrectly use of addIncomplete");
8574 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8575 E.Swapped.swap(E.Str);
8576 E.Str.swap(StubEnc);
8585 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8588 auto I = Map.find(ID);
8589 assert(I != Map.end() &&
"Entry not present");
8590 Entry &E = I->second;
8592 E.State == IncompleteUsed) &&
8593 "Entry must be an incomplete type");
8594 bool IsRecursive =
false;
8595 if (E.State == IncompleteUsed) {
8598 --IncompleteUsedCount;
8600 if (E.Swapped.empty())
8604 E.Swapped.swap(E.Str);
8606 E.State = Recursive;
8614 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8616 if (!ID || IncompleteUsedCount)
8619 if (IsRecursive && !E.Str.empty()) {
8620 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8621 "This is not the same Recursive entry");
8627 assert(E.Str.empty() &&
"Entry already present");
8629 E.State = IsRecursive? Recursive : NonRecursive;
8638 auto I = Map.find(ID);
8641 Entry &E = I->second;
8642 if (E.State == Recursive && IncompleteCount)
8647 E.State = IncompleteUsed;
8648 ++IncompleteUsedCount;
8669 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8673 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8674 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8675 llvm::MDString::get(Ctx, Enc.str())};
8676 llvm::NamedMDNode *MD =
8677 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8678 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8691 unsigned getOpenCLKernelCallingConv()
const override;
8699 DefaultABIInfo SPIRABI(CGM.
getTypes());
8700 SPIRABI.computeInfo(FI);
8705 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8706 return llvm::CallingConv::SPIR_KERNEL;
8711 TypeStringCache &TSC);
8719 TypeStringCache &TSC) {
8720 for (
const auto *Field : RD->
fields()) {
8723 Enc += Field->getName();
8725 if (Field->isBitField()) {
8727 llvm::raw_svector_ostream OS(Enc);
8728 OS << Field->getBitWidthValue(CGM.
getContext());
8731 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8733 if (Field->isBitField())
8736 FE.emplace_back(!Field->getName().empty(), Enc);
8748 StringRef TypeString = TSC.lookupStr(ID);
8749 if (!TypeString.empty()) {
8755 size_t Start = Enc.size();
8763 bool IsRecursive =
false;
8770 std::string StubEnc(Enc.substr(Start).str());
8772 TSC.addIncomplete(ID, std::move(StubEnc));
8774 (void) TSC.removeIncomplete(ID);
8777 IsRecursive = TSC.removeIncomplete(ID);
8783 unsigned E = FE.size();
8784 for (
unsigned I = 0; I != E; ++I) {
8791 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8797 TypeStringCache &TSC,
8800 StringRef TypeString = TSC.lookupStr(ID);
8801 if (!TypeString.empty()) {
8806 size_t Start = Enc.size();
8815 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8817 SmallStringEnc EnumEnc;
8819 EnumEnc += I->getName();
8821 I->getInitVal().toString(EnumEnc);
8823 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8826 unsigned E = FE.size();
8827 for (
unsigned I = 0; I != E; ++I) {
8834 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8842 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8850 Enc += Table[Lookup];
8855 const char *EncType;
8857 case BuiltinType::Void:
8860 case BuiltinType::Bool:
8863 case BuiltinType::Char_U:
8866 case BuiltinType::UChar:
8869 case BuiltinType::SChar:
8872 case BuiltinType::UShort:
8875 case BuiltinType::Short:
8878 case BuiltinType::UInt:
8881 case BuiltinType::Int:
8884 case BuiltinType::ULong:
8887 case BuiltinType::Long:
8890 case BuiltinType::ULongLong:
8893 case BuiltinType::LongLong:
8896 case BuiltinType::Float:
8899 case BuiltinType::Double:
8902 case BuiltinType::LongDouble:
8915 TypeStringCache &TSC) {
8927 TypeStringCache &TSC, StringRef NoSizeEnc) {
8932 CAT->getSize().toStringUnsigned(Enc);
8948 TypeStringCache &TSC) {
8955 auto I = FPT->param_type_begin();
8956 auto E = FPT->param_type_end();
8965 if (FPT->isVariadic())
8968 if (FPT->isVariadic())
8982 TypeStringCache &TSC) {
9019 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9022 return appendType(Enc, FD->getType(), CGM, TSC);
9025 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9028 QualType QT = VD->getType().getCanonicalType();
9045 class RISCVABIInfo :
public DefaultABIInfo {
9048 static const int NumArgGPRs = 8;
9052 : DefaultABIInfo(CGT), XLen(XLen) {}
9059 int &ArgGPRsLeft)
const;
9086 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9091 bool IsFixed = ArgNum < NumFixedArgs;
9098 int &ArgGPRsLeft)
const {
9099 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
9117 bool MustUseStack =
false;
9121 int NeededArgGPRs = 1;
9122 if (!IsFixed && NeededAlign == 2 * XLen)
9123 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9124 else if (Size > XLen && Size <= 2 * XLen)
9127 if (NeededArgGPRs > ArgGPRsLeft) {
9128 MustUseStack =
true;
9129 NeededArgGPRs = ArgGPRsLeft;
9132 ArgGPRsLeft -= NeededArgGPRs;
9137 Ty = EnumTy->getDecl()->getIntegerType();
9141 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9142 return extendType(Ty);
9150 if (Size <= 2 * XLen) {
9158 }
else if (Alignment == 2 * XLen) {
9173 int ArgGPRsLeft = 2;
9191 std::pair<CharUnits, CharUnits> SizeAndAlign =
9195 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9215 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
9217 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9220 const auto *
Attr = FD->getAttr<RISCVInterruptAttr>();
9225 switch (
Attr->getInterrupt()) {
9226 case RISCVInterruptAttr::user: Kind =
"user";
break;
9227 case RISCVInterruptAttr::supervisor: Kind =
"supervisor";
break;
9228 case RISCVInterruptAttr::machine: Kind =
"machine";
break;
9231 auto *Fn = cast<llvm::Function>(GV);
9233 Fn->addFnAttr(
"interrupt", Kind);
9243 return getTriple().supportsCOMDAT();
9247 if (TheTargetCodeGenInfo)
9248 return *TheTargetCodeGenInfo;
9252 this->TheTargetCodeGenInfo.reset(
P);
9257 switch (Triple.getArch()) {
9259 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
9261 case llvm::Triple::le32:
9262 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9263 case llvm::Triple::mips:
9264 case llvm::Triple::mipsel:
9265 if (Triple.getOS() == llvm::Triple::NaCl)
9266 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
9267 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
9269 case llvm::Triple::mips64:
9270 case llvm::Triple::mips64el:
9271 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
9273 case llvm::Triple::avr:
9274 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
9276 case llvm::Triple::aarch64:
9277 case llvm::Triple::aarch64_be: {
9278 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
9279 if (
getTarget().getABI() ==
"darwinpcs")
9280 Kind = AArch64ABIInfo::DarwinPCS;
9281 else if (Triple.isOSWindows())
9283 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9285 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
9288 case llvm::Triple::wasm32:
9289 case llvm::Triple::wasm64:
9290 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
9292 case llvm::Triple::arm:
9293 case llvm::Triple::armeb:
9294 case llvm::Triple::thumb:
9295 case llvm::Triple::thumbeb: {
9296 if (Triple.getOS() == llvm::Triple::Win32) {
9298 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9301 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
9303 if (ABIStr ==
"apcs-gnu")
9304 Kind = ARMABIInfo::APCS;
9305 else if (ABIStr ==
"aapcs16")
9306 Kind = ARMABIInfo::AAPCS16_VFP;
9307 else if (CodeGenOpts.FloatABI ==
"hard" ||
9308 (CodeGenOpts.FloatABI !=
"soft" &&
9309 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9310 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9311 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9312 Kind = ARMABIInfo::AAPCS_VFP;
9314 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
9317 case llvm::Triple::ppc:
9319 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
9320 case llvm::Triple::ppc64:
9321 if (Triple.isOSBinFormatELF()) {
9322 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
9324 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9326 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9328 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9331 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
9332 case llvm::Triple::ppc64le: {
9333 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
9334 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
9336 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9338 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
9340 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9344 case llvm::Triple::nvptx:
9345 case llvm::Triple::nvptx64:
9346 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
9348 case llvm::Triple::msp430:
9349 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
9351 case llvm::Triple::riscv32:
9352 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 32));
9353 case llvm::Triple::riscv64:
9354 return SetCGInfo(
new RISCVTargetCodeGenInfo(Types, 64));
9356 case llvm::Triple::systemz: {
9358 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
9361 case llvm::Triple::tce:
9362 case llvm::Triple::tcele:
9363 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
9365 case llvm::Triple::x86: {
9366 bool IsDarwinVectorABI = Triple.isOSDarwin();
9367 bool RetSmallStructInRegABI =
9368 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9369 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9371 if (Triple.getOS() == llvm::Triple::Win32) {
9372 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
9373 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9374 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9376 return SetCGInfo(
new X86_32TargetCodeGenInfo(
9377 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9378 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9379 CodeGenOpts.FloatABI ==
"soft"));
9383 case llvm::Triple::x86_64: {
9387 ? X86AVXABILevel::AVX512
9390 switch (Triple.getOS()) {
9391 case llvm::Triple::Win32:
9392 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9393 case llvm::Triple::PS4:
9394 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
9396 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
9399 case llvm::Triple::hexagon:
9400 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
9401 case llvm::Triple::lanai:
9402 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
9403 case llvm::Triple::r600:
9404 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9405 case llvm::Triple::amdgcn:
9406 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
9407 case llvm::Triple::sparc:
9408 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
9409 case llvm::Triple::sparcv9:
9410 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
9411 case llvm::Triple::xcore:
9412 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
9413 case llvm::Triple::arc:
9414 return SetCGInfo(
new ARCTargetCodeGenInfo(Types));
9415 case llvm::Triple::spir:
9416 case llvm::Triple::spir64:
9417 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
9428 llvm::Function *Invoke,
9430 auto *InvokeFT = Invoke->getFunctionType();
9432 for (
auto &
P : InvokeFT->params())
9433 ArgTys.push_back(
P);
9435 std::string Name = Invoke->getName().str() +
"_kernel";
9436 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9439 auto IP = CGF.
Builder.saveIP();
9442 Builder.SetInsertPoint(BB);
9444 for (
auto &A : F->args())
9446 Builder.CreateCall(Invoke, Args);
9447 Builder.CreateRetVoid();
9448 Builder.restoreIP(IP);
9460 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9466 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9467 auto *InvokeFT = Invoke->getFunctionType();
9476 ArgTys.push_back(BlockTy);
9477 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9478 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9479 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9480 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9481 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9482 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9483 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9484 ArgTys.push_back(InvokeFT->getParamType(I));
9485 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9486 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9487 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9488 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9489 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9491 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).
str()));
9493 std::string Name = Invoke->getName().str() +
"_kernel";
9494 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9497 F->addFnAttr(
"enqueued-block");
9498 auto IP = CGF.
Builder.saveIP();
9500 Builder.SetInsertPoint(BB);
9501 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9502 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9503 BlockPtr->setAlignment(BlockAlign);
9504 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9505 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9507 Args.push_back(Cast);
9508 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9510 Builder.CreateCall(Invoke, Args);
9511 Builder.CreateRetVoid();
9512 Builder.restoreIP(IP);
9514 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9515 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9516 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9517 F->setMetadata(
"kernel_arg_base_type",
9518 llvm::MDNode::get(C, ArgBaseTypeNames));
9519 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9521 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Represents a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
ASTContext & getContext() const
Represents a variable declaration or definition.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
Represents a member of a struct/union/class.
bool isReferenceType() const
CharUnits getTypeUnadjustedAlignInChars(QualType T) const
getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, in characters, before alignment adjustments.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
virtual bool hasLegalHalfType() const
Determine whether _Float16 is supported on this target.
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
const_arg_iterator arg_begin() const
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isFloat128Type() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
unsigned getTypeUnadjustedAlign(QualType T) const
Return the ABI-specified natural alignment of a (complete) type T, before alignment adjustments...
constexpr XRayInstrMask All
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
ExtInfo withCallingConv(CallingConv cc) const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
llvm::CallingConv::ID RuntimeCC
static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
SyncScope
Defines synch scope values used internally by clang.
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
constexpr XRayInstrMask None
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
bool isFloat16Type() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
ExtInfo getExtInfo() const
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
__DEVICE__ int max(int __a, int __b)
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
__DEVICE__ int min(int __a, int __b)
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const
const llvm::Triple & getTriple() const