25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/raw_ostream.h" 34 using namespace clang;
35 using namespace CodeGen;
53 llvm::LLVMContext &LLVMContext) {
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
68 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
84 ByRef, Realign, Padding);
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
118 if (
type->isPointerTy()) {
120 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
134 unsigned numElts)
const {
161 if (UD->
hasAttr<TransparentUnionAttr>()) {
162 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
200 uint64_t Members)
const {
209 raw_ostream &OS = llvm::errs();
210 OS <<
"(ABIArgInfo Kind=";
213 OS <<
"Direct Type=";
226 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
229 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
230 <<
" ByVal=" << getIndirectByVal()
231 <<
" Realign=" << getIndirectRealign();
236 case CoerceAndExpand:
237 OS <<
"CoerceAndExpand Type=";
238 getCoerceAndExpandType()->print(OS);
251 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
253 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
255 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
257 Ptr->getName() +
".aligned");
281 bool AllowHigherAlign) {
291 if (AllowHigherAlign && DirectAlign > SlotSize) {
308 !DirectTy->isStructTy()) {
331 std::pair<CharUnits, CharUnits> ValueInfo,
333 bool AllowHigherAlign) {
340 DirectSize = ValueInfo.first;
341 DirectAlign = ValueInfo.second;
347 DirectTy = DirectTy->getPointerTo(0);
350 DirectSize, DirectAlign,
363 Address Addr1, llvm::BasicBlock *Block1,
364 Address Addr2, llvm::BasicBlock *Block2,
365 const llvm::Twine &Name =
"") {
367 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
418 return llvm::CallingConv::SPIR_KERNEL;
422 llvm::PointerType *
T,
QualType QT)
const {
423 return llvm::ConstantPointerNull::get(T);
430 "Address space agnostic languages only");
439 if (
auto *C = dyn_cast<llvm::Constant>(Src))
440 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
450 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
455 return C.getOrInsertSyncScopeID(
"");
473 if (AT->getSize() == 0)
475 FT = AT->getElementType();
486 if (isa<CXXRecordDecl>(RT->
getDecl()))
504 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
505 for (
const auto &I : CXXRD->bases())
509 for (
const auto *I : RD->
fields())
532 const Type *Found =
nullptr;
535 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
536 for (
const auto &I : CXXRD->bases()) {
554 for (
const auto *FD : RD->
fields()) {
568 if (AT->getSize().getZExtValue() != 1)
570 FT = AT->getElementType();
606 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
609 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
618 return Address(Addr, TyAlignForABI);
621 "Unexpected ArgInfo Kind in generic VAArg emitter!");
624 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
626 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
628 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
630 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
643 class DefaultABIInfo :
public ABIInfo {
654 I.info = classifyArgumentType(I.type);
683 Ty = EnumTy->getDecl()->getIntegerType();
698 RetTy = EnumTy->getDecl()->getIntegerType();
710 class WebAssemblyABIInfo final :
public DefaultABIInfo {
713 : DefaultABIInfo(CGT) {}
726 Arg.info = classifyArgumentType(Arg.type);
797 class PNaClABIInfo :
public ABIInfo {
842 Ty = EnumTy->getDecl()->getIntegerType();
862 RetTy = EnumTy->getDecl()->getIntegerType();
871 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
872 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
873 IRType->getScalarSizeInBits() != 64;
877 StringRef Constraint,
879 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
880 .Cases(
"y",
"&y",
"^Ym",
true)
882 if (IsMMXCons && Ty->isVectorTy()) {
883 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
899 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
900 if (BT->getKind() == BuiltinType::LongDouble) {
902 &llvm::APFloat::x87DoubleExtended())
911 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
919 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
920 return NumMembers <= 4;
937 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
941 unsigned FreeSSERegs;
946 VectorcallMaxParamNumAsReg = 6
956 static const unsigned MinABIStackAlignInBytes = 4;
958 bool IsDarwinVectorABI;
959 bool IsRetSmallStructInRegABI;
960 bool IsWin32StructABI;
963 unsigned DefaultNumRegisterParameters;
965 static bool isRegisterSize(
unsigned Size) {
966 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
971 return isX86VectorTypeForVectorCall(
getContext(), Ty);
975 uint64_t NumMembers)
const override {
977 return isX86VectorCallAggregateSmallEnough(NumMembers);
989 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
997 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
999 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1000 bool &NeedsPadding)
const;
1001 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1003 bool canExpandIndirectArgument(
QualType Ty)
const;
1013 bool &UsedInAlloca)
const;
1022 bool RetSmallStructInRegABI,
bool Win32StructABI,
1023 unsigned NumRegisterParameters,
bool SoftFloatABI)
1024 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1025 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1026 IsWin32StructABI(Win32StructABI),
1027 IsSoftFloatABI(SoftFloatABI),
1029 DefaultNumRegisterParameters(NumRegisterParameters) {}
1031 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
1033 bool asReturnValue)
const override {
1041 bool isSwiftErrorInRegister()
const override {
1050 bool RetSmallStructInRegABI,
bool Win32StructABI,
1051 unsigned NumRegisterParameters,
bool SoftFloatABI)
1053 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1054 NumRegisterParameters, SoftFloatABI)) {}
1056 static bool isStructReturnInRegABI(
1059 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1073 StringRef Constraint,
1075 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1079 std::string &Constraints,
1080 std::vector<llvm::Type *> &ResultRegTypes,
1081 std::vector<llvm::Type *> &ResultTruncRegTypes,
1082 std::vector<LValue> &ResultRegDests,
1083 std::string &AsmString,
1084 unsigned NumOutputs)
const override;
1088 unsigned Sig = (0xeb << 0) |
1092 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1095 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1096 return "movl\t%ebp, %ebp" 1097 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1112 unsigned NumNewOuts,
1113 std::string &AsmString) {
1115 llvm::raw_string_ostream OS(Buf);
1117 while (Pos < AsmString.size()) {
1118 size_t DollarStart = AsmString.find(
'$', Pos);
1119 if (DollarStart == std::string::npos)
1120 DollarStart = AsmString.size();
1121 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1122 if (DollarEnd == std::string::npos)
1123 DollarEnd = AsmString.size();
1124 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1126 size_t NumDollars = DollarEnd - DollarStart;
1127 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1129 size_t DigitStart = Pos;
1130 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1131 if (DigitEnd == std::string::npos)
1132 DigitEnd = AsmString.size();
1133 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1134 unsigned OperandIndex;
1135 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1136 if (OperandIndex >= FirstIn)
1137 OperandIndex += NumNewOuts;
1145 AsmString = std::move(OS.str());
1149 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1151 std::vector<llvm::Type *> &ResultRegTypes,
1152 std::vector<llvm::Type *> &ResultTruncRegTypes,
1153 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1154 unsigned NumOutputs)
const {
1159 if (!Constraints.empty())
1161 if (RetWidth <= 32) {
1162 Constraints +=
"={eax}";
1163 ResultRegTypes.push_back(CGF.
Int32Ty);
1166 Constraints +=
"=A";
1167 ResultRegTypes.push_back(CGF.
Int64Ty);
1172 ResultTruncRegTypes.push_back(CoerceTy);
1176 CoerceTy->getPointerTo()));
1177 ResultRegDests.push_back(ReturnSlot);
1184 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1190 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1196 if (Size == 64 || Size == 128)
1211 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1215 if (!RT)
return false;
1227 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1236 Ty = CTy->getElementType();
1246 return Size == 32 || Size == 64;
1251 for (
const auto *FD : RD->
fields()) {
1261 if (FD->isBitField())
1286 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1293 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1294 if (!IsWin32StructABI) {
1297 if (!CXXRD->isCLike())
1301 if (CXXRD->isDynamicClass())
1318 if (State.FreeRegs) {
1327 CCState &State)
const {
1332 uint64_t NumElts = 0;
1333 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1334 State.CC == llvm::CallingConv::X86_RegCall) &&
1342 if (IsDarwinVectorABI) {
1354 if ((Size == 8 || Size == 16 || Size == 32) ||
1355 (Size == 64 && VT->getNumElements() == 1))
1359 return getIndirectReturnResult(RetTy, State);
1368 if (RT->getDecl()->hasFlexibleArrayMember())
1369 return getIndirectReturnResult(RetTy, State);
1374 return getIndirectReturnResult(RetTy, State);
1382 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1391 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1392 || SeltTy->hasPointerRepresentation())
1400 return getIndirectReturnResult(RetTy, State);
1405 RetTy = EnumTy->getDecl()->getIntegerType();
1422 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1423 for (
const auto &I : CXXRD->bases())
1427 for (
const auto *i : RD->
fields()) {
1440 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1441 unsigned Align)
const {
1444 if (Align <= MinABIStackAlignInBytes)
1448 if (!IsDarwinVectorABI) {
1450 return MinABIStackAlignInBytes;
1458 return MinABIStackAlignInBytes;
1462 CCState &State)
const {
1464 if (State.FreeRegs) {
1474 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1475 if (StackAlign == 0)
1480 bool Realign = TypeAlign > StackAlign;
1485 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1492 if (K == BuiltinType::Float || K == BuiltinType::Double)
1498 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1499 if (!IsSoftFloatABI) {
1500 Class C = classify(Ty);
1506 unsigned SizeInRegs = (Size + 31) / 32;
1508 if (SizeInRegs == 0)
1512 if (SizeInRegs > State.FreeRegs) {
1521 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1525 State.FreeRegs -= SizeInRegs;
1529 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1531 bool &NeedsPadding)
const {
1538 NeedsPadding =
false;
1541 if (!updateFreeRegs(Ty, State))
1547 if (State.CC == llvm::CallingConv::X86_FastCall ||
1548 State.CC == llvm::CallingConv::X86_VectorCall ||
1549 State.CC == llvm::CallingConv::X86_RegCall) {
1550 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1551 NeedsPadding =
true;
1559 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1560 if (!updateFreeRegs(Ty, State))
1566 if (State.CC == llvm::CallingConv::X86_FastCall ||
1567 State.CC == llvm::CallingConv::X86_VectorCall ||
1568 State.CC == llvm::CallingConv::X86_RegCall) {
1580 CCState &State)
const {
1590 return getIndirectResult(Ty,
false, State);
1600 uint64_t NumElts = 0;
1601 if (State.CC == llvm::CallingConv::X86_RegCall &&
1604 if (State.FreeSSERegs >= NumElts) {
1605 State.FreeSSERegs -= NumElts;
1610 return getIndirectResult(Ty,
false, State);
1617 return getIndirectResult(Ty,
true, State);
1624 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1625 bool NeedsPadding =
false;
1627 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1630 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1636 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1644 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1645 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1647 State.CC == llvm::CallingConv::X86_FastCall ||
1648 State.CC == llvm::CallingConv::X86_VectorCall ||
1649 State.CC == llvm::CallingConv::X86_RegCall,
1652 return getIndirectResult(Ty,
true, State);
1658 if (IsDarwinVectorABI) {
1660 if ((Size == 8 || Size == 16 || Size == 32) ||
1661 (Size == 64 && VT->getNumElements() == 1))
1674 Ty = EnumTy->getDecl()->getIntegerType();
1676 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1689 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1690 bool &UsedInAlloca)
const {
1704 uint64_t NumElts = 0;
1708 if (State.FreeSSERegs >= NumElts) {
1709 State.FreeSSERegs -= NumElts;
1721 uint64_t NumElts = 0;
1727 if (State.FreeSSERegs >= NumElts) {
1728 State.FreeSSERegs -= NumElts;
1729 I.info = getDirectX86Hva();
1731 I.info = getIndirectResult(Ty,
false, State);
1733 }
else if (!IsHva) {
1745 else if (State.CC == llvm::CallingConv::X86_FastCall)
1747 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1749 State.FreeSSERegs = 6;
1752 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1754 State.FreeSSERegs = 8;
1756 State.FreeRegs = DefaultNumRegisterParameters;
1763 if (State.FreeRegs) {
1774 bool UsedInAlloca =
false;
1775 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1776 computeVectorCallArgs(FI, State, UsedInAlloca);
1788 rewriteWithInAlloca(FI);
1798 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1805 StackOffset = FieldEnd.
alignTo(FieldAlign);
1806 if (StackOffset != FieldEnd) {
1807 CharUnits NumBytes = StackOffset - FieldEnd;
1809 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1810 FrameFields.push_back(Ty);
1835 llvm_unreachable(
"invalid enum");
1838 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1839 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1856 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1863 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1873 for (; I != E; ++I) {
1875 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1893 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1900 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1902 assert(Triple.getArch() == llvm::Triple::x86);
1904 switch (Opts.getStructReturnConvention()) {
1913 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1916 switch (Triple.getOS()) {
1917 case llvm::Triple::DragonFly:
1918 case llvm::Triple::FreeBSD:
1919 case llvm::Triple::OpenBSD:
1920 case llvm::Triple::Win32:
1927 void X86_32TargetCodeGenInfo::setTargetAttributes(
1930 if (!IsForDefinition)
1932 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1933 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1935 llvm::Function *Fn = cast<llvm::Function>(GV);
1938 llvm::AttrBuilder B;
1939 B.addStackAlignmentAttr(16);
1940 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1942 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1943 llvm::Function *Fn = cast<llvm::Function>(GV);
1944 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1949 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1972 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1999 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2001 case X86AVXABILevel::AVX512:
2003 case X86AVXABILevel::AVX:
2008 llvm_unreachable(
"Unknown AVXLevel");
2033 static Class merge(Class Accum, Class Field);
2049 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2075 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2076 bool isNamedArg)
const;
2080 unsigned IROffset,
QualType SourceTy,
2081 unsigned SourceOffset)
const;
2083 unsigned IROffset,
QualType SourceTy,
2084 unsigned SourceOffset)
const;
2100 unsigned &neededInt,
unsigned &neededSSE,
2101 bool isNamedArg)
const;
2104 unsigned &NeededSSE)
const;
2107 unsigned &NeededSSE)
const;
2109 bool IsIllegalVectorType(
QualType Ty)
const;
2116 bool honorsRevision0_98()
const {
2122 bool classifyIntegerMMXAsSSE()
const {
2129 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2131 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2139 bool Has64BitPointers;
2144 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2148 unsigned neededInt, neededSSE;
2154 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2155 return (vectorTy->getBitWidth() > 128);
2167 bool has64BitPointers()
const {
2168 return Has64BitPointers;
2171 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2173 bool asReturnValue)
const override {
2176 bool isSwiftErrorInRegister()
const override {
2186 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2195 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2199 uint64_t NumMembers)
const override {
2201 return isX86VectorCallAggregateSmallEnough(NumMembers);
2204 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2206 bool asReturnValue)
const override {
2210 bool isSwiftErrorInRegister()
const override {
2216 bool IsVectorCall,
bool IsRegCall)
const;
2219 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2220 bool IsVectorCall,
bool IsRegCall)
const;
2230 const X86_64ABIInfo &getABIInfo()
const {
2249 StringRef Constraint,
2251 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2254 bool isNoProtoCallVariadic(
const CallArgList &args,
2263 bool HasAVXType =
false;
2264 for (CallArgList::const_iterator
2265 it = args.begin(), ie = args.end(); it != ie; ++it) {
2266 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2281 unsigned Sig = (0xeb << 0) |
2285 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2288 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2291 if (!IsForDefinition)
2293 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2294 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2296 auto *Fn = cast<llvm::Function>(GV);
2299 llvm::AttrBuilder B;
2300 B.addStackAlignmentAttr(16);
2301 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2303 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2304 llvm::Function *Fn = cast<llvm::Function>(GV);
2305 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2311 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2314 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2316 void getDependentLibraryOption(llvm::StringRef Lib,
2320 if (Lib.find(
" ") != StringRef::npos)
2321 Opt +=
"\"" + Lib.str() +
"\"";
2327 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2331 bool Quote = (Lib.find(
" ") != StringRef::npos);
2332 std::string ArgStr = Quote ?
"\"" :
"";
2334 if (!Lib.endswith_lower(
".lib"))
2336 ArgStr += Quote ?
"\"" :
"";
2340 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2343 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2344 unsigned NumRegisterParameters)
2345 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2346 Win32StructABI, NumRegisterParameters,
false) {}
2348 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2352 void getDependentLibraryOption(llvm::StringRef Lib,
2354 Opt =
"/DEFAULTLIB:";
2355 Opt += qualifyWindowsLibrary(Lib);
2358 void getDetectMismatchOption(llvm::StringRef Name,
2359 llvm::StringRef
Value,
2361 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2365 static void addStackProbeSizeTargetAttribute(
const Decl *D,
2366 llvm::GlobalValue *GV,
2368 if (D && isa<FunctionDecl>(D)) {
2370 llvm::Function *Fn = cast<llvm::Function>(GV);
2372 Fn->addFnAttr(
"stack-probe-size",
2378 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2381 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
2382 if (!IsForDefinition)
2384 addStackProbeSizeTargetAttribute(D, GV, CGM);
2393 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2411 void getDependentLibraryOption(llvm::StringRef Lib,
2413 Opt =
"/DEFAULTLIB:";
2414 Opt += qualifyWindowsLibrary(Lib);
2417 void getDetectMismatchOption(llvm::StringRef Name,
2418 llvm::StringRef
Value,
2420 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2424 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2428 if (!IsForDefinition)
2430 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2431 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2433 auto *Fn = cast<llvm::Function>(GV);
2436 llvm::AttrBuilder B;
2437 B.addStackAlignmentAttr(16);
2438 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2440 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2441 llvm::Function *Fn = cast<llvm::Function>(GV);
2442 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2446 addStackProbeSizeTargetAttribute(D, GV, CGM);
2450 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2475 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2477 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2479 if (Hi == SSEUp && Lo != SSE)
2483 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2507 assert((Accum != Memory && Accum != ComplexX87) &&
2508 "Invalid accumulated classification during merge.");
2509 if (Accum == Field || Field == NoClass)
2511 if (Field == Memory)
2513 if (Accum == NoClass)
2515 if (Accum == Integer || Field == Integer)
2517 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2518 Accum == X87 || Accum == X87Up)
2523 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2524 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2535 Class &Current = OffsetBase < 64 ? Lo : Hi;
2541 if (k == BuiltinType::Void) {
2543 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2546 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2548 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2550 }
else if (k == BuiltinType::LongDouble) {
2552 if (LDF == &llvm::APFloat::IEEEquad()) {
2555 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2558 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2561 llvm_unreachable(
"unexpected long double representation!");
2570 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2581 if (Has64BitPointers) {
2588 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2589 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2590 if (EB_FuncPtr != EB_ThisAdj) {
2604 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2613 uint64_t EB_Lo = (OffsetBase) / 64;
2614 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2617 }
else if (Size == 64) {
2618 QualType ElementType = VT->getElementType();
2627 if (!classifyIntegerMMXAsSSE() &&
2638 if (OffsetBase && OffsetBase != 64)
2640 }
else if (Size == 128 ||
2641 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2669 else if (Size <= 128)
2677 if (LDF == &llvm::APFloat::IEEEquad())
2679 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2680 Current = ComplexX87;
2681 else if (LDF == &llvm::APFloat::IEEEdouble())
2684 llvm_unreachable(
"unexpected long double representation!");
2689 uint64_t EB_Real = (OffsetBase) / 64;
2691 if (Hi == NoClass && EB_Real != EB_Imag)
2711 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2718 uint64_t ArraySize = AT->getSize().getZExtValue();
2725 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2728 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2729 Class FieldLo, FieldHi;
2730 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2731 Lo = merge(Lo, FieldLo);
2732 Hi = merge(Hi, FieldHi);
2733 if (Lo == Memory || Hi == Memory)
2737 postMerge(Size, Lo, Hi);
2738 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2768 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2769 for (
const auto &I : CXXRD->bases()) {
2770 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2771 "Unexpected base class!");
2773 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2780 Class FieldLo, FieldHi;
2783 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2784 Lo = merge(Lo, FieldLo);
2785 Hi = merge(Hi, FieldHi);
2786 if (Lo == Memory || Hi == Memory) {
2787 postMerge(Size, Lo, Hi);
2796 i != e; ++i, ++idx) {
2798 bool BitField = i->isBitField();
2801 if (BitField && i->isUnnamedBitfield())
2811 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2812 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2814 postMerge(Size, Lo, Hi);
2818 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2820 postMerge(Size, Lo, Hi);
2830 Class FieldLo, FieldHi;
2836 assert(!i->isUnnamedBitfield());
2838 uint64_t Size = i->getBitWidthValue(
getContext());
2840 uint64_t EB_Lo = Offset / 64;
2841 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2844 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2849 FieldHi = EB_Hi ? Integer : NoClass;
2852 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2853 Lo = merge(Lo, FieldLo);
2854 Hi = merge(Hi, FieldHi);
2855 if (Lo == Memory || Hi == Memory)
2859 postMerge(Size, Lo, Hi);
2869 Ty = EnumTy->getDecl()->getIntegerType();
2878 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2881 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2882 if (Size <= 64 || Size > LargestVector)
2890 unsigned freeIntRegs)
const {
2902 Ty = EnumTy->getDecl()->getIntegerType();
2936 if (freeIntRegs == 0) {
2941 if (Align == 8 && Size <= 64)
2958 if (isa<llvm::VectorType>(IRType) ||
2959 IRType->getTypeID() == llvm::Type::FP128TyID)
2964 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2967 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
2983 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
2984 if (TySize <= StartBit)
2988 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
2989 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2992 for (
unsigned i = 0; i != NumElts; ++i) {
2994 unsigned EltOffset = i*EltSize;
2995 if (EltOffset >= EndBit)
break;
2997 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2999 EndBit-EltOffset, Context))
3011 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3012 for (
const auto &I : CXXRD->bases()) {
3013 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3014 "Unexpected base class!");
3016 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3020 if (BaseOffset >= EndBit)
continue;
3022 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3024 EndBit-BaseOffset, Context))
3035 i != e; ++i, ++idx) {
3039 if (FieldOffset >= EndBit)
break;
3041 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3060 const llvm::DataLayout &TD) {
3062 if (IROffset == 0 && IRType->isFloatTy())
3066 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3067 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3068 unsigned Elt = SL->getElementContainingOffset(IROffset);
3069 IROffset -= SL->getElementOffset(Elt);
3074 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3076 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3077 IROffset -= IROffset/EltSize*EltSize;
3088 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3089 QualType SourceTy,
unsigned SourceOffset)
const {
3102 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3123 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3124 QualType SourceTy,
unsigned SourceOffset)
const {
3127 if (IROffset == 0) {
3129 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3130 IRType->isIntegerTy(64))
3139 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3140 IRType->isIntegerTy(32) ||
3141 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3142 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3143 cast<llvm::IntegerType>(IRType)->getBitWidth();
3151 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3153 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3154 if (IROffset < SL->getSizeInBytes()) {
3155 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3156 IROffset -= SL->getElementOffset(FieldIdx);
3158 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3159 SourceTy, SourceOffset);
3163 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3166 unsigned EltOffset = IROffset/EltSize*EltSize;
3167 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3173 unsigned TySizeInBytes =
3176 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3181 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3192 const llvm::DataLayout &TD) {
3197 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3198 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3199 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3200 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3212 if (Lo->isFloatTy())
3213 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3215 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3216 &&
"Invalid/unknown lo type");
3217 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3221 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3224 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3225 "Invalid x86-64 argument pair!");
3233 X86_64ABIInfo::Class Lo, Hi;
3234 classify(RetTy, 0, Lo, Hi,
true);
3237 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3238 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3247 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3248 "Unknown missing lo part");
3253 llvm_unreachable(
"Invalid classification for lo word.");
3258 return getIndirectReturnResult(RetTy);
3263 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3267 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3270 RetTy = EnumTy->getDecl()->getIntegerType();
3281 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3294 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3295 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3306 llvm_unreachable(
"Invalid classification for hi word.");
3313 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3318 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3329 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3330 ResType = GetByteVectorType(RetTy);
3341 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3358 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3364 X86_64ABIInfo::Class Lo, Hi;
3365 classify(Ty, 0, Lo, Hi, isNamedArg);
3369 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3370 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3381 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3382 "Unknown missing lo part");
3395 return getIndirectResult(Ty, freeIntRegs);
3399 llvm_unreachable(
"Invalid classification for lo word.");
3412 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3415 Ty = EnumTy->getDecl()->getIntegerType();
3429 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3443 llvm_unreachable(
"Invalid classification for hi word.");
3445 case NoClass:
break;
3450 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3472 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3473 ResType = GetByteVectorType(Ty);
3487 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3488 unsigned &NeededSSE)
const {
3490 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3492 if (RT->getDecl()->hasFlexibleArrayMember())
3493 return getIndirectReturnResult(Ty);
3496 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3497 if (CXXRD->isDynamicClass()) {
3498 NeededInt = NeededSSE = 0;
3499 return getIndirectReturnResult(Ty);
3502 for (
const auto &I : CXXRD->bases())
3503 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3505 NeededInt = NeededSSE = 0;
3506 return getIndirectReturnResult(Ty);
3511 for (
const auto *FD : RT->getDecl()->fields()) {
3512 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3513 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3515 NeededInt = NeededSSE = 0;
3516 return getIndirectReturnResult(Ty);
3519 unsigned LocalNeededInt, LocalNeededSSE;
3521 LocalNeededSSE,
true)
3523 NeededInt = NeededSSE = 0;
3524 return getIndirectReturnResult(Ty);
3526 NeededInt += LocalNeededInt;
3527 NeededSSE += LocalNeededSSE;
3535 unsigned &NeededInt,
3536 unsigned &NeededSSE)
const {
3541 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3550 if (CallingConv == llvm::CallingConv::Win64) {
3551 WinX86_64ABIInfo Win64ABIInfo(
CGT);
3552 Win64ABIInfo.computeInfo(FI);
3556 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3559 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3560 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3561 unsigned NeededInt, NeededSSE;
3567 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3568 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3569 FreeIntRegs -= NeededInt;
3570 FreeSSERegs -= NeededSSE;
3599 it != ie; ++it, ++ArgNo) {
3600 bool IsNamedArg = ArgNo < NumRequiredArgs;
3602 if (IsRegCall && it->type->isStructureOrClassType())
3603 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3606 NeededSSE, IsNamedArg);
3612 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3613 FreeIntRegs -= NeededInt;
3614 FreeSSERegs -= NeededSSE;
3616 it->info = getIndirectResult(it->type, FreeIntRegs);
3642 llvm::PointerType::getUnqual(LTy));
3651 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3652 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3653 "overflow_arg_area.next");
3669 unsigned neededInt, neededSSE;
3677 if (!neededInt && !neededSSE)
3693 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3699 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3700 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3709 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3710 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3711 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3717 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3739 if (neededInt && neededSSE) {
3741 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3745 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3748 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3749 "Unexpected ABI info for mixed regs");
3750 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3751 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3754 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3755 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3774 }
else if (neededInt) {
3780 std::pair<CharUnits, CharUnits> SizeAlign =
3782 uint64_t TySize = SizeAlign.first.getQuantity();
3793 }
else if (neededSSE == 1) {
3798 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3811 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
3864 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3867 const Type *
Base =
nullptr;
3868 uint64_t NumElts = 0;
3872 FreeSSERegs -= NumElts;
3873 return getDirectX86Hva();
3879 bool IsReturnType,
bool IsVectorCall,
3880 bool IsRegCall)
const {
3886 Ty = EnumTy->getDecl()->getIntegerType();
3889 uint64_t Width = Info.
Width;
3894 if (!IsReturnType) {
3904 const Type *
Base =
nullptr;
3905 uint64_t NumElts = 0;
3908 if ((IsVectorCall || IsRegCall) &&
3911 if (FreeSSERegs >= NumElts) {
3912 FreeSSERegs -= NumElts;
3918 }
else if (IsVectorCall) {
3919 if (FreeSSERegs >= NumElts &&
3921 FreeSSERegs -= NumElts;
3923 }
else if (IsReturnType) {
3936 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3943 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3953 if (BT && BT->
getKind() == BuiltinType::Bool)
3958 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3960 if (LDF == &llvm::APFloat::x87DoubleExtended())
3968 unsigned FreeSSERegs,
3970 bool IsRegCall)
const {
3975 if (Count < VectorcallMaxParamNumAsReg)
3976 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
3980 unsigned ZeroSSERegsAvail = 0;
3981 I.info = classify(I.type, ZeroSSERegsAvail,
false,
3982 IsVectorCall, IsRegCall);
3988 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3997 unsigned FreeSSERegs = 0;
4001 }
else if (IsRegCall) {
4008 IsVectorCall, IsRegCall);
4013 }
else if (IsRegCall) {
4019 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4022 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4030 bool IsIndirect =
false;
4036 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4048 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4049 bool IsSoftFloatABI;
4055 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4079 Ty = CTy->getElementType();
4087 const Type *AlignTy =
nullptr;
4104 if (
getTarget().getTriple().isOSDarwin()) {
4106 TI.second = getParamTypeAlignment(Ty);
4114 const unsigned OverflowLimit = 8;
4142 if (isInt || IsSoftFloatABI) {
4151 if (isI64 || (isF64 && IsSoftFloatABI)) {
4152 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4153 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4157 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4163 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4166 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4180 if (!(isInt || IsSoftFloatABI)) {
4189 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4197 Builder.CreateAdd(NumRegs,
4198 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4209 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4217 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4228 if (Align > OverflowAreaAlign) {
4238 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4265 llvm::IntegerType *i8 = CGF.
Int8Ty;
4266 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4267 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4268 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4302 class PPC64_SVR4_ABIInfo :
public ABIInfo {
4310 static const unsigned GPRBits = 64;
4313 bool IsSoftFloatABI;
4317 bool IsQPXVectorTy(
const Type *Ty)
const {
4322 unsigned NumElements = VT->getNumElements();
4323 if (NumElements == 1)
4326 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4329 }
else if (VT->getElementType()->
4330 isSpecificBuiltinType(BuiltinType::Float)) {
4339 bool IsQPXVectorTy(
QualType Ty)
const {
4347 IsSoftFloatABI(SoftFloatABI) {}
4349 bool isPromotableTypeForABI(
QualType Ty)
const;
4357 uint64_t Members)
const override;
4375 if (IsQPXVectorTy(T) ||
4395 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4409 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4411 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4427 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4430 Ty = EnumTy->getDecl()->getIntegerType();
4439 switch (BT->getKind()) {
4440 case BuiltinType::Int:
4441 case BuiltinType::UInt:
4455 Ty = CTy->getElementType();
4459 if (IsQPXVectorTy(Ty)) {
4470 const Type *AlignAsType =
nullptr;
4474 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4477 AlignAsType = EltType;
4481 const Type *
Base =
nullptr;
4482 uint64_t Members = 0;
4483 if (!AlignAsType &&
Kind == ELFv2 &&
4488 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4493 }
else if (AlignAsType) {
4512 uint64_t &Members)
const {
4514 uint64_t NElements = AT->getSize().getZExtValue();
4519 Members *= NElements;
4528 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4529 for (
const auto &I : CXXRD->bases()) {
4534 uint64_t FldMembers;
4538 Members += FldMembers;
4542 for (
const auto *FD : RD->
fields()) {
4547 if (AT->getSize().getZExtValue() == 0)
4549 FT = AT->getElementType();
4556 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
4559 uint64_t FldMembers;
4564 std::max(Members, FldMembers) : Members + FldMembers);
4578 Ty = CT->getElementType();
4594 QualType EltTy = VT->getElementType();
4595 unsigned NumElements =
4610 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4614 if (BT->getKind() == BuiltinType::Float ||
4615 BT->getKind() == BuiltinType::Double ||
4616 BT->getKind() == BuiltinType::LongDouble) {
4629 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4630 const Type *
Base, uint64_t Members)
const {
4637 return Members * NumRegs <= 8;
4653 else if (Size < 128) {
4663 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4667 const Type *Base =
nullptr;
4668 uint64_t Members = 0;
4669 if (
Kind == ELFv2 &&
4672 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4681 if (Bits > 0 && Bits <= 8 * GPRBits) {
4686 if (Bits <= GPRBits)
4688 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4692 uint64_t RegBits = ABIAlign * 8;
4693 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4695 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4704 TyAlign > ABIAlign);
4707 return (isPromotableTypeForABI(Ty) ?
4725 else if (Size < 128) {
4733 const Type *Base =
nullptr;
4734 uint64_t Members = 0;
4735 if (
Kind == ELFv2 &&
4738 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4744 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4749 if (Bits > GPRBits) {
4750 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4751 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4754 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4762 return (isPromotableTypeForABI(RetTy) ?
4770 TypeInfo.second = getParamTypeAlignment(Ty);
4782 if (EltSize < SlotSize) {
4784 SlotSize * 2, SlotSize,
4791 SlotSize - EltSize);
4793 2 * SlotSize - EltSize);
4824 llvm::IntegerType *i8 = CGF.
Int8Ty;
4825 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4826 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4827 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4864 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4900 ABIKind getABIKind()
const {
return Kind; }
4901 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4907 uint64_t Members)
const override;
4909 bool isIllegalVectorType(
QualType Ty)
const;
4916 it.info = classifyArgumentType(it.type);
4927 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4928 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4929 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4935 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
4937 bool asReturnValue)
const override {
4940 bool isSwiftErrorInRegister()
const override {
4945 unsigned elts)
const override;
4953 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4954 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
4961 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4964 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
4966 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
4967 : AArch64TargetCodeGenInfo(CGT, K) {}
4969 void getDependentLibraryOption(llvm::StringRef Lib,
4971 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
4974 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
4976 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
4985 if (isIllegalVectorType(Ty)) {
4998 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
5003 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5012 Ty = EnumTy->getDecl()->getIntegerType();
5030 if (IsEmpty || Size == 0) {
5036 if (IsEmpty && Size == 0)
5042 const Type *Base =
nullptr;
5043 uint64_t Members = 0;
5053 if (
getTarget().isRenderScriptTarget()) {
5057 Size = llvm::alignTo(Size, 64);
5061 if (Alignment < 128 && Size == 128) {
5082 RetTy = EnumTy->getDecl()->getIntegerType();
5093 const Type *Base =
nullptr;
5094 uint64_t Members = 0;
5103 if (
getTarget().isRenderScriptTarget()) {
5107 Size = llvm::alignTo(Size, 64);
5111 if (Alignment < 128 && Size == 128) {
5122 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5125 unsigned NumElements = VT->getNumElements();
5128 if (!llvm::isPowerOf2_32(NumElements))
5130 return Size != 64 && (Size != 128 || NumElements == 1);
5135 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5137 unsigned elts)
const {
5138 if (!llvm::isPowerOf2_32(elts))
5146 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5152 if (BT->isFloatingPoint())
5156 if (VecSize == 64 || VecSize == 128)
5162 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5163 uint64_t Members)
const {
5164 return Members <= 4;
5175 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5179 unsigned NumRegs = 1;
5180 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5181 BaseTy = ArrTy->getElementType();
5182 NumRegs = ArrTy->getNumElements();
5184 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5209 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5218 RegSize = llvm::alignTo(RegSize, 8);
5227 RegSize = 16 * NumRegs;
5239 UsingStack = CGF.
Builder.CreateICmpSGE(
5240 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5242 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5251 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5254 reg_offs = CGF.
Builder.CreateAdd(
5255 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5257 reg_offs = CGF.
Builder.CreateAnd(
5258 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5267 NewOffset = CGF.
Builder.CreateAdd(
5268 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5274 InRegs = CGF.
Builder.CreateICmpSLE(
5275 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5277 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5289 reg_top_offset,
"reg_top_p");
5291 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5299 MemTy = llvm::PointerType::getUnqual(MemTy);
5302 const Type *Base =
nullptr;
5303 uint64_t NumMembers = 0;
5305 if (IsHFA && NumMembers > 1) {
5310 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5313 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5315 std::max(TyAlign, BaseTyInfo.second));
5320 BaseTyInfo.first.getQuantity() < 16)
5321 Offset = 16 - BaseTyInfo.first.getQuantity();
5323 for (
unsigned i = 0; i < NumMembers; ++i) {
5341 CharUnits SlotSize = BaseAddr.getAlignment();
5344 TyInfo.first < SlotSize) {
5368 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5370 OnStackPtr = CGF.
Builder.CreateAdd(
5371 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5373 OnStackPtr = CGF.
Builder.CreateAnd(
5374 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5379 Address OnStackAddr(OnStackPtr,
5386 StackSize = StackSlotSize;
5388 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5392 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5398 TyInfo.first < StackSlotSize) {
5413 OnStackAddr, OnStackBlock,
"vaargs.addr");
5445 bool IsIndirect =
false;
5446 if (TyInfo.first.getQuantity() > 16) {
5447 const Type *Base =
nullptr;
5448 uint64_t Members = 0;
5453 TyInfo, SlotSize,
true);
5488 bool isEABI()
const {
5489 switch (
getTarget().getTriple().getEnvironment()) {
5490 case llvm::Triple::Android:
5491 case llvm::Triple::EABI:
5492 case llvm::Triple::EABIHF:
5493 case llvm::Triple::GNUEABI:
5494 case llvm::Triple::GNUEABIHF:
5495 case llvm::Triple::MuslEABI:
5496 case llvm::Triple::MuslEABIHF:
5503 bool isEABIHF()
const {
5504 switch (
getTarget().getTriple().getEnvironment()) {
5505 case llvm::Triple::EABIHF:
5506 case llvm::Triple::GNUEABIHF:
5507 case llvm::Triple::MuslEABIHF:
5514 ABIKind getABIKind()
const {
return Kind; }
5519 bool isIllegalVectorType(
QualType Ty)
const;
5523 uint64_t Members)
const override;
5534 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
5536 bool asReturnValue)
const override {
5539 bool isSwiftErrorInRegister()
const override {
5543 unsigned elts)
const override;
5551 const ARMABIInfo &getABIInfo()
const {
5559 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5560 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5572 unsigned getSizeOfUnwindException()
const override {
5573 if (getABIInfo().isEABI())
return 88;
5577 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5580 if (!IsForDefinition)
5582 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5586 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5591 switch (Attr->getInterrupt()) {
5592 case ARMInterruptAttr::Generic: Kind =
"";
break;
5593 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5594 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5595 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5596 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5597 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5600 llvm::Function *Fn = cast<llvm::Function>(GV);
5602 Fn->addFnAttr(
"interrupt", Kind);
5604 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5605 if (ABI == ARMABIInfo::APCS)
5611 llvm::AttrBuilder B;
5612 B.addStackAlignmentAttr(8);
5613 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5617 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5620 : ARMTargetCodeGenInfo(CGT, K) {}
5622 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5626 void getDependentLibraryOption(llvm::StringRef Lib,
5628 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5631 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5633 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5637 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5640 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
5641 if (!IsForDefinition)
5643 addStackProbeSizeTargetAttribute(D, GV, CGM);
5667 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5668 return llvm::CallingConv::ARM_AAPCS_VFP;
5670 return llvm::CallingConv::ARM_AAPCS;
5672 return llvm::CallingConv::ARM_APCS;
5678 switch (getABIKind()) {
5679 case APCS:
return llvm::CallingConv::ARM_APCS;
5680 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5681 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5682 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5684 llvm_unreachable(
"bad ABI kind");
5687 void ARMABIInfo::setCCs() {
5693 if (abiCC != getLLVMDefaultCC())
5705 if (abiCC != getLLVMDefaultCC())
5710 bool isVariadic)
const {
5718 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5723 if (isIllegalVectorType(Ty)) {
5747 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5756 Ty = EnumTy->getDecl()->getIntegerType();
5771 if (IsEffectivelyAAPCS_VFP) {
5774 const Type *Base =
nullptr;
5775 uint64_t Members = 0;
5777 assert(Base &&
"Base class should be set for homogeneous aggregate");
5781 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5785 const Type *Base =
nullptr;
5786 uint64_t Members = 0;
5788 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5795 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5808 uint64_t ABIAlign = 4;
5810 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5811 getABIKind() == ARMABIInfo::AAPCS)
5815 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5818 TyAlign > ABIAlign);
5823 if (
getTarget().isRenderScriptTarget()) {
5844 llvm::LLVMContext &VMContext) {
5876 if (!RT)
return false;
5887 bool HadField =
false;
5890 i != e; ++i, ++idx) {
5929 bool isVariadic)
const {
5930 bool IsEffectivelyAAPCS_VFP =
5931 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5945 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5954 RetTy = EnumTy->getDecl()->getIntegerType();
5961 if (getABIKind() == APCS) {
5994 if (IsEffectivelyAAPCS_VFP) {
5995 const Type *Base =
nullptr;
5996 uint64_t Members = 0;
5998 assert(Base &&
"Base class should be set for homogeneous aggregate");
6010 if (
getTarget().isRenderScriptTarget()) {
6023 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6026 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6034 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6042 unsigned NumElements = VT->getNumElements();
6044 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6048 unsigned NumElements = VT->getNumElements();
6051 if (!llvm::isPowerOf2_32(NumElements))
6060 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6062 unsigned numElts)
const {
6063 if (!llvm::isPowerOf2_32(numElts))
6065 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6074 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6078 if (BT->getKind() == BuiltinType::Float ||
6079 BT->getKind() == BuiltinType::Double ||
6080 BT->getKind() == BuiltinType::LongDouble)
6084 if (VecSize == 64 || VecSize == 128)
6090 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6091 uint64_t Members)
const {
6092 return Members <= 4;
6107 CharUnits TyAlignForABI = TyInfo.second;
6110 bool IsIndirect =
false;
6111 const Type *Base =
nullptr;
6112 uint64_t Members = 0;
6119 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6127 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6128 getABIKind() == ARMABIInfo::AAPCS) {
6131 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6138 TyInfo.second = TyAlignForABI;
6150 class NVPTXABIInfo :
public ABIInfo {
6167 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6174 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6187 RetTy = EnumTy->getDecl()->getIntegerType();
6196 Ty = EnumTy->getDecl()->getIntegerType();
6221 llvm_unreachable(
"NVPTX does not support varargs");
6224 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6227 if (!IsForDefinition)
6229 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6232 llvm::Function *F = cast<llvm::Function>(GV);
6238 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6241 addNVVMMetadata(F,
"kernel", 1);
6243 F->addFnAttr(llvm::Attribute::NoInline);
6252 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6254 addNVVMMetadata(F,
"kernel", 1);
6256 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6258 llvm::APSInt MaxThreads(32);
6259 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6261 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6266 if (
Attr->getMinBlocks()) {
6267 llvm::APSInt MinBlocks(32);
6268 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6271 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6277 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6279 llvm::Module *M = F->getParent();
6283 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6285 llvm::Metadata *MDVals[] = {
6286 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6287 llvm::ConstantAsMetadata::get(
6288 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6290 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6307 bool isPromotableIntegerType(
QualType Ty)
const;
6308 bool isCompoundType(
QualType Ty)
const;
6309 bool isVectorArgumentType(
QualType Ty)
const;
6310 bool isFPArgumentType(
QualType Ty)
const;
6320 I.info = classifyArgumentType(I.type);
6326 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
6328 bool asReturnValue)
const override {
6331 bool isSwiftErrorInRegister()
const override {
6344 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6347 Ty = EnumTy->getDecl()->getIntegerType();
6355 switch (BT->getKind()) {
6356 case BuiltinType::Int:
6357 case BuiltinType::UInt:
6365 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6371 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6372 return (HasVector &&
6377 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6379 switch (BT->getKind()) {
6380 case BuiltinType::Float:
6381 case BuiltinType::Double:
6396 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6397 for (
const auto &I : CXXRD->bases()) {
6406 Found = GetSingleElementType(Base);
6410 for (
const auto *FD : RD->
fields()) {
6415 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
6422 Found = GetSingleElementType(FD->getType());
6453 bool InFPRs =
false;
6454 bool IsVector =
false;
6458 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6463 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6464 IsVector = ArgTy->isVectorTy();
6465 UnpaddedSize = TyInfo.first;
6466 DirectAlign = TyInfo.second;
6469 if (IsVector && UnpaddedSize > PaddedSize)
6471 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6473 CharUnits Padding = (PaddedSize - UnpaddedSize);
6477 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6485 "overflow_arg_area_ptr");
6495 "overflow_arg_area");
6503 unsigned MaxRegs, RegCountField, RegSaveIndex;
6514 RegPadding = Padding;
6521 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6528 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6535 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6537 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6540 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6543 "reg_save_area_ptr");
6553 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6555 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6576 "overflow_arg_area");
6583 MemAddr, InMemBlock,
"va_arg.addr");
6595 if (isVectorArgumentType(RetTy))
6599 return (isPromotableIntegerType(RetTy) ?
6609 if (isPromotableIntegerType(Ty))
6616 QualType SingleElementTy = GetSingleElementType(Ty);
6617 if (isVectorArgumentType(SingleElementTy) &&
6618 getContext().getTypeSize(SingleElementTy) == Size)
6622 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6635 if (isFPArgumentType(SingleElementTy)) {
6636 assert(Size == 32 || Size == 64);
6647 if (isCompoundType(Ty))
6663 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6670 void MSP430TargetCodeGenInfo::setTargetAttributes(
6673 if (!IsForDefinition)
6675 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6676 if (
const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6678 llvm::Function *F = cast<llvm::Function>(GV);
6681 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6684 F->addFnAttr(llvm::Attribute::NoInline);
6687 unsigned Num = attr->getNumber() / 2;
6689 "__isr_" + Twine(Num), F);
6700 class MipsABIInfo :
public ABIInfo {
6702 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6703 void CoerceToIntArgs(uint64_t TySize,
6710 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6711 StackAlignInBytes(IsO32 ? 8 : 16) {}
6722 unsigned SizeOfUnwindException;
6726 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6732 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6735 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6737 llvm::Function *Fn = cast<llvm::Function>(GV);
6739 if (FD->
hasAttr<MipsLongCallAttr>())
6740 Fn->addFnAttr(
"long-call");
6741 else if (FD->
hasAttr<MipsShortCallAttr>())
6742 Fn->addFnAttr(
"short-call");
6745 if (!IsForDefinition)
6748 if (FD->
hasAttr<Mips16Attr>()) {
6749 Fn->addFnAttr(
"mips16");
6751 else if (FD->
hasAttr<NoMips16Attr>()) {
6752 Fn->addFnAttr(
"nomips16");
6755 if (FD->
hasAttr<MicroMipsAttr>())
6756 Fn->addFnAttr(
"micromips");
6757 else if (FD->
hasAttr<NoMicroMipsAttr>())
6758 Fn->addFnAttr(
"nomicromips");
6760 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6765 switch (Attr->getInterrupt()) {
6766 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6767 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6768 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6769 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6770 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6771 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6772 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6773 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6774 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6777 Fn->addFnAttr(
"interrupt", Kind);
6784 unsigned getSizeOfUnwindException()
const override {
6785 return SizeOfUnwindException;
6790 void MipsABIInfo::CoerceToIntArgs(
6792 llvm::IntegerType *IntTy =
6793 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6796 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6797 ArgList.push_back(IntTy);
6800 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6803 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
6812 CoerceToIntArgs(TySize, ArgList);
6823 CoerceToIntArgs(TySize, ArgList);
6829 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6831 uint64_t LastOffset = 0;
6833 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
6838 i != e; ++i, ++idx) {
6842 if (!BT || BT->
getKind() != BuiltinType::Double)
6850 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6851 ArgList.push_back(I64);
6854 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
6855 LastOffset = Offset + 64;
6858 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6859 ArgList.append(IntArgList.begin(), IntArgList.end());
6864 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6866 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6869 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
6876 uint64_t OrigOffset =
Offset;
6881 (uint64_t)StackAlignInBytes);
6882 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6883 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6891 Offset = OrigOffset + MinABIStackAlignInBytes;
6900 getPaddingType(OrigOffset, CurrOffset));
6907 Ty = EnumTy->getDecl()->getIntegerType();
6914 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6918 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6938 for (; b != e; ++b) {
6955 CoerceToIntArgs(Size, RTList);
6967 if (!IsO32 && Size == 0)
6991 RetTy = EnumTy->getDecl()->getIntegerType();
7015 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7017 bool DidPromote =
false;
7037 TyInfo, ArgSlotSize,
true);
7060 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
7108 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7111 if (!IsForDefinition)
7113 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7115 auto *Fn = cast<llvm::Function>(GV);
7117 if (FD->getAttr<AVRInterruptAttr>())
7118 Fn->addFnAttr(
"interrupt");
7120 if (FD->getAttr<AVRSignalAttr>())
7121 Fn->addFnAttr(
"signal");
7134 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7137 : DefaultTargetCodeGenInfo(CGT) {}
7139 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7144 void TCETargetCodeGenInfo::setTargetAttributes(
7147 if (!IsForDefinition)
7149 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7152 llvm::Function *F = cast<llvm::Function>(GV);
7155 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7157 F->addFnAttr(llvm::Attribute::NoInline);
7158 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7161 llvm::LLVMContext &Context = F->getContext();
7162 llvm::NamedMDNode *OpenCLMetadata =
7164 "opencl.kernel_wg_size_info");
7167 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7170 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7171 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7173 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7174 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7176 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7177 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7183 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7184 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7198 class HexagonABIInfo :
public ABIInfo {
7238 Ty = EnumTy->getDecl()->getIntegerType();
7276 RetTy = EnumTy->getDecl()->getIntegerType();
7316 class LanaiABIInfo :
public DefaultABIInfo {
7320 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7343 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7345 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7347 if (SizeInRegs == 0)
7350 if (SizeInRegs > State.FreeRegs) {
7355 State.FreeRegs -= SizeInRegs;
7361 CCState &State)
const {
7363 if (State.FreeRegs) {
7371 const unsigned MinABIStackAlignInBytes = 4;
7375 MinABIStackAlignInBytes);
7379 CCState &State)
const {
7385 return getIndirectResult(Ty,
false, State);
7394 return getIndirectResult(Ty,
true, State);
7402 if (SizeInRegs <= State.FreeRegs) {
7403 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7405 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7406 State.FreeRegs -= SizeInRegs;
7411 return getIndirectResult(Ty,
true, State);
7416 Ty = EnumTy->getDecl()->getIntegerType();
7418 bool InReg = shouldUseInReg(Ty, State);
7443 class AMDGPUABIInfo final :
public DefaultABIInfo {
7445 static const unsigned MaxNumRegsForArgsRet = 16;
7447 unsigned numRegsForType(
QualType Ty)
const;
7451 uint64_t Members)
const override;
7455 DefaultABIInfo(CGT) {}
7464 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7468 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7469 const Type *Base, uint64_t Members)
const {
7473 return Members * NumRegs <= MaxNumRegsForArgsRet;
7477 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7478 unsigned NumRegs = 0;
7483 QualType EltTy = VT->getElementType();
7488 return (VT->getNumElements() + 1) / 2;
7490 unsigned EltNumRegs = (EltSize + 31) / 32;
7491 return EltNumRegs * VT->getNumElements();
7499 QualType FieldTy = Field->getType();
7500 NumRegs += numRegsForType(FieldTy);
7506 return (
getContext().getTypeSize(Ty) + 31) / 32;
7515 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7517 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7518 Arg.info = classifyKernelArgumentType(Arg.type);
7557 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7584 unsigned &NumRegsLeft)
const {
7585 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7614 unsigned NumRegs = (Size + 31) / 32;
7615 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7628 if (NumRegsLeft > 0) {
7629 unsigned NumRegs = numRegsForType(Ty);
7630 if (NumRegsLeft >= NumRegs) {
7631 NumRegsLeft -= NumRegs;
7640 unsigned NumRegs = numRegsForType(Ty);
7641 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7651 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7654 unsigned getOpenCLKernelCallingConv()
const override;
7657 llvm::PointerType *T,
QualType QT)
const override;
7659 LangAS getASTAllocaAddressSpace()
const override {
7664 const VarDecl *D)
const override;
7666 llvm::LLVMContext &C)
const override;
7669 llvm::Function *BlockInvokeFunc,
7674 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7677 if (!IsForDefinition)
7679 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7683 llvm::Function *F = cast<llvm::Function>(GV);
7686 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7687 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7688 if (ReqdWGS || FlatWGS) {
7689 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7690 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7691 if (ReqdWGS && Min == 0 && Max == 0)
7692 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7695 assert(Min <= Max &&
"Min must be less than or equal Max");
7697 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7698 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7700 assert(Max == 0 &&
"Max must be zero");
7703 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7704 unsigned Min =
Attr->getMin();
7705 unsigned Max =
Attr->getMax();
7708 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7710 std::string AttrVal = llvm::utostr(Min);
7712 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7713 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7715 assert(Max == 0 &&
"Max must be zero");
7718 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7719 unsigned NumSGPR =
Attr->getNumSGPR();
7722 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7725 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7726 uint32_t NumVGPR =
Attr->getNumVGPR();
7729 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7733 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7734 return llvm::CallingConv::AMDGPU_KERNEL;
7742 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7746 return llvm::ConstantPointerNull::get(PT);
7749 auto NPT = llvm::PointerType::get(PT->getElementType(),
7751 return llvm::ConstantExpr::getAddrSpaceCast(
7752 llvm::ConstantPointerNull::get(NPT), PT);
7756 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7760 "Address space agnostic languages only");
7764 return DefaultGlobalAS;
7773 return ConstAS.getValue();
7775 return DefaultGlobalAS;
7779 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
SyncScope S,
7780 llvm::LLVMContext &C)
const {
7795 return C.getOrInsertSyncScopeID(Name);
7805 class SparcV8ABIInfo :
public DefaultABIInfo {
7868 class SparcV9ABIInfo :
public ABIInfo {
7889 struct CoerceBuilder {
7890 llvm::LLVMContext &Context;
7891 const llvm::DataLayout &DL;
7896 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
7897 : Context(c), DL(dl), Size(0), InReg(
false) {}
7900 void pad(uint64_t ToSize) {
7901 assert(ToSize >= Size &&
"Cannot remove elements");
7906 uint64_t Aligned = llvm::alignTo(Size, 64);
7907 if (Aligned > Size && Aligned <= ToSize) {
7908 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7913 while (Size + 64 <= ToSize) {
7914 Elems.push_back(llvm::Type::getInt64Ty(Context));
7919 if (Size < ToSize) {
7920 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7934 Elems.push_back(Ty);
7935 Size = Offset + Bits;
7939 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7940 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7941 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7942 llvm::Type *ElemTy = StrTy->getElementType(i);
7943 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7944 switch (ElemTy->getTypeID()) {
7945 case llvm::Type::StructTyID:
7946 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7948 case llvm::Type::FloatTyID:
7949 addFloat(ElemOffset, ElemTy, 32);
7951 case llvm::Type::DoubleTyID:
7952 addFloat(ElemOffset, ElemTy, 64);
7954 case llvm::Type::FP128TyID:
7955 addFloat(ElemOffset, ElemTy, 128);
7957 case llvm::Type::PointerTyID:
7958 if (ElemOffset % 64 == 0) {
7960 Elems.push_back(ElemTy);
7971 bool isUsableType(llvm::StructType *Ty)
const {
7972 return llvm::makeArrayRef(Elems) == Ty->elements();
7977 if (Elems.size() == 1)
7978 return Elems.front();
7980 return llvm::StructType::get(Context, Elems);
7995 if (Size > SizeLimit)
8000 Ty = EnumTy->getDecl()->getIntegerType();
8003 if (Size < 64 && Ty->isIntegerType())
8017 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8022 CB.addStruct(0, StrTy);
8023 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8026 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8045 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8055 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8087 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8119 llvm::IntegerType *i8 = CGF.
Int8Ty;
8120 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8121 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8210 class TypeStringCache {
8211 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8215 std::string Swapped;
8218 std::map<const IdentifierInfo *, struct Entry> Map;
8219 unsigned IncompleteCount;
8220 unsigned IncompleteUsedCount;
8222 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8232 class FieldEncoding {
8236 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8237 StringRef str() {
return Enc; }
8238 bool operator<(
const FieldEncoding &rhs)
const {
8239 if (HasName != rhs.HasName)
return HasName;
8240 return Enc < rhs.Enc;
8244 class XCoreABIInfo :
public DefaultABIInfo {
8252 mutable TypeStringCache TSC;
8256 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8276 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8277 AI.setCoerceToType(ArgTy);
8278 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8282 switch (AI.getKind()) {
8286 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8288 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8296 ArgSize = ArgSize.
alignTo(SlotSize);
8321 std::string StubEnc) {
8325 assert( (E.Str.empty() || E.State == Recursive) &&
8326 "Incorrectly use of addIncomplete");
8327 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8328 E.Swapped.swap(E.Str);
8329 E.Str.swap(StubEnc);
8338 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8341 auto I = Map.find(ID);
8342 assert(I != Map.end() &&
"Entry not present");
8343 Entry &E = I->second;
8345 E.State == IncompleteUsed) &&
8346 "Entry must be an incomplete type");
8347 bool IsRecursive =
false;
8348 if (E.State == IncompleteUsed) {
8351 --IncompleteUsedCount;
8353 if (E.Swapped.empty())
8357 E.Swapped.swap(E.Str);
8359 E.State = Recursive;
8367 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8369 if (!ID || IncompleteUsedCount)
8372 if (IsRecursive && !E.Str.empty()) {
8373 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8374 "This is not the same Recursive entry");
8380 assert(E.Str.empty() &&
"Entry already present");
8382 E.State = IsRecursive? Recursive : NonRecursive;
8391 auto I = Map.find(ID);
8394 Entry &E = I->second;
8395 if (E.State == Recursive && IncompleteCount)
8400 E.State = IncompleteUsed;
8401 ++IncompleteUsedCount;
8422 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8426 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8427 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8428 llvm::MDString::get(Ctx, Enc.str())};
8429 llvm::NamedMDNode *MD =
8430 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8431 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8444 unsigned getOpenCLKernelCallingConv()
const override;
8452 DefaultABIInfo SPIRABI(CGM.
getTypes());
8453 SPIRABI.computeInfo(FI);
8458 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8459 return llvm::CallingConv::SPIR_KERNEL;
8464 TypeStringCache &TSC);
8472 TypeStringCache &TSC) {
8473 for (
const auto *Field : RD->
fields()) {
8476 Enc += Field->getName();
8478 if (Field->isBitField()) {
8480 llvm::raw_svector_ostream OS(Enc);
8481 OS << Field->getBitWidthValue(CGM.
getContext());
8484 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8486 if (Field->isBitField())
8489 FE.emplace_back(!Field->getName().empty(), Enc);
8501 StringRef TypeString = TSC.lookupStr(ID);
8502 if (!TypeString.empty()) {
8508 size_t Start = Enc.size();
8516 bool IsRecursive =
false;
8523 std::string StubEnc(Enc.substr(Start).str());
8525 TSC.addIncomplete(ID, std::move(StubEnc));
8527 (void) TSC.removeIncomplete(ID);
8530 IsRecursive = TSC.removeIncomplete(ID);
8534 std::sort(FE.begin(), FE.end());
8536 unsigned E = FE.size();
8537 for (
unsigned I = 0; I != E; ++I) {
8544 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8550 TypeStringCache &TSC,
8553 StringRef TypeString = TSC.lookupStr(ID);
8554 if (!TypeString.empty()) {
8559 size_t Start = Enc.size();
8568 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8570 SmallStringEnc EnumEnc;
8572 EnumEnc += I->getName();
8574 I->getInitVal().toString(EnumEnc);
8576 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8578 std::sort(FE.begin(), FE.end());
8579 unsigned E = FE.size();
8580 for (
unsigned I = 0; I != E; ++I) {
8587 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8595 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8603 Enc += Table[Lookup];
8608 const char *EncType;
8610 case BuiltinType::Void:
8613 case BuiltinType::Bool:
8616 case BuiltinType::Char_U:
8619 case BuiltinType::UChar:
8622 case BuiltinType::SChar:
8625 case BuiltinType::UShort:
8628 case BuiltinType::Short:
8631 case BuiltinType::UInt:
8634 case BuiltinType::Int:
8637 case BuiltinType::ULong:
8640 case BuiltinType::Long:
8643 case BuiltinType::ULongLong:
8646 case BuiltinType::LongLong:
8649 case BuiltinType::Float:
8652 case BuiltinType::Double:
8655 case BuiltinType::LongDouble:
8668 TypeStringCache &TSC) {
8680 TypeStringCache &TSC, StringRef NoSizeEnc) {
8685 CAT->getSize().toStringUnsigned(Enc);
8701 TypeStringCache &TSC) {
8708 auto I = FPT->param_type_begin();
8709 auto E = FPT->param_type_end();
8718 if (FPT->isVariadic())
8721 if (FPT->isVariadic())
8735 TypeStringCache &TSC) {
8772 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8775 return appendType(Enc, FD->getType(), CGM, TSC);
8778 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8781 QualType QT = VD->getType().getCanonicalType();
8799 return getTriple().supportsCOMDAT();
8803 if (TheTargetCodeGenInfo)
8804 return *TheTargetCodeGenInfo;
8808 this->TheTargetCodeGenInfo.reset(
P);
8813 switch (Triple.getArch()) {
8815 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
8817 case llvm::Triple::le32:
8818 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8819 case llvm::Triple::mips:
8820 case llvm::Triple::mipsel:
8821 if (Triple.getOS() == llvm::Triple::NaCl)
8822 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8823 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
8825 case llvm::Triple::mips64:
8826 case llvm::Triple::mips64el:
8827 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
8829 case llvm::Triple::avr:
8830 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
8832 case llvm::Triple::aarch64:
8833 case llvm::Triple::aarch64_be: {
8834 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
8835 if (
getTarget().getABI() ==
"darwinpcs")
8836 Kind = AArch64ABIInfo::DarwinPCS;
8837 else if (Triple.isOSWindows())
8839 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
8841 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
8844 case llvm::Triple::wasm32:
8845 case llvm::Triple::wasm64:
8846 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
8848 case llvm::Triple::arm:
8849 case llvm::Triple::armeb:
8850 case llvm::Triple::thumb:
8851 case llvm::Triple::thumbeb: {
8852 if (Triple.getOS() == llvm::Triple::Win32) {
8854 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8857 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
8859 if (ABIStr ==
"apcs-gnu")
8860 Kind = ARMABIInfo::APCS;
8861 else if (ABIStr ==
"aapcs16")
8862 Kind = ARMABIInfo::AAPCS16_VFP;
8863 else if (CodeGenOpts.FloatABI ==
"hard" ||
8864 (CodeGenOpts.FloatABI !=
"soft" &&
8865 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8866 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8867 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8868 Kind = ARMABIInfo::AAPCS_VFP;
8870 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
8873 case llvm::Triple::ppc:
8875 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
8876 case llvm::Triple::ppc64:
8877 if (Triple.isOSBinFormatELF()) {
8878 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
8880 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8882 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
8884 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8887 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
8888 case llvm::Triple::ppc64le: {
8889 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
8890 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
8892 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8894 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
8896 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8900 case llvm::Triple::nvptx:
8901 case llvm::Triple::nvptx64:
8902 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
8904 case llvm::Triple::msp430:
8905 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
8907 case llvm::Triple::systemz: {
8909 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
8912 case llvm::Triple::tce:
8913 case llvm::Triple::tcele:
8914 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
8916 case llvm::Triple::x86: {
8917 bool IsDarwinVectorABI = Triple.isOSDarwin();
8918 bool RetSmallStructInRegABI =
8919 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8920 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8922 if (Triple.getOS() == llvm::Triple::Win32) {
8923 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
8924 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8925 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8927 return SetCGInfo(
new X86_32TargetCodeGenInfo(
8928 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8929 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8930 CodeGenOpts.FloatABI ==
"soft"));
8934 case llvm::Triple::x86_64: {
8938 ? X86AVXABILevel::AVX512
8941 switch (Triple.getOS()) {
8942 case llvm::Triple::Win32:
8943 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8944 case llvm::Triple::PS4:
8945 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
8947 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
8950 case llvm::Triple::hexagon:
8951 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
8952 case llvm::Triple::lanai:
8953 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
8954 case llvm::Triple::r600:
8955 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8956 case llvm::Triple::amdgcn:
8957 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8958 case llvm::Triple::sparc:
8959 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
8960 case llvm::Triple::sparcv9:
8961 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
8962 case llvm::Triple::xcore:
8963 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
8964 case llvm::Triple::spir:
8965 case llvm::Triple::spir64:
8966 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
8977 llvm::Function *Invoke,
8979 auto *InvokeFT = Invoke->getFunctionType();
8981 for (
auto &
P : InvokeFT->params())
8982 ArgTys.push_back(
P);
8984 std::string Name = Invoke->getName().str() +
"_kernel";
8985 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
8988 auto IP = CGF.
Builder.saveIP();
8991 Builder.SetInsertPoint(BB);
8993 for (
auto &A : F->args())
8995 Builder.CreateCall(Invoke, Args);
8996 Builder.CreateRetVoid();
8997 Builder.restoreIP(IP);
9009 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9015 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9016 auto *InvokeFT = Invoke->getFunctionType();
9025 ArgTys.push_back(BlockTy);
9026 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9027 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9028 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9029 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9030 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9031 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9032 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9033 ArgTys.push_back(InvokeFT->getParamType(I));
9034 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9035 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9036 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9037 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9038 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9040 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).str()));
9042 std::string Name = Invoke->getName().str() +
"_kernel";
9043 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9046 F->addFnAttr(
"enqueued-block");
9047 auto IP = CGF.
Builder.saveIP();
9049 Builder.SetInsertPoint(BB);
9050 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9051 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9052 BlockPtr->setAlignment(BlockAlign);
9053 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9054 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9056 Args.push_back(Cast);
9057 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9059 Builder.CreateCall(Invoke, Args);
9060 Builder.CreateRetVoid();
9061 Builder.restoreIP(IP);
9063 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9064 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9065 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9066 F->setMetadata(
"kernel_arg_base_type",
9067 llvm::MDNode::get(C, ArgBaseTypeNames));
9068 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9070 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
An instance of this class is created to represent a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
ASTContext & getContext() const
VarDecl - An instance of this class is created to represent a variable declaration or definition...
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
RecordDecl - Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
getDefinition - Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
bool isReferenceType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
const_arg_iterator arg_begin() const
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, ForDefinition_t IsForDefinition) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
const FunctionProtoType * T
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::CallingConv::ID RuntimeCC
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
SyncScope
Defines synch scope values used internally by clang.
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
llvm::CallingConv::ID BuiltinCC
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
EnumDecl - Represents an enum.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", bool CastToDefaultAddrSpace=true)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const