25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/raw_ostream.h" 34 using namespace clang;
35 using namespace CodeGen;
53 llvm::LLVMContext &LLVMContext) {
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
68 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
84 ByRef, Realign, Padding);
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
118 if (
type->isPointerTy()) {
120 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
134 unsigned numElts)
const {
161 if (UD->
hasAttr<TransparentUnionAttr>()) {
162 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
200 uint64_t Members)
const {
209 raw_ostream &OS = llvm::errs();
210 OS <<
"(ABIArgInfo Kind=";
213 OS <<
"Direct Type=";
226 OS <<
"InAlloca Offset=" << getInAllocaFieldIndex();
229 OS <<
"Indirect Align=" << getIndirectAlign().getQuantity()
230 <<
" ByVal=" << getIndirectByVal()
231 <<
" Realign=" << getIndirectRealign();
236 case CoerceAndExpand:
237 OS <<
"CoerceAndExpand Type=";
238 getCoerceAndExpandType()->print(OS);
251 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
253 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
255 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
257 Ptr->getName() +
".aligned");
281 bool AllowHigherAlign) {
291 if (AllowHigherAlign && DirectAlign > SlotSize) {
308 !DirectTy->isStructTy()) {
331 std::pair<CharUnits, CharUnits> ValueInfo,
333 bool AllowHigherAlign) {
340 DirectSize = ValueInfo.first;
341 DirectAlign = ValueInfo.second;
347 DirectTy = DirectTy->getPointerTo(0);
350 DirectSize, DirectAlign,
363 Address Addr1, llvm::BasicBlock *Block1,
364 Address Addr2, llvm::BasicBlock *Block2,
365 const llvm::Twine &Name =
"") {
367 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
418 return llvm::CallingConv::SPIR_KERNEL;
422 llvm::PointerType *
T,
QualType QT)
const {
423 return llvm::ConstantPointerNull::get(T);
430 "Address space agnostic languages only");
439 if (
auto *C = dyn_cast<llvm::Constant>(Src))
440 return performAddrSpaceCast(CGF.
CGM, C, SrcAddr, DestAddr, DestTy);
450 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
455 return C.getOrInsertSyncScopeID(
"");
473 if (AT->getSize() == 0)
475 FT = AT->getElementType();
486 if (isa<CXXRecordDecl>(RT->
getDecl()))
504 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
505 for (
const auto &I : CXXRD->bases())
509 for (
const auto *I : RD->
fields())
532 const Type *Found =
nullptr;
535 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
536 for (
const auto &I : CXXRD->bases()) {
554 for (
const auto *FD : RD->
fields()) {
568 if (AT->getSize().getZExtValue() != 1)
570 FT = AT->getElementType();
606 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
609 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
618 return Address(Addr, TyAlignForABI);
621 "Unexpected ArgInfo Kind in generic VAArg emitter!");
624 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
626 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
628 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
630 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
643 class DefaultABIInfo :
public ABIInfo {
654 I.info = classifyArgumentType(I.type);
683 Ty = EnumTy->getDecl()->getIntegerType();
698 RetTy = EnumTy->getDecl()->getIntegerType();
710 class WebAssemblyABIInfo final :
public DefaultABIInfo {
713 : DefaultABIInfo(CGT) {}
726 Arg.info = classifyArgumentType(Arg.type);
797 class PNaClABIInfo :
public ABIInfo {
842 Ty = EnumTy->getDecl()->getIntegerType();
862 RetTy = EnumTy->getDecl()->getIntegerType();
871 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
872 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
873 IRType->getScalarSizeInBits() != 64;
877 StringRef Constraint,
879 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
880 .Cases(
"y",
"&y",
"^Ym",
true)
882 if (IsMMXCons && Ty->isVectorTy()) {
883 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
899 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
900 if (BT->getKind() == BuiltinType::LongDouble) {
902 &llvm::APFloat::x87DoubleExtended())
911 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
919 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
920 return NumMembers <= 4;
937 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
941 unsigned FreeSSERegs;
946 VectorcallMaxParamNumAsReg = 6
956 static const unsigned MinABIStackAlignInBytes = 4;
958 bool IsDarwinVectorABI;
959 bool IsRetSmallStructInRegABI;
960 bool IsWin32StructABI;
963 unsigned DefaultNumRegisterParameters;
965 static bool isRegisterSize(
unsigned Size) {
966 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
971 return isX86VectorTypeForVectorCall(
getContext(), Ty);
975 uint64_t NumMembers)
const override {
977 return isX86VectorCallAggregateSmallEnough(NumMembers);
989 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
997 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
999 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1000 bool &NeedsPadding)
const;
1001 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1003 bool canExpandIndirectArgument(
QualType Ty)
const;
1013 bool &UsedInAlloca)
const;
1022 bool RetSmallStructInRegABI,
bool Win32StructABI,
1023 unsigned NumRegisterParameters,
bool SoftFloatABI)
1024 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1025 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1026 IsWin32StructABI(Win32StructABI),
1027 IsSoftFloatABI(SoftFloatABI),
1029 DefaultNumRegisterParameters(NumRegisterParameters) {}
1031 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
1033 bool asReturnValue)
const override {
1041 bool isSwiftErrorInRegister()
const override {
1050 bool RetSmallStructInRegABI,
bool Win32StructABI,
1051 unsigned NumRegisterParameters,
bool SoftFloatABI)
1053 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1054 NumRegisterParameters, SoftFloatABI)) {}
1056 static bool isStructReturnInRegABI(
1059 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1073 StringRef Constraint,
1075 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1079 std::string &Constraints,
1080 std::vector<llvm::Type *> &ResultRegTypes,
1081 std::vector<llvm::Type *> &ResultTruncRegTypes,
1082 std::vector<LValue> &ResultRegDests,
1083 std::string &AsmString,
1084 unsigned NumOutputs)
const override;
1088 unsigned Sig = (0xeb << 0) |
1092 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1095 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1096 return "movl\t%ebp, %ebp" 1097 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1112 unsigned NumNewOuts,
1113 std::string &AsmString) {
1115 llvm::raw_string_ostream OS(Buf);
1117 while (Pos < AsmString.size()) {
1118 size_t DollarStart = AsmString.find(
'$', Pos);
1119 if (DollarStart == std::string::npos)
1120 DollarStart = AsmString.size();
1121 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1122 if (DollarEnd == std::string::npos)
1123 DollarEnd = AsmString.size();
1124 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1126 size_t NumDollars = DollarEnd - DollarStart;
1127 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1129 size_t DigitStart = Pos;
1130 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1131 if (DigitEnd == std::string::npos)
1132 DigitEnd = AsmString.size();
1133 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1134 unsigned OperandIndex;
1135 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1136 if (OperandIndex >= FirstIn)
1137 OperandIndex += NumNewOuts;
1145 AsmString = std::move(OS.str());
1149 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1151 std::vector<llvm::Type *> &ResultRegTypes,
1152 std::vector<llvm::Type *> &ResultTruncRegTypes,
1153 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1154 unsigned NumOutputs)
const {
1159 if (!Constraints.empty())
1161 if (RetWidth <= 32) {
1162 Constraints +=
"={eax}";
1163 ResultRegTypes.push_back(CGF.
Int32Ty);
1166 Constraints +=
"=A";
1167 ResultRegTypes.push_back(CGF.
Int64Ty);
1172 ResultTruncRegTypes.push_back(CoerceTy);
1176 CoerceTy->getPointerTo()));
1177 ResultRegDests.push_back(ReturnSlot);
1184 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1190 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1196 if (Size == 64 || Size == 128)
1211 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1215 if (!RT)
return false;
1227 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1236 Ty = CTy->getElementType();
1246 return Size == 32 || Size == 64;
1251 for (
const auto *FD : RD->
fields()) {
1261 if (FD->isBitField())
1286 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1293 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1294 if (!IsWin32StructABI) {
1297 if (!CXXRD->isCLike())
1301 if (CXXRD->isDynamicClass())
1318 if (State.FreeRegs) {
1327 CCState &State)
const {
1332 uint64_t NumElts = 0;
1333 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1334 State.CC == llvm::CallingConv::X86_RegCall) &&
1342 if (IsDarwinVectorABI) {
1354 if ((Size == 8 || Size == 16 || Size == 32) ||
1355 (Size == 64 && VT->getNumElements() == 1))
1359 return getIndirectReturnResult(RetTy, State);
1368 if (RT->getDecl()->hasFlexibleArrayMember())
1369 return getIndirectReturnResult(RetTy, State);
1374 return getIndirectReturnResult(RetTy, State);
1382 if (shouldReturnTypeInRegister(RetTy,
getContext())) {
1391 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1392 || SeltTy->hasPointerRepresentation())
1400 return getIndirectReturnResult(RetTy, State);
1405 RetTy = EnumTy->getDecl()->getIntegerType();
1422 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1423 for (
const auto &I : CXXRD->bases())
1427 for (
const auto *i : RD->
fields()) {
1440 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1441 unsigned Align)
const {
1444 if (Align <= MinABIStackAlignInBytes)
1448 if (!IsDarwinVectorABI) {
1450 return MinABIStackAlignInBytes;
1458 return MinABIStackAlignInBytes;
1462 CCState &State)
const {
1464 if (State.FreeRegs) {
1474 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1475 if (StackAlign == 0)
1480 bool Realign = TypeAlign > StackAlign;
1485 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1492 if (K == BuiltinType::Float || K == BuiltinType::Double)
1498 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1499 if (!IsSoftFloatABI) {
1500 Class C = classify(Ty);
1506 unsigned SizeInRegs = (Size + 31) / 32;
1508 if (SizeInRegs == 0)
1512 if (SizeInRegs > State.FreeRegs) {
1521 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1525 State.FreeRegs -= SizeInRegs;
1529 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1531 bool &NeedsPadding)
const {
1538 NeedsPadding =
false;
1541 if (!updateFreeRegs(Ty, State))
1547 if (State.CC == llvm::CallingConv::X86_FastCall ||
1548 State.CC == llvm::CallingConv::X86_VectorCall ||
1549 State.CC == llvm::CallingConv::X86_RegCall) {
1550 if (
getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1551 NeedsPadding =
true;
1559 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1560 if (!updateFreeRegs(Ty, State))
1566 if (State.CC == llvm::CallingConv::X86_FastCall ||
1567 State.CC == llvm::CallingConv::X86_VectorCall ||
1568 State.CC == llvm::CallingConv::X86_RegCall) {
1580 CCState &State)
const {
1590 return getIndirectResult(Ty,
false, State);
1600 uint64_t NumElts = 0;
1601 if (State.CC == llvm::CallingConv::X86_RegCall &&
1604 if (State.FreeSSERegs >= NumElts) {
1605 State.FreeSSERegs -= NumElts;
1610 return getIndirectResult(Ty,
false, State);
1617 return getIndirectResult(Ty,
true, State);
1624 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1625 bool NeedsPadding =
false;
1627 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1630 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1636 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1644 if (
getContext().getTypeSize(Ty) <= 4 * 32 &&
1645 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1647 State.CC == llvm::CallingConv::X86_FastCall ||
1648 State.CC == llvm::CallingConv::X86_VectorCall ||
1649 State.CC == llvm::CallingConv::X86_RegCall,
1652 return getIndirectResult(Ty,
true, State);
1658 if (IsDarwinVectorABI) {
1660 if ((Size == 8 || Size == 16 || Size == 32) ||
1661 (Size == 64 && VT->getNumElements() == 1))
1674 Ty = EnumTy->getDecl()->getIntegerType();
1676 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1689 void X86_32ABIInfo::computeVectorCallArgs(
CGFunctionInfo &FI, CCState &State,
1690 bool &UsedInAlloca)
const {
1704 uint64_t NumElts = 0;
1708 if (State.FreeSSERegs >= NumElts) {
1709 State.FreeSSERegs -= NumElts;
1721 uint64_t NumElts = 0;
1727 if (State.FreeSSERegs >= NumElts) {
1728 State.FreeSSERegs -= NumElts;
1729 I.info = getDirectX86Hva();
1731 I.info = getIndirectResult(Ty,
false, State);
1733 }
else if (!IsHva) {
1745 else if (State.CC == llvm::CallingConv::X86_FastCall)
1747 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1749 State.FreeSSERegs = 6;
1752 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1754 State.FreeSSERegs = 8;
1756 State.FreeRegs = DefaultNumRegisterParameters;
1763 if (State.FreeRegs) {
1774 bool UsedInAlloca =
false;
1775 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1776 computeVectorCallArgs(FI, State, UsedInAlloca);
1788 rewriteWithInAlloca(FI);
1798 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1805 StackOffset = FieldEnd.
alignTo(FieldAlign);
1806 if (StackOffset != FieldEnd) {
1807 CharUnits NumBytes = StackOffset - FieldEnd;
1809 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1810 FrameFields.push_back(Ty);
1835 llvm_unreachable(
"invalid enum");
1838 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1839 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1856 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1863 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1873 for (; I != E; ++I) {
1875 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1893 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1900 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1902 assert(Triple.getArch() == llvm::Triple::x86);
1904 switch (Opts.getStructReturnConvention()) {
1913 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1916 switch (Triple.getOS()) {
1917 case llvm::Triple::DragonFly:
1918 case llvm::Triple::FreeBSD:
1919 case llvm::Triple::OpenBSD:
1920 case llvm::Triple::Win32:
1927 void X86_32TargetCodeGenInfo::setTargetAttributes(
1930 if (!IsForDefinition)
1932 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1933 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1935 llvm::Function *Fn = cast<llvm::Function>(GV);
1938 llvm::AttrBuilder B;
1939 B.addStackAlignmentAttr(16);
1940 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1942 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1943 llvm::Function *Fn = cast<llvm::Function>(GV);
1944 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1949 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1972 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1999 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
2001 case X86AVXABILevel::AVX512:
2003 case X86AVXABILevel::AVX:
2008 llvm_unreachable(
"Unknown AVXLevel");
2033 static Class merge(Class Accum, Class Field);
2049 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2075 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2076 bool isNamedArg)
const;
2080 unsigned IROffset,
QualType SourceTy,
2081 unsigned SourceOffset)
const;
2083 unsigned IROffset,
QualType SourceTy,
2084 unsigned SourceOffset)
const;
2100 unsigned &neededInt,
unsigned &neededSSE,
2101 bool isNamedArg)
const;
2104 unsigned &NeededSSE)
const;
2107 unsigned &NeededSSE)
const;
2109 bool IsIllegalVectorType(
QualType Ty)
const;
2116 bool honorsRevision0_98()
const {
2122 bool classifyIntegerMMXAsSSE()
const {
2129 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2131 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2139 bool Has64BitPointers;
2144 Has64BitPointers(CGT.
getDataLayout().getPointerSize(0) == 8) {
2148 unsigned neededInt, neededSSE;
2154 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2155 return (vectorTy->getBitWidth() > 128);
2167 bool has64BitPointers()
const {
2168 return Has64BitPointers;
2171 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2173 bool asReturnValue)
const override {
2176 bool isSwiftErrorInRegister()
const override {
2186 IsMingw64(
getTarget().getTriple().isWindowsGNUEnvironment()) {}
2195 return isX86VectorTypeForVectorCall(
getContext(), Ty);
2199 uint64_t NumMembers)
const override {
2201 return isX86VectorCallAggregateSmallEnough(NumMembers);
2204 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
2206 bool asReturnValue)
const override {
2210 bool isSwiftErrorInRegister()
const override {
2216 bool IsVectorCall,
bool IsRegCall)
const;
2219 void computeVectorCallArgs(
CGFunctionInfo &FI,
unsigned FreeSSERegs,
2220 bool IsVectorCall,
bool IsRegCall)
const;
2230 const X86_64ABIInfo &getABIInfo()
const {
2249 StringRef Constraint,
2251 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2254 bool isNoProtoCallVariadic(
const CallArgList &args,
2263 bool HasAVXType =
false;
2264 for (CallArgList::const_iterator
2265 it = args.begin(), ie = args.end(); it != ie; ++it) {
2266 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2281 unsigned Sig = (0xeb << 0) |
2285 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2288 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2291 if (!IsForDefinition)
2293 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2294 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2296 auto *Fn = cast<llvm::Function>(GV);
2299 llvm::AttrBuilder B;
2300 B.addStackAlignmentAttr(16);
2301 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2303 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2304 llvm::Function *Fn = cast<llvm::Function>(GV);
2305 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2311 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2314 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2316 void getDependentLibraryOption(llvm::StringRef Lib,
2320 if (Lib.find(
" ") != StringRef::npos)
2321 Opt +=
"\"" + Lib.str() +
"\"";
2327 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2331 bool Quote = (Lib.find(
" ") != StringRef::npos);
2332 std::string ArgStr = Quote ?
"\"" :
"";
2334 if (!Lib.endswith_lower(
".lib"))
2336 ArgStr += Quote ?
"\"" :
"";
2340 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2343 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2344 unsigned NumRegisterParameters)
2345 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2346 Win32StructABI, NumRegisterParameters,
false) {}
2348 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2352 void getDependentLibraryOption(llvm::StringRef Lib,
2354 Opt =
"/DEFAULTLIB:";
2355 Opt += qualifyWindowsLibrary(Lib);
2358 void getDetectMismatchOption(llvm::StringRef Name,
2359 llvm::StringRef
Value,
2361 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2365 static void addStackProbeSizeTargetAttribute(
const Decl *D,
2366 llvm::GlobalValue *GV,
2368 if (D && isa<FunctionDecl>(D)) {
2370 llvm::Function *Fn = cast<llvm::Function>(GV);
2372 Fn->addFnAttr(
"stack-probe-size",
2378 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2381 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
2382 if (!IsForDefinition)
2384 addStackProbeSizeTargetAttribute(D, GV, CGM);
2393 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2411 void getDependentLibraryOption(llvm::StringRef Lib,
2413 Opt =
"/DEFAULTLIB:";
2414 Opt += qualifyWindowsLibrary(Lib);
2417 void getDetectMismatchOption(llvm::StringRef Name,
2418 llvm::StringRef
Value,
2420 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2424 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2428 if (!IsForDefinition)
2430 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2431 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2433 auto *Fn = cast<llvm::Function>(GV);
2436 llvm::AttrBuilder B;
2437 B.addStackAlignmentAttr(16);
2438 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2440 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2441 llvm::Function *Fn = cast<llvm::Function>(GV);
2442 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2446 addStackProbeSizeTargetAttribute(D, GV, CGM);
2450 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2475 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2477 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2479 if (Hi == SSEUp && Lo != SSE)
2483 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2507 assert((Accum != Memory && Accum != ComplexX87) &&
2508 "Invalid accumulated classification during merge.");
2509 if (Accum == Field || Field == NoClass)
2511 if (Field == Memory)
2513 if (Accum == NoClass)
2515 if (Accum == Integer || Field == Integer)
2517 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2518 Accum == X87 || Accum == X87Up)
2523 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2524 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2535 Class &Current = OffsetBase < 64 ? Lo : Hi;
2541 if (k == BuiltinType::Void) {
2543 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2546 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2548 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2550 }
else if (k == BuiltinType::LongDouble) {
2552 if (LDF == &llvm::APFloat::IEEEquad()) {
2555 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2558 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2561 llvm_unreachable(
"unexpected long double representation!");
2570 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2581 if (Has64BitPointers) {
2588 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2589 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2590 if (EB_FuncPtr != EB_ThisAdj) {
2604 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2613 uint64_t EB_Lo = (OffsetBase) / 64;
2614 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2617 }
else if (Size == 64) {
2618 QualType ElementType = VT->getElementType();
2627 if (!classifyIntegerMMXAsSSE() &&
2638 if (OffsetBase && OffsetBase != 64)
2640 }
else if (Size == 128 ||
2641 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2669 else if (Size <= 128)
2677 if (LDF == &llvm::APFloat::IEEEquad())
2679 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2680 Current = ComplexX87;
2681 else if (LDF == &llvm::APFloat::IEEEdouble())
2684 llvm_unreachable(
"unexpected long double representation!");
2689 uint64_t EB_Real = (OffsetBase) / 64;
2691 if (Hi == NoClass && EB_Real != EB_Imag)
2711 if (OffsetBase %
getContext().getTypeAlign(AT->getElementType()))
2718 uint64_t ArraySize = AT->getSize().getZExtValue();
2725 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2728 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2729 Class FieldLo, FieldHi;
2730 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2731 Lo = merge(Lo, FieldLo);
2732 Hi = merge(Hi, FieldHi);
2733 if (Lo == Memory || Hi == Memory)
2737 postMerge(Size, Lo, Hi);
2738 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2768 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2769 for (
const auto &I : CXXRD->bases()) {
2770 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2771 "Unexpected base class!");
2773 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2780 Class FieldLo, FieldHi;
2783 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2784 Lo = merge(Lo, FieldLo);
2785 Hi = merge(Hi, FieldHi);
2786 if (Lo == Memory || Hi == Memory) {
2787 postMerge(Size, Lo, Hi);
2796 i != e; ++i, ++idx) {
2798 bool BitField = i->isBitField();
2801 if (BitField && i->isUnnamedBitfield())
2811 if (Size > 128 && (Size !=
getContext().getTypeSize(i->getType()) ||
2812 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2814 postMerge(Size, Lo, Hi);
2818 if (!BitField && Offset %
getContext().getTypeAlign(i->getType())) {
2820 postMerge(Size, Lo, Hi);
2830 Class FieldLo, FieldHi;
2836 assert(!i->isUnnamedBitfield());
2838 uint64_t Size = i->getBitWidthValue(
getContext());
2840 uint64_t EB_Lo = Offset / 64;
2841 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2844 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2849 FieldHi = EB_Hi ? Integer : NoClass;
2852 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2853 Lo = merge(Lo, FieldLo);
2854 Hi = merge(Hi, FieldHi);
2855 if (Lo == Memory || Hi == Memory)
2859 postMerge(Size, Lo, Hi);
2869 Ty = EnumTy->getDecl()->getIntegerType();
2878 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2881 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2882 if (Size <= 64 || Size > LargestVector)
2890 unsigned freeIntRegs)
const {
2902 Ty = EnumTy->getDecl()->getIntegerType();
2936 if (freeIntRegs == 0) {
2941 if (Align == 8 && Size <= 64)
2958 if (isa<llvm::VectorType>(IRType) ||
2959 IRType->getTypeID() == llvm::Type::FP128TyID)
2964 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2967 return llvm::VectorType::get(llvm::Type::getDoubleTy(
getVMContext()),
2983 unsigned TySize = (unsigned)Context.
getTypeSize(Ty);
2984 if (TySize <= StartBit)
2988 unsigned EltSize = (unsigned)Context.
getTypeSize(AT->getElementType());
2989 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2992 for (
unsigned i = 0; i != NumElts; ++i) {
2994 unsigned EltOffset = i*EltSize;
2995 if (EltOffset >= EndBit)
break;
2997 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2999 EndBit-EltOffset, Context))
3011 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3012 for (
const auto &I : CXXRD->bases()) {
3013 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3014 "Unexpected base class!");
3016 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
3020 if (BaseOffset >= EndBit)
continue;
3022 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3024 EndBit-BaseOffset, Context))
3035 i != e; ++i, ++idx) {
3039 if (FieldOffset >= EndBit)
break;
3041 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3060 const llvm::DataLayout &TD) {
3062 if (IROffset == 0 && IRType->isFloatTy())
3066 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3067 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3068 unsigned Elt = SL->getElementContainingOffset(IROffset);
3069 IROffset -= SL->getElementOffset(Elt);
3074 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3076 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3077 IROffset -= IROffset/EltSize*EltSize;
3088 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3089 QualType SourceTy,
unsigned SourceOffset)
const {
3102 return llvm::VectorType::get(llvm::Type::getFloatTy(
getVMContext()), 2);
3123 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
3124 QualType SourceTy,
unsigned SourceOffset)
const {
3127 if (IROffset == 0) {
3129 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3130 IRType->isIntegerTy(64))
3139 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3140 IRType->isIntegerTy(32) ||
3141 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3142 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3143 cast<llvm::IntegerType>(IRType)->getBitWidth();
3151 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3153 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(STy);
3154 if (IROffset < SL->getSizeInBytes()) {
3155 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3156 IROffset -= SL->getElementOffset(FieldIdx);
3158 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3159 SourceTy, SourceOffset);
3163 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3166 unsigned EltOffset = IROffset/EltSize*EltSize;
3167 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3173 unsigned TySizeInBytes =
3176 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3181 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3192 const llvm::DataLayout &TD) {
3197 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3198 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3199 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3200 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3212 if (Lo->isFloatTy())
3213 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3215 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3216 &&
"Invalid/unknown lo type");
3217 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3221 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3224 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3225 "Invalid x86-64 argument pair!");
3233 X86_64ABIInfo::Class Lo, Hi;
3234 classify(RetTy, 0, Lo, Hi,
true);
3237 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3238 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3247 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3248 "Unknown missing lo part");
3253 llvm_unreachable(
"Invalid classification for lo word.");
3258 return getIndirectReturnResult(RetTy);
3263 ResType = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3267 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3270 RetTy = EnumTy->getDecl()->getIntegerType();
3281 ResType = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 0, RetTy, 0);
3294 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3295 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(
getVMContext()),
3306 llvm_unreachable(
"Invalid classification for hi word.");
3313 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3318 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3329 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3330 ResType = GetByteVectorType(RetTy);
3341 HighPart = GetSSETypeAtOffset(
CGT.
ConvertType(RetTy), 8, RetTy, 8);
3358 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3364 X86_64ABIInfo::Class Lo, Hi;
3365 classify(Ty, 0, Lo, Hi, isNamedArg);
3369 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3370 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3381 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3382 "Unknown missing lo part");
3395 return getIndirectResult(Ty, freeIntRegs);
3399 llvm_unreachable(
"Invalid classification for lo word.");
3412 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3415 Ty = EnumTy->getDecl()->getIntegerType();
3429 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3443 llvm_unreachable(
"Invalid classification for hi word.");
3445 case NoClass:
break;
3450 HighPart = GetINTEGERTypeAtOffset(
CGT.
ConvertType(Ty), 8, Ty, 8);
3472 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3473 ResType = GetByteVectorType(Ty);
3487 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3488 unsigned &NeededSSE)
const {
3490 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3492 if (RT->getDecl()->hasFlexibleArrayMember())
3493 return getIndirectReturnResult(Ty);
3496 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3497 if (CXXRD->isDynamicClass()) {
3498 NeededInt = NeededSSE = 0;
3499 return getIndirectReturnResult(Ty);
3502 for (
const auto &I : CXXRD->bases())
3503 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3505 NeededInt = NeededSSE = 0;
3506 return getIndirectReturnResult(Ty);
3511 for (
const auto *FD : RT->getDecl()->fields()) {
3512 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3513 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3515 NeededInt = NeededSSE = 0;
3516 return getIndirectReturnResult(Ty);
3519 unsigned LocalNeededInt, LocalNeededSSE;
3521 LocalNeededSSE,
true)
3523 NeededInt = NeededSSE = 0;
3524 return getIndirectReturnResult(Ty);
3526 NeededInt += LocalNeededInt;
3527 NeededSSE += LocalNeededSSE;
3535 unsigned &NeededInt,
3536 unsigned &NeededSSE)
const {
3541 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3549 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3550 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3551 unsigned NeededInt, NeededSSE;
3557 classifyRegCallStructType(FI.
getReturnType(), NeededInt, NeededSSE);
3558 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3559 FreeIntRegs -= NeededInt;
3560 FreeSSERegs -= NeededSSE;
3589 it != ie; ++it, ++ArgNo) {
3590 bool IsNamedArg = ArgNo < NumRequiredArgs;
3592 if (IsRegCall && it->type->isStructureOrClassType())
3593 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3596 NeededSSE, IsNamedArg);
3602 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3603 FreeIntRegs -= NeededInt;
3604 FreeSSERegs -= NeededSSE;
3606 it->info = getIndirectResult(it->type, FreeIntRegs);
3632 llvm::PointerType::getUnqual(LTy));
3641 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3642 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3643 "overflow_arg_area.next");
3659 unsigned neededInt, neededSSE;
3667 if (!neededInt && !neededSSE)
3683 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3689 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3690 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3699 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3700 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3701 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3707 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3729 if (neededInt && neededSSE) {
3731 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3735 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3738 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3739 "Unexpected ABI info for mixed regs");
3740 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3741 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3744 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3745 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3764 }
else if (neededInt) {
3770 std::pair<CharUnits, CharUnits> SizeAlign =
3772 uint64_t TySize = SizeAlign.first.getQuantity();
3783 }
else if (neededSSE == 1) {
3788 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3801 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
3854 WinX86_64ABIInfo::reclassifyHvaArgType(
QualType Ty,
unsigned &FreeSSERegs,
3857 const Type *
Base =
nullptr;
3858 uint64_t NumElts = 0;
3862 FreeSSERegs -= NumElts;
3863 return getDirectX86Hva();
3869 bool IsReturnType,
bool IsVectorCall,
3870 bool IsRegCall)
const {
3876 Ty = EnumTy->getDecl()->getIntegerType();
3879 uint64_t Width = Info.
Width;
3884 if (!IsReturnType) {
3894 const Type *
Base =
nullptr;
3895 uint64_t NumElts = 0;
3898 if ((IsVectorCall || IsRegCall) &&
3901 if (FreeSSERegs >= NumElts) {
3902 FreeSSERegs -= NumElts;
3908 }
else if (IsVectorCall) {
3909 if (FreeSSERegs >= NumElts &&
3911 FreeSSERegs -= NumElts;
3913 }
else if (IsReturnType) {
3926 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3933 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3943 if (BT && BT->
getKind() == BuiltinType::Bool)
3948 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3950 if (LDF == &llvm::APFloat::x87DoubleExtended())
3958 unsigned FreeSSERegs,
3960 bool IsRegCall)
const {
3965 if (Count < VectorcallMaxParamNumAsReg)
3966 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
3970 unsigned ZeroSSERegsAvail = 0;
3971 I.info = classify(I.type, ZeroSSERegsAvail,
false,
3972 IsVectorCall, IsRegCall);
3978 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3987 unsigned FreeSSERegs = 0;
3991 }
else if (IsRegCall) {
3998 IsVectorCall, IsRegCall);
4003 }
else if (IsRegCall) {
4009 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4012 I.info = classify(I.type, FreeSSERegs,
false, IsVectorCall, IsRegCall);
4020 bool IsIndirect =
false;
4026 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4038 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4039 bool IsSoftFloatABI;
4045 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4069 Ty = CTy->getElementType();
4077 const Type *AlignTy =
nullptr;
4094 if (
getTarget().getTriple().isOSDarwin()) {
4096 TI.second = getParamTypeAlignment(Ty);
4104 const unsigned OverflowLimit = 8;
4132 if (isInt || IsSoftFloatABI) {
4141 if (isI64 || (isF64 && IsSoftFloatABI)) {
4142 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4143 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4147 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4153 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4156 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4170 if (!(isInt || IsSoftFloatABI)) {
4179 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4187 Builder.CreateAdd(NumRegs,
4188 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4199 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4207 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
4218 if (Align > OverflowAreaAlign) {
4228 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4255 llvm::IntegerType *i8 = CGF.
Int8Ty;
4256 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4257 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4258 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4292 class PPC64_SVR4_ABIInfo :
public ABIInfo {
4300 static const unsigned GPRBits = 64;
4303 bool IsSoftFloatABI;
4307 bool IsQPXVectorTy(
const Type *Ty)
const {
4312 unsigned NumElements = VT->getNumElements();
4313 if (NumElements == 1)
4316 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4319 }
else if (VT->getElementType()->
4320 isSpecificBuiltinType(BuiltinType::Float)) {
4329 bool IsQPXVectorTy(
QualType Ty)
const {
4337 IsSoftFloatABI(SoftFloatABI) {}
4339 bool isPromotableTypeForABI(
QualType Ty)
const;
4347 uint64_t Members)
const override;
4365 if (IsQPXVectorTy(T) ||
4385 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX,
4399 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
4401 PPC64TargetCodeGenInfo(
CodeGenTypes &
CGT) : DefaultTargetCodeGenInfo(CGT) {}
4417 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4420 Ty = EnumTy->getDecl()->getIntegerType();
4429 switch (BT->getKind()) {
4430 case BuiltinType::Int:
4431 case BuiltinType::UInt:
4445 Ty = CTy->getElementType();
4449 if (IsQPXVectorTy(Ty)) {
4460 const Type *AlignAsType =
nullptr;
4464 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4467 AlignAsType = EltType;
4471 const Type *
Base =
nullptr;
4472 uint64_t Members = 0;
4473 if (!AlignAsType &&
Kind == ELFv2 &&
4478 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4483 }
else if (AlignAsType) {
4502 uint64_t &Members)
const {
4504 uint64_t NElements = AT->getSize().getZExtValue();
4509 Members *= NElements;
4518 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4519 for (
const auto &I : CXXRD->bases()) {
4524 uint64_t FldMembers;
4528 Members += FldMembers;
4532 for (
const auto *FD : RD->
fields()) {
4537 if (AT->getSize().getZExtValue() == 0)
4539 FT = AT->getElementType();
4546 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
4549 uint64_t FldMembers;
4554 std::max(Members, FldMembers) : Members + FldMembers);
4568 Ty = CT->getElementType();
4584 QualType EltTy = VT->getElementType();
4585 unsigned NumElements =
4600 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4604 if (BT->getKind() == BuiltinType::Float ||
4605 BT->getKind() == BuiltinType::Double ||
4606 BT->getKind() == BuiltinType::LongDouble) {
4619 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4620 const Type *
Base, uint64_t Members)
const {
4627 return Members * NumRegs <= 8;
4643 else if (Size < 128) {
4653 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4657 const Type *Base =
nullptr;
4658 uint64_t Members = 0;
4659 if (
Kind == ELFv2 &&
4662 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4671 if (Bits > 0 && Bits <= 8 * GPRBits) {
4676 if (Bits <= GPRBits)
4678 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4682 uint64_t RegBits = ABIAlign * 8;
4683 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4685 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4694 TyAlign > ABIAlign);
4697 return (isPromotableTypeForABI(Ty) ?
4715 else if (Size < 128) {
4723 const Type *Base =
nullptr;
4724 uint64_t Members = 0;
4725 if (
Kind == ELFv2 &&
4728 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4734 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4739 if (Bits > GPRBits) {
4740 CoerceTy = llvm::IntegerType::get(
getVMContext(), GPRBits);
4741 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4744 llvm::IntegerType::get(
getVMContext(), llvm::alignTo(Bits, 8));
4752 return (isPromotableTypeForABI(RetTy) ?
4760 TypeInfo.second = getParamTypeAlignment(Ty);
4772 if (EltSize < SlotSize) {
4774 SlotSize * 2, SlotSize,
4781 SlotSize - EltSize);
4783 2 * SlotSize - EltSize);
4814 llvm::IntegerType *i8 = CGF.
Int8Ty;
4815 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4816 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4817 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4854 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4890 ABIKind getABIKind()
const {
return Kind; }
4891 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4897 uint64_t Members)
const override;
4899 bool isIllegalVectorType(
QualType Ty)
const;
4906 it.info = classifyArgumentType(it.type);
4917 return Kind == Win64 ?
EmitMSVAArg(CGF, VAListAddr, Ty)
4918 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4919 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4925 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
4927 bool asReturnValue)
const override {
4930 bool isSwiftErrorInRegister()
const override {
4935 unsigned elts)
const override;
4943 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4944 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
4951 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4954 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
4956 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &
CGT, AArch64ABIInfo::ABIKind K)
4957 : AArch64TargetCodeGenInfo(CGT, K) {}
4959 void getDependentLibraryOption(llvm::StringRef Lib,
4961 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
4964 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
4966 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
4975 if (isIllegalVectorType(Ty)) {
4988 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 2);
4993 llvm::VectorType::get(llvm::Type::getInt32Ty(
getVMContext()), 4);
5002 Ty = EnumTy->getDecl()->getIntegerType();
5020 if (IsEmpty || Size == 0) {
5026 if (IsEmpty && Size == 0)
5032 const Type *Base =
nullptr;
5033 uint64_t Members = 0;
5043 if (
getTarget().isRenderScriptTarget()) {
5047 Size = llvm::alignTo(Size, 64);
5051 if (Alignment < 128 && Size == 128) {
5072 RetTy = EnumTy->getDecl()->getIntegerType();
5083 const Type *Base =
nullptr;
5084 uint64_t Members = 0;
5093 if (
getTarget().isRenderScriptTarget()) {
5097 Size = llvm::alignTo(Size, 64);
5101 if (Alignment < 128 && Size == 128) {
5112 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
5115 unsigned NumElements = VT->getNumElements();
5118 if (!llvm::isPowerOf2_32(NumElements))
5120 return Size != 64 && (Size != 128 || NumElements == 1);
5125 bool AArch64ABIInfo::isLegalVectorTypeForSwift(
CharUnits totalSize,
5127 unsigned elts)
const {
5128 if (!llvm::isPowerOf2_32(elts))
5136 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5142 if (BT->isFloatingPoint())
5146 if (VecSize == 64 || VecSize == 128)
5152 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5153 uint64_t Members)
const {
5154 return Members <= 4;
5165 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5169 unsigned NumRegs = 1;
5170 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5171 BaseTy = ArrTy->getElementType();
5172 NumRegs = ArrTy->getNumElements();
5174 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5199 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
5208 RegSize = llvm::alignTo(RegSize, 8);
5217 RegSize = 16 * NumRegs;
5229 UsingStack = CGF.
Builder.CreateICmpSGE(
5230 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
5232 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5241 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
5244 reg_offs = CGF.
Builder.CreateAdd(
5245 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
5247 reg_offs = CGF.
Builder.CreateAnd(
5248 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
5257 NewOffset = CGF.
Builder.CreateAdd(
5258 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
5264 InRegs = CGF.
Builder.CreateICmpSLE(
5265 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
5267 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5279 reg_top_offset,
"reg_top_p");
5281 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
5289 MemTy = llvm::PointerType::getUnqual(MemTy);
5292 const Type *Base =
nullptr;
5293 uint64_t NumMembers = 0;
5295 if (IsHFA && NumMembers > 1) {
5300 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
5303 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5305 std::max(TyAlign, BaseTyInfo.second));
5310 BaseTyInfo.first.getQuantity() < 16)
5311 Offset = 16 - BaseTyInfo.first.getQuantity();
5313 for (
unsigned i = 0; i < NumMembers; ++i) {
5331 CharUnits SlotSize = BaseAddr.getAlignment();
5334 TyInfo.first < SlotSize) {
5358 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
5360 OnStackPtr = CGF.
Builder.CreateAdd(
5361 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
5363 OnStackPtr = CGF.
Builder.CreateAnd(
5364 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
5369 Address OnStackAddr(OnStackPtr,
5376 StackSize = StackSlotSize;
5378 StackSize = TyInfo.first.
alignTo(StackSlotSize);
5382 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
5388 TyInfo.first < StackSlotSize) {
5403 OnStackAddr, OnStackBlock,
"vaargs.addr");
5435 bool IsIndirect =
false;
5436 if (TyInfo.first.getQuantity() > 16) {
5437 const Type *Base =
nullptr;
5438 uint64_t Members = 0;
5443 TyInfo, SlotSize,
true);
5478 bool isEABI()
const {
5479 switch (
getTarget().getTriple().getEnvironment()) {
5480 case llvm::Triple::Android:
5481 case llvm::Triple::EABI:
5482 case llvm::Triple::EABIHF:
5483 case llvm::Triple::GNUEABI:
5484 case llvm::Triple::GNUEABIHF:
5485 case llvm::Triple::MuslEABI:
5486 case llvm::Triple::MuslEABIHF:
5493 bool isEABIHF()
const {
5494 switch (
getTarget().getTriple().getEnvironment()) {
5495 case llvm::Triple::EABIHF:
5496 case llvm::Triple::GNUEABIHF:
5497 case llvm::Triple::MuslEABIHF:
5504 ABIKind getABIKind()
const {
return Kind; }
5509 bool isIllegalVectorType(
QualType Ty)
const;
5513 uint64_t Members)
const override;
5524 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
5526 bool asReturnValue)
const override {
5529 bool isSwiftErrorInRegister()
const override {
5533 unsigned elts)
const override;
5541 const ARMABIInfo &getABIInfo()
const {
5549 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5550 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5562 unsigned getSizeOfUnwindException()
const override {
5563 if (getABIInfo().isEABI())
return 88;
5567 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5570 if (!IsForDefinition)
5572 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5576 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
5581 switch (Attr->getInterrupt()) {
5582 case ARMInterruptAttr::Generic: Kind =
"";
break;
5583 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5584 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5585 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5586 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5587 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5590 llvm::Function *Fn = cast<llvm::Function>(GV);
5592 Fn->addFnAttr(
"interrupt", Kind);
5594 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5595 if (ABI == ARMABIInfo::APCS)
5601 llvm::AttrBuilder B;
5602 B.addStackAlignmentAttr(8);
5603 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5607 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5610 : ARMTargetCodeGenInfo(CGT, K) {}
5612 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5616 void getDependentLibraryOption(llvm::StringRef Lib,
5618 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5621 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5623 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5627 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5630 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
5631 if (!IsForDefinition)
5633 addStackProbeSizeTargetAttribute(D, GV, CGM);
5657 if (isEABIHF() ||
getTarget().getTriple().isWatchABI())
5658 return llvm::CallingConv::ARM_AAPCS_VFP;
5660 return llvm::CallingConv::ARM_AAPCS;
5662 return llvm::CallingConv::ARM_APCS;
5668 switch (getABIKind()) {
5669 case APCS:
return llvm::CallingConv::ARM_APCS;
5670 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5671 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5672 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5674 llvm_unreachable(
"bad ABI kind");
5677 void ARMABIInfo::setCCs() {
5683 if (abiCC != getLLVMDefaultCC())
5695 if (abiCC != getLLVMDefaultCC())
5700 bool isVariadic)
const {
5708 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5713 if (isIllegalVectorType(Ty)) {
5737 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5746 Ty = EnumTy->getDecl()->getIntegerType();
5761 if (IsEffectivelyAAPCS_VFP) {
5764 const Type *Base =
nullptr;
5765 uint64_t Members = 0;
5767 assert(Base &&
"Base class should be set for homogeneous aggregate");
5771 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5775 const Type *Base =
nullptr;
5776 uint64_t Members = 0;
5778 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5785 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5798 uint64_t ABIAlign = 4;
5800 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5801 getABIKind() == ARMABIInfo::AAPCS)
5805 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5808 TyAlign > ABIAlign);
5813 if (
getTarget().isRenderScriptTarget()) {
5834 llvm::LLVMContext &VMContext) {
5866 if (!RT)
return false;
5877 bool HadField =
false;
5880 i != e; ++i, ++idx) {
5919 bool isVariadic)
const {
5920 bool IsEffectivelyAAPCS_VFP =
5921 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5935 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5944 RetTy = EnumTy->getDecl()->getIntegerType();
5951 if (getABIKind() == APCS) {
5984 if (IsEffectivelyAAPCS_VFP) {
5985 const Type *Base =
nullptr;
5986 uint64_t Members = 0;
5988 assert(Base &&
"Base class should be set for homogeneous aggregate");
6000 if (
getTarget().isRenderScriptTarget()) {
6013 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6016 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6024 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
6032 unsigned NumElements = VT->getNumElements();
6034 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6038 unsigned NumElements = VT->getNumElements();
6041 if (!llvm::isPowerOf2_32(NumElements))
6050 bool ARMABIInfo::isLegalVectorTypeForSwift(
CharUnits vectorSize,
6052 unsigned numElts)
const {
6053 if (!llvm::isPowerOf2_32(numElts))
6055 unsigned size =
getDataLayout().getTypeStoreSizeInBits(eltTy);
6064 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
6068 if (BT->getKind() == BuiltinType::Float ||
6069 BT->getKind() == BuiltinType::Double ||
6070 BT->getKind() == BuiltinType::LongDouble)
6074 if (VecSize == 64 || VecSize == 128)
6080 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
6081 uint64_t Members)
const {
6082 return Members <= 4;
6097 CharUnits TyAlignForABI = TyInfo.second;
6100 bool IsIndirect =
false;
6101 const Type *Base =
nullptr;
6102 uint64_t Members = 0;
6109 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6117 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6118 getABIKind() == ARMABIInfo::AAPCS) {
6121 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6128 TyInfo.second = TyAlignForABI;
6140 class NVPTXABIInfo :
public ABIInfo {
6157 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6164 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
6177 RetTy = EnumTy->getDecl()->getIntegerType();
6186 Ty = EnumTy->getDecl()->getIntegerType();
6211 llvm_unreachable(
"NVPTX does not support varargs");
6214 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6217 if (!IsForDefinition)
6219 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6222 llvm::Function *F = cast<llvm::Function>(GV);
6228 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6231 addNVVMMetadata(F,
"kernel", 1);
6233 F->addFnAttr(llvm::Attribute::NoInline);
6242 if (FD->
hasAttr<CUDAGlobalAttr>()) {
6244 addNVVMMetadata(F,
"kernel", 1);
6246 if (CUDALaunchBoundsAttr *
Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
6248 llvm::APSInt MaxThreads(32);
6249 MaxThreads =
Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
6251 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
6256 if (
Attr->getMinBlocks()) {
6257 llvm::APSInt MinBlocks(32);
6258 MinBlocks =
Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
6261 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
6267 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6269 llvm::Module *M = F->getParent();
6273 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
6275 llvm::Metadata *MDVals[] = {
6276 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6277 llvm::ConstantAsMetadata::get(
6278 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6280 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6297 bool isPromotableIntegerType(
QualType Ty)
const;
6298 bool isCompoundType(
QualType Ty)
const;
6299 bool isVectorArgumentType(
QualType Ty)
const;
6300 bool isFPArgumentType(
QualType Ty)
const;
6310 I.info = classifyArgumentType(I.type);
6316 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
6318 bool asReturnValue)
const override {
6321 bool isSwiftErrorInRegister()
const override {
6334 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
6337 Ty = EnumTy->getDecl()->getIntegerType();
6345 switch (BT->getKind()) {
6346 case BuiltinType::Int:
6347 case BuiltinType::UInt:
6355 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
6361 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
6362 return (HasVector &&
6367 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
6369 switch (BT->getKind()) {
6370 case BuiltinType::Float:
6371 case BuiltinType::Double:
6386 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6387 for (
const auto &I : CXXRD->bases()) {
6396 Found = GetSingleElementType(Base);
6400 for (
const auto *FD : RD->
fields()) {
6405 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
6412 Found = GetSingleElementType(FD->getType());
6443 bool InFPRs =
false;
6444 bool IsVector =
false;
6448 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6453 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6454 IsVector = ArgTy->isVectorTy();
6455 UnpaddedSize = TyInfo.first;
6456 DirectAlign = TyInfo.second;
6459 if (IsVector && UnpaddedSize > PaddedSize)
6461 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
6463 CharUnits Padding = (PaddedSize - UnpaddedSize);
6467 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
6475 "overflow_arg_area_ptr");
6485 "overflow_arg_area");
6493 unsigned MaxRegs, RegCountField, RegSaveIndex;
6504 RegPadding = Padding;
6511 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6518 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6525 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
6527 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
6530 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
6533 "reg_save_area_ptr");
6543 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6545 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6566 "overflow_arg_area");
6573 MemAddr, InMemBlock,
"va_arg.addr");
6585 if (isVectorArgumentType(RetTy))
6589 return (isPromotableIntegerType(RetTy) ?
6599 if (isPromotableIntegerType(Ty))
6606 QualType SingleElementTy = GetSingleElementType(Ty);
6607 if (isVectorArgumentType(SingleElementTy) &&
6608 getContext().getTypeSize(SingleElementTy) == Size)
6612 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6625 if (isFPArgumentType(SingleElementTy)) {
6626 assert(Size == 32 || Size == 64);
6637 if (isCompoundType(Ty))
6653 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6660 void MSP430TargetCodeGenInfo::setTargetAttributes(
6663 if (!IsForDefinition)
6665 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6666 if (
const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6668 llvm::Function *F = cast<llvm::Function>(GV);
6671 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6674 F->addFnAttr(llvm::Attribute::NoInline);
6677 unsigned Num = attr->getNumber() / 2;
6679 "__isr_" + Twine(Num), F);
6690 class MipsABIInfo :
public ABIInfo {
6692 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6693 void CoerceToIntArgs(uint64_t TySize,
6700 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6701 StackAlignInBytes(IsO32 ? 8 : 16) {}
6712 unsigned SizeOfUnwindException;
6716 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6722 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6725 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6727 llvm::Function *Fn = cast<llvm::Function>(GV);
6729 if (FD->
hasAttr<MipsLongCallAttr>())
6730 Fn->addFnAttr(
"long-call");
6731 else if (FD->
hasAttr<MipsShortCallAttr>())
6732 Fn->addFnAttr(
"short-call");
6735 if (!IsForDefinition)
6738 if (FD->
hasAttr<Mips16Attr>()) {
6739 Fn->addFnAttr(
"mips16");
6741 else if (FD->
hasAttr<NoMips16Attr>()) {
6742 Fn->addFnAttr(
"nomips16");
6745 if (FD->
hasAttr<MicroMipsAttr>())
6746 Fn->addFnAttr(
"micromips");
6747 else if (FD->
hasAttr<NoMicroMipsAttr>())
6748 Fn->addFnAttr(
"nomicromips");
6750 const MipsInterruptAttr *
Attr = FD->
getAttr<MipsInterruptAttr>();
6755 switch (Attr->getInterrupt()) {
6756 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6757 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6758 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6759 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6760 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6761 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6762 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6763 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6764 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6767 Fn->addFnAttr(
"interrupt", Kind);
6774 unsigned getSizeOfUnwindException()
const override {
6775 return SizeOfUnwindException;
6780 void MipsABIInfo::CoerceToIntArgs(
6782 llvm::IntegerType *IntTy =
6783 llvm::IntegerType::get(
getVMContext(), MinABIStackAlignInBytes * 8);
6786 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6787 ArgList.push_back(IntTy);
6790 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6793 ArgList.push_back(llvm::IntegerType::get(
getVMContext(), R));
6802 CoerceToIntArgs(TySize, ArgList);
6813 CoerceToIntArgs(TySize, ArgList);
6819 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6821 uint64_t LastOffset = 0;
6823 llvm::IntegerType *I64 = llvm::IntegerType::get(
getVMContext(), 64);
6828 i != e; ++i, ++idx) {
6832 if (!BT || BT->
getKind() != BuiltinType::Double)
6840 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6841 ArgList.push_back(I64);
6844 ArgList.push_back(llvm::Type::getDoubleTy(
getVMContext()));
6845 LastOffset = Offset + 64;
6848 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6849 ArgList.append(IntArgList.begin(), IntArgList.end());
6854 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6856 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6859 return llvm::IntegerType::get(
getVMContext(), (Offset - OrigOffset) * 8);
6866 uint64_t OrigOffset =
Offset;
6871 (uint64_t)StackAlignInBytes);
6872 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6873 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6881 Offset = OrigOffset + MinABIStackAlignInBytes;
6890 getPaddingType(OrigOffset, CurrOffset));
6897 Ty = EnumTy->getDecl()->getIntegerType();
6904 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6908 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6928 for (; b != e; ++b) {
6945 CoerceToIntArgs(Size, RTList);
6957 if (!IsO32 && Size == 0)
6981 RetTy = EnumTy->getDecl()->getIntegerType();
7005 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7007 bool DidPromote =
false;
7027 TyInfo, ArgSlotSize,
true);
7050 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
7098 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7101 if (!IsForDefinition)
7103 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7105 auto *Fn = cast<llvm::Function>(GV);
7107 if (FD->getAttr<AVRInterruptAttr>())
7108 Fn->addFnAttr(
"interrupt");
7110 if (FD->getAttr<AVRSignalAttr>())
7111 Fn->addFnAttr(
"signal");
7124 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
7127 : DefaultTargetCodeGenInfo(CGT) {}
7129 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7134 void TCETargetCodeGenInfo::setTargetAttributes(
7137 if (!IsForDefinition)
7139 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7142 llvm::Function *F = cast<llvm::Function>(GV);
7145 if (FD->
hasAttr<OpenCLKernelAttr>()) {
7147 F->addFnAttr(llvm::Attribute::NoInline);
7148 const ReqdWorkGroupSizeAttr *
Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
7151 llvm::LLVMContext &Context = F->getContext();
7152 llvm::NamedMDNode *OpenCLMetadata =
7154 "opencl.kernel_wg_size_info");
7157 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7160 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7161 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7163 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7164 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7166 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7167 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7173 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7174 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7188 class HexagonABIInfo :
public ABIInfo {
7228 Ty = EnumTy->getDecl()->getIntegerType();
7266 RetTy = EnumTy->getDecl()->getIntegerType();
7306 class LanaiABIInfo :
public DefaultABIInfo {
7310 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
7333 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
7335 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7337 if (SizeInRegs == 0)
7340 if (SizeInRegs > State.FreeRegs) {
7345 State.FreeRegs -= SizeInRegs;
7351 CCState &State)
const {
7353 if (State.FreeRegs) {
7361 const unsigned MinABIStackAlignInBytes = 4;
7365 MinABIStackAlignInBytes);
7369 CCState &State)
const {
7375 return getIndirectResult(Ty,
false, State);
7384 return getIndirectResult(Ty,
true, State);
7392 if (SizeInRegs <= State.FreeRegs) {
7393 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7395 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7396 State.FreeRegs -= SizeInRegs;
7401 return getIndirectResult(Ty,
true, State);
7406 Ty = EnumTy->getDecl()->getIntegerType();
7408 bool InReg = shouldUseInReg(Ty, State);
7433 class AMDGPUABIInfo final :
public DefaultABIInfo {
7435 static const unsigned MaxNumRegsForArgsRet = 16;
7437 unsigned numRegsForType(
QualType Ty)
const;
7441 uint64_t Members)
const override;
7445 DefaultABIInfo(CGT) {}
7454 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
7458 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7459 const Type *Base, uint64_t Members)
const {
7463 return Members * NumRegs <= MaxNumRegsForArgsRet;
7467 unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
7468 unsigned NumRegs = 0;
7473 QualType EltTy = VT->getElementType();
7478 return (VT->getNumElements() + 1) / 2;
7480 unsigned EltNumRegs = (EltSize + 31) / 32;
7481 return EltNumRegs * VT->getNumElements();
7489 QualType FieldTy = Field->getType();
7490 NumRegs += numRegsForType(FieldTy);
7496 return (
getContext().getTypeSize(Ty) + 31) / 32;
7505 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7507 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7508 Arg.info = classifyKernelArgumentType(Arg.type);
7547 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7574 unsigned &NumRegsLeft)
const {
7575 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
7604 unsigned NumRegs = (Size + 31) / 32;
7605 NumRegsLeft -=
std::min(NumRegsLeft, NumRegs);
7618 if (NumRegsLeft > 0) {
7619 unsigned NumRegs = numRegsForType(Ty);
7620 if (NumRegsLeft >= NumRegs) {
7621 NumRegsLeft -= NumRegs;
7630 unsigned NumRegs = numRegsForType(Ty);
7631 NumRegsLeft -=
std::min(NumRegs, NumRegsLeft);
7641 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
7644 unsigned getOpenCLKernelCallingConv()
const override;
7647 llvm::PointerType *T,
QualType QT)
const override;
7649 LangAS getASTAllocaAddressSpace()
const override {
7654 const VarDecl *D)
const override;
7656 llvm::LLVMContext &C)
const override;
7659 llvm::Function *BlockInvokeFunc,
7664 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7667 if (!IsForDefinition)
7669 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7673 llvm::Function *F = cast<llvm::Function>(GV);
7676 FD->
getAttr<ReqdWorkGroupSizeAttr>() :
nullptr;
7677 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7678 if (ReqdWGS || FlatWGS) {
7679 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7680 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7681 if (ReqdWGS && Min == 0 && Max == 0)
7682 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7685 assert(Min <= Max &&
"Min must be less than or equal Max");
7687 std::string AttrVal = llvm::utostr(Min) +
"," + llvm::utostr(Max);
7688 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
7690 assert(Max == 0 &&
"Max must be zero");
7693 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>()) {
7694 unsigned Min =
Attr->getMin();
7695 unsigned Max =
Attr->getMax();
7698 assert((Max == 0 || Min <= Max) &&
"Min must be less than or equal Max");
7700 std::string AttrVal = llvm::utostr(Min);
7702 AttrVal = AttrVal +
"," + llvm::utostr(Max);
7703 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
7705 assert(Max == 0 &&
"Max must be zero");
7708 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
7709 unsigned NumSGPR =
Attr->getNumSGPR();
7712 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7715 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
7716 uint32_t NumVGPR =
Attr->getNumVGPR();
7719 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7723 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7724 return llvm::CallingConv::AMDGPU_KERNEL;
7732 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7736 return llvm::ConstantPointerNull::get(PT);
7739 auto NPT = llvm::PointerType::get(PT->getElementType(),
7741 return llvm::ConstantExpr::getAddrSpaceCast(
7742 llvm::ConstantPointerNull::get(NPT), PT);
7746 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
7750 "Address space agnostic languages only");
7754 return DefaultGlobalAS;
7763 return ConstAS.getValue();
7765 return DefaultGlobalAS;
7769 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
SyncScope S,
7770 llvm::LLVMContext &C)
const {
7785 return C.getOrInsertSyncScopeID(Name);
7795 class SparcV8ABIInfo :
public DefaultABIInfo {
7858 class SparcV9ABIInfo :
public ABIInfo {
7879 struct CoerceBuilder {
7880 llvm::LLVMContext &Context;
7881 const llvm::DataLayout &DL;
7886 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
7887 : Context(c), DL(dl), Size(0), InReg(
false) {}
7890 void pad(uint64_t ToSize) {
7891 assert(ToSize >= Size &&
"Cannot remove elements");
7896 uint64_t Aligned = llvm::alignTo(Size, 64);
7897 if (Aligned > Size && Aligned <= ToSize) {
7898 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7903 while (Size + 64 <= ToSize) {
7904 Elems.push_back(llvm::Type::getInt64Ty(Context));
7909 if (Size < ToSize) {
7910 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7924 Elems.push_back(Ty);
7925 Size = Offset + Bits;
7929 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7930 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7931 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7932 llvm::Type *ElemTy = StrTy->getElementType(i);
7933 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7934 switch (ElemTy->getTypeID()) {
7935 case llvm::Type::StructTyID:
7936 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7938 case llvm::Type::FloatTyID:
7939 addFloat(ElemOffset, ElemTy, 32);
7941 case llvm::Type::DoubleTyID:
7942 addFloat(ElemOffset, ElemTy, 64);
7944 case llvm::Type::FP128TyID:
7945 addFloat(ElemOffset, ElemTy, 128);
7947 case llvm::Type::PointerTyID:
7948 if (ElemOffset % 64 == 0) {
7950 Elems.push_back(ElemTy);
7961 bool isUsableType(llvm::StructType *Ty)
const {
7962 return llvm::makeArrayRef(Elems) == Ty->elements();
7967 if (Elems.size() == 1)
7968 return Elems.front();
7970 return llvm::StructType::get(Context, Elems);
7985 if (Size > SizeLimit)
7990 Ty = EnumTy->getDecl()->getIntegerType();
7993 if (Size < 64 && Ty->isIntegerType())
8007 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(
CGT.
ConvertType(Ty));
8012 CB.addStruct(0, StrTy);
8013 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8016 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8035 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8045 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8077 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
8109 llvm::IntegerType *i8 = CGF.
Int8Ty;
8110 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8111 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8200 class TypeStringCache {
8201 enum Status {NonRecursive, Recursive,
Incomplete, IncompleteUsed};
8205 std::string Swapped;
8208 std::map<const IdentifierInfo *, struct Entry> Map;
8209 unsigned IncompleteCount;
8210 unsigned IncompleteUsedCount;
8212 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8222 class FieldEncoding {
8226 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8227 StringRef str() {
return Enc; }
8228 bool operator<(
const FieldEncoding &rhs)
const {
8229 if (HasName != rhs.HasName)
return HasName;
8230 return Enc < rhs.Enc;
8234 class XCoreABIInfo :
public DefaultABIInfo {
8242 mutable TypeStringCache TSC;
8246 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8266 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8267 AI.setCoerceToType(ArgTy);
8268 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8272 switch (AI.getKind()) {
8276 llvm_unreachable(
"Unsupported ABI kind for va_arg");
8278 Val =
Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8286 ArgSize = ArgSize.
alignTo(SlotSize);
8311 std::string StubEnc) {
8315 assert( (E.Str.empty() || E.State == Recursive) &&
8316 "Incorrectly use of addIncomplete");
8317 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
8318 E.Swapped.swap(E.Str);
8319 E.Str.swap(StubEnc);
8328 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
8331 auto I = Map.find(ID);
8332 assert(I != Map.end() &&
"Entry not present");
8333 Entry &E = I->second;
8335 E.State == IncompleteUsed) &&
8336 "Entry must be an incomplete type");
8337 bool IsRecursive =
false;
8338 if (E.State == IncompleteUsed) {
8341 --IncompleteUsedCount;
8343 if (E.Swapped.empty())
8347 E.Swapped.swap(E.Str);
8349 E.State = Recursive;
8357 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
8359 if (!ID || IncompleteUsedCount)
8362 if (IsRecursive && !E.Str.empty()) {
8363 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8364 "This is not the same Recursive entry");
8370 assert(E.Str.empty() &&
"Entry already present");
8372 E.State = IsRecursive? Recursive : NonRecursive;
8381 auto I = Map.find(ID);
8384 Entry &E = I->second;
8385 if (E.State == Recursive && IncompleteCount)
8390 E.State = IncompleteUsed;
8391 ++IncompleteUsedCount;
8412 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
8416 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
8417 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8418 llvm::MDString::get(Ctx, Enc.str())};
8419 llvm::NamedMDNode *MD =
8420 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
8421 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8434 unsigned getOpenCLKernelCallingConv()
const override;
8442 DefaultABIInfo SPIRABI(CGM.
getTypes());
8443 SPIRABI.computeInfo(FI);
8448 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
8449 return llvm::CallingConv::SPIR_KERNEL;
8454 TypeStringCache &TSC);
8462 TypeStringCache &TSC) {
8463 for (
const auto *Field : RD->
fields()) {
8466 Enc += Field->getName();
8468 if (Field->isBitField()) {
8470 llvm::raw_svector_ostream OS(Enc);
8471 OS << Field->getBitWidthValue(CGM.
getContext());
8474 if (!
appendType(Enc, Field->getType(), CGM, TSC))
8476 if (Field->isBitField())
8479 FE.emplace_back(!Field->getName().empty(), Enc);
8491 StringRef TypeString = TSC.lookupStr(ID);
8492 if (!TypeString.empty()) {
8498 size_t Start = Enc.size();
8506 bool IsRecursive =
false;
8513 std::string StubEnc(Enc.substr(Start).str());
8515 TSC.addIncomplete(ID, std::move(StubEnc));
8517 (void) TSC.removeIncomplete(ID);
8520 IsRecursive = TSC.removeIncomplete(ID);
8524 std::sort(FE.begin(), FE.end());
8526 unsigned E = FE.size();
8527 for (
unsigned I = 0; I != E; ++I) {
8534 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8540 TypeStringCache &TSC,
8543 StringRef TypeString = TSC.lookupStr(ID);
8544 if (!TypeString.empty()) {
8549 size_t Start = Enc.size();
8558 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8560 SmallStringEnc EnumEnc;
8562 EnumEnc += I->getName();
8564 I->getInitVal().toString(EnumEnc);
8566 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8568 std::sort(FE.begin(), FE.end());
8569 unsigned E = FE.size();
8570 for (
unsigned I = 0; I != E; ++I) {
8577 TSC.addIfComplete(ID, Enc.substr(Start),
false);
8585 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
8593 Enc += Table[Lookup];
8598 const char *EncType;
8600 case BuiltinType::Void:
8603 case BuiltinType::Bool:
8606 case BuiltinType::Char_U:
8609 case BuiltinType::UChar:
8612 case BuiltinType::SChar:
8615 case BuiltinType::UShort:
8618 case BuiltinType::Short:
8621 case BuiltinType::UInt:
8624 case BuiltinType::Int:
8627 case BuiltinType::ULong:
8630 case BuiltinType::Long:
8633 case BuiltinType::ULongLong:
8636 case BuiltinType::LongLong:
8639 case BuiltinType::Float:
8642 case BuiltinType::Double:
8645 case BuiltinType::LongDouble:
8658 TypeStringCache &TSC) {
8670 TypeStringCache &TSC, StringRef NoSizeEnc) {
8675 CAT->getSize().toStringUnsigned(Enc);
8691 TypeStringCache &TSC) {
8698 auto I = FPT->param_type_begin();
8699 auto E = FPT->param_type_end();
8708 if (FPT->isVariadic())
8711 if (FPT->isVariadic())
8725 TypeStringCache &TSC) {
8762 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8765 return appendType(Enc, FD->getType(), CGM, TSC);
8768 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8771 QualType QT = VD->getType().getCanonicalType();
8789 return getTriple().supportsCOMDAT();
8793 if (TheTargetCodeGenInfo)
8794 return *TheTargetCodeGenInfo;
8798 this->TheTargetCodeGenInfo.reset(
P);
8803 switch (Triple.getArch()) {
8805 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
8807 case llvm::Triple::le32:
8808 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8809 case llvm::Triple::mips:
8810 case llvm::Triple::mipsel:
8811 if (Triple.getOS() == llvm::Triple::NaCl)
8812 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
8813 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
8815 case llvm::Triple::mips64:
8816 case llvm::Triple::mips64el:
8817 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
8819 case llvm::Triple::avr:
8820 return SetCGInfo(
new AVRTargetCodeGenInfo(Types));
8822 case llvm::Triple::aarch64:
8823 case llvm::Triple::aarch64_be: {
8824 AArch64ABIInfo::ABIKind
Kind = AArch64ABIInfo::AAPCS;
8825 if (
getTarget().getABI() ==
"darwinpcs")
8826 Kind = AArch64ABIInfo::DarwinPCS;
8827 else if (Triple.isOSWindows())
8829 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
8831 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
8834 case llvm::Triple::wasm32:
8835 case llvm::Triple::wasm64:
8836 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
8838 case llvm::Triple::arm:
8839 case llvm::Triple::armeb:
8840 case llvm::Triple::thumb:
8841 case llvm::Triple::thumbeb: {
8842 if (Triple.getOS() == llvm::Triple::Win32) {
8844 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8847 ARMABIInfo::ABIKind
Kind = ARMABIInfo::AAPCS;
8849 if (ABIStr ==
"apcs-gnu")
8850 Kind = ARMABIInfo::APCS;
8851 else if (ABIStr ==
"aapcs16")
8852 Kind = ARMABIInfo::AAPCS16_VFP;
8853 else if (CodeGenOpts.FloatABI ==
"hard" ||
8854 (CodeGenOpts.FloatABI !=
"soft" &&
8855 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8856 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8857 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8858 Kind = ARMABIInfo::AAPCS_VFP;
8860 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
8863 case llvm::Triple::ppc:
8865 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI ==
"soft"));
8866 case llvm::Triple::ppc64:
8867 if (Triple.isOSBinFormatELF()) {
8868 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv1;
8870 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8872 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
8874 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8877 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
8878 case llvm::Triple::ppc64le: {
8879 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
8880 PPC64_SVR4_ABIInfo::ABIKind
Kind = PPC64_SVR4_ABIInfo::ELFv2;
8882 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8884 bool IsSoftFloat = CodeGenOpts.FloatABI ==
"soft";
8886 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8890 case llvm::Triple::nvptx:
8891 case llvm::Triple::nvptx64:
8892 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
8894 case llvm::Triple::msp430:
8895 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
8897 case llvm::Triple::systemz: {
8899 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
8902 case llvm::Triple::tce:
8903 case llvm::Triple::tcele:
8904 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
8906 case llvm::Triple::x86: {
8907 bool IsDarwinVectorABI = Triple.isOSDarwin();
8908 bool RetSmallStructInRegABI =
8909 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8910 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8912 if (Triple.getOS() == llvm::Triple::Win32) {
8913 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
8914 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8915 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8917 return SetCGInfo(
new X86_32TargetCodeGenInfo(
8918 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8919 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8920 CodeGenOpts.FloatABI ==
"soft"));
8924 case llvm::Triple::x86_64: {
8928 ? X86AVXABILevel::AVX512
8931 switch (Triple.getOS()) {
8932 case llvm::Triple::Win32:
8933 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8934 case llvm::Triple::PS4:
8935 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
8937 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
8940 case llvm::Triple::hexagon:
8941 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
8942 case llvm::Triple::lanai:
8943 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
8944 case llvm::Triple::r600:
8945 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8946 case llvm::Triple::amdgcn:
8947 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8948 case llvm::Triple::sparc:
8949 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
8950 case llvm::Triple::sparcv9:
8951 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
8952 case llvm::Triple::xcore:
8953 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
8954 case llvm::Triple::spir:
8955 case llvm::Triple::spir64:
8956 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
8967 llvm::Function *Invoke,
8969 auto *InvokeFT = Invoke->getFunctionType();
8971 for (
auto &
P : InvokeFT->params())
8972 ArgTys.push_back(
P);
8974 std::string Name = Invoke->getName().str() +
"_kernel";
8975 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
8978 auto IP = CGF.
Builder.saveIP();
8981 Builder.SetInsertPoint(BB);
8983 for (
auto &A : F->args())
8985 Builder.CreateCall(Invoke, Args);
8986 Builder.CreateRetVoid();
8987 Builder.restoreIP(IP);
8999 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9005 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9006 auto *InvokeFT = Invoke->getFunctionType();
9015 ArgTys.push_back(BlockTy);
9016 ArgTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9017 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9018 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"__block_literal"));
9019 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9020 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9021 ArgNames.push_back(llvm::MDString::get(C,
"block_literal"));
9022 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9023 ArgTys.push_back(InvokeFT->getParamType(I));
9024 ArgTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9025 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9026 AccessQuals.push_back(llvm::MDString::get(C,
"none"));
9027 ArgBaseTypeNames.push_back(llvm::MDString::get(C,
"void*"));
9028 ArgTypeQuals.push_back(llvm::MDString::get(C,
""));
9030 llvm::MDString::get(C, (Twine(
"local_arg") + Twine(I)).str()));
9032 std::string Name = Invoke->getName().str() +
"_kernel";
9033 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys,
false);
9036 F->addFnAttr(
"enqueued-block");
9037 auto IP = CGF.
Builder.saveIP();
9039 Builder.SetInsertPoint(BB);
9040 unsigned BlockAlign = CGF.
CGM.
getDataLayout().getPrefTypeAlignment(BlockTy);
9041 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
9042 BlockPtr->setAlignment(BlockAlign);
9043 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9044 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9046 Args.push_back(Cast);
9047 for (
auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9049 Builder.CreateCall(Invoke, Args);
9050 Builder.CreateRetVoid();
9051 Builder.restoreIP(IP);
9053 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9054 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9055 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9056 F->setMetadata(
"kernel_arg_base_type",
9057 llvm::MDNode::get(C, ArgBaseTypeNames));
9058 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9060 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(C, ArgNames));
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
Ignore - Ignore the argument (treat as void).
bool isFloatingPoint() const
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
An instance of this class is created to represent a function declaration or definition.
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isBlockPointerType() const
CodeGenTypes & getTypes()
bool isMemberPointerType() const
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isRealFloatingType() const
Floating point categories.
Extend - Valid only for integer argument types.
bool isRecordType() const
Decl - This represents one declaration (or definition), e.g.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CharUnits getPointerSize() const
const RecordType * getAsStructureType() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const llvm::DataLayout & getDataLayout() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
The base class of the type hierarchy.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isZero() const
isZero - Test whether the quantity equals zero.
const TargetInfo & getTargetInfo() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
void setCanBeFlattened(bool Flatten)
QualType getElementType() const
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
ASTContext & getContext() const
VarDecl - An instance of this class is created to represent a variable declaration or definition...
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool isEnumeralType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool supportsCOMDAT() const
LangAS
Defines the address space values used by the address space qualifier of QualType. ...
llvm::LLVMContext & getVMContext() const
void setCoerceToType(llvm::Type *T)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * getPointer() const
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
RecordDecl - Represents a struct/union/class.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
Address getAddress() const
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
RecordDecl * getDefinition() const
getDefinition - Returns the RecordDecl that actually defines this struct/union/class.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
bool isReferenceType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type *> scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
ABIInfo(CodeGen::CodeGenTypes &cgt)
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
virtual StringRef getABI() const
Get the ABI currently in use.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
bool getHasRegParm() const
bool isBitField() const
Determines whether this field is a bitfield.
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
CharUnits getAlignment() const
Return the alignment of this pointer.
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
const_arg_iterator arg_begin() const
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
field_iterator field_begin() const
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
CharUnits getPointerAlign() const
bool isScalarType() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor)
isTypeConstant - Determine whether an object of this type can be emitted as a constant.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
CanQualType getReturnType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
static CharUnits One()
One - Construct a CharUnits quantity of one.
ASTContext & getContext() const
Represents a prototype with parameter type info, e.g.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const TargetCodeGenInfo & getTargetCodeGenInfo()
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, ForDefinition_t IsForDefinition) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
const llvm::fltSemantics & getLongDoubleFormat() const
Exposes information about the current target.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
QualType getElementType() const
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
const FunctionProtoType * T
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
field_iterator field_end() const
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isAnyComplexType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
EnumDecl * getDefinition() const
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::CallingConv::ID RuntimeCC
llvm::LLVMContext & getLLVMContext()
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
llvm::IntegerType * Int32Ty
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
const CodeGenOptions & getCodeGenOpts() const
bool canHaveCoerceToType() const
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
bool getIndirectByVal() const
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Represents a GCC generic vector type.
ArraySizeModifier getSizeModifier() const
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
Implements C++ ABI-specific semantic analysis functions.
const TargetInfo & getTarget() const
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
bool isConstQualified() const
Determine whether this type is const-qualified.
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Pass it as a pointer to temporary memory.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
bool isStructureOrClassType() const
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
QualType getCanonicalType() const
bool isBuiltinType() const
Helper methods to distinguish type categories.
QualType getReturnType() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
bool isSRetAfterThis() const
LangAS getAddressSpace() const
Return the address space of this type.
unsigned getRegParm() const
const TargetInfo & getTarget() const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
SyncScope
Defines synch scope values used internally by clang.
const llvm::DataLayout & getDataLayout() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const_arg_iterator arg_end() const
CoerceAndExpand - Only valid for aggregate argument types.
bool isMemberFunctionPointerType() const
llvm::LLVMContext & getLLVMContext()
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isTargetAddressSpace(LangAS AS)
EnumDecl * getDecl() const
bool isVectorType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
llvm::CallingConv::ID getRuntimeCC() const
Return the calling convention to use for system runtime functions.
bool hasFlexibleArrayMember() const
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
llvm::CallingConv::ID BuiltinCC
StringRef getName() const
Return the actual identifier string.
const TargetInfo & getTarget() const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA...
A refining implementation of ABIInfo for targets that support swiftcall.
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
virtual llvm::Function * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Value *BlockLiteral) const
Create an OpenCL kernel for an enqueued block.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
EnumDecl - Represents an enum.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
llvm::Module & getModule() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned getIntWidth(QualType T) const
virtual llvm::Optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Implements C++ ABI-specific code generation functions.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGen::CGCXXABI & getCXXABI() const
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
ASTContext & getContext() const
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
CallingConv getCallConv() const
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
llvm::Type * ConvertType(QualType T)
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isPointerType() const
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isFloatingType() const
LValue - This represents an lvalue references.
llvm::Type * getCoerceToType() const
void setInAllocaSRet(bool SRet)
unsigned getTargetAddressSpace(QualType T) const
RecordArgABI
Specify how one should pass an argument of a record type.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", bool CastToDefaultAddrSpace=true)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const LangOptions & getLangOpts() const
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
bool getIndirectRealign() const
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
const CodeGenOptions & getCodeGenOpts() const