clang  8.0.0
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
28 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67  case CC_Swift: return llvm::CallingConv::Swift;
68  }
69 }
70 
71 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
72 /// qualification.
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
74  const CXXMethodDecl *MD) {
75  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
76  if (MD)
77  RecTy = Context.getAddrSpaceQualType(RecTy, MD->getTypeQualifiers().getAddressSpace());
78  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
79 }
80 
81 /// Returns the canonical formal type of the given C++ method.
83  return MD->getType()->getCanonicalTypeUnqualified()
85 }
86 
87 /// Returns the "extra-canonicalized" return type, which discards
88 /// qualifiers on the return type. Codegen doesn't care about them,
89 /// and it makes ABI code a little easier to be able to assume that
90 /// all parameter and return types are top-level unqualified.
93 }
94 
95 /// Arrange the argument and result information for a value of the given
96 /// unprototyped freestanding function type.
97 const CGFunctionInfo &
99  // When translating an unprototyped function type, always use a
100  // variadic type.
101  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
102  /*instanceMethod=*/false,
103  /*chainCall=*/false, None,
104  FTNP->getExtInfo(), {}, RequiredArgs(0));
105 }
106 
109  const FunctionProtoType *proto,
110  unsigned prefixArgs,
111  unsigned totalArgs) {
112  assert(proto->hasExtParameterInfos());
113  assert(paramInfos.size() <= prefixArgs);
114  assert(proto->getNumParams() + prefixArgs <= totalArgs);
115 
116  paramInfos.reserve(totalArgs);
117 
118  // Add default infos for any prefix args that don't already have infos.
119  paramInfos.resize(prefixArgs);
120 
121  // Add infos for the prototype.
122  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
123  paramInfos.push_back(ParamInfo);
124  // pass_object_size params have no parameter info.
125  if (ParamInfo.hasPassObjectSize())
126  paramInfos.emplace_back();
127  }
128 
129  assert(paramInfos.size() <= totalArgs &&
130  "Did we forget to insert pass_object_size args?");
131  // Add default infos for the variadic and/or suffix arguments.
132  paramInfos.resize(totalArgs);
133 }
134 
135 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
136 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
137 static void appendParameterTypes(const CodeGenTypes &CGT,
141  // Fast path: don't touch param info if we don't need to.
142  if (!FPT->hasExtParameterInfos()) {
143  assert(paramInfos.empty() &&
144  "We have paramInfos, but the prototype doesn't?");
145  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
146  return;
147  }
148 
149  unsigned PrefixSize = prefix.size();
150  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
151  // parameters; the only thing that can change this is the presence of
152  // pass_object_size. So, we preallocate for the common case.
153  prefix.reserve(prefix.size() + FPT->getNumParams());
154 
155  auto ExtInfos = FPT->getExtParameterInfos();
156  assert(ExtInfos.size() == FPT->getNumParams());
157  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
158  prefix.push_back(FPT->getParamType(I));
159  if (ExtInfos[I].hasPassObjectSize())
160  prefix.push_back(CGT.getContext().getSizeType());
161  }
162 
163  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
164  prefix.size());
165 }
166 
167 /// Arrange the LLVM function layout for a value of the given function
168 /// type, on top of any implicit parameters already stored.
169 static const CGFunctionInfo &
170 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
173  const FunctionDecl *FD) {
174  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
175  RequiredArgs Required =
176  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
177  // FIXME: Kill copy.
178  appendParameterTypes(CGT, prefix, paramInfos, FTP);
179  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
180 
181  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
182  /*chainCall=*/false, prefix,
183  FTP->getExtInfo(), paramInfos,
184  Required);
185 }
186 
187 /// Arrange the argument and result information for a value of the
188 /// given freestanding function type.
189 const CGFunctionInfo &
191  const FunctionDecl *FD) {
193  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
194  FTP, FD);
195 }
196 
197 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
198  // Set the appropriate calling convention for the Function.
199  if (D->hasAttr<StdCallAttr>())
200  return CC_X86StdCall;
201 
202  if (D->hasAttr<FastCallAttr>())
203  return CC_X86FastCall;
204 
205  if (D->hasAttr<RegCallAttr>())
206  return CC_X86RegCall;
207 
208  if (D->hasAttr<ThisCallAttr>())
209  return CC_X86ThisCall;
210 
211  if (D->hasAttr<VectorCallAttr>())
212  return CC_X86VectorCall;
213 
214  if (D->hasAttr<PascalAttr>())
215  return CC_X86Pascal;
216 
217  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
218  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
219 
220  if (D->hasAttr<AArch64VectorPcsAttr>())
221  return CC_AArch64VectorCall;
222 
223  if (D->hasAttr<IntelOclBiccAttr>())
224  return CC_IntelOclBicc;
225 
226  if (D->hasAttr<MSABIAttr>())
227  return IsWindows ? CC_C : CC_Win64;
228 
229  if (D->hasAttr<SysVABIAttr>())
230  return IsWindows ? CC_X86_64SysV : CC_C;
231 
232  if (D->hasAttr<PreserveMostAttr>())
233  return CC_PreserveMost;
234 
235  if (D->hasAttr<PreserveAllAttr>())
236  return CC_PreserveAll;
237 
238  return CC_C;
239 }
240 
241 /// Arrange the argument and result information for a call to an
242 /// unknown C++ non-static member function of the given abstract type.
243 /// (Zero value of RD means we don't have any meaningful "this" argument type,
244 /// so fall back to a generic pointer type).
245 /// The member function must be an ordinary function, i.e. not a
246 /// constructor or destructor.
247 const CGFunctionInfo &
249  const FunctionProtoType *FTP,
250  const CXXMethodDecl *MD) {
252 
253  // Add the 'this' pointer.
254  if (RD)
255  argTypes.push_back(GetThisType(Context, RD, MD));
256  else
257  argTypes.push_back(Context.VoidPtrTy);
258 
260  *this, true, argTypes,
262 }
263 
264 /// Set calling convention for CUDA/HIP kernel.
266  const FunctionDecl *FD) {
267  if (FD->hasAttr<CUDAGlobalAttr>()) {
268  const FunctionType *FT = FTy->getAs<FunctionType>();
270  FTy = FT->getCanonicalTypeUnqualified();
271  }
272 }
273 
274 /// Arrange the argument and result information for a declaration or
275 /// definition of the given C++ non-static member function. The
276 /// member function must be an ordinary function, i.e. not a
277 /// constructor or destructor.
278 const CGFunctionInfo &
280  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
281  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
282 
283  CanQualType FT = GetFormalType(MD).getAs<Type>();
284  setCUDAKernelCallingConvention(FT, CGM, MD);
285  auto prototype = FT.getAs<FunctionProtoType>();
286 
287  if (MD->isInstance()) {
288  // The abstract case is perfectly fine.
289  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
290  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
291  }
292 
293  return arrangeFreeFunctionType(prototype, MD);
294 }
295 
297  const InheritedConstructor &Inherited, CXXCtorType Type) {
298  // Parameters are unnecessary if we're constructing a base class subobject
299  // and the inherited constructor lives in a virtual base.
300  return Type == Ctor_Complete ||
301  !Inherited.getShadowDecl()->constructsVirtualBase() ||
302  !Target.getCXXABI().hasConstructorVariants();
303  }
304 
305 const CGFunctionInfo &
307  StructorType Type) {
308 
311  argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
312 
313  bool PassParams = true;
314 
315  GlobalDecl GD;
316  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
317  GD = GlobalDecl(CD, toCXXCtorType(Type));
318 
319  // A base class inheriting constructor doesn't get forwarded arguments
320  // needed to construct a virtual base (or base class thereof).
321  if (auto Inherited = CD->getInheritedConstructor())
322  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
323  } else {
324  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
325  GD = GlobalDecl(DD, toCXXDtorType(Type));
326  }
327 
329 
330  // Add the formal parameters.
331  if (PassParams)
332  appendParameterTypes(*this, argTypes, paramInfos, FTP);
333 
334  CGCXXABI::AddedStructorArgs AddedArgs =
335  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
336  if (!paramInfos.empty()) {
337  // Note: prefix implies after the first param.
338  if (AddedArgs.Prefix)
339  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
341  if (AddedArgs.Suffix)
342  paramInfos.append(AddedArgs.Suffix,
344  }
345 
346  RequiredArgs required =
347  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
349 
350  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
351  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
352  ? argTypes.front()
353  : TheCXXABI.hasMostDerivedReturn(GD)
354  ? CGM.getContext().VoidPtrTy
355  : Context.VoidTy;
356  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
357  /*chainCall=*/false, argTypes, extInfo,
358  paramInfos, required);
359 }
360 
363  SmallVector<CanQualType, 16> argTypes;
364  for (auto &arg : args)
365  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
366  return argTypes;
367 }
368 
369 static SmallVector<CanQualType, 16>
371  SmallVector<CanQualType, 16> argTypes;
372  for (auto &arg : args)
373  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
374  return argTypes;
375 }
376 
379  unsigned prefixArgs, unsigned totalArgs) {
381  if (proto->hasExtParameterInfos()) {
382  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
383  }
384  return result;
385 }
386 
387 /// Arrange a call to a C++ method, passing the given arguments.
388 ///
389 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
390 /// parameter.
391 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
392 /// args.
393 /// PassProtoArgs indicates whether `args` has args for the parameters in the
394 /// given CXXConstructorDecl.
395 const CGFunctionInfo &
397  const CXXConstructorDecl *D,
398  CXXCtorType CtorKind,
399  unsigned ExtraPrefixArgs,
400  unsigned ExtraSuffixArgs,
401  bool PassProtoArgs) {
402  // FIXME: Kill copy.
404  for (const auto &Arg : args)
405  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
406 
407  // +1 for implicit this, which should always be args[0].
408  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
409 
411  RequiredArgs Required =
412  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
413  GlobalDecl GD(D, CtorKind);
414  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415  ? ArgTypes.front()
416  : TheCXXABI.hasMostDerivedReturn(GD)
417  ? CGM.getContext().VoidPtrTy
418  : Context.VoidTy;
419 
420  FunctionType::ExtInfo Info = FPT->getExtInfo();
422  // If the prototype args are elided, we should only have ABI-specific args,
423  // which never have param info.
424  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425  // ABI-specific suffix arguments are treated the same as variadic arguments.
426  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427  ArgTypes.size());
428  }
429  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430  /*chainCall=*/false, ArgTypes, Info,
431  ParamInfos, Required);
432 }
433 
434 /// Arrange the argument and result information for the declaration or
435 /// definition of the given function.
436 const CGFunctionInfo &
438  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439  if (MD->isInstance())
440  return arrangeCXXMethodDeclaration(MD);
441 
443 
444  assert(isa<FunctionType>(FTy));
445  setCUDAKernelCallingConvention(FTy, CGM, FD);
446 
447  // When declaring a function without a prototype, always use a
448  // non-variadic type.
451  noProto->getReturnType(), /*instanceMethod=*/false,
452  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453  }
454 
456 }
457 
458 /// Arrange the argument and result information for the declaration or
459 /// definition of an Objective-C method.
460 const CGFunctionInfo &
462  // It happens that this is the same as a call with no optional
463  // arguments, except also using the formal 'self' type.
465 }
466 
467 /// Arrange the argument and result information for the function type
468 /// through which to perform a send to the given Objective-C method,
469 /// using the given receiver type. The receiver type is not always
470 /// the 'self' type of the method or even an Objective-C pointer type.
471 /// This is *not* the right method for actually performing such a
472 /// message send, due to the possibility of optional arguments.
473 const CGFunctionInfo &
475  QualType receiverType) {
478  argTys.push_back(Context.getCanonicalParamType(receiverType));
479  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480  // FIXME: Kill copy?
481  for (const auto *I : MD->parameters()) {
482  argTys.push_back(Context.getCanonicalParamType(I->getType()));
484  I->hasAttr<NoEscapeAttr>());
485  extParamInfos.push_back(extParamInfo);
486  }
487 
488  FunctionType::ExtInfo einfo;
489  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491 
492  if (getContext().getLangOpts().ObjCAutoRefCount &&
493  MD->hasAttr<NSReturnsRetainedAttr>())
494  einfo = einfo.withProducesResult(true);
495 
496  RequiredArgs required =
497  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498 
500  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502 }
503 
504 const CGFunctionInfo &
506  const CallArgList &args) {
507  auto argTypes = getArgTypesForCall(Context, args);
508  FunctionType::ExtInfo einfo;
509 
511  GetReturnType(returnType), /*instanceMethod=*/false,
512  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513 }
514 
515 const CGFunctionInfo &
517  // FIXME: Do we need to handle ObjCMethodDecl?
518  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519 
520  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
522 
523  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
525 
526  return arrangeFunctionDeclaration(FD);
527 }
528 
529 /// Arrange a thunk that takes 'this' as the first parameter followed by
530 /// varargs. Return a void pointer, regardless of the actual return type.
531 /// The body of the thunk will end in a musttail call to a function of the
532 /// correct type, and the caller will bitcast the function to the correct
533 /// prototype.
534 const CGFunctionInfo &
536  assert(MD->isVirtual() && "only methods have thunks");
538  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
539  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
540  /*chainCall=*/false, ArgTys,
541  FTP->getExtInfo(), {}, RequiredArgs(1));
542 }
543 
544 const CGFunctionInfo &
546  CXXCtorType CT) {
547  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
548 
551  const CXXRecordDecl *RD = CD->getParent();
552  ArgTys.push_back(GetThisType(Context, RD, CD));
553  if (CT == Ctor_CopyingClosure)
554  ArgTys.push_back(*FTP->param_type_begin());
555  if (RD->getNumVBases() > 0)
556  ArgTys.push_back(Context.IntTy);
558  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
559  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
560  /*chainCall=*/false, ArgTys,
561  FunctionType::ExtInfo(CC), {},
563 }
564 
565 /// Arrange a call as unto a free function, except possibly with an
566 /// additional number of formal parameters considered required.
567 static const CGFunctionInfo &
569  CodeGenModule &CGM,
570  const CallArgList &args,
571  const FunctionType *fnType,
572  unsigned numExtraRequiredArgs,
573  bool chainCall) {
574  assert(args.size() >= numExtraRequiredArgs);
575 
577 
578  // In most cases, there are no optional arguments.
579  RequiredArgs required = RequiredArgs::All;
580 
581  // If we have a variadic prototype, the required arguments are the
582  // extra prefix plus the arguments in the prototype.
583  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
584  if (proto->isVariadic())
585  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
586 
587  if (proto->hasExtParameterInfos())
588  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
589  args.size());
590 
591  // If we don't have a prototype at all, but we're supposed to
592  // explicitly use the variadic convention for unprototyped calls,
593  // treat all of the arguments as required but preserve the nominal
594  // possibility of variadics.
595  } else if (CGM.getTargetCodeGenInfo()
596  .isNoProtoCallVariadic(args,
597  cast<FunctionNoProtoType>(fnType))) {
598  required = RequiredArgs(args.size());
599  }
600 
601  // FIXME: Kill copy.
602  SmallVector<CanQualType, 16> argTypes;
603  for (const auto &arg : args)
604  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
606  /*instanceMethod=*/false, chainCall,
607  argTypes, fnType->getExtInfo(), paramInfos,
608  required);
609 }
610 
611 /// Figure out the rules for calling a function with the given formal
612 /// type using the given arguments. The arguments are necessary
613 /// because the function might be unprototyped, in which case it's
614 /// target-dependent in crazy ways.
615 const CGFunctionInfo &
617  const FunctionType *fnType,
618  bool chainCall) {
619  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
620  chainCall ? 1 : 0, chainCall);
621 }
622 
623 /// A block function is essentially a free function with an
624 /// extra implicit argument.
625 const CGFunctionInfo &
627  const FunctionType *fnType) {
628  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
629  /*chainCall=*/false);
630 }
631 
632 const CGFunctionInfo &
634  const FunctionArgList &params) {
635  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
636  auto argTypes = getArgTypesForDeclaration(Context, params);
637 
639  GetReturnType(proto->getReturnType()),
640  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
641  proto->getExtInfo(), paramInfos,
642  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
643 }
644 
645 const CGFunctionInfo &
647  const CallArgList &args) {
648  // FIXME: Kill copy.
650  for (const auto &Arg : args)
651  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
653  GetReturnType(resultType), /*instanceMethod=*/false,
654  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
655  /*paramInfos=*/ {}, RequiredArgs::All);
656 }
657 
658 const CGFunctionInfo &
660  const FunctionArgList &args) {
661  auto argTypes = getArgTypesForDeclaration(Context, args);
662 
664  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
665  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
666 }
667 
668 const CGFunctionInfo &
670  ArrayRef<CanQualType> argTypes) {
672  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
673  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
674 }
675 
676 /// Arrange a call to a C++ method, passing the given arguments.
677 ///
678 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
679 /// does not count `this`.
680 const CGFunctionInfo &
682  const FunctionProtoType *proto,
683  RequiredArgs required,
684  unsigned numPrefixArgs) {
685  assert(numPrefixArgs + 1 <= args.size() &&
686  "Emitting a call with less args than the required prefix?");
687  // Add one to account for `this`. It's a bit awkward here, but we don't count
688  // `this` in similar places elsewhere.
689  auto paramInfos =
690  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
691 
692  // FIXME: Kill copy.
693  auto argTypes = getArgTypesForCall(Context, args);
694 
695  FunctionType::ExtInfo info = proto->getExtInfo();
697  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
698  /*chainCall=*/false, argTypes, info, paramInfos, required);
699 }
700 
703  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
705 }
706 
707 const CGFunctionInfo &
709  const CallArgList &args) {
710  assert(signature.arg_size() <= args.size());
711  if (signature.arg_size() == args.size())
712  return signature;
713 
715  auto sigParamInfos = signature.getExtParameterInfos();
716  if (!sigParamInfos.empty()) {
717  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
718  paramInfos.resize(args.size());
719  }
720 
721  auto argTypes = getArgTypesForCall(Context, args);
722 
723  assert(signature.getRequiredArgs().allowsOptionalArgs());
724  return arrangeLLVMFunctionInfo(signature.getReturnType(),
725  signature.isInstanceMethod(),
726  signature.isChainCall(),
727  argTypes,
728  signature.getExtInfo(),
729  paramInfos,
730  signature.getRequiredArgs());
731 }
732 
733 namespace clang {
734 namespace CodeGen {
736 }
737 }
738 
739 /// Arrange the argument and result information for an abstract value
740 /// of a given function type. This is the method which all of the
741 /// above functions ultimately defer to.
742 const CGFunctionInfo &
744  bool instanceMethod,
745  bool chainCall,
746  ArrayRef<CanQualType> argTypes,
749  RequiredArgs required) {
750  assert(llvm::all_of(argTypes,
751  [](CanQualType T) { return T.isCanonicalAsParam(); }));
752 
753  // Lookup or create unique function info.
754  llvm::FoldingSetNodeID ID;
755  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
756  required, resultType, argTypes);
757 
758  void *insertPos = nullptr;
759  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
760  if (FI)
761  return *FI;
762 
763  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
764 
765  // Construct the function info. We co-allocate the ArgInfos.
766  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
767  paramInfos, resultType, argTypes, required);
768  FunctionInfos.InsertNode(FI, insertPos);
769 
770  bool inserted = FunctionsBeingProcessed.insert(FI).second;
771  (void)inserted;
772  assert(inserted && "Recursively being processed?");
773 
774  // Compute ABI information.
775  if (CC == llvm::CallingConv::SPIR_KERNEL) {
776  // Force target independent argument handling for the host visible
777  // kernel functions.
778  computeSPIRKernelABIInfo(CGM, *FI);
779  } else if (info.getCC() == CC_Swift) {
780  swiftcall::computeABIInfo(CGM, *FI);
781  } else {
782  getABIInfo().computeInfo(*FI);
783  }
784 
785  // Loop over all of the computed argument and return value info. If any of
786  // them are direct or extend without a specified coerce type, specify the
787  // default now.
788  ABIArgInfo &retInfo = FI->getReturnInfo();
789  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
790  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
791 
792  for (auto &I : FI->arguments())
793  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
794  I.info.setCoerceToType(ConvertType(I.type));
795 
796  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
797  assert(erased && "Not in set?");
798 
799  return *FI;
800 }
801 
803  bool instanceMethod,
804  bool chainCall,
805  const FunctionType::ExtInfo &info,
806  ArrayRef<ExtParameterInfo> paramInfos,
807  CanQualType resultType,
808  ArrayRef<CanQualType> argTypes,
809  RequiredArgs required) {
810  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
811 
812  void *buffer =
813  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
814  argTypes.size() + 1, paramInfos.size()));
815 
816  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
817  FI->CallingConvention = llvmCC;
818  FI->EffectiveCallingConvention = llvmCC;
819  FI->ASTCallingConvention = info.getCC();
820  FI->InstanceMethod = instanceMethod;
821  FI->ChainCall = chainCall;
822  FI->NoReturn = info.getNoReturn();
823  FI->ReturnsRetained = info.getProducesResult();
824  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825  FI->NoCfCheck = info.getNoCfCheck();
826  FI->Required = required;
827  FI->HasRegParm = info.getHasRegParm();
828  FI->RegParm = info.getRegParm();
829  FI->ArgStruct = nullptr;
830  FI->ArgStructAlign = 0;
831  FI->NumArgs = argTypes.size();
832  FI->HasExtParameterInfos = !paramInfos.empty();
833  FI->getArgsBuffer()[0].type = resultType;
834  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835  FI->getArgsBuffer()[i + 1].type = argTypes[i];
836  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838  return FI;
839 }
840 
841 /***/
842 
843 namespace {
844 // ABIArgInfo::Expand implementation.
845 
846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847 struct TypeExpansion {
848  enum TypeExpansionKind {
849  // Elements of constant arrays are expanded recursively.
850  TEK_ConstantArray,
851  // Record fields are expanded recursively (but if record is a union, only
852  // the field with the largest size is expanded).
853  TEK_Record,
854  // For complex types, real and imaginary parts are expanded recursively.
855  TEK_Complex,
856  // All other types are not expandable.
857  TEK_None
858  };
859 
860  const TypeExpansionKind Kind;
861 
862  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
863  virtual ~TypeExpansion() {}
864 };
865 
866 struct ConstantArrayExpansion : TypeExpansion {
867  QualType EltTy;
868  uint64_t NumElts;
869 
870  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
872  static bool classof(const TypeExpansion *TE) {
873  return TE->Kind == TEK_ConstantArray;
874  }
875 };
876 
877 struct RecordExpansion : TypeExpansion {
879 
881 
882  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
884  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885  Fields(std::move(Fields)) {}
886  static bool classof(const TypeExpansion *TE) {
887  return TE->Kind == TEK_Record;
888  }
889 };
890 
891 struct ComplexExpansion : TypeExpansion {
892  QualType EltTy;
893 
894  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
895  static bool classof(const TypeExpansion *TE) {
896  return TE->Kind == TEK_Complex;
897  }
898 };
899 
900 struct NoExpansion : TypeExpansion {
901  NoExpansion() : TypeExpansion(TEK_None) {}
902  static bool classof(const TypeExpansion *TE) {
903  return TE->Kind == TEK_None;
904  }
905 };
906 } // namespace
907 
908 static std::unique_ptr<TypeExpansion>
909 getTypeExpansion(QualType Ty, const ASTContext &Context) {
910  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911  return llvm::make_unique<ConstantArrayExpansion>(
912  AT->getElementType(), AT->getSize().getZExtValue());
913  }
914  if (const RecordType *RT = Ty->getAs<RecordType>()) {
915  SmallVector<const CXXBaseSpecifier *, 1> Bases;
916  SmallVector<const FieldDecl *, 1> Fields;
917  const RecordDecl *RD = RT->getDecl();
918  assert(!RD->hasFlexibleArrayMember() &&
919  "Cannot expand structure with flexible array.");
920  if (RD->isUnion()) {
921  // Unions can be here only in degenerative cases - all the fields are same
922  // after flattening. Thus we have to use the "largest" field.
923  const FieldDecl *LargestFD = nullptr;
924  CharUnits UnionSize = CharUnits::Zero();
925 
926  for (const auto *FD : RD->fields()) {
927  if (FD->isZeroLengthBitField(Context))
928  continue;
929  assert(!FD->isBitField() &&
930  "Cannot expand structure with bit-field members.");
931  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932  if (UnionSize < FieldSize) {
933  UnionSize = FieldSize;
934  LargestFD = FD;
935  }
936  }
937  if (LargestFD)
938  Fields.push_back(LargestFD);
939  } else {
940  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941  assert(!CXXRD->isDynamicClass() &&
942  "cannot expand vtable pointers in dynamic classes");
943  for (const CXXBaseSpecifier &BS : CXXRD->bases())
944  Bases.push_back(&BS);
945  }
946 
947  for (const auto *FD : RD->fields()) {
948  if (FD->isZeroLengthBitField(Context))
949  continue;
950  assert(!FD->isBitField() &&
951  "Cannot expand structure with bit-field members.");
952  Fields.push_back(FD);
953  }
954  }
955  return llvm::make_unique<RecordExpansion>(std::move(Bases),
956  std::move(Fields));
957  }
958  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
960  }
961  return llvm::make_unique<NoExpansion>();
962 }
963 
964 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965  auto Exp = getTypeExpansion(Ty, Context);
966  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968  }
969  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970  int Res = 0;
971  for (auto BS : RExp->Bases)
972  Res += getExpansionSize(BS->getType(), Context);
973  for (auto FD : RExp->Fields)
974  Res += getExpansionSize(FD->getType(), Context);
975  return Res;
976  }
977  if (isa<ComplexExpansion>(Exp.get()))
978  return 2;
979  assert(isa<NoExpansion>(Exp.get()));
980  return 1;
981 }
982 
983 void
986  auto Exp = getTypeExpansion(Ty, Context);
987  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989  getExpandedTypes(CAExp->EltTy, TI);
990  }
991  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992  for (auto BS : RExp->Bases)
993  getExpandedTypes(BS->getType(), TI);
994  for (auto FD : RExp->Fields)
995  getExpandedTypes(FD->getType(), TI);
996  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997  llvm::Type *EltTy = ConvertType(CExp->EltTy);
998  *TI++ = EltTy;
999  *TI++ = EltTy;
1000  } else {
1001  assert(isa<NoExpansion>(Exp.get()));
1002  *TI++ = ConvertType(Ty);
1003  }
1004 }
1005 
1007  ConstantArrayExpansion *CAE,
1008  Address BaseAddr,
1009  llvm::function_ref<void(Address)> Fn) {
1010  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011  CharUnits EltAlign =
1012  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013 
1014  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015  llvm::Value *EltAddr =
1016  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017  Fn(Address(EltAddr, EltAlign));
1018  }
1019 }
1020 
1021 void CodeGenFunction::ExpandTypeFromArgs(
1023  assert(LV.isSimple() &&
1024  "Unexpected non-simple lvalue during struct expansion.");
1025 
1026  auto Exp = getTypeExpansion(Ty, getContext());
1027  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1029  [&](Address EltAddr) {
1030  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032  });
1033  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034  Address This = LV.getAddress();
1035  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036  // Perform a single step derived-to-base conversion.
1037  Address Base =
1038  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039  /*NullCheckValue=*/false, SourceLocation());
1040  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041 
1042  // Recurse onto bases.
1043  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044  }
1045  for (auto FD : RExp->Fields) {
1046  // FIXME: What are the right qualifiers here?
1047  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049  }
1050  } else if (isa<ComplexExpansion>(Exp.get())) {
1051  auto realValue = *AI++;
1052  auto imagValue = *AI++;
1053  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054  } else {
1055  assert(isa<NoExpansion>(Exp.get()));
1056  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1057  }
1058 }
1059 
1060 void CodeGenFunction::ExpandTypeToArgs(
1061  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1062  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1063  auto Exp = getTypeExpansion(Ty, getContext());
1064  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1065  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1068  *this, CAExp, Addr, [&](Address EltAddr) {
1069  CallArg EltArg = CallArg(
1070  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1071  CAExp->EltTy);
1072  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1073  IRCallArgPos);
1074  });
1075  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1076  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1078  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1079  // Perform a single step derived-to-base conversion.
1080  Address Base =
1081  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1082  /*NullCheckValue=*/false, SourceLocation());
1083  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1084 
1085  // Recurse onto bases.
1086  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1087  IRCallArgPos);
1088  }
1089 
1090  LValue LV = MakeAddrLValue(This, Ty);
1091  for (auto FD : RExp->Fields) {
1092  CallArg FldArg =
1093  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1094  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1095  IRCallArgPos);
1096  }
1097  } else if (isa<ComplexExpansion>(Exp.get())) {
1099  IRCallArgs[IRCallArgPos++] = CV.first;
1100  IRCallArgs[IRCallArgPos++] = CV.second;
1101  } else {
1102  assert(isa<NoExpansion>(Exp.get()));
1103  auto RV = Arg.getKnownRValue();
1104  assert(RV.isScalar() &&
1105  "Unexpected non-scalar rvalue during struct expansion.");
1106 
1107  // Insert a bitcast as needed.
1108  llvm::Value *V = RV.getScalarVal();
1109  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1110  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1111  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1112 
1113  IRCallArgs[IRCallArgPos++] = V;
1114  }
1115 }
1116 
1117 /// Create a temporary allocation for the purposes of coercion.
1119  CharUnits MinAlign) {
1120  // Don't use an alignment that's worse than what LLVM would prefer.
1121  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1122  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1123 
1124  return CGF.CreateTempAlloca(Ty, Align);
1125 }
1126 
1127 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1128 /// accessing some number of bytes out of it, try to gep into the struct to get
1129 /// at its inner goodness. Dive as deep as possible without entering an element
1130 /// with an in-memory size smaller than DstSize.
1131 static Address
1133  llvm::StructType *SrcSTy,
1134  uint64_t DstSize, CodeGenFunction &CGF) {
1135  // We can't dive into a zero-element struct.
1136  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1137 
1138  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1139 
1140  // If the first elt is at least as large as what we're looking for, or if the
1141  // first element is the same size as the whole struct, we can enter it. The
1142  // comparison must be made on the store size and not the alloca size. Using
1143  // the alloca size may overstate the size of the load.
1144  uint64_t FirstEltSize =
1145  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1146  if (FirstEltSize < DstSize &&
1147  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1148  return SrcPtr;
1149 
1150  // GEP into the first element.
1151  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1152 
1153  // If the first element is a struct, recurse.
1154  llvm::Type *SrcTy = SrcPtr.getElementType();
1155  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1156  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1157 
1158  return SrcPtr;
1159 }
1160 
1161 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1162 /// are either integers or pointers. This does a truncation of the value if it
1163 /// is too large or a zero extension if it is too small.
1164 ///
1165 /// This behaves as if the value were coerced through memory, so on big-endian
1166 /// targets the high bits are preserved in a truncation, while little-endian
1167 /// targets preserve the low bits.
1169  llvm::Type *Ty,
1170  CodeGenFunction &CGF) {
1171  if (Val->getType() == Ty)
1172  return Val;
1173 
1174  if (isa<llvm::PointerType>(Val->getType())) {
1175  // If this is Pointer->Pointer avoid conversion to and from int.
1176  if (isa<llvm::PointerType>(Ty))
1177  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1178 
1179  // Convert the pointer to an integer so we can play with its width.
1180  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1181  }
1182 
1183  llvm::Type *DestIntTy = Ty;
1184  if (isa<llvm::PointerType>(DestIntTy))
1185  DestIntTy = CGF.IntPtrTy;
1186 
1187  if (Val->getType() != DestIntTy) {
1188  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1189  if (DL.isBigEndian()) {
1190  // Preserve the high bits on big-endian targets.
1191  // That is what memory coercion does.
1192  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1193  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1194 
1195  if (SrcSize > DstSize) {
1196  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1197  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1198  } else {
1199  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1200  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1201  }
1202  } else {
1203  // Little-endian targets preserve the low bits. No shifts required.
1204  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1205  }
1206  }
1207 
1208  if (isa<llvm::PointerType>(Ty))
1209  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1210  return Val;
1211 }
1212 
1213 
1214 
1215 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1216 /// a pointer to an object of type \arg Ty, known to be aligned to
1217 /// \arg SrcAlign bytes.
1218 ///
1219 /// This safely handles the case when the src type is smaller than the
1220 /// destination type; in this situation the values of bits which not
1221 /// present in the src are undefined.
1223  CodeGenFunction &CGF) {
1224  llvm::Type *SrcTy = Src.getElementType();
1225 
1226  // If SrcTy and Ty are the same, just do a load.
1227  if (SrcTy == Ty)
1228  return CGF.Builder.CreateLoad(Src);
1229 
1230  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1231 
1232  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1233  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1234  SrcTy = Src.getType()->getElementType();
1235  }
1236 
1237  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1238 
1239  // If the source and destination are integer or pointer types, just do an
1240  // extension or truncation to the desired type.
1241  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1242  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1243  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1244  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1245  }
1246 
1247  // If load is legal, just bitcast the src pointer.
1248  if (SrcSize >= DstSize) {
1249  // Generally SrcSize is never greater than DstSize, since this means we are
1250  // losing bits. However, this can happen in cases where the structure has
1251  // additional padding, for example due to a user specified alignment.
1252  //
1253  // FIXME: Assert that we aren't truncating non-padding bits when have access
1254  // to that information.
1255  Src = CGF.Builder.CreateBitCast(Src,
1256  Ty->getPointerTo(Src.getAddressSpace()));
1257  return CGF.Builder.CreateLoad(Src);
1258  }
1259 
1260  // Otherwise do coercion through memory. This is stupid, but simple.
1261  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1262  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1263  Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1264  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1265  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1266  false);
1267  return CGF.Builder.CreateLoad(Tmp);
1268 }
1269 
1270 // Function to store a first-class aggregate into memory. We prefer to
1271 // store the elements rather than the aggregate to be more friendly to
1272 // fast-isel.
1273 // FIXME: Do we need to recurse here?
1275  Address Dest, bool DestIsVolatile) {
1276  // Prefer scalar stores to first-class aggregate stores.
1277  if (llvm::StructType *STy =
1278  dyn_cast<llvm::StructType>(Val->getType())) {
1279  const llvm::StructLayout *Layout =
1280  CGF.CGM.getDataLayout().getStructLayout(STy);
1281 
1282  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1283  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1284  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1285  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1286  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1287  }
1288  } else {
1289  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1290  }
1291 }
1292 
1293 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1294 /// where the source and destination may have different types. The
1295 /// destination is known to be aligned to \arg DstAlign bytes.
1296 ///
1297 /// This safely handles the case when the src type is larger than the
1298 /// destination type; the upper bits of the src will be lost.
1300  Address Dst,
1301  bool DstIsVolatile,
1302  CodeGenFunction &CGF) {
1303  llvm::Type *SrcTy = Src->getType();
1304  llvm::Type *DstTy = Dst.getType()->getElementType();
1305  if (SrcTy == DstTy) {
1306  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1307  return;
1308  }
1309 
1310  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1311 
1312  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1313  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1314  DstTy = Dst.getType()->getElementType();
1315  }
1316 
1317  // If the source and destination are integer or pointer types, just do an
1318  // extension or truncation to the desired type.
1319  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1320  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1321  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1322  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1323  return;
1324  }
1325 
1326  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1327 
1328  // If store is legal, just bitcast the src pointer.
1329  if (SrcSize <= DstSize) {
1330  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1331  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1332  } else {
1333  // Otherwise do coercion through memory. This is stupid, but
1334  // simple.
1335 
1336  // Generally SrcSize is never greater than DstSize, since this means we are
1337  // losing bits. However, this can happen in cases where the structure has
1338  // additional padding, for example due to a user specified alignment.
1339  //
1340  // FIXME: Assert that we aren't truncating non-padding bits when have access
1341  // to that information.
1342  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1343  CGF.Builder.CreateStore(Src, Tmp);
1344  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1345  Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1346  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1347  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1348  false);
1349  }
1350 }
1351 
1353  const ABIArgInfo &info) {
1354  if (unsigned offset = info.getDirectOffset()) {
1355  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1356  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1357  CharUnits::fromQuantity(offset));
1358  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1359  }
1360  return addr;
1361 }
1362 
1363 namespace {
1364 
1365 /// Encapsulates information about the way function arguments from
1366 /// CGFunctionInfo should be passed to actual LLVM IR function.
1367 class ClangToLLVMArgMapping {
1368  static const unsigned InvalidIndex = ~0U;
1369  unsigned InallocaArgNo;
1370  unsigned SRetArgNo;
1371  unsigned TotalIRArgs;
1372 
1373  /// Arguments of LLVM IR function corresponding to single Clang argument.
1374  struct IRArgs {
1375  unsigned PaddingArgIndex;
1376  // Argument is expanded to IR arguments at positions
1377  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1378  unsigned FirstArgIndex;
1379  unsigned NumberOfArgs;
1380 
1381  IRArgs()
1382  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1383  NumberOfArgs(0) {}
1384  };
1385 
1386  SmallVector<IRArgs, 8> ArgInfo;
1387 
1388 public:
1389  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1390  bool OnlyRequiredArgs = false)
1391  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1392  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1393  construct(Context, FI, OnlyRequiredArgs);
1394  }
1395 
1396  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1397  unsigned getInallocaArgNo() const {
1398  assert(hasInallocaArg());
1399  return InallocaArgNo;
1400  }
1401 
1402  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1403  unsigned getSRetArgNo() const {
1404  assert(hasSRetArg());
1405  return SRetArgNo;
1406  }
1407 
1408  unsigned totalIRArgs() const { return TotalIRArgs; }
1409 
1410  bool hasPaddingArg(unsigned ArgNo) const {
1411  assert(ArgNo < ArgInfo.size());
1412  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1413  }
1414  unsigned getPaddingArgNo(unsigned ArgNo) const {
1415  assert(hasPaddingArg(ArgNo));
1416  return ArgInfo[ArgNo].PaddingArgIndex;
1417  }
1418 
1419  /// Returns index of first IR argument corresponding to ArgNo, and their
1420  /// quantity.
1421  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1422  assert(ArgNo < ArgInfo.size());
1423  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1424  ArgInfo[ArgNo].NumberOfArgs);
1425  }
1426 
1427 private:
1428  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1429  bool OnlyRequiredArgs);
1430 };
1431 
1432 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1433  const CGFunctionInfo &FI,
1434  bool OnlyRequiredArgs) {
1435  unsigned IRArgNo = 0;
1436  bool SwapThisWithSRet = false;
1437  const ABIArgInfo &RetAI = FI.getReturnInfo();
1438 
1439  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1440  SwapThisWithSRet = RetAI.isSRetAfterThis();
1441  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1442  }
1443 
1444  unsigned ArgNo = 0;
1445  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1446  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1447  ++I, ++ArgNo) {
1448  assert(I != FI.arg_end());
1449  QualType ArgType = I->type;
1450  const ABIArgInfo &AI = I->info;
1451  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1452  auto &IRArgs = ArgInfo[ArgNo];
1453 
1454  if (AI.getPaddingType())
1455  IRArgs.PaddingArgIndex = IRArgNo++;
1456 
1457  switch (AI.getKind()) {
1458  case ABIArgInfo::Extend:
1459  case ABIArgInfo::Direct: {
1460  // FIXME: handle sseregparm someday...
1461  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1462  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1463  IRArgs.NumberOfArgs = STy->getNumElements();
1464  } else {
1465  IRArgs.NumberOfArgs = 1;
1466  }
1467  break;
1468  }
1469  case ABIArgInfo::Indirect:
1470  IRArgs.NumberOfArgs = 1;
1471  break;
1472  case ABIArgInfo::Ignore:
1473  case ABIArgInfo::InAlloca:
1474  // ignore and inalloca doesn't have matching LLVM parameters.
1475  IRArgs.NumberOfArgs = 0;
1476  break;
1478  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1479  break;
1480  case ABIArgInfo::Expand:
1481  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1482  break;
1483  }
1484 
1485  if (IRArgs.NumberOfArgs > 0) {
1486  IRArgs.FirstArgIndex = IRArgNo;
1487  IRArgNo += IRArgs.NumberOfArgs;
1488  }
1489 
1490  // Skip over the sret parameter when it comes second. We already handled it
1491  // above.
1492  if (IRArgNo == 1 && SwapThisWithSRet)
1493  IRArgNo++;
1494  }
1495  assert(ArgNo == ArgInfo.size());
1496 
1497  if (FI.usesInAlloca())
1498  InallocaArgNo = IRArgNo++;
1499 
1500  TotalIRArgs = IRArgNo;
1501 }
1502 } // namespace
1503 
1504 /***/
1505 
1507  const auto &RI = FI.getReturnInfo();
1508  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1509 }
1510 
1512  return ReturnTypeUsesSRet(FI) &&
1513  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1514 }
1515 
1517  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1518  switch (BT->getKind()) {
1519  default:
1520  return false;
1521  case BuiltinType::Float:
1523  case BuiltinType::Double:
1525  case BuiltinType::LongDouble:
1527  }
1528  }
1529 
1530  return false;
1531 }
1532 
1534  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1535  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1536  if (BT->getKind() == BuiltinType::LongDouble)
1538  }
1539  }
1540 
1541  return false;
1542 }
1543 
1545  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1546  return GetFunctionType(FI);
1547 }
1548 
1549 llvm::FunctionType *
1551 
1552  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1553  (void)Inserted;
1554  assert(Inserted && "Recursively being processed?");
1555 
1556  llvm::Type *resultType = nullptr;
1557  const ABIArgInfo &retAI = FI.getReturnInfo();
1558  switch (retAI.getKind()) {
1559  case ABIArgInfo::Expand:
1560  llvm_unreachable("Invalid ABI kind for return argument");
1561 
1562  case ABIArgInfo::Extend:
1563  case ABIArgInfo::Direct:
1564  resultType = retAI.getCoerceToType();
1565  break;
1566 
1567  case ABIArgInfo::InAlloca:
1568  if (retAI.getInAllocaSRet()) {
1569  // sret things on win32 aren't void, they return the sret pointer.
1570  QualType ret = FI.getReturnType();
1571  llvm::Type *ty = ConvertType(ret);
1572  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1573  resultType = llvm::PointerType::get(ty, addressSpace);
1574  } else {
1575  resultType = llvm::Type::getVoidTy(getLLVMContext());
1576  }
1577  break;
1578 
1579  case ABIArgInfo::Indirect:
1580  case ABIArgInfo::Ignore:
1581  resultType = llvm::Type::getVoidTy(getLLVMContext());
1582  break;
1583 
1585  resultType = retAI.getUnpaddedCoerceAndExpandType();
1586  break;
1587  }
1588 
1589  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1590  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1591 
1592  // Add type for sret argument.
1593  if (IRFunctionArgs.hasSRetArg()) {
1594  QualType Ret = FI.getReturnType();
1595  llvm::Type *Ty = ConvertType(Ret);
1596  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1597  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1598  llvm::PointerType::get(Ty, AddressSpace);
1599  }
1600 
1601  // Add type for inalloca argument.
1602  if (IRFunctionArgs.hasInallocaArg()) {
1603  auto ArgStruct = FI.getArgStruct();
1604  assert(ArgStruct);
1605  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1606  }
1607 
1608  // Add in all of the required arguments.
1609  unsigned ArgNo = 0;
1611  ie = it + FI.getNumRequiredArgs();
1612  for (; it != ie; ++it, ++ArgNo) {
1613  const ABIArgInfo &ArgInfo = it->info;
1614 
1615  // Insert a padding type to ensure proper alignment.
1616  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1617  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1618  ArgInfo.getPaddingType();
1619 
1620  unsigned FirstIRArg, NumIRArgs;
1621  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1622 
1623  switch (ArgInfo.getKind()) {
1624  case ABIArgInfo::Ignore:
1625  case ABIArgInfo::InAlloca:
1626  assert(NumIRArgs == 0);
1627  break;
1628 
1629  case ABIArgInfo::Indirect: {
1630  assert(NumIRArgs == 1);
1631  // indirect arguments are always on the stack, which is alloca addr space.
1632  llvm::Type *LTy = ConvertTypeForMem(it->type);
1633  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1634  CGM.getDataLayout().getAllocaAddrSpace());
1635  break;
1636  }
1637 
1638  case ABIArgInfo::Extend:
1639  case ABIArgInfo::Direct: {
1640  // Fast-isel and the optimizer generally like scalar values better than
1641  // FCAs, so we flatten them if this is safe to do for this argument.
1642  llvm::Type *argType = ArgInfo.getCoerceToType();
1643  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1644  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1645  assert(NumIRArgs == st->getNumElements());
1646  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1647  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1648  } else {
1649  assert(NumIRArgs == 1);
1650  ArgTypes[FirstIRArg] = argType;
1651  }
1652  break;
1653  }
1654 
1656  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1658  *ArgTypesIter++ = EltTy;
1659  }
1660  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1661  break;
1662  }
1663 
1664  case ABIArgInfo::Expand:
1665  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1666  getExpandedTypes(it->type, ArgTypesIter);
1667  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1668  break;
1669  }
1670  }
1671 
1672  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1673  assert(Erased && "Not in set?");
1674 
1675  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1676 }
1677 
1679  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1680  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1681 
1682  if (!isFuncTypeConvertible(FPT))
1683  return llvm::StructType::get(getLLVMContext());
1684 
1685  const CGFunctionInfo *Info;
1686  if (isa<CXXDestructorDecl>(MD))
1687  Info =
1689  else
1690  Info = &arrangeCXXMethodDeclaration(MD);
1691  return GetFunctionType(*Info);
1692 }
1693 
1695  llvm::AttrBuilder &FuncAttrs,
1696  const FunctionProtoType *FPT) {
1697  if (!FPT)
1698  return;
1699 
1701  FPT->isNothrow())
1702  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1703 }
1704 
1705 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1706  bool AttrOnCallSite,
1707  llvm::AttrBuilder &FuncAttrs) {
1708  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1709  if (!HasOptnone) {
1710  if (CodeGenOpts.OptimizeSize)
1711  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1712  if (CodeGenOpts.OptimizeSize == 2)
1713  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1714  }
1715 
1716  if (CodeGenOpts.DisableRedZone)
1717  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1718  if (CodeGenOpts.IndirectTlsSegRefs)
1719  FuncAttrs.addAttribute("indirect-tls-seg-refs");
1720  if (CodeGenOpts.NoImplicitFloat)
1721  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1722 
1723  if (AttrOnCallSite) {
1724  // Attributes that should go on the call site only.
1725  if (!CodeGenOpts.SimplifyLibCalls ||
1726  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1727  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1728  if (!CodeGenOpts.TrapFuncName.empty())
1729  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1730  } else {
1731  // Attributes that should go on the function, but not the call site.
1732  if (!CodeGenOpts.DisableFPElim) {
1733  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1734  } else if (CodeGenOpts.OmitLeafFramePointer) {
1735  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1736  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1737  } else {
1738  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1739  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1740  }
1741 
1742  FuncAttrs.addAttribute("less-precise-fpmad",
1743  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1744 
1745  if (CodeGenOpts.NullPointerIsValid)
1746  FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1747  if (!CodeGenOpts.FPDenormalMode.empty())
1748  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1749 
1750  FuncAttrs.addAttribute("no-trapping-math",
1751  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1752 
1753  // Strict (compliant) code is the default, so only add this attribute to
1754  // indicate that we are trying to workaround a problem case.
1755  if (!CodeGenOpts.StrictFloatCastOverflow)
1756  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1757 
1758  // TODO: Are these all needed?
1759  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1760  FuncAttrs.addAttribute("no-infs-fp-math",
1761  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1762  FuncAttrs.addAttribute("no-nans-fp-math",
1763  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1764  FuncAttrs.addAttribute("unsafe-fp-math",
1765  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1766  FuncAttrs.addAttribute("use-soft-float",
1767  llvm::toStringRef(CodeGenOpts.SoftFloat));
1768  FuncAttrs.addAttribute("stack-protector-buffer-size",
1769  llvm::utostr(CodeGenOpts.SSPBufferSize));
1770  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1771  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1772  FuncAttrs.addAttribute(
1773  "correctly-rounded-divide-sqrt-fp-math",
1774  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1775 
1776  if (getLangOpts().OpenCL)
1777  FuncAttrs.addAttribute("denorms-are-zero",
1778  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1779 
1780  // TODO: Reciprocal estimate codegen options should apply to instructions?
1781  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1782  if (!Recips.empty())
1783  FuncAttrs.addAttribute("reciprocal-estimates",
1784  llvm::join(Recips, ","));
1785 
1786  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1787  CodeGenOpts.PreferVectorWidth != "none")
1788  FuncAttrs.addAttribute("prefer-vector-width",
1789  CodeGenOpts.PreferVectorWidth);
1790 
1791  if (CodeGenOpts.StackRealignment)
1792  FuncAttrs.addAttribute("stackrealign");
1793  if (CodeGenOpts.Backchain)
1794  FuncAttrs.addAttribute("backchain");
1795 
1796  // FIXME: The interaction of this attribute with the SLH command line flag
1797  // has not been determined.
1798  if (CodeGenOpts.SpeculativeLoadHardening)
1799  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1800  }
1801 
1802  if (getLangOpts().assumeFunctionsAreConvergent()) {
1803  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1804  // convergent (meaning, they may call an intrinsically convergent op, such
1805  // as __syncthreads() / barrier(), and so can't have certain optimizations
1806  // applied around them). LLVM will remove this attribute where it safely
1807  // can.
1808  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1809  }
1810 
1811  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1812  // Exceptions aren't supported in CUDA device code.
1813  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1814 
1815  // Respect -fcuda-flush-denormals-to-zero.
1816  if (CodeGenOpts.FlushDenorm)
1817  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1818  }
1819 
1820  for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1821  StringRef Var, Value;
1822  std::tie(Var, Value) = Attr.split('=');
1823  FuncAttrs.addAttribute(Var, Value);
1824  }
1825 }
1826 
1827 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1828  llvm::AttrBuilder FuncAttrs;
1829  ConstructDefaultFnAttrList(F.getName(),
1830  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1831  /* AttrOnCallsite = */ false, FuncAttrs);
1832  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1833 }
1834 
1836  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1837  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1838  llvm::AttrBuilder FuncAttrs;
1839  llvm::AttrBuilder RetAttrs;
1840 
1841  CallingConv = FI.getEffectiveCallingConvention();
1842  if (FI.isNoReturn())
1843  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1844 
1845  // If we have information about the function prototype, we can learn
1846  // attributes from there.
1848  CalleeInfo.getCalleeFunctionProtoType());
1849 
1850  const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1851 
1852  bool HasOptnone = false;
1853  // FIXME: handle sseregparm someday...
1854  if (TargetDecl) {
1855  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1856  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1857  if (TargetDecl->hasAttr<NoThrowAttr>())
1858  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1859  if (TargetDecl->hasAttr<NoReturnAttr>())
1860  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1861  if (TargetDecl->hasAttr<ColdAttr>())
1862  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1863  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1864  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1865  if (TargetDecl->hasAttr<ConvergentAttr>())
1866  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1867  if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1868  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1869 
1870  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1872  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1873  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1874  // These attributes are not inherited by overloads.
1875  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1876  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1877  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1878  }
1879 
1880  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1881  if (TargetDecl->hasAttr<ConstAttr>()) {
1882  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1883  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1884  } else if (TargetDecl->hasAttr<PureAttr>()) {
1885  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1886  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1887  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1888  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1889  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1890  }
1891  if (TargetDecl->hasAttr<RestrictAttr>())
1892  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1893  if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1894  !CodeGenOpts.NullPointerIsValid)
1895  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1896  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1897  FuncAttrs.addAttribute("no_caller_saved_registers");
1898  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1899  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1900 
1901  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1902  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1903  Optional<unsigned> NumElemsParam;
1904  if (AllocSize->getNumElemsParam().isValid())
1905  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1906  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1907  NumElemsParam);
1908  }
1909  }
1910 
1911  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1912 
1913  if (CodeGenOpts.EnableSegmentedStacks &&
1914  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1915  FuncAttrs.addAttribute("split-stack");
1916 
1917  // Add NonLazyBind attribute to function declarations when -fno-plt
1918  // is used.
1919  if (TargetDecl && CodeGenOpts.NoPLT) {
1920  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1921  if (!Fn->isDefined() && !AttrOnCallSite) {
1922  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1923  }
1924  }
1925  }
1926 
1927  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1928  if (getLangOpts().OpenCLVersion <= 120) {
1929  // OpenCL v1.2 Work groups are always uniform
1930  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1931  } else {
1932  // OpenCL v2.0 Work groups may be whether uniform or not.
1933  // '-cl-uniform-work-group-size' compile option gets a hint
1934  // to the compiler that the global work-size be a multiple of
1935  // the work-group size specified to clEnqueueNDRangeKernel
1936  // (i.e. work groups are uniform).
1937  FuncAttrs.addAttribute("uniform-work-group-size",
1938  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1939  }
1940  }
1941 
1942  if (!AttrOnCallSite) {
1943  bool DisableTailCalls = false;
1944 
1945  if (CodeGenOpts.DisableTailCalls)
1946  DisableTailCalls = true;
1947  else if (TargetDecl) {
1948  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1949  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1950  DisableTailCalls = true;
1951  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1952  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1953  if (!BD->doesNotEscape())
1954  DisableTailCalls = true;
1955  }
1956  }
1957 
1958  FuncAttrs.addAttribute("disable-tail-calls",
1959  llvm::toStringRef(DisableTailCalls));
1960  GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1961  }
1962 
1963  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1964 
1965  QualType RetTy = FI.getReturnType();
1966  const ABIArgInfo &RetAI = FI.getReturnInfo();
1967  switch (RetAI.getKind()) {
1968  case ABIArgInfo::Extend:
1969  if (RetAI.isSignExt())
1970  RetAttrs.addAttribute(llvm::Attribute::SExt);
1971  else
1972  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1973  LLVM_FALLTHROUGH;
1974  case ABIArgInfo::Direct:
1975  if (RetAI.getInReg())
1976  RetAttrs.addAttribute(llvm::Attribute::InReg);
1977  break;
1978  case ABIArgInfo::Ignore:
1979  break;
1980 
1981  case ABIArgInfo::InAlloca:
1982  case ABIArgInfo::Indirect: {
1983  // inalloca and sret disable readnone and readonly
1984  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1985  .removeAttribute(llvm::Attribute::ReadNone);
1986  break;
1987  }
1988 
1990  break;
1991 
1992  case ABIArgInfo::Expand:
1993  llvm_unreachable("Invalid ABI kind for return argument");
1994  }
1995 
1996  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1997  QualType PTy = RefTy->getPointeeType();
1998  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1999  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2000  .getQuantity());
2001  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2002  !CodeGenOpts.NullPointerIsValid)
2003  RetAttrs.addAttribute(llvm::Attribute::NonNull);
2004  }
2005 
2006  bool hasUsedSRet = false;
2007  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2008 
2009  // Attach attributes to sret.
2010  if (IRFunctionArgs.hasSRetArg()) {
2011  llvm::AttrBuilder SRETAttrs;
2012  if (!RetAI.getSuppressSRet())
2013  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2014  hasUsedSRet = true;
2015  if (RetAI.getInReg())
2016  SRETAttrs.addAttribute(llvm::Attribute::InReg);
2017  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2018  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2019  }
2020 
2021  // Attach attributes to inalloca argument.
2022  if (IRFunctionArgs.hasInallocaArg()) {
2023  llvm::AttrBuilder Attrs;
2024  Attrs.addAttribute(llvm::Attribute::InAlloca);
2025  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2026  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2027  }
2028 
2029  unsigned ArgNo = 0;
2031  E = FI.arg_end();
2032  I != E; ++I, ++ArgNo) {
2033  QualType ParamType = I->type;
2034  const ABIArgInfo &AI = I->info;
2035  llvm::AttrBuilder Attrs;
2036 
2037  // Add attribute for padding argument, if necessary.
2038  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2039  if (AI.getPaddingInReg()) {
2040  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2041  llvm::AttributeSet::get(
2042  getLLVMContext(),
2043  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2044  }
2045  }
2046 
2047  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2048  // have the corresponding parameter variable. It doesn't make
2049  // sense to do it here because parameters are so messed up.
2050  switch (AI.getKind()) {
2051  case ABIArgInfo::Extend:
2052  if (AI.isSignExt())
2053  Attrs.addAttribute(llvm::Attribute::SExt);
2054  else
2055  Attrs.addAttribute(llvm::Attribute::ZExt);
2056  LLVM_FALLTHROUGH;
2057  case ABIArgInfo::Direct:
2058  if (ArgNo == 0 && FI.isChainCall())
2059  Attrs.addAttribute(llvm::Attribute::Nest);
2060  else if (AI.getInReg())
2061  Attrs.addAttribute(llvm::Attribute::InReg);
2062  break;
2063 
2064  case ABIArgInfo::Indirect: {
2065  if (AI.getInReg())
2066  Attrs.addAttribute(llvm::Attribute::InReg);
2067 
2068  if (AI.getIndirectByVal())
2069  Attrs.addAttribute(llvm::Attribute::ByVal);
2070 
2071  CharUnits Align = AI.getIndirectAlign();
2072 
2073  // In a byval argument, it is important that the required
2074  // alignment of the type is honored, as LLVM might be creating a
2075  // *new* stack object, and needs to know what alignment to give
2076  // it. (Sometimes it can deduce a sensible alignment on its own,
2077  // but not if clang decides it must emit a packed struct, or the
2078  // user specifies increased alignment requirements.)
2079  //
2080  // This is different from indirect *not* byval, where the object
2081  // exists already, and the align attribute is purely
2082  // informative.
2083  assert(!Align.isZero());
2084 
2085  // For now, only add this when we have a byval argument.
2086  // TODO: be less lazy about updating test cases.
2087  if (AI.getIndirectByVal())
2088  Attrs.addAlignmentAttr(Align.getQuantity());
2089 
2090  // byval disables readnone and readonly.
2091  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2092  .removeAttribute(llvm::Attribute::ReadNone);
2093  break;
2094  }
2095  case ABIArgInfo::Ignore:
2096  case ABIArgInfo::Expand:
2098  break;
2099 
2100  case ABIArgInfo::InAlloca:
2101  // inalloca disables readnone and readonly.
2102  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2103  .removeAttribute(llvm::Attribute::ReadNone);
2104  continue;
2105  }
2106 
2107  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2108  QualType PTy = RefTy->getPointeeType();
2109  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2110  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2111  .getQuantity());
2112  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2113  !CodeGenOpts.NullPointerIsValid)
2114  Attrs.addAttribute(llvm::Attribute::NonNull);
2115  }
2116 
2117  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2119  break;
2120 
2122  // Add 'sret' if we haven't already used it for something, but
2123  // only if the result is void.
2124  if (!hasUsedSRet && RetTy->isVoidType()) {
2125  Attrs.addAttribute(llvm::Attribute::StructRet);
2126  hasUsedSRet = true;
2127  }
2128 
2129  // Add 'noalias' in either case.
2130  Attrs.addAttribute(llvm::Attribute::NoAlias);
2131 
2132  // Add 'dereferenceable' and 'alignment'.
2133  auto PTy = ParamType->getPointeeType();
2134  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2135  auto info = getContext().getTypeInfoInChars(PTy);
2136  Attrs.addDereferenceableAttr(info.first.getQuantity());
2137  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2138  info.second.getQuantity()));
2139  }
2140  break;
2141  }
2142 
2144  Attrs.addAttribute(llvm::Attribute::SwiftError);
2145  break;
2146 
2148  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2149  break;
2150  }
2151 
2152  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2153  Attrs.addAttribute(llvm::Attribute::NoCapture);
2154 
2155  if (Attrs.hasAttributes()) {
2156  unsigned FirstIRArg, NumIRArgs;
2157  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2158  for (unsigned i = 0; i < NumIRArgs; i++)
2159  ArgAttrs[FirstIRArg + i] =
2160  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2161  }
2162  }
2163  assert(ArgNo == FI.arg_size());
2164 
2165  AttrList = llvm::AttributeList::get(
2166  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2167  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2168 }
2169 
2170 /// An argument came in as a promoted argument; demote it back to its
2171 /// declared type.
2173  const VarDecl *var,
2174  llvm::Value *value) {
2175  llvm::Type *varType = CGF.ConvertType(var->getType());
2176 
2177  // This can happen with promotions that actually don't change the
2178  // underlying type, like the enum promotions.
2179  if (value->getType() == varType) return value;
2180 
2181  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2182  && "unexpected promotion type");
2183 
2184  if (isa<llvm::IntegerType>(varType))
2185  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2186 
2187  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2188 }
2189 
2190 /// Returns the attribute (either parameter attribute, or function
2191 /// attribute), which declares argument ArgNo to be non-null.
2192 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2193  QualType ArgType, unsigned ArgNo) {
2194  // FIXME: __attribute__((nonnull)) can also be applied to:
2195  // - references to pointers, where the pointee is known to be
2196  // nonnull (apparently a Clang extension)
2197  // - transparent unions containing pointers
2198  // In the former case, LLVM IR cannot represent the constraint. In
2199  // the latter case, we have no guarantee that the transparent union
2200  // is in fact passed as a pointer.
2201  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2202  return nullptr;
2203  // First, check attribute on parameter itself.
2204  if (PVD) {
2205  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2206  return ParmNNAttr;
2207  }
2208  // Check function attributes.
2209  if (!FD)
2210  return nullptr;
2211  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2212  if (NNAttr->isNonNull(ArgNo))
2213  return NNAttr;
2214  }
2215  return nullptr;
2216 }
2217 
2218 namespace {
2219  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2220  Address Temp;
2221  Address Arg;
2222  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2223  void Emit(CodeGenFunction &CGF, Flags flags) override {
2224  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2225  CGF.Builder.CreateStore(errorValue, Arg);
2226  }
2227  };
2228 }
2229 
2231  llvm::Function *Fn,
2232  const FunctionArgList &Args) {
2233  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2234  // Naked functions don't have prologues.
2235  return;
2236 
2237  // If this is an implicit-return-zero function, go ahead and
2238  // initialize the return value. TODO: it might be nice to have
2239  // a more general mechanism for this that didn't require synthesized
2240  // return statements.
2241  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2242  if (FD->hasImplicitReturnZero()) {
2243  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2244  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2245  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2246  Builder.CreateStore(Zero, ReturnValue);
2247  }
2248  }
2249 
2250  // FIXME: We no longer need the types from FunctionArgList; lift up and
2251  // simplify.
2252 
2253  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2254  // Flattened function arguments.
2256  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2257  for (auto &Arg : Fn->args()) {
2258  FnArgs.push_back(&Arg);
2259  }
2260  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2261 
2262  // If we're using inalloca, all the memory arguments are GEPs off of the last
2263  // parameter, which is a pointer to the complete memory area.
2264  Address ArgStruct = Address::invalid();
2265  const llvm::StructLayout *ArgStructLayout = nullptr;
2266  if (IRFunctionArgs.hasInallocaArg()) {
2267  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2268  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2269  FI.getArgStructAlignment());
2270 
2271  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2272  }
2273 
2274  // Name the struct return parameter.
2275  if (IRFunctionArgs.hasSRetArg()) {
2276  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2277  AI->setName("agg.result");
2278  AI->addAttr(llvm::Attribute::NoAlias);
2279  }
2280 
2281  // Track if we received the parameter as a pointer (indirect, byval, or
2282  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2283  // into a local alloca for us.
2285  ArgVals.reserve(Args.size());
2286 
2287  // Create a pointer value for every parameter declaration. This usually
2288  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2289  // any cleanups or do anything that might unwind. We do that separately, so
2290  // we can push the cleanups in the correct order for the ABI.
2291  assert(FI.arg_size() == Args.size() &&
2292  "Mismatch between function signature & arguments.");
2293  unsigned ArgNo = 0;
2295  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2296  i != e; ++i, ++info_it, ++ArgNo) {
2297  const VarDecl *Arg = *i;
2298  const ABIArgInfo &ArgI = info_it->info;
2299 
2300  bool isPromoted =
2301  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2302  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2303  // the parameter is promoted. In this case we convert to
2304  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2305  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2306  assert(hasScalarEvaluationKind(Ty) ==
2307  hasScalarEvaluationKind(Arg->getType()));
2308 
2309  unsigned FirstIRArg, NumIRArgs;
2310  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2311 
2312  switch (ArgI.getKind()) {
2313  case ABIArgInfo::InAlloca: {
2314  assert(NumIRArgs == 0);
2315  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2316  CharUnits FieldOffset =
2317  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2318  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2319  Arg->getName());
2320  ArgVals.push_back(ParamValue::forIndirect(V));
2321  break;
2322  }
2323 
2324  case ABIArgInfo::Indirect: {
2325  assert(NumIRArgs == 1);
2326  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2327 
2328  if (!hasScalarEvaluationKind(Ty)) {
2329  // Aggregates and complex variables are accessed by reference. All we
2330  // need to do is realign the value, if requested.
2331  Address V = ParamAddr;
2332  if (ArgI.getIndirectRealign()) {
2333  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2334 
2335  // Copy from the incoming argument pointer to the temporary with the
2336  // appropriate alignment.
2337  //
2338  // FIXME: We should have a common utility for generating an aggregate
2339  // copy.
2341  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2342  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2343  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2344  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2345  V = AlignedTemp;
2346  }
2347  ArgVals.push_back(ParamValue::forIndirect(V));
2348  } else {
2349  // Load scalar value from indirect argument.
2350  llvm::Value *V =
2351  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2352 
2353  if (isPromoted)
2354  V = emitArgumentDemotion(*this, Arg, V);
2355  ArgVals.push_back(ParamValue::forDirect(V));
2356  }
2357  break;
2358  }
2359 
2360  case ABIArgInfo::Extend:
2361  case ABIArgInfo::Direct: {
2362 
2363  // If we have the trivial case, handle it with no muss and fuss.
2364  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2365  ArgI.getCoerceToType() == ConvertType(Ty) &&
2366  ArgI.getDirectOffset() == 0) {
2367  assert(NumIRArgs == 1);
2368  llvm::Value *V = FnArgs[FirstIRArg];
2369  auto AI = cast<llvm::Argument>(V);
2370 
2371  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2372  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2373  PVD->getFunctionScopeIndex()) &&
2374  !CGM.getCodeGenOpts().NullPointerIsValid)
2375  AI->addAttr(llvm::Attribute::NonNull);
2376 
2377  QualType OTy = PVD->getOriginalType();
2378  if (const auto *ArrTy =
2379  getContext().getAsConstantArrayType(OTy)) {
2380  // A C99 array parameter declaration with the static keyword also
2381  // indicates dereferenceability, and if the size is constant we can
2382  // use the dereferenceable attribute (which requires the size in
2383  // bytes).
2384  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2385  QualType ETy = ArrTy->getElementType();
2386  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2387  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2388  ArrSize) {
2389  llvm::AttrBuilder Attrs;
2390  Attrs.addDereferenceableAttr(
2391  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2392  AI->addAttrs(Attrs);
2393  } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2394  !CGM.getCodeGenOpts().NullPointerIsValid) {
2395  AI->addAttr(llvm::Attribute::NonNull);
2396  }
2397  }
2398  } else if (const auto *ArrTy =
2399  getContext().getAsVariableArrayType(OTy)) {
2400  // For C99 VLAs with the static keyword, we don't know the size so
2401  // we can't use the dereferenceable attribute, but in addrspace(0)
2402  // we know that it must be nonnull.
2403  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2404  !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2405  !CGM.getCodeGenOpts().NullPointerIsValid)
2406  AI->addAttr(llvm::Attribute::NonNull);
2407  }
2408 
2409  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2410  if (!AVAttr)
2411  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2412  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2413  if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2414  // If alignment-assumption sanitizer is enabled, we do *not* add
2415  // alignment attribute here, but emit normal alignment assumption,
2416  // so the UBSAN check could function.
2417  llvm::Value *AlignmentValue =
2418  EmitScalarExpr(AVAttr->getAlignment());
2419  llvm::ConstantInt *AlignmentCI =
2420  cast<llvm::ConstantInt>(AlignmentValue);
2421  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2422  +llvm::Value::MaximumAlignment);
2423  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2424  }
2425  }
2426 
2427  if (Arg->getType().isRestrictQualified())
2428  AI->addAttr(llvm::Attribute::NoAlias);
2429 
2430  // LLVM expects swifterror parameters to be used in very restricted
2431  // ways. Copy the value into a less-restricted temporary.
2432  if (FI.getExtParameterInfo(ArgNo).getABI()
2434  QualType pointeeTy = Ty->getPointeeType();
2435  assert(pointeeTy->isPointerType());
2436  Address temp =
2437  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2438  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2439  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2440  Builder.CreateStore(incomingErrorValue, temp);
2441  V = temp.getPointer();
2442 
2443  // Push a cleanup to copy the value back at the end of the function.
2444  // The convention does not guarantee that the value will be written
2445  // back if the function exits with an unwind exception.
2446  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2447  }
2448 
2449  // Ensure the argument is the correct type.
2450  if (V->getType() != ArgI.getCoerceToType())
2451  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2452 
2453  if (isPromoted)
2454  V = emitArgumentDemotion(*this, Arg, V);
2455 
2456  // Because of merging of function types from multiple decls it is
2457  // possible for the type of an argument to not match the corresponding
2458  // type in the function type. Since we are codegening the callee
2459  // in here, add a cast to the argument type.
2460  llvm::Type *LTy = ConvertType(Arg->getType());
2461  if (V->getType() != LTy)
2462  V = Builder.CreateBitCast(V, LTy);
2463 
2464  ArgVals.push_back(ParamValue::forDirect(V));
2465  break;
2466  }
2467 
2468  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2469  Arg->getName());
2470 
2471  // Pointer to store into.
2472  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2473 
2474  // Fast-isel and the optimizer generally like scalar values better than
2475  // FCAs, so we flatten them if this is safe to do for this argument.
2476  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2477  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2478  STy->getNumElements() > 1) {
2479  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2480  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2481  llvm::Type *DstTy = Ptr.getElementType();
2482  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2483 
2484  Address AddrToStoreInto = Address::invalid();
2485  if (SrcSize <= DstSize) {
2486  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2487  } else {
2488  AddrToStoreInto =
2489  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2490  }
2491 
2492  assert(STy->getNumElements() == NumIRArgs);
2493  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2494  auto AI = FnArgs[FirstIRArg + i];
2495  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2496  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2497  Address EltPtr =
2498  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2499  Builder.CreateStore(AI, EltPtr);
2500  }
2501 
2502  if (SrcSize > DstSize) {
2503  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2504  }
2505 
2506  } else {
2507  // Simple case, just do a coerced store of the argument into the alloca.
2508  assert(NumIRArgs == 1);
2509  auto AI = FnArgs[FirstIRArg];
2510  AI->setName(Arg->getName() + ".coerce");
2511  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2512  }
2513 
2514  // Match to what EmitParmDecl is expecting for this type.
2516  llvm::Value *V =
2517  EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2518  if (isPromoted)
2519  V = emitArgumentDemotion(*this, Arg, V);
2520  ArgVals.push_back(ParamValue::forDirect(V));
2521  } else {
2522  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2523  }
2524  break;
2525  }
2526 
2528  // Reconstruct into a temporary.
2529  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2530  ArgVals.push_back(ParamValue::forIndirect(alloca));
2531 
2532  auto coercionType = ArgI.getCoerceAndExpandType();
2533  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2534  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2535 
2536  unsigned argIndex = FirstIRArg;
2537  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2538  llvm::Type *eltType = coercionType->getElementType(i);
2540  continue;
2541 
2542  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2543  auto elt = FnArgs[argIndex++];
2544  Builder.CreateStore(elt, eltAddr);
2545  }
2546  assert(argIndex == FirstIRArg + NumIRArgs);
2547  break;
2548  }
2549 
2550  case ABIArgInfo::Expand: {
2551  // If this structure was expanded into multiple arguments then
2552  // we need to create a temporary and reconstruct it from the
2553  // arguments.
2554  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2555  LValue LV = MakeAddrLValue(Alloca, Ty);
2556  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2557 
2558  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2559  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2560  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2561  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2562  auto AI = FnArgs[FirstIRArg + i];
2563  AI->setName(Arg->getName() + "." + Twine(i));
2564  }
2565  break;
2566  }
2567 
2568  case ABIArgInfo::Ignore:
2569  assert(NumIRArgs == 0);
2570  // Initialize the local variable appropriately.
2571  if (!hasScalarEvaluationKind(Ty)) {
2572  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2573  } else {
2574  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2575  ArgVals.push_back(ParamValue::forDirect(U));
2576  }
2577  break;
2578  }
2579  }
2580 
2581  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2582  for (int I = Args.size() - 1; I >= 0; --I)
2583  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2584  } else {
2585  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2586  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2587  }
2588 }
2589 
2590 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2591  while (insn->use_empty()) {
2592  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2593  if (!bitcast) return;
2594 
2595  // This is "safe" because we would have used a ConstantExpr otherwise.
2596  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2597  bitcast->eraseFromParent();
2598  }
2599 }
2600 
2601 /// Try to emit a fused autorelease of a return result.
2603  llvm::Value *result) {
2604  // We must be immediately followed the cast.
2605  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2606  if (BB->empty()) return nullptr;
2607  if (&BB->back() != result) return nullptr;
2608 
2609  llvm::Type *resultType = result->getType();
2610 
2611  // result is in a BasicBlock and is therefore an Instruction.
2612  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2613 
2614  SmallVector<llvm::Instruction *, 4> InstsToKill;
2615 
2616  // Look for:
2617  // %generator = bitcast %type1* %generator2 to %type2*
2618  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2619  // We would have emitted this as a constant if the operand weren't
2620  // an Instruction.
2621  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2622 
2623  // Require the generator to be immediately followed by the cast.
2624  if (generator->getNextNode() != bitcast)
2625  return nullptr;
2626 
2627  InstsToKill.push_back(bitcast);
2628  }
2629 
2630  // Look for:
2631  // %generator = call i8* @objc_retain(i8* %originalResult)
2632  // or
2633  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2634  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2635  if (!call) return nullptr;
2636 
2637  bool doRetainAutorelease;
2638 
2639  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2640  doRetainAutorelease = true;
2641  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2643  doRetainAutorelease = false;
2644 
2645  // If we emitted an assembly marker for this call (and the
2646  // ARCEntrypoints field should have been set if so), go looking
2647  // for that call. If we can't find it, we can't do this
2648  // optimization. But it should always be the immediately previous
2649  // instruction, unless we needed bitcasts around the call.
2651  llvm::Instruction *prev = call->getPrevNode();
2652  assert(prev);
2653  if (isa<llvm::BitCastInst>(prev)) {
2654  prev = prev->getPrevNode();
2655  assert(prev);
2656  }
2657  assert(isa<llvm::CallInst>(prev));
2658  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2660  InstsToKill.push_back(prev);
2661  }
2662  } else {
2663  return nullptr;
2664  }
2665 
2666  result = call->getArgOperand(0);
2667  InstsToKill.push_back(call);
2668 
2669  // Keep killing bitcasts, for sanity. Note that we no longer care
2670  // about precise ordering as long as there's exactly one use.
2671  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2672  if (!bitcast->hasOneUse()) break;
2673  InstsToKill.push_back(bitcast);
2674  result = bitcast->getOperand(0);
2675  }
2676 
2677  // Delete all the unnecessary instructions, from latest to earliest.
2678  for (auto *I : InstsToKill)
2679  I->eraseFromParent();
2680 
2681  // Do the fused retain/autorelease if we were asked to.
2682  if (doRetainAutorelease)
2683  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2684 
2685  // Cast back to the result type.
2686  return CGF.Builder.CreateBitCast(result, resultType);
2687 }
2688 
2689 /// If this is a +1 of the value of an immutable 'self', remove it.
2691  llvm::Value *result) {
2692  // This is only applicable to a method with an immutable 'self'.
2693  const ObjCMethodDecl *method =
2694  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2695  if (!method) return nullptr;
2696  const VarDecl *self = method->getSelfDecl();
2697  if (!self->getType().isConstQualified()) return nullptr;
2698 
2699  // Look for a retain call.
2700  llvm::CallInst *retainCall =
2701  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2702  if (!retainCall ||
2703  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2704  return nullptr;
2705 
2706  // Look for an ordinary load of 'self'.
2707  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2708  llvm::LoadInst *load =
2709  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2710  if (!load || load->isAtomic() || load->isVolatile() ||
2711  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2712  return nullptr;
2713 
2714  // Okay! Burn it all down. This relies for correctness on the
2715  // assumption that the retain is emitted as part of the return and
2716  // that thereafter everything is used "linearly".
2717  llvm::Type *resultType = result->getType();
2718  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2719  assert(retainCall->use_empty());
2720  retainCall->eraseFromParent();
2721  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2722 
2723  return CGF.Builder.CreateBitCast(load, resultType);
2724 }
2725 
2726 /// Emit an ARC autorelease of the result of a function.
2727 ///
2728 /// \return the value to actually return from the function
2730  llvm::Value *result) {
2731  // If we're returning 'self', kill the initial retain. This is a
2732  // heuristic attempt to "encourage correctness" in the really unfortunate
2733  // case where we have a return of self during a dealloc and we desperately
2734  // need to avoid the possible autorelease.
2735  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2736  return self;
2737 
2738  // At -O0, try to emit a fused retain/autorelease.
2739  if (CGF.shouldUseFusedARCCalls())
2740  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2741  return fused;
2742 
2743  return CGF.EmitARCAutoreleaseReturnValue(result);
2744 }
2745 
2746 /// Heuristically search for a dominating store to the return-value slot.
2747 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2748  // Check if a User is a store which pointerOperand is the ReturnValue.
2749  // We are looking for stores to the ReturnValue, not for stores of the
2750  // ReturnValue to some other location.
2751  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2752  auto *SI = dyn_cast<llvm::StoreInst>(U);
2753  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2754  return nullptr;
2755  // These aren't actually possible for non-coerced returns, and we
2756  // only care about non-coerced returns on this code path.
2757  assert(!SI->isAtomic() && !SI->isVolatile());
2758  return SI;
2759  };
2760  // If there are multiple uses of the return-value slot, just check
2761  // for something immediately preceding the IP. Sometimes this can
2762  // happen with how we generate implicit-returns; it can also happen
2763  // with noreturn cleanups.
2764  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2765  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2766  if (IP->empty()) return nullptr;
2767  llvm::Instruction *I = &IP->back();
2768 
2769  // Skip lifetime markers
2770  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2771  IE = IP->rend();
2772  II != IE; ++II) {
2773  if (llvm::IntrinsicInst *Intrinsic =
2774  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2775  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2776  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2777  ++II;
2778  if (II == IE)
2779  break;
2780  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2781  continue;
2782  }
2783  }
2784  I = &*II;
2785  break;
2786  }
2787 
2788  return GetStoreIfValid(I);
2789  }
2790 
2791  llvm::StoreInst *store =
2792  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2793  if (!store) return nullptr;
2794 
2795  // Now do a first-and-dirty dominance check: just walk up the
2796  // single-predecessors chain from the current insertion point.
2797  llvm::BasicBlock *StoreBB = store->getParent();
2798  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2799  while (IP != StoreBB) {
2800  if (!(IP = IP->getSinglePredecessor()))
2801  return nullptr;
2802  }
2803 
2804  // Okay, the store's basic block dominates the insertion point; we
2805  // can do our thing.
2806  return store;
2807 }
2808 
2810  bool EmitRetDbgLoc,
2811  SourceLocation EndLoc) {
2812  if (FI.isNoReturn()) {
2813  // Noreturn functions don't return.
2814  EmitUnreachable(EndLoc);
2815  return;
2816  }
2817 
2818  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2819  // Naked functions don't have epilogues.
2820  Builder.CreateUnreachable();
2821  return;
2822  }
2823 
2824  // Functions with no result always return void.
2825  if (!ReturnValue.isValid()) {
2826  Builder.CreateRetVoid();
2827  return;
2828  }
2829 
2830  llvm::DebugLoc RetDbgLoc;
2831  llvm::Value *RV = nullptr;
2832  QualType RetTy = FI.getReturnType();
2833  const ABIArgInfo &RetAI = FI.getReturnInfo();
2834 
2835  switch (RetAI.getKind()) {
2836  case ABIArgInfo::InAlloca:
2837  // Aggregrates get evaluated directly into the destination. Sometimes we
2838  // need to return the sret value in a register, though.
2839  assert(hasAggregateEvaluationKind(RetTy));
2840  if (RetAI.getInAllocaSRet()) {
2841  llvm::Function::arg_iterator EI = CurFn->arg_end();
2842  --EI;
2843  llvm::Value *ArgStruct = &*EI;
2844  llvm::Value *SRet = Builder.CreateStructGEP(
2845  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2846  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2847  }
2848  break;
2849 
2850  case ABIArgInfo::Indirect: {
2851  auto AI = CurFn->arg_begin();
2852  if (RetAI.isSRetAfterThis())
2853  ++AI;
2854  switch (getEvaluationKind(RetTy)) {
2855  case TEK_Complex: {
2856  ComplexPairTy RT =
2857  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2858  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2859  /*isInit*/ true);
2860  break;
2861  }
2862  case TEK_Aggregate:
2863  // Do nothing; aggregrates get evaluated directly into the destination.
2864  break;
2865  case TEK_Scalar:
2866  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2867  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2868  /*isInit*/ true);
2869  break;
2870  }
2871  break;
2872  }
2873 
2874  case ABIArgInfo::Extend:
2875  case ABIArgInfo::Direct:
2876  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2877  RetAI.getDirectOffset() == 0) {
2878  // The internal return value temp always will have pointer-to-return-type
2879  // type, just do a load.
2880 
2881  // If there is a dominating store to ReturnValue, we can elide
2882  // the load, zap the store, and usually zap the alloca.
2883  if (llvm::StoreInst *SI =
2885  // Reuse the debug location from the store unless there is
2886  // cleanup code to be emitted between the store and return
2887  // instruction.
2888  if (EmitRetDbgLoc && !AutoreleaseResult)
2889  RetDbgLoc = SI->getDebugLoc();
2890  // Get the stored value and nuke the now-dead store.
2891  RV = SI->getValueOperand();
2892  SI->eraseFromParent();
2893 
2894  // If that was the only use of the return value, nuke it as well now.
2895  auto returnValueInst = ReturnValue.getPointer();
2896  if (returnValueInst->use_empty()) {
2897  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2898  alloca->eraseFromParent();
2899  ReturnValue = Address::invalid();
2900  }
2901  }
2902 
2903  // Otherwise, we have to do a simple load.
2904  } else {
2905  RV = Builder.CreateLoad(ReturnValue);
2906  }
2907  } else {
2908  // If the value is offset in memory, apply the offset now.
2909  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2910 
2911  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2912  }
2913 
2914  // In ARC, end functions that return a retainable type with a call
2915  // to objc_autoreleaseReturnValue.
2916  if (AutoreleaseResult) {
2917 #ifndef NDEBUG
2918  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2919  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2920  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2921  // CurCodeDecl or BlockInfo.
2922  QualType RT;
2923 
2924  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2925  RT = FD->getReturnType();
2926  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2927  RT = MD->getReturnType();
2928  else if (isa<BlockDecl>(CurCodeDecl))
2929  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2930  else
2931  llvm_unreachable("Unexpected function/method type");
2932 
2933  assert(getLangOpts().ObjCAutoRefCount &&
2934  !FI.isReturnsRetained() &&
2935  RT->isObjCRetainableType());
2936 #endif
2937  RV = emitAutoreleaseOfResult(*this, RV);
2938  }
2939 
2940  break;
2941 
2942  case ABIArgInfo::Ignore:
2943  break;
2944 
2946  auto coercionType = RetAI.getCoerceAndExpandType();
2947  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2948 
2949  // Load all of the coerced elements out into results.
2951  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2952  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2953  auto coercedEltType = coercionType->getElementType(i);
2954  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2955  continue;
2956 
2957  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2958  auto elt = Builder.CreateLoad(eltAddr);
2959  results.push_back(elt);
2960  }
2961 
2962  // If we have one result, it's the single direct result type.
2963  if (results.size() == 1) {
2964  RV = results[0];
2965 
2966  // Otherwise, we need to make a first-class aggregate.
2967  } else {
2968  // Construct a return type that lacks padding elements.
2969  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2970 
2971  RV = llvm::UndefValue::get(returnType);
2972  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2973  RV = Builder.CreateInsertValue(RV, results[i], i);
2974  }
2975  }
2976  break;
2977  }
2978 
2979  case ABIArgInfo::Expand:
2980  llvm_unreachable("Invalid ABI kind for return argument");
2981  }
2982 
2983  llvm::Instruction *Ret;
2984  if (RV) {
2985  EmitReturnValueCheck(RV);
2986  Ret = Builder.CreateRet(RV);
2987  } else {
2988  Ret = Builder.CreateRetVoid();
2989  }
2990 
2991  if (RetDbgLoc)
2992  Ret->setDebugLoc(std::move(RetDbgLoc));
2993 }
2994 
2996  // A current decl may not be available when emitting vtable thunks.
2997  if (!CurCodeDecl)
2998  return;
2999 
3000  ReturnsNonNullAttr *RetNNAttr = nullptr;
3001  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3002  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3003 
3004  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3005  return;
3006 
3007  // Prefer the returns_nonnull attribute if it's present.
3008  SourceLocation AttrLoc;
3009  SanitizerMask CheckKind;
3010  SanitizerHandler Handler;
3011  if (RetNNAttr) {
3012  assert(!requiresReturnValueNullabilityCheck() &&
3013  "Cannot check nullability and the nonnull attribute");
3014  AttrLoc = RetNNAttr->getLocation();
3015  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3016  Handler = SanitizerHandler::NonnullReturn;
3017  } else {
3018  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3019  if (auto *TSI = DD->getTypeSourceInfo())
3020  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3021  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3022  CheckKind = SanitizerKind::NullabilityReturn;
3023  Handler = SanitizerHandler::NullabilityReturn;
3024  }
3025 
3026  SanitizerScope SanScope(this);
3027 
3028  // Make sure the "return" source location is valid. If we're checking a
3029  // nullability annotation, make sure the preconditions for the check are met.
3030  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3031  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3032  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3033  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3034  if (requiresReturnValueNullabilityCheck())
3035  CanNullCheck =
3036  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3037  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3038  EmitBlock(Check);
3039 
3040  // Now do the null check.
3041  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3042  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3043  llvm::Value *DynamicData[] = {SLocPtr};
3044  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3045 
3046  EmitBlock(NoCheck);
3047 
3048 #ifndef NDEBUG
3049  // The return location should not be used after the check has been emitted.
3050  ReturnLocation = Address::invalid();
3051 #endif
3052 }
3053 
3055  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3056  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3057 }
3058 
3060  QualType Ty) {
3061  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3062  // placeholders.
3063  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3064  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3065  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3066 
3067  // FIXME: When we generate this IR in one pass, we shouldn't need
3068  // this win32-specific alignment hack.
3070  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3071 
3072  return AggValueSlot::forAddr(Address(Placeholder, Align),
3073  Ty.getQualifiers(),
3078 }
3079 
3081  const VarDecl *param,
3082  SourceLocation loc) {
3083  // StartFunction converted the ABI-lowered parameter(s) into a
3084  // local alloca. We need to turn that into an r-value suitable
3085  // for EmitCall.
3086  Address local = GetAddrOfLocalVar(param);
3087 
3088  QualType type = param->getType();
3089 
3090  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3091  CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3092  }
3093 
3094  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3095  // but the argument needs to be the original pointer.
3096  if (type->isReferenceType()) {
3097  args.add(RValue::get(Builder.CreateLoad(local)), type);
3098 
3099  // In ARC, move out of consumed arguments so that the release cleanup
3100  // entered by StartFunction doesn't cause an over-release. This isn't
3101  // optimal -O0 code generation, but it should get cleaned up when
3102  // optimization is enabled. This also assumes that delegate calls are
3103  // performed exactly once for a set of arguments, but that should be safe.
3104  } else if (getLangOpts().ObjCAutoRefCount &&
3105  param->hasAttr<NSConsumedAttr>() &&
3106  type->isObjCRetainableType()) {
3107  llvm::Value *ptr = Builder.CreateLoad(local);
3108  auto null =
3109  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3110  Builder.CreateStore(null, local);
3111  args.add(RValue::get(ptr), type);
3112 
3113  // For the most part, we just need to load the alloca, except that
3114  // aggregate r-values are actually pointers to temporaries.
3115  } else {
3116  args.add(convertTempToRValue(local, type, loc), type);
3117  }
3118 
3119  // Deactivate the cleanup for the callee-destructed param that was pushed.
3120  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3122  type.isDestructedType()) {
3124  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3125  assert(cleanup.isValid() &&
3126  "cleanup for callee-destructed param not recorded");
3127  // This unreachable is a temporary marker which will be removed later.
3128  llvm::Instruction *isActive = Builder.CreateUnreachable();
3129  args.addArgCleanupDeactivation(cleanup, isActive);
3130  }
3131 }
3132 
3133 static bool isProvablyNull(llvm::Value *addr) {
3134  return isa<llvm::ConstantPointerNull>(addr);
3135 }
3136 
3137 /// Emit the actual writing-back of a writeback.
3139  const CallArgList::Writeback &writeback) {
3140  const LValue &srcLV = writeback.Source;
3141  Address srcAddr = srcLV.getAddress();
3142  assert(!isProvablyNull(srcAddr.getPointer()) &&
3143  "shouldn't have writeback for provably null argument");
3144 
3145  llvm::BasicBlock *contBB = nullptr;
3146 
3147  // If the argument wasn't provably non-null, we need to null check
3148  // before doing the store.
3149  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3150  CGF.CGM.getDataLayout());
3151  if (!provablyNonNull) {
3152  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3153  contBB = CGF.createBasicBlock("icr.done");
3154 
3155  llvm::Value *isNull =
3156  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3157  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3158  CGF.EmitBlock(writebackBB);
3159  }
3160 
3161  // Load the value to writeback.
3162  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3163 
3164  // Cast it back, in case we're writing an id to a Foo* or something.
3165  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3166  "icr.writeback-cast");
3167 
3168  // Perform the writeback.
3169 
3170  // If we have a "to use" value, it's something we need to emit a use
3171  // of. This has to be carefully threaded in: if it's done after the
3172  // release it's potentially undefined behavior (and the optimizer
3173  // will ignore it), and if it happens before the retain then the
3174  // optimizer could move the release there.
3175  if (writeback.ToUse) {
3176  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3177 
3178  // Retain the new value. No need to block-copy here: the block's
3179  // being passed up the stack.
3180  value = CGF.EmitARCRetainNonBlock(value);
3181 
3182  // Emit the intrinsic use here.
3183  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3184 
3185  // Load the old value (primitively).
3186  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3187 
3188  // Put the new value in place (primitively).
3189  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3190 
3191  // Release the old value.
3192  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3193 
3194  // Otherwise, we can just do a normal lvalue store.
3195  } else {
3196  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3197  }
3198 
3199  // Jump to the continuation block.
3200  if (!provablyNonNull)
3201  CGF.EmitBlock(contBB);
3202 }
3203 
3205  const CallArgList &args) {
3206  for (const auto &I : args.writebacks())
3207  emitWriteback(CGF, I);
3208 }
3209 
3211  const CallArgList &CallArgs) {
3213  CallArgs.getCleanupsToDeactivate();
3214  // Iterate in reverse to increase the likelihood of popping the cleanup.
3215  for (const auto &I : llvm::reverse(Cleanups)) {
3216  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3217  I.IsActiveIP->eraseFromParent();
3218  }
3219 }
3220 
3221 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3222  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3223  if (uop->getOpcode() == UO_AddrOf)
3224  return uop->getSubExpr();
3225  return nullptr;
3226 }
3227 
3228 /// Emit an argument that's being passed call-by-writeback. That is,
3229 /// we are passing the address of an __autoreleased temporary; it
3230 /// might be copy-initialized with the current value of the given
3231 /// address, but it will definitely be copied out of after the call.
3233  const ObjCIndirectCopyRestoreExpr *CRE) {
3234  LValue srcLV;
3235 
3236  // Make an optimistic effort to emit the address as an l-value.
3237  // This can fail if the argument expression is more complicated.
3238  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3239  srcLV = CGF.EmitLValue(lvExpr);
3240 
3241  // Otherwise, just emit it as a scalar.
3242  } else {
3243  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3244 
3245  QualType srcAddrType =
3246  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3247  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3248  }
3249  Address srcAddr = srcLV.getAddress();
3250 
3251  // The dest and src types don't necessarily match in LLVM terms
3252  // because of the crazy ObjC compatibility rules.
3253 
3254  llvm::PointerType *destType =
3255  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3256 
3257  // If the address is a constant null, just pass the appropriate null.
3258  if (isProvablyNull(srcAddr.getPointer())) {
3259  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3260  CRE->getType());
3261  return;
3262  }
3263 
3264  // Create the temporary.
3265  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3266  CGF.getPointerAlign(),
3267  "icr.temp");
3268  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3269  // and that cleanup will be conditional if we can't prove that the l-value
3270  // isn't null, so we need to register a dominating point so that the cleanups
3271  // system will make valid IR.
3273 
3274  // Zero-initialize it if we're not doing a copy-initialization.
3275  bool shouldCopy = CRE->shouldCopy();
3276  if (!shouldCopy) {
3277  llvm::Value *null =
3278  llvm::ConstantPointerNull::get(
3279  cast<llvm::PointerType>(destType->getElementType()));
3280  CGF.Builder.CreateStore(null, temp);
3281  }
3282 
3283  llvm::BasicBlock *contBB = nullptr;
3284  llvm::BasicBlock *originBB = nullptr;
3285 
3286  // If the address is *not* known to be non-null, we need to switch.
3287  llvm::Value *finalArgument;
3288 
3289  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3290  CGF.CGM.getDataLayout());
3291  if (provablyNonNull) {
3292  finalArgument = temp.getPointer();
3293  } else {
3294  llvm::Value *isNull =
3295  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3296 
3297  finalArgument = CGF.Builder.CreateSelect(isNull,
3298  llvm::ConstantPointerNull::get(destType),
3299  temp.getPointer(), "icr.argument");
3300 
3301  // If we need to copy, then the load has to be conditional, which
3302  // means we need control flow.
3303  if (shouldCopy) {
3304  originBB = CGF.Builder.GetInsertBlock();
3305  contBB = CGF.createBasicBlock("icr.cont");
3306  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3307  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3308  CGF.EmitBlock(copyBB);
3309  condEval.begin(CGF);
3310  }
3311  }
3312 
3313  llvm::Value *valueToUse = nullptr;
3314 
3315  // Perform a copy if necessary.
3316  if (shouldCopy) {
3317  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3318  assert(srcRV.isScalar());
3319 
3320  llvm::Value *src = srcRV.getScalarVal();
3321  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3322  "icr.cast");
3323 
3324  // Use an ordinary store, not a store-to-lvalue.
3325  CGF.Builder.CreateStore(src, temp);
3326 
3327  // If optimization is enabled, and the value was held in a
3328  // __strong variable, we need to tell the optimizer that this
3329  // value has to stay alive until we're doing the store back.
3330  // This is because the temporary is effectively unretained,
3331  // and so otherwise we can violate the high-level semantics.
3332  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3334  valueToUse = src;
3335  }
3336  }
3337 
3338  // Finish the control flow if we needed it.
3339  if (shouldCopy && !provablyNonNull) {
3340  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3341  CGF.EmitBlock(contBB);
3342 
3343  // Make a phi for the value to intrinsically use.
3344  if (valueToUse) {
3345  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3346  "icr.to-use");
3347  phiToUse->addIncoming(valueToUse, copyBB);
3348  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3349  originBB);
3350  valueToUse = phiToUse;
3351  }
3352 
3353  condEval.end(CGF);
3354  }
3355 
3356  args.addWriteback(srcLV, temp, valueToUse);
3357  args.add(RValue::get(finalArgument), CRE->getType());
3358 }
3359 
3361  assert(!StackBase);
3362 
3363  // Save the stack.
3364  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3365  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3366 }
3367 
3369  if (StackBase) {
3370  // Restore the stack after the call.
3371  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3372  CGF.Builder.CreateCall(F, StackBase);
3373  }
3374 }
3375 
3377  SourceLocation ArgLoc,
3378  AbstractCallee AC,
3379  unsigned ParmNum) {
3380  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3381  SanOpts.has(SanitizerKind::NullabilityArg)))
3382  return;
3383 
3384  // The param decl may be missing in a variadic function.
3385  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3386  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3387 
3388  // Prefer the nonnull attribute if it's present.
3389  const NonNullAttr *NNAttr = nullptr;
3390  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3391  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3392 
3393  bool CanCheckNullability = false;
3394  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3395  auto Nullability = PVD->getType()->getNullability(getContext());
3396  CanCheckNullability = Nullability &&
3398  PVD->getTypeSourceInfo();
3399  }
3400 
3401  if (!NNAttr && !CanCheckNullability)
3402  return;
3403 
3404  SourceLocation AttrLoc;
3405  SanitizerMask CheckKind;
3406  SanitizerHandler Handler;
3407  if (NNAttr) {
3408  AttrLoc = NNAttr->getLocation();
3409  CheckKind = SanitizerKind::NonnullAttribute;
3410  Handler = SanitizerHandler::NonnullArg;
3411  } else {
3412  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3413  CheckKind = SanitizerKind::NullabilityArg;
3414  Handler = SanitizerHandler::NullabilityArg;
3415  }
3416 
3417  SanitizerScope SanScope(this);
3418  assert(RV.isScalar());
3419  llvm::Value *V = RV.getScalarVal();
3420  llvm::Value *Cond =
3421  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3422  llvm::Constant *StaticData[] = {
3423  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3424  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3425  };
3426  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3427 }
3428 
3430  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3431  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3432  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3433  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3434 
3435  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3436  // because arguments are destroyed left to right in the callee. As a special
3437  // case, there are certain language constructs that require left-to-right
3438  // evaluation, and in those cases we consider the evaluation order requirement
3439  // to trump the "destruction order is reverse construction order" guarantee.
3440  bool LeftToRight =
3441  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3442  ? Order == EvaluationOrder::ForceLeftToRight
3443  : Order != EvaluationOrder::ForceRightToLeft;
3444 
3445  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3446  RValue EmittedArg) {
3447  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3448  return;
3449  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3450  if (PS == nullptr)
3451  return;
3452 
3453  const auto &Context = getContext();
3454  auto SizeTy = Context.getSizeType();
3455  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3456  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3457  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3458  EmittedArg.getScalarVal());
3459  Args.add(RValue::get(V), SizeTy);
3460  // If we're emitting args in reverse, be sure to do so with
3461  // pass_object_size, as well.
3462  if (!LeftToRight)
3463  std::swap(Args.back(), *(&Args.back() - 1));
3464  };
3465 
3466  // Insert a stack save if we're going to need any inalloca args.
3467  bool HasInAllocaArgs = false;
3468  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3469  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3470  I != E && !HasInAllocaArgs; ++I)
3471  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3472  if (HasInAllocaArgs) {
3473  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3474  Args.allocateArgumentMemory(*this);
3475  }
3476  }
3477 
3478  // Evaluate each argument in the appropriate order.
3479  size_t CallArgsStart = Args.size();
3480  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3481  unsigned Idx = LeftToRight ? I : E - I - 1;
3482  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3483  unsigned InitialArgSize = Args.size();
3484  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3485  // the argument and parameter match or the objc method is parameterized.
3486  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3487  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3488  ArgTypes[Idx]) ||
3489  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3490  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3491  "Argument and parameter types don't match");
3492  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3493  // In particular, we depend on it being the last arg in Args, and the
3494  // objectsize bits depend on there only being one arg if !LeftToRight.
3495  assert(InitialArgSize + 1 == Args.size() &&
3496  "The code below depends on only adding one arg per EmitCallArg");
3497  (void)InitialArgSize;
3498  // Since pointer argument are never emitted as LValue, it is safe to emit
3499  // non-null argument check for r-value only.
3500  if (!Args.back().hasLValue()) {
3501  RValue RVArg = Args.back().getKnownRValue();
3502  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3503  ParamsToSkip + Idx);
3504  // @llvm.objectsize should never have side-effects and shouldn't need
3505  // destruction/cleanups, so we can safely "emit" it after its arg,
3506  // regardless of right-to-leftness
3507  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3508  }
3509  }
3510 
3511  if (!LeftToRight) {
3512  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3513  // IR function.
3514  std::reverse(Args.begin() + CallArgsStart, Args.end());
3515  }
3516 }
3517 
3518 namespace {
3519 
3520 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3521  DestroyUnpassedArg(Address Addr, QualType Ty)
3522  : Addr(Addr), Ty(Ty) {}
3523 
3524  Address Addr;
3525  QualType Ty;
3526 
3527  void Emit(CodeGenFunction &CGF, Flags flags) override {
3529  if (DtorKind == QualType::DK_cxx_destructor) {
3530  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3531  assert(!Dtor->isTrivial());
3532  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3533  /*Delegating=*/false, Addr);
3534  } else {
3535  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3536  }
3537  }
3538 };
3539 
3540 struct DisableDebugLocationUpdates {
3541  CodeGenFunction &CGF;
3542  bool disabledDebugInfo;
3543  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3544  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3545  CGF.disableDebugInfo();
3546  }
3547  ~DisableDebugLocationUpdates() {
3548  if (disabledDebugInfo)
3549  CGF.enableDebugInfo();
3550  }
3551 };
3552 
3553 } // end anonymous namespace
3554 
3556  if (!HasLV)
3557  return RV;
3558  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3560  LV.isVolatile());
3561  IsUsed = true;
3562  return RValue::getAggregate(Copy.getAddress());
3563 }
3564 
3566  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3567  if (!HasLV && RV.isScalar())
3568  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3569  else if (!HasLV && RV.isComplex())
3570  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3571  else {
3572  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3573  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3574  // We assume that call args are never copied into subobjects.
3575  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3576  HasLV ? LV.isVolatileQualified()
3577  : RV.isVolatileQualified());
3578  }
3579  IsUsed = true;
3580 }
3581 
3583  QualType type) {
3584  DisableDebugLocationUpdates Dis(*this, E);
3585  if (const ObjCIndirectCopyRestoreExpr *CRE
3586  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3587  assert(getLangOpts().ObjCAutoRefCount);
3588  return emitWritebackArg(*this, args, CRE);
3589  }
3590 
3591  assert(type->isReferenceType() == E->isGLValue() &&
3592  "reference binding to unmaterialized r-value!");
3593 
3594  if (E->isGLValue()) {
3595  assert(E->getObjectKind() == OK_Ordinary);
3596  return args.add(EmitReferenceBindingToExpr(E), type);
3597  }
3598 
3599  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3600 
3601  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3602  // However, we still have to push an EH-only cleanup in case we unwind before
3603  // we make it to the call.
3604  if (HasAggregateEvalKind &&
3606  // If we're using inalloca, use the argument memory. Otherwise, use a
3607  // temporary.
3608  AggValueSlot Slot;
3609  if (args.isUsingInAlloca())
3610  Slot = createPlaceholderSlot(*this, type);
3611  else
3612  Slot = CreateAggTemp(type, "agg.tmp");
3613 
3614  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3615  if (const auto *RD = type->getAsCXXRecordDecl())
3616  DestroyedInCallee = RD->hasNonTrivialDestructor();
3617  else
3618  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3619 
3620  if (DestroyedInCallee)
3621  Slot.setExternallyDestructed();
3622 
3623  EmitAggExpr(E, Slot);
3624  RValue RV = Slot.asRValue();
3625  args.add(RV, type);
3626 
3627  if (DestroyedInCallee && NeedsEHCleanup) {
3628  // Create a no-op GEP between the placeholder and the cleanup so we can
3629  // RAUW it successfully. It also serves as a marker of the first
3630  // instruction where the cleanup is active.
3631  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3632  type);
3633  // This unreachable is a temporary marker which will be removed later.
3634  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3635  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3636  }
3637  return;
3638  }
3639 
3640  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3641  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3642  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3643  assert(L.isSimple());
3644  args.addUncopiedAggregate(L, type);
3645  return;
3646  }
3647 
3648  args.add(EmitAnyExprToTemp(E), type);
3649 }
3650 
3651 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3652  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3653  // implicitly widens null pointer constants that are arguments to varargs
3654  // functions to pointer-sized ints.
3655  if (!getTarget().getTriple().isOSWindows())
3656  return Arg->getType();
3657 
3658  if (Arg->getType()->isIntegerType() &&
3659  getContext().getTypeSize(Arg->getType()) <
3663  return getContext().getIntPtrType();
3664  }
3665 
3666  return Arg->getType();
3667 }
3668 
3669 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3670 // optimizer it can aggressively ignore unwind edges.
3671 void
3672 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3673  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3674  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3675  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3676  CGM.getNoObjCARCExceptionsMetadata());
3677 }
3678 
3679 /// Emits a call to the given no-arguments nounwind runtime function.
3680 llvm::CallInst *
3682  const llvm::Twine &name) {
3683  return EmitNounwindRuntimeCall(callee, None, name);
3684 }
3685 
3686 /// Emits a call to the given nounwind runtime function.
3687 llvm::CallInst *
3690  const llvm::Twine &name) {
3691  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3692  call->setDoesNotThrow();
3693  return call;
3694 }
3695 
3696 /// Emits a simple call (never an invoke) to the given no-arguments
3697 /// runtime function.
3698 llvm::CallInst *
3700  const llvm::Twine &name) {
3701  return EmitRuntimeCall(callee, None, name);
3702 }
3703 
3704 // Calls which may throw must have operand bundles indicating which funclet
3705 // they are nested within.
3709  // There is no need for a funclet operand bundle if we aren't inside a
3710  // funclet.
3711  if (!CurrentFuncletPad)
3712  return BundleList;
3713 
3714  // Skip intrinsics which cannot throw.
3715  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3716  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3717  return BundleList;
3718 
3719  BundleList.emplace_back("funclet", CurrentFuncletPad);
3720  return BundleList;
3721 }
3722 
3723 /// Emits a simple call (never an invoke) to the given runtime function.
3724 llvm::CallInst *
3727  const llvm::Twine &name) {
3728  llvm::CallInst *call =
3729  Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3730  call->setCallingConv(getRuntimeCC());
3731  return call;
3732 }
3733 
3734 /// Emits a call or invoke to the given noreturn runtime function.
3736  ArrayRef<llvm::Value*> args) {
3738  getBundlesForFunclet(callee);
3739 
3740  if (getInvokeDest()) {
3741  llvm::InvokeInst *invoke =
3742  Builder.CreateInvoke(callee,
3743  getUnreachableBlock(),
3744  getInvokeDest(),
3745  args,
3746  BundleList);
3747  invoke->setDoesNotReturn();
3748  invoke->setCallingConv(getRuntimeCC());
3749  } else {
3750  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3751  call->setDoesNotReturn();
3752  call->setCallingConv(getRuntimeCC());
3753  Builder.CreateUnreachable();
3754  }
3755 }
3756 
3757 /// Emits a call or invoke instruction to the given nullary runtime function.
3758 llvm::CallSite
3760  const Twine &name) {
3761  return EmitRuntimeCallOrInvoke(callee, None, name);
3762 }
3763 
3764 /// Emits a call or invoke instruction to the given runtime function.
3765 llvm::CallSite
3768  const Twine &name) {
3769  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3770  callSite.setCallingConv(getRuntimeCC());
3771  return callSite;
3772 }
3773 
3774 /// Emits a call or invoke instruction to the given function, depending
3775 /// on the current state of the EH stack.
3776 llvm::CallSite
3779  const Twine &Name) {
3780  llvm::BasicBlock *InvokeDest = getInvokeDest();
3782  getBundlesForFunclet(Callee);
3783 
3784  llvm::Instruction *Inst;
3785  if (!InvokeDest)
3786  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3787  else {
3788  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3789  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3790  Name);
3791  EmitBlock(ContBB);
3792  }
3793 
3794  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3795  // optimizer it can aggressively ignore unwind edges.
3796  if (CGM.getLangOpts().ObjCAutoRefCount)
3797  AddObjCARCExceptionMetadata(Inst);
3798 
3799  return llvm::CallSite(Inst);
3800 }
3801 
3802 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3803  llvm::Value *New) {
3804  DeferredReplacements.push_back(std::make_pair(Old, New));
3805 }
3806 
3808  const CGCallee &Callee,
3809  ReturnValueSlot ReturnValue,
3810  const CallArgList &CallArgs,
3811  llvm::Instruction **callOrInvoke,
3812  SourceLocation Loc) {
3813  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3814 
3815  assert(Callee.isOrdinary() || Callee.isVirtual());
3816 
3817  // Handle struct-return functions by passing a pointer to the
3818  // location that we would like to return into.
3819  QualType RetTy = CallInfo.getReturnType();
3820  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3821 
3822  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3823 
3824  // 1. Set up the arguments.
3825 
3826  // If we're using inalloca, insert the allocation after the stack save.
3827  // FIXME: Do this earlier rather than hacking it in here!
3828  Address ArgMemory = Address::invalid();
3829  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3830  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3831  const llvm::DataLayout &DL = CGM.getDataLayout();
3832  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3833  llvm::Instruction *IP = CallArgs.getStackBase();
3834  llvm::AllocaInst *AI;
3835  if (IP) {
3836  IP = IP->getNextNode();
3837  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3838  "argmem", IP);
3839  } else {
3840  AI = CreateTempAlloca(ArgStruct, "argmem");
3841  }
3842  auto Align = CallInfo.getArgStructAlignment();
3843  AI->setAlignment(Align.getQuantity());
3844  AI->setUsedWithInAlloca(true);
3845  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3846  ArgMemory = Address(AI, Align);
3847  }
3848 
3849  // Helper function to drill into the inalloca allocation.
3850  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3851  auto FieldOffset =
3852  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3853  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3854  };
3855 
3856  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3857  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3858 
3859  // If the call returns a temporary with struct return, create a temporary
3860  // alloca to hold the result, unless one is given to us.
3861  Address SRetPtr = Address::invalid();
3862  Address SRetAlloca = Address::invalid();
3863  llvm::Value *UnusedReturnSizePtr = nullptr;
3864  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3865  if (!ReturnValue.isNull()) {
3866  SRetPtr = ReturnValue.getValue();
3867  } else {
3868  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3869  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3870  uint64_t size =
3871  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3872  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3873  }
3874  }
3875  if (IRFunctionArgs.hasSRetArg()) {
3876  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3877  } else if (RetAI.isInAlloca()) {
3878  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3879  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3880  }
3881  }
3882 
3883  Address swiftErrorTemp = Address::invalid();
3884  Address swiftErrorArg = Address::invalid();
3885 
3886  // Translate all of the arguments as necessary to match the IR lowering.
3887  assert(CallInfo.arg_size() == CallArgs.size() &&
3888  "Mismatch between function signature & arguments.");
3889  unsigned ArgNo = 0;
3890  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3891  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3892  I != E; ++I, ++info_it, ++ArgNo) {
3893  const ABIArgInfo &ArgInfo = info_it->info;
3894 
3895  // Insert a padding argument to ensure proper alignment.
3896  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3897  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3898  llvm::UndefValue::get(ArgInfo.getPaddingType());
3899 
3900  unsigned FirstIRArg, NumIRArgs;
3901  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3902 
3903  switch (ArgInfo.getKind()) {
3904  case ABIArgInfo::InAlloca: {
3905  assert(NumIRArgs == 0);
3906  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3907  if (I->isAggregate()) {
3908  // Replace the placeholder with the appropriate argument slot GEP.
3909  Address Addr = I->hasLValue()
3910  ? I->getKnownLValue().getAddress()
3911  : I->getKnownRValue().getAggregateAddress();
3912  llvm::Instruction *Placeholder =
3913  cast<llvm::Instruction>(Addr.getPointer());
3914  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3915  Builder.SetInsertPoint(Placeholder);
3916  Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3917  Builder.restoreIP(IP);
3918  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3919  } else {
3920  // Store the RValue into the argument struct.
3921  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3922  unsigned AS = Addr.getType()->getPointerAddressSpace();
3923  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3924  // There are some cases where a trivial bitcast is not avoidable. The
3925  // definition of a type later in a translation unit may change it's type
3926  // from {}* to (%struct.foo*)*.
3927  if (Addr.getType() != MemType)
3928  Addr = Builder.CreateBitCast(Addr, MemType);
3929  I->copyInto(*this, Addr);
3930  }
3931  break;
3932  }
3933 
3934  case ABIArgInfo::Indirect: {
3935  assert(NumIRArgs == 1);
3936  if (!I->isAggregate()) {
3937  // Make a temporary alloca to pass the argument.
3938  Address Addr = CreateMemTempWithoutCast(
3939  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3940  IRCallArgs[FirstIRArg] = Addr.getPointer();
3941 
3942  I->copyInto(*this, Addr);
3943  } else {
3944  // We want to avoid creating an unnecessary temporary+copy here;
3945  // however, we need one in three cases:
3946  // 1. If the argument is not byval, and we are required to copy the
3947  // source. (This case doesn't occur on any common architecture.)
3948  // 2. If the argument is byval, RV is not sufficiently aligned, and
3949  // we cannot force it to be sufficiently aligned.
3950  // 3. If the argument is byval, but RV is not located in default
3951  // or alloca address space.
3952  Address Addr = I->hasLValue()
3953  ? I->getKnownLValue().getAddress()
3954  : I->getKnownRValue().getAggregateAddress();
3955  llvm::Value *V = Addr.getPointer();
3956  CharUnits Align = ArgInfo.getIndirectAlign();
3957  const llvm::DataLayout *TD = &CGM.getDataLayout();
3958 
3959  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3960  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3961  TD->getAllocaAddrSpace()) &&
3962  "indirect argument must be in alloca address space");
3963 
3964  bool NeedCopy = false;
3965 
3966  if (Addr.getAlignment() < Align &&
3967  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3968  Align.getQuantity()) {
3969  NeedCopy = true;
3970  } else if (I->hasLValue()) {
3971  auto LV = I->getKnownLValue();
3972  auto AS = LV.getAddressSpace();
3973 
3974  if ((!ArgInfo.getIndirectByVal() &&
3975  (LV.getAlignment() >=
3976  getContext().getTypeAlignInChars(I->Ty)))) {
3977  NeedCopy = true;
3978  }
3979  if (!getLangOpts().OpenCL) {
3980  if ((ArgInfo.getIndirectByVal() &&
3981  (AS != LangAS::Default &&
3982  AS != CGM.getASTAllocaAddressSpace()))) {
3983  NeedCopy = true;
3984  }
3985  }
3986  // For OpenCL even if RV is located in default or alloca address space
3987  // we don't want to perform address space cast for it.
3988  else if ((ArgInfo.getIndirectByVal() &&
3989  Addr.getType()->getAddressSpace() != IRFuncTy->
3990  getParamType(FirstIRArg)->getPointerAddressSpace())) {
3991  NeedCopy = true;
3992  }
3993  }
3994 
3995  if (NeedCopy) {
3996  // Create an aligned temporary, and copy to it.
3997  Address AI = CreateMemTempWithoutCast(
3998  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3999  IRCallArgs[FirstIRArg] = AI.getPointer();
4000  I->copyInto(*this, AI);
4001  } else {
4002  // Skip the extra memcpy call.
4003  auto *T = V->getType()->getPointerElementType()->getPointerTo(
4004  CGM.getDataLayout().getAllocaAddrSpace());
4005  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4006  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4007  true);
4008  }
4009  }
4010  break;
4011  }
4012 
4013  case ABIArgInfo::Ignore:
4014  assert(NumIRArgs == 0);
4015  break;
4016 
4017  case ABIArgInfo::Extend:
4018  case ABIArgInfo::Direct: {
4019  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4020  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4021  ArgInfo.getDirectOffset() == 0) {
4022  assert(NumIRArgs == 1);
4023  llvm::Value *V;
4024  if (!I->isAggregate())
4025  V = I->getKnownRValue().getScalarVal();
4026  else
4027  V = Builder.CreateLoad(
4028  I->hasLValue() ? I->getKnownLValue().getAddress()
4029  : I->getKnownRValue().getAggregateAddress());
4030 
4031  // Implement swifterror by copying into a new swifterror argument.
4032  // We'll write back in the normal path out of the call.
4033  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4035  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4036 
4037  QualType pointeeTy = I->Ty->getPointeeType();
4038  swiftErrorArg =
4039  Address(V, getContext().getTypeAlignInChars(pointeeTy));
4040 
4041  swiftErrorTemp =
4042  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4043  V = swiftErrorTemp.getPointer();
4044  cast<llvm::AllocaInst>(V)->setSwiftError(true);
4045 
4046  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4047  Builder.CreateStore(errorValue, swiftErrorTemp);
4048  }
4049 
4050  // We might have to widen integers, but we should never truncate.
4051  if (ArgInfo.getCoerceToType() != V->getType() &&
4052  V->getType()->isIntegerTy())
4053  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4054 
4055  // If the argument doesn't match, perform a bitcast to coerce it. This
4056  // can happen due to trivial type mismatches.
4057  if (FirstIRArg < IRFuncTy->getNumParams() &&
4058  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4059  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4060 
4061  IRCallArgs[FirstIRArg] = V;
4062  break;
4063  }
4064 
4065  // FIXME: Avoid the conversion through memory if possible.
4066  Address Src = Address::invalid();
4067  if (!I->isAggregate()) {
4068  Src = CreateMemTemp(I->Ty, "coerce");
4069  I->copyInto(*this, Src);
4070  } else {
4071  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4072  : I->getKnownRValue().getAggregateAddress();
4073  }
4074 
4075  // If the value is offset in memory, apply the offset now.
4076  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4077 
4078  // Fast-isel and the optimizer generally like scalar values better than
4079  // FCAs, so we flatten them if this is safe to do for this argument.
4080  llvm::StructType *STy =
4081  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4082  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4083  llvm::Type *SrcTy = Src.getType()->getElementType();
4084  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4085  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4086 
4087  // If the source type is smaller than the destination type of the
4088  // coerce-to logic, copy the source value into a temp alloca the size
4089  // of the destination type to allow loading all of it. The bits past
4090  // the source value are left undef.
4091  if (SrcSize < DstSize) {
4092  Address TempAlloca
4093  = CreateTempAlloca(STy, Src.getAlignment(),
4094  Src.getName() + ".coerce");
4095  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4096  Src = TempAlloca;
4097  } else {
4098  Src = Builder.CreateBitCast(Src,
4099  STy->getPointerTo(Src.getAddressSpace()));
4100  }
4101 
4102  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4103  assert(NumIRArgs == STy->getNumElements());
4104  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4105  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4106  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4107  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4108  IRCallArgs[FirstIRArg + i] = LI;
4109  }
4110  } else {
4111  // In the simple case, just pass the coerced loaded value.
4112  assert(NumIRArgs == 1);
4113  IRCallArgs[FirstIRArg] =
4114  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4115  }
4116 
4117  break;
4118  }
4119 
4121  auto coercionType = ArgInfo.getCoerceAndExpandType();
4122  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4123 
4124  llvm::Value *tempSize = nullptr;
4125  Address addr = Address::invalid();
4126  Address AllocaAddr = Address::invalid();
4127  if (I->isAggregate()) {
4128  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4129  : I->getKnownRValue().getAggregateAddress();
4130 
4131  } else {
4132  RValue RV = I->getKnownRValue();
4133  assert(RV.isScalar()); // complex should always just be direct
4134 
4135  llvm::Type *scalarType = RV.getScalarVal()->getType();
4136  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4137  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4138 
4139  // Materialize to a temporary.
4140  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4142  layout->getAlignment(), scalarAlign)),
4143  "tmp",
4144  /*ArraySize=*/nullptr, &AllocaAddr);
4145  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4146 
4147  Builder.CreateStore(RV.getScalarVal(), addr);
4148  }
4149 
4150  addr = Builder.CreateElementBitCast(addr, coercionType);
4151 
4152  unsigned IRArgPos = FirstIRArg;
4153  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4154  llvm::Type *eltType = coercionType->getElementType(i);
4155  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4156  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4157  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4158  IRCallArgs[IRArgPos++] = elt;
4159  }
4160  assert(IRArgPos == FirstIRArg + NumIRArgs);
4161 
4162  if (tempSize) {
4163  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4164  }
4165 
4166  break;
4167  }
4168 
4169  case ABIArgInfo::Expand:
4170  unsigned IRArgPos = FirstIRArg;
4171  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4172  assert(IRArgPos == FirstIRArg + NumIRArgs);
4173  break;
4174  }
4175  }
4176 
4177  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4178  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4179 
4180  // If we're using inalloca, set up that argument.
4181  if (ArgMemory.isValid()) {
4182  llvm::Value *Arg = ArgMemory.getPointer();
4183  if (CallInfo.isVariadic()) {
4184  // When passing non-POD arguments by value to variadic functions, we will
4185  // end up with a variadic prototype and an inalloca call site. In such
4186  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4187  // the callee.
4188  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4189  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4190  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4191  } else {
4192  llvm::Type *LastParamTy =
4193  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4194  if (Arg->getType() != LastParamTy) {
4195 #ifndef NDEBUG
4196  // Assert that these structs have equivalent element types.
4197  llvm::StructType *FullTy = CallInfo.getArgStruct();
4198  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4199  cast<llvm::PointerType>(LastParamTy)->getElementType());
4200  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4201  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4202  DE = DeclaredTy->element_end(),
4203  FI = FullTy->element_begin();
4204  DI != DE; ++DI, ++FI)
4205  assert(*DI == *FI);
4206 #endif
4207  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4208  }
4209  }
4210  assert(IRFunctionArgs.hasInallocaArg());
4211  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4212  }
4213 
4214  // 2. Prepare the function pointer.
4215 
4216  // If the callee is a bitcast of a non-variadic function to have a
4217  // variadic function pointer type, check to see if we can remove the
4218  // bitcast. This comes up with unprototyped functions.
4219  //
4220  // This makes the IR nicer, but more importantly it ensures that we
4221  // can inline the function at -O0 if it is marked always_inline.
4222  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4223  llvm::FunctionType *CalleeFT =
4224  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4225  if (!CalleeFT->isVarArg())
4226  return Ptr;
4227 
4228  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4229  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4230  return Ptr;
4231 
4232  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4233  if (!OrigFn)
4234  return Ptr;
4235 
4236  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4237 
4238  // If the original type is variadic, or if any of the component types
4239  // disagree, we cannot remove the cast.
4240  if (OrigFT->isVarArg() ||
4241  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4242  OrigFT->getReturnType() != CalleeFT->getReturnType())
4243  return Ptr;
4244 
4245  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4246  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4247  return Ptr;
4248 
4249  return OrigFn;
4250  };
4251  CalleePtr = simplifyVariadicCallee(CalleePtr);
4252 
4253  // 3. Perform the actual call.
4254 
4255  // Deactivate any cleanups that we're supposed to do immediately before
4256  // the call.
4257  if (!CallArgs.getCleanupsToDeactivate().empty())
4258  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4259 
4260  // Assert that the arguments we computed match up. The IR verifier
4261  // will catch this, but this is a common enough source of problems
4262  // during IRGen changes that it's way better for debugging to catch
4263  // it ourselves here.
4264 #ifndef NDEBUG
4265  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4266  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4267  // Inalloca argument can have different type.
4268  if (IRFunctionArgs.hasInallocaArg() &&
4269  i == IRFunctionArgs.getInallocaArgNo())
4270  continue;
4271  if (i < IRFuncTy->getNumParams())
4272  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4273  }
4274 #endif
4275 
4276  // Update the largest vector width if any arguments have vector types.
4277  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4278  if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4279  LargestVectorWidth = std::max(LargestVectorWidth,
4280  VT->getPrimitiveSizeInBits());
4281  }
4282 
4283  // Compute the calling convention and attributes.
4284  unsigned CallingConv;
4285  llvm::AttributeList Attrs;
4286  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4287  Callee.getAbstractInfo(), Attrs, CallingConv,
4288  /*AttrOnCallSite=*/true);
4289 
4290  // Apply some call-site-specific attributes.
4291  // TODO: work this into building the attribute set.
4292 
4293  // Apply always_inline to all calls within flatten functions.
4294  // FIXME: should this really take priority over __try, below?
4295  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4296  !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4297  Callee.getAbstractInfo()
4298  .getCalleeDecl()
4299  .getDecl()
4300  ->hasAttr<NoInlineAttr>())) {
4301  Attrs =
4302  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4303  llvm::Attribute::AlwaysInline);
4304  }
4305 
4306  // Disable inlining inside SEH __try blocks.
4307  if (isSEHTryScope()) {
4308  Attrs =
4309  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4310  llvm::Attribute::NoInline);
4311  }
4312 
4313  // Decide whether to use a call or an invoke.
4314  bool CannotThrow;
4315  if (currentFunctionUsesSEHTry()) {
4316  // SEH cares about asynchronous exceptions, so everything can "throw."
4317  CannotThrow = false;
4318  } else if (isCleanupPadScope() &&
4320  // The MSVC++ personality will implicitly terminate the program if an
4321  // exception is thrown during a cleanup outside of a try/catch.
4322  // We don't need to model anything in IR to get this behavior.
4323  CannotThrow = true;
4324  } else {
4325  // Otherwise, nounwind call sites will never throw.
4326  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4327  llvm::Attribute::NoUnwind);
4328  }
4329 
4330  // If we made a temporary, be sure to clean up after ourselves. Note that we
4331  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4332  // pop this cleanup later on. Being eager about this is OK, since this
4333  // temporary is 'invisible' outside of the callee.
4334  if (UnusedReturnSizePtr)
4335  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4336  UnusedReturnSizePtr);
4337 
4338  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4339 
4341  getBundlesForFunclet(CalleePtr);
4342 
4343  // Emit the actual call/invoke instruction.
4344  llvm::CallSite CS;
4345  if (!InvokeDest) {
4346  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4347  } else {
4348  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4349  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4350  BundleList);
4351  EmitBlock(Cont);
4352  }
4353  llvm::Instruction *CI = CS.getInstruction();
4354  if (callOrInvoke)
4355  *callOrInvoke = CI;
4356 
4357  // Apply the attributes and calling convention.
4358  CS.setAttributes(Attrs);
4359  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4360 
4361  // Apply various metadata.
4362 
4363  if (!CI->getType()->isVoidTy())
4364  CI->setName("call");
4365 
4366  // Update largest vector width from the return type.
4367  if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4368  LargestVectorWidth = std::max(LargestVectorWidth,
4369  VT->getPrimitiveSizeInBits());
4370 
4371  // Insert instrumentation or attach profile metadata at indirect call sites.
4372  // For more details, see the comment before the definition of
4373  // IPVK_IndirectCallTarget in InstrProfData.inc.
4374  if (!CS.getCalledFunction())
4375  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4376  CI, CalleePtr);
4377 
4378  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4379  // optimizer it can aggressively ignore unwind edges.
4380  if (CGM.getLangOpts().ObjCAutoRefCount)
4381  AddObjCARCExceptionMetadata(CI);
4382 
4383  // Suppress tail calls if requested.
4384  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4385  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4386  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4387  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4388  }
4389 
4390  // 4. Finish the call.
4391 
4392  // If the call doesn't return, finish the basic block and clear the
4393  // insertion point; this allows the rest of IRGen to discard
4394  // unreachable code.
4395  if (CS.doesNotReturn()) {
4396  if (UnusedReturnSizePtr)
4397  PopCleanupBlock();
4398 
4399  // Strip away the noreturn attribute to better diagnose unreachable UB.
4400  if (SanOpts.has(SanitizerKind::Unreachable)) {
4401  if (auto *F = CS.getCalledFunction())
4402  F->removeFnAttr(llvm::Attribute::NoReturn);
4403  CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4404  llvm::Attribute::NoReturn);
4405  }
4406 
4407  EmitUnreachable(Loc);
4408  Builder.ClearInsertionPoint();
4409 
4410  // FIXME: For now, emit a dummy basic block because expr emitters in
4411  // generally are not ready to handle emitting expressions at unreachable
4412  // points.
4413  EnsureInsertPoint();
4414 
4415  // Return a reasonable RValue.
4416  return GetUndefRValue(RetTy);
4417  }
4418 
4419  // Perform the swifterror writeback.
4420  if (swiftErrorTemp.isValid()) {
4421  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4422  Builder.CreateStore(errorResult, swiftErrorArg);
4423  }
4424 
4425  // Emit any call-associated writebacks immediately. Arguably this
4426  // should happen after any return-value munging.
4427  if (CallArgs.hasWritebacks())
4428  emitWritebacks(*this, CallArgs);
4429 
4430  // The stack cleanup for inalloca arguments has to run out of the normal
4431  // lexical order, so deactivate it and run it manually here.
4432  CallArgs.freeArgumentMemory(*this);
4433 
4434  // Extract the return value.
4435  RValue Ret = [&] {
4436  switch (RetAI.getKind()) {
4438  auto coercionType = RetAI.getCoerceAndExpandType();
4439  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4440 
4441  Address addr = SRetPtr;
4442  addr = Builder.CreateElementBitCast(addr, coercionType);
4443 
4444  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4445  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4446 
4447  unsigned unpaddedIndex = 0;
4448  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4449  llvm::Type *eltType = coercionType->getElementType(i);
4450  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4451  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4452  llvm::Value *elt = CI;
4453  if (requiresExtract)
4454  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4455  else
4456  assert(unpaddedIndex == 0);
4457  Builder.CreateStore(elt, eltAddr);
4458  }
4459  // FALLTHROUGH
4460  LLVM_FALLTHROUGH;
4461  }
4462 
4463  case ABIArgInfo::InAlloca:
4464  case ABIArgInfo::Indirect: {
4465  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4466  if (UnusedReturnSizePtr)
4467  PopCleanupBlock();
4468  return ret;
4469  }
4470 
4471  case ABIArgInfo::Ignore:
4472  // If we are ignoring an argument that had a result, make sure to
4473  // construct the appropriate return value for our caller.
4474  return GetUndefRValue(RetTy);
4475 
4476  case ABIArgInfo::Extend:
4477  case ABIArgInfo::Direct: {
4478  llvm::Type *RetIRTy = ConvertType(RetTy);
4479  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4480  switch (getEvaluationKind(RetTy)) {
4481  case TEK_Complex: {
4482  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4483  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4484  return RValue::getComplex(std::make_pair(Real, Imag));
4485  }
4486  case TEK_Aggregate: {
4487  Address DestPtr = ReturnValue.getValue();
4488  bool DestIsVolatile = ReturnValue.isVolatile();
4489 
4490  if (!DestPtr.isValid()) {
4491  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4492  DestIsVolatile = false;
4493  }
4494  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4495  return RValue::getAggregate(DestPtr);
4496  }
4497  case TEK_Scalar: {
4498  // If the argument doesn't match, perform a bitcast to coerce it. This
4499  // can happen due to trivial type mismatches.
4500  llvm::Value *V = CI;
4501  if (V->getType() != RetIRTy)
4502  V = Builder.CreateBitCast(V, RetIRTy);
4503  return RValue::get(V);
4504  }
4505  }
4506  llvm_unreachable("bad evaluation kind");
4507  }
4508 
4509  Address DestPtr = ReturnValue.getValue();
4510  bool DestIsVolatile = ReturnValue.isVolatile();
4511 
4512  if (!DestPtr.isValid()) {
4513  DestPtr = CreateMemTemp(RetTy, "coerce");
4514  DestIsVolatile = false;
4515  }
4516 
4517  // If the value is offset in memory, apply the offset now.
4518  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4519  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4520 
4521  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4522  }
4523 
4524  case ABIArgInfo::Expand:
4525  llvm_unreachable("Invalid ABI kind for return argument");
4526  }
4527 
4528  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4529  } ();
4530 
4531  // Emit the assume_aligned check on the return value.
4532  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4533  if (Ret.isScalar() && TargetDecl) {
4534  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4535  llvm::Value *OffsetValue = nullptr;
4536  if (const auto *Offset = AA->getOffset())
4537  OffsetValue = EmitScalarExpr(Offset);
4538 
4539  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4540  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4541  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4542  AlignmentCI->getZExtValue(), OffsetValue);
4543  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4544  llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4545  .getRValue(*this)
4546  .getScalarVal();
4547  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4548  AlignmentVal);
4549  }
4550  }
4551 
4552  return Ret;
4553 }
4554 
4556  if (isVirtual()) {
4557  const CallExpr *CE = getVirtualCallExpr();
4559  CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4560  CE ? CE->getBeginLoc() : SourceLocation());
4561  }
4562 
4563  return *this;
4564 }
4565 
4566 /* VarArg handling */
4567 
4569  VAListAddr = VE->isMicrosoftABI()
4570  ? EmitMSVAListRef(VE->getSubExpr())
4571  : EmitVAListRef(VE->getSubExpr());
4572  QualType Ty = VE->getType();
4573  if (VE->isMicrosoftABI())
4574  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4575  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4576 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:659
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1518
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:176
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
Represents a function declaration or definition.
Definition: Decl.h:1738
Address getAddress() const
Definition: CGValue.h:583
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:633
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2995
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2537
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:1044
A (possibly-)qualified type.
Definition: Type.h:638
bool isBlockPointerType() const
Definition: Type.h:6304
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1506
bool getNoCfCheck() const
Definition: Type.h:3515
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:265
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:82
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3138
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1118
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:76
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2729
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:568
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:174
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3355
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:190
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:949
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:4059
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2228
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:838
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1029
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4568
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3133
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:87
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:248
bool isVirtual() const
Definition: DeclCXX.h:2086
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4555
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:4109
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:287
bool isVolatile() const
Definition: CGValue.h:301
The base class of the type hierarchy.
Definition: Type.h:1407
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1914
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2192
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6136
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:964
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:690
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3777
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:2065
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:378
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2484
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2172
bool hasWritebacks() const
Definition: CGCall.h:312
Default closure variant of a ctor.
Definition: ABI.h:30
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3807
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:813
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:107
llvm::Instruction * getStackBase() const
Definition: CGCall.h:334
unsigned getNumParams() const
Definition: Type.h:3888
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:178
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1222
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6748
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3544
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3376
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:306
Address getValue() const
Definition: CGCall.h:381
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
Represents a parameter to a function.
Definition: Decl.h:1550
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
void add(RValue rvalue, QualType type)
Definition: CGCall.h:285
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:420
Represents a struct/union/class.
Definition: Decl.h:3593
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3368
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:348
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2458
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1018
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3204
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2809
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3997
Address getAddress() const
Definition: CGValue.h:327
unsigned getRegParm() const
Definition: Type.h:3518
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:155
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:4063
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3784
bool isVolatileQualified() const
Definition: CGValue.h:258
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2295
Represents a member of a struct/union/class.
Definition: Decl.h:2579
CharUnits getAlignment() const
Definition: CGValue.h:316
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:339
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1603
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:103
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:169
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:266
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6308
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2285
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3232
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:514
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
bool isVirtual() const
Definition: CGCall.h:187
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:739
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:321
bool getProducesResult() const
Definition: Type.h:3513
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:203
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:285
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2602
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3565
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:215
Wrapper for source info for functions.
Definition: TypeLoc.h:1327
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:109
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:71
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:396
LangAS getAddressSpace() const
Definition: Type.h:352
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1835
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:137
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:474
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:708
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:252
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:279
bool isInstance() const
Definition: DeclCXX.h:2069
An ordinary object is located at an address in memory.
Definition: Specifiers.h:126
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1697
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:323
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:743
bool getNoReturn() const
Definition: Type.h:3512
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:84
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
bool getNoCallerSavedRegs() const
Definition: Type.h:3514
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3582
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:516
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:305
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3571
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:505
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3650
bool hasAttr() const
Definition: DeclBase.h:531
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:997
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1613
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3687
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:4114
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:317
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:306
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3080
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4091
Address Temporary
The temporary alloca.
Definition: CGCall.h:271
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:107
unsigned Offset
Definition: Format.cpp:1631
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:274
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3421
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3059
This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2689
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3054
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:705
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:91
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:701
bool getHasRegParm() const
Definition: Type.h:3516
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6811
bool isObjCRetainableType() const
Definition: Type.cpp:3921
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2706
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:362
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2590
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3707
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1511
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:646
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1352
Qualifiers getTypeQualifiers() const
Definition: DeclCXX.h:2188
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:535
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2747
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1896
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2026
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:414
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1168
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:236
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1274
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:240
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4380
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1299
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:169
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1516
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3221
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3438
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3613
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2172
A saved depth on the scope stack.
Definition: EHScopeStack.h:107
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:296
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3766
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3394
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1246
CallingConv getCC() const
Definition: Type.h:3525
const Decl * getDecl() const
Definition: GlobalDecl.h:69
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1859
An aggregate value slot.
Definition: CGValue.h:437
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:461
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2041
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2413
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3360
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:719
CanQualType VoidTy
Definition: ASTContext.h:1016
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6300
An aligned address.
Definition: Address.h:25
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1152
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:711
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:177
All available information about a concrete callee.
Definition: CGCall.h:67
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:370
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1533
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1694
bool hasFlexibleArrayMember() const
Definition: Decl.h:3647
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3922
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:626
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:554
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1132
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:356
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:175
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2389
ExtInfo getExtInfo() const
Definition: Type.h:3624
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:91
LValue Source
The original argument.
Definition: CGCall.h:268
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:437
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3735
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1006
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:170
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3381
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1893
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2166
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3555
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:802
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:401
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
uint64_t SanitizerMask
Definition: Sanitizers.h:26
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4370
Complex values, per C99 6.2.5p11.
Definition: Type.h:2477
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:76
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:2021
QualType getCanonicalTypeInternal() const
Definition: Type.h:2355
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6578
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2690
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:527
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:645
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:119
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2673
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:984
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:909
bool isParamDestroyedInCallee() const
Definition: Decl.h:3731
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:443
Represents a base class of a C++ class.
Definition: DeclCXX.h:192
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2070
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2031
ASTContext & getContext() const
Definition: CodeGenTypes.h:173
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:134
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:513
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1678
LangAS getAddressSpace() const
Definition: CGValue.h:314
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
RValue getKnownRValue() const
Definition: CGCall.h:244
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:338
bool isVoidType() const
Definition: Type.h:6544
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2230
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6099
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:63
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1236
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like &#39;int&#39;.
Definition: Type.h:2391
bool isVariadic() const
Definition: DeclObjC.h:427
bool shouldCopy() const
shouldCopy - True if we should do the &#39;copy&#39; part of the copy-restore.
Definition: ExprObjC.h:1548
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1760
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2396
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:197
bool hasLValue() const
Definition: CGCall.h:233
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:545
__DEVICE__ int max(int __a, int __b)
CanQualType IntTy
Definition: ASTContext.h:1025
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration&#39;s cl...
Definition: DeclCXX.h:3332
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:3210
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1823
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition: CGCall.h:60
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
static RValue get(llvm::Value *V)
Definition: CGValue.h:86
bool isUnion() const
Definition: Decl.h:3252
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional, const FunctionDecl *FD)
Compute the arguments required by the given formal prototype, given that there may be some additional...
bool isPointerType() const
Definition: Type.h:6296
__DEVICE__ int min(int __a, int __b)
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
Definition: CGCall.cpp:616
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:64
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
QualType getType() const
Definition: Decl.h:648
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:107
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
LValue - This represents an lvalue references.
Definition: CGValue.h:167
An abstract representation of regular/ObjC call/message targets.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1428
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:147
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
RValue asRValue() const
Definition: CGValue.h:607
llvm::Type * getCoerceToType() const
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:172
Notes how many arguments were added to the beginning (Prefix) and ending (Suffix) of an arg list...
Definition: CGCXXABI.h:300
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2499
void AddDefaultFnAttrs(llvm::Function &F)
Adds attributes to F according to our CodeGenOptions and LangOptions, as though we had emitted it our...
Definition: CGCall.cpp:1827
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:260
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:681
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2872
Abstract information about a function or function prototype.
Definition: CGCall.h:45
A class which abstracts out some details necessary for making a call.
Definition: Type.h:3466
bool isScalar() const
Definition: CGValue.h:52
Attr - This represents one attribute.
Definition: Attr.h:44
This parameter (which must have pointer type) is a Swift indirect result parameter.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: DeclCXX.h:2470
ArrayRef< ParmVarDecl * > parameters() const
Definition: DeclObjC.h:367
Expr * IgnoreParens() LLVM_READONLY
IgnoreParens - Ignore parentheses.
Definition: Expr.cpp:2560
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:329
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, ASTContext &Context)
A helper function to get the alignment of a Decl referred to by DeclRefExpr or MemberExpr.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1550