clang  10.0.0git
CodeGenFunction.cpp
Go to the documentation of this file.
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/StmtCXX.h"
29 #include "clang/AST/StmtObjC.h"
30 #include "clang/Basic/Builtins.h"
32 #include "clang/Basic/TargetInfo.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/FPEnv.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Operator.h"
42 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
43 using namespace clang;
44 using namespace CodeGen;
45 
46 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
47 /// markers.
48 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
49  const LangOptions &LangOpts) {
50  if (CGOpts.DisableLifetimeMarkers)
51  return false;
52 
53  // Sanitizers may use markers.
54  if (CGOpts.SanitizeAddressUseAfterScope ||
55  LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
56  LangOpts.Sanitize.has(SanitizerKind::Memory))
57  return true;
58 
59  // For now, only in optimized builds.
60  return CGOpts.OptimizationLevel != 0;
61 }
62 
63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
64  : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
65  Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
66  CGBuilderInserterTy(this)),
67  SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
68  PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
69  CGM.getCodeGenOpts(), CGM.getLangOpts())) {
70  if (!suppressNewContext)
72 
73  llvm::FastMathFlags FMF;
74  if (CGM.getLangOpts().FastMath)
75  FMF.setFast();
76  if (CGM.getLangOpts().FiniteMathOnly) {
77  FMF.setNoNaNs();
78  FMF.setNoInfs();
79  }
80  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
81  FMF.setNoNaNs();
82  }
83  if (CGM.getCodeGenOpts().NoSignedZeros) {
84  FMF.setNoSignedZeros();
85  }
86  if (CGM.getCodeGenOpts().ReciprocalMath) {
87  FMF.setAllowReciprocal();
88  }
89  if (CGM.getCodeGenOpts().Reassociate) {
90  FMF.setAllowReassoc();
91  }
92  Builder.setFastMathFlags(FMF);
93  SetFPModel();
94 }
95 
97  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
98 
99  // If there are any unclaimed block infos, go ahead and destroy them
100  // now. This can happen if IR-gen gets clever and skips evaluating
101  // something.
102  if (FirstBlockInfo)
104 
105  if (getLangOpts().OpenMP && CurFn)
106  CGM.getOpenMPRuntime().functionFinished(*this);
107 }
108 
109 // Map the LangOption for rounding mode into
110 // the corresponding enum in the IR.
111 static llvm::fp::RoundingMode ToConstrainedRoundingMD(
113 
114  switch (Kind) {
115  case LangOptions::FPR_ToNearest: return llvm::fp::rmToNearest;
116  case LangOptions::FPR_Downward: return llvm::fp::rmDownward;
117  case LangOptions::FPR_Upward: return llvm::fp::rmUpward;
118  case LangOptions::FPR_TowardZero: return llvm::fp::rmTowardZero;
119  case LangOptions::FPR_Dynamic: return llvm::fp::rmDynamic;
120  }
121  llvm_unreachable("Unsupported FP RoundingMode");
122 }
123 
124 // Map the LangOption for exception behavior into
125 // the corresponding enum in the IR.
126 static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
128 
129  switch (Kind) {
130  case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
131  case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
132  case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
133  }
134  llvm_unreachable("Unsupported FP Exception Behavior");
135 }
136 
138  auto fpRoundingMode = ToConstrainedRoundingMD(
139  getLangOpts().getFPRoundingMode());
140  auto fpExceptionBehavior = ToConstrainedExceptMD(
141  getLangOpts().getFPExceptionMode());
142 
143  if (fpExceptionBehavior == llvm::fp::ebIgnore &&
144  fpRoundingMode == llvm::fp::rmToNearest)
145  // Constrained intrinsics are not used.
146  ;
147  else {
148  Builder.setIsFPConstrained(true);
149  Builder.setDefaultConstrainedRounding(fpRoundingMode);
150  Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
151  }
152 }
153 
155  LValueBaseInfo *BaseInfo,
156  TBAAAccessInfo *TBAAInfo) {
157  return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
158  /* forPointeeType= */ true);
159 }
160 
162  LValueBaseInfo *BaseInfo,
163  TBAAAccessInfo *TBAAInfo,
164  bool forPointeeType) {
165  if (TBAAInfo)
166  *TBAAInfo = CGM.getTBAAAccessInfo(T);
167 
168  // Honor alignment typedef attributes even on incomplete types.
169  // We also honor them straight for C++ class types, even as pointees;
170  // there's an expressivity gap here.
171  if (auto TT = T->getAs<TypedefType>()) {
172  if (auto Align = TT->getDecl()->getMaxAlignment()) {
173  if (BaseInfo)
175  return getContext().toCharUnitsFromBits(Align);
176  }
177  }
178 
179  if (BaseInfo)
181 
182  CharUnits Alignment;
183  if (T->isIncompleteType()) {
184  Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
185  } else {
186  // For C++ class pointees, we don't know whether we're pointing at a
187  // base or a complete object, so we generally need to use the
188  // non-virtual alignment.
189  const CXXRecordDecl *RD;
190  if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
191  Alignment = CGM.getClassPointerAlignment(RD);
192  } else {
193  Alignment = getContext().getTypeAlignInChars(T);
194  if (T.getQualifiers().hasUnaligned())
195  Alignment = CharUnits::One();
196  }
197 
198  // Cap to the global maximum type alignment unless the alignment
199  // was somehow explicit on the type.
200  if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
201  if (Alignment.getQuantity() > MaxAlign &&
203  Alignment = CharUnits::fromQuantity(MaxAlign);
204  }
205  }
206  return Alignment;
207 }
208 
210  LValueBaseInfo BaseInfo;
211  TBAAAccessInfo TBAAInfo;
212  CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
213  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
214  TBAAInfo);
215 }
216 
217 /// Given a value of type T* that may not be to a complete object,
218 /// construct an l-value with the natural pointee alignment of T.
219 LValue
221  LValueBaseInfo BaseInfo;
222  TBAAAccessInfo TBAAInfo;
223  CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
224  /* forPointeeType= */ true);
225  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
226 }
227 
228 
230  return CGM.getTypes().ConvertTypeForMem(T);
231 }
232 
234  return CGM.getTypes().ConvertType(T);
235 }
236 
238  type = type.getCanonicalType();
239  while (true) {
240  switch (type->getTypeClass()) {
241 #define TYPE(name, parent)
242 #define ABSTRACT_TYPE(name, parent)
243 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
244 #define DEPENDENT_TYPE(name, parent) case Type::name:
245 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
246 #include "clang/AST/TypeNodes.inc"
247  llvm_unreachable("non-canonical or dependent type in IR-generation");
248 
249  case Type::Auto:
250  case Type::DeducedTemplateSpecialization:
251  llvm_unreachable("undeduced type in IR-generation");
252 
253  // Various scalar types.
254  case Type::Builtin:
255  case Type::Pointer:
256  case Type::BlockPointer:
257  case Type::LValueReference:
258  case Type::RValueReference:
259  case Type::MemberPointer:
260  case Type::Vector:
261  case Type::ExtVector:
262  case Type::FunctionProto:
263  case Type::FunctionNoProto:
264  case Type::Enum:
265  case Type::ObjCObjectPointer:
266  case Type::Pipe:
267  return TEK_Scalar;
268 
269  // Complexes.
270  case Type::Complex:
271  return TEK_Complex;
272 
273  // Arrays, records, and Objective-C objects.
274  case Type::ConstantArray:
275  case Type::IncompleteArray:
276  case Type::VariableArray:
277  case Type::Record:
278  case Type::ObjCObject:
279  case Type::ObjCInterface:
280  return TEK_Aggregate;
281 
282  // We operate on atomic values according to their underlying type.
283  case Type::Atomic:
284  type = cast<AtomicType>(type)->getValueType();
285  continue;
286  }
287  llvm_unreachable("unknown type kind!");
288  }
289 }
290 
292  // For cleanliness, we try to avoid emitting the return block for
293  // simple cases.
294  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
295 
296  if (CurBB) {
297  assert(!CurBB->getTerminator() && "Unexpected terminated block.");
298 
299  // We have a valid insert point, reuse it if it is empty or there are no
300  // explicit jumps to the return block.
301  if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
302  ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
303  delete ReturnBlock.getBlock();
304  ReturnBlock = JumpDest();
305  } else
307  return llvm::DebugLoc();
308  }
309 
310  // Otherwise, if the return block is the target of a single direct
311  // branch then we can just put the code in that block instead. This
312  // cleans up functions which started with a unified return block.
313  if (ReturnBlock.getBlock()->hasOneUse()) {
314  llvm::BranchInst *BI =
315  dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
316  if (BI && BI->isUnconditional() &&
317  BI->getSuccessor(0) == ReturnBlock.getBlock()) {
318  // Record/return the DebugLoc of the simple 'return' expression to be used
319  // later by the actual 'ret' instruction.
320  llvm::DebugLoc Loc = BI->getDebugLoc();
321  Builder.SetInsertPoint(BI->getParent());
322  BI->eraseFromParent();
323  delete ReturnBlock.getBlock();
324  ReturnBlock = JumpDest();
325  return Loc;
326  }
327  }
328 
329  // FIXME: We are at an unreachable point, there is no reason to emit the block
330  // unless it has uses. However, we still need a place to put the debug
331  // region.end for now.
332 
334  return llvm::DebugLoc();
335 }
336 
337 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
338  if (!BB) return;
339  if (!BB->use_empty())
340  return CGF.CurFn->getBasicBlockList().push_back(BB);
341  delete BB;
342 }
343 
345  assert(BreakContinueStack.empty() &&
346  "mismatched push/pop in break/continue stack!");
347 
348  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
349  && NumSimpleReturnExprs == NumReturnExprs
350  && ReturnBlock.getBlock()->use_empty();
351  // Usually the return expression is evaluated before the cleanup
352  // code. If the function contains only a simple return statement,
353  // such as a constant, the location before the cleanup code becomes
354  // the last useful breakpoint in the function, because the simple
355  // return expression will be evaluated after the cleanup code. To be
356  // safe, set the debug location for cleanup code to the location of
357  // the return statement. Otherwise the cleanup code should be at the
358  // end of the function's lexical scope.
359  //
360  // If there are multiple branches to the return block, the branch
361  // instructions will get the location of the return statements and
362  // all will be fine.
363  if (CGDebugInfo *DI = getDebugInfo()) {
364  if (OnlySimpleReturnStmts)
365  DI->EmitLocation(Builder, LastStopPoint);
366  else
367  DI->EmitLocation(Builder, EndLoc);
368  }
369 
370  // Pop any cleanups that might have been associated with the
371  // parameters. Do this in whatever block we're currently in; it's
372  // important to do this before we enter the return block or return
373  // edges will be *really* confused.
374  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
375  bool HasOnlyLifetimeMarkers =
377  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
378  if (HasCleanups) {
379  // Make sure the line table doesn't jump back into the body for
380  // the ret after it's been at EndLoc.
382  if (CGDebugInfo *DI = getDebugInfo()) {
383  if (OnlySimpleReturnStmts)
384  DI->EmitLocation(Builder, EndLoc);
385  else
386  // We may not have a valid end location. Try to apply it anyway, and
387  // fall back to an artificial location if needed.
389  }
390 
392  }
393 
394  // Emit function epilog (to return).
395  llvm::DebugLoc Loc = EmitReturnBlock();
396 
397  if (ShouldInstrumentFunction()) {
398  if (CGM.getCodeGenOpts().InstrumentFunctions)
399  CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
400  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
401  CurFn->addFnAttr("instrument-function-exit-inlined",
402  "__cyg_profile_func_exit");
403  }
404 
405  // Emit debug descriptor for function end.
406  if (CGDebugInfo *DI = getDebugInfo())
407  DI->EmitFunctionEnd(Builder, CurFn);
408 
409  // Reset the debug location to that of the simple 'return' expression, if any
410  // rather than that of the end of the function's scope '}'.
411  ApplyDebugLocation AL(*this, Loc);
412  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
414 
415  assert(EHStack.empty() &&
416  "did not remove all scopes from cleanup stack!");
417 
418  // If someone did an indirect goto, emit the indirect goto block at the end of
419  // the function.
420  if (IndirectBranch) {
421  EmitBlock(IndirectBranch->getParent());
422  Builder.ClearInsertionPoint();
423  }
424 
425  // If some of our locals escaped, insert a call to llvm.localescape in the
426  // entry block.
427  if (!EscapedLocals.empty()) {
428  // Invert the map from local to index into a simple vector. There should be
429  // no holes.
431  EscapeArgs.resize(EscapedLocals.size());
432  for (auto &Pair : EscapedLocals)
433  EscapeArgs[Pair.second] = Pair.first;
434  llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
435  &CGM.getModule(), llvm::Intrinsic::localescape);
436  CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
437  }
438 
439  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
440  llvm::Instruction *Ptr = AllocaInsertPt;
441  AllocaInsertPt = nullptr;
442  Ptr->eraseFromParent();
443 
444  // If someone took the address of a label but never did an indirect goto, we
445  // made a zero entry PHI node, which is illegal, zap it now.
446  if (IndirectBranch) {
447  llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
448  if (PN->getNumIncomingValues() == 0) {
449  PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
450  PN->eraseFromParent();
451  }
452  }
453 
454  EmitIfUsed(*this, EHResumeBlock);
455  EmitIfUsed(*this, TerminateLandingPad);
456  EmitIfUsed(*this, TerminateHandler);
457  EmitIfUsed(*this, UnreachableBlock);
458 
459  for (const auto &FuncletAndParent : TerminateFunclets)
460  EmitIfUsed(*this, FuncletAndParent.second);
461 
462  if (CGM.getCodeGenOpts().EmitDeclMetadata)
463  EmitDeclMetadata();
464 
465  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
466  I = DeferredReplacements.begin(),
467  E = DeferredReplacements.end();
468  I != E; ++I) {
469  I->first->replaceAllUsesWith(I->second);
470  I->first->eraseFromParent();
471  }
472 
473  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
474  // PHIs if the current function is a coroutine. We don't do it for all
475  // functions as it may result in slight increase in numbers of instructions
476  // if compiled with no optimizations. We do it for coroutine as the lifetime
477  // of CleanupDestSlot alloca make correct coroutine frame building very
478  // difficult.
480  llvm::DominatorTree DT(*CurFn);
481  llvm::PromoteMemToReg(
482  cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
484  }
485 
486  // Scan function arguments for vector width.
487  for (llvm::Argument &A : CurFn->args())
488  if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
489  LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
490  VT->getPrimitiveSizeInBits().getFixedSize());
491 
492  // Update vector width based on return type.
493  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
494  LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
495  VT->getPrimitiveSizeInBits().getFixedSize());
496 
497  // Add the required-vector-width attribute. This contains the max width from:
498  // 1. min-vector-width attribute used in the source program.
499  // 2. Any builtins used that have a vector width specified.
500  // 3. Values passed in and out of inline assembly.
501  // 4. Width of vector arguments and return types for this function.
502  // 5. Width of vector aguments and return types for functions called by this
503  // function.
504  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
505 
506  // If we generated an unreachable return block, delete it now.
507  if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
508  Builder.ClearInsertionPoint();
509  ReturnBlock.getBlock()->eraseFromParent();
510  }
511  if (ReturnValue.isValid()) {
512  auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
513  if (RetAlloca && RetAlloca->use_empty()) {
514  RetAlloca->eraseFromParent();
516  }
517  }
518 }
519 
520 /// ShouldInstrumentFunction - Return true if the current function should be
521 /// instrumented with __cyg_profile_func_* calls
523  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
524  !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
525  !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
526  return false;
527  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
528  return false;
529  return true;
530 }
531 
532 /// ShouldXRayInstrument - Return true if the current function should be
533 /// instrumented with XRay nop sleds.
535  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
536 }
537 
538 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
539 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
541  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
542  (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
545 }
546 
548  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
549  (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
552 }
553 
554 llvm::Constant *
556  llvm::Constant *Addr) {
557  // Addresses stored in prologue data can't require run-time fixups and must
558  // be PC-relative. Run-time fixups are undesirable because they necessitate
559  // writable text segments, which are unsafe. And absolute addresses are
560  // undesirable because they break PIE mode.
561 
562  // Add a layer of indirection through a private global. Taking its address
563  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
564  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
565  /*isConstant=*/true,
566  llvm::GlobalValue::PrivateLinkage, Addr);
567 
568  // Create a PC-relative address.
569  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
570  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
571  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
572  return (IntPtrTy == Int32Ty)
573  ? PCRelAsInt
574  : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
575 }
576 
577 llvm::Value *
579  llvm::Value *EncodedAddr) {
580  // Reconstruct the address of the global.
581  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
582  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
583  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
584  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
585 
586  // Load the original pointer through the global.
587  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
588  "decoded_addr");
589 }
590 
591 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
592  llvm::Function *Fn)
593 {
594  if (!FD->hasAttr<OpenCLKernelAttr>())
595  return;
596 
597  llvm::LLVMContext &Context = getLLVMContext();
598 
599  CGM.GenOpenCLArgMetadata(Fn, FD, this);
600 
601  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
602  QualType HintQTy = A->getTypeHint();
603  const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
604  bool IsSignedInteger =
605  HintQTy->isSignedIntegerType() ||
606  (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
607  llvm::Metadata *AttrMDArgs[] = {
608  llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
609  CGM.getTypes().ConvertType(A->getTypeHint()))),
610  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
611  llvm::IntegerType::get(Context, 32),
612  llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
613  Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
614  }
615 
616  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
617  llvm::Metadata *AttrMDArgs[] = {
618  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
619  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
620  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
621  Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
622  }
623 
624  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
625  llvm::Metadata *AttrMDArgs[] = {
626  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
627  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
628  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
629  Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
630  }
631 
632  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
633  FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
634  llvm::Metadata *AttrMDArgs[] = {
635  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
636  Fn->setMetadata("intel_reqd_sub_group_size",
637  llvm::MDNode::get(Context, AttrMDArgs));
638  }
639 }
640 
641 /// Determine whether the function F ends with a return stmt.
642 static bool endsWithReturn(const Decl* F) {
643  const Stmt *Body = nullptr;
644  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
645  Body = FD->getBody();
646  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
647  Body = OMD->getBody();
648 
649  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
650  auto LastStmt = CS->body_rbegin();
651  if (LastStmt != CS->body_rend())
652  return isa<ReturnStmt>(*LastStmt);
653  }
654  return false;
655 }
656 
658  if (SanOpts.has(SanitizerKind::Thread)) {
659  Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
660  Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
661  }
662 }
663 
664 /// Check if the return value of this function requires sanitization.
665 bool CodeGenFunction::requiresReturnValueCheck() const {
666  return requiresReturnValueNullabilityCheck() ||
667  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
668  CurCodeDecl->getAttr<ReturnsNonNullAttr>());
669 }
670 
671 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
672  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
673  if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
674  !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
675  (MD->getNumParams() != 1 && MD->getNumParams() != 2))
676  return false;
677 
678  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
679  return false;
680 
681  if (MD->getNumParams() == 2) {
682  auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
683  if (!PT || !PT->isVoidPointerType() ||
684  !PT->getPointeeType().isConstQualified())
685  return false;
686  }
687 
688  return true;
689 }
690 
691 /// Return the UBSan prologue signature for \p FD if one is available.
692 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
693  const FunctionDecl *FD) {
694  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
695  if (!MD->isStatic())
696  return nullptr;
698 }
699 
701  llvm::Function *Fn,
702  const CGFunctionInfo &FnInfo,
703  const FunctionArgList &Args,
704  SourceLocation Loc,
705  SourceLocation StartLoc) {
706  assert(!CurFn &&
707  "Do not use a CodeGenFunction object for more than one function");
708 
709  const Decl *D = GD.getDecl();
710 
711  DidCallStackSave = false;
712  CurCodeDecl = D;
713  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
714  if (FD->usesSEHTry())
715  CurSEHParent = FD;
716  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
717  FnRetTy = RetTy;
718  CurFn = Fn;
719  CurFnInfo = &FnInfo;
720  assert(CurFn->isDeclaration() && "Function already has body?");
721 
722  // If this function has been blacklisted for any of the enabled sanitizers,
723  // disable the sanitizer for the function.
724  do {
725 #define SANITIZER(NAME, ID) \
726  if (SanOpts.empty()) \
727  break; \
728  if (SanOpts.has(SanitizerKind::ID)) \
729  if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
730  SanOpts.set(SanitizerKind::ID, false);
731 
732 #include "clang/Basic/Sanitizers.def"
733 #undef SANITIZER
734  } while (0);
735 
736  if (D) {
737  // Apply the no_sanitize* attributes to SanOpts.
738  for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
739  SanitizerMask mask = Attr->getMask();
740  SanOpts.Mask &= ~mask;
741  if (mask & SanitizerKind::Address)
742  SanOpts.set(SanitizerKind::KernelAddress, false);
743  if (mask & SanitizerKind::KernelAddress)
744  SanOpts.set(SanitizerKind::Address, false);
745  if (mask & SanitizerKind::HWAddress)
746  SanOpts.set(SanitizerKind::KernelHWAddress, false);
747  if (mask & SanitizerKind::KernelHWAddress)
748  SanOpts.set(SanitizerKind::HWAddress, false);
749  }
750  }
751 
752  // Apply sanitizer attributes to the function.
753  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
754  Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
755  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
756  Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
757  if (SanOpts.has(SanitizerKind::MemTag))
758  Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
759  if (SanOpts.has(SanitizerKind::Thread))
760  Fn->addFnAttr(llvm::Attribute::SanitizeThread);
761  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
762  Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
763  if (SanOpts.has(SanitizerKind::SafeStack))
764  Fn->addFnAttr(llvm::Attribute::SafeStack);
765  if (SanOpts.has(SanitizerKind::ShadowCallStack))
766  Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
767 
768  // Apply fuzzing attribute to the function.
769  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
770  Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
771 
772  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
773  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
774  if (SanOpts.has(SanitizerKind::Thread)) {
775  if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
776  IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
777  if (OMD->getMethodFamily() == OMF_dealloc ||
778  OMD->getMethodFamily() == OMF_initialize ||
779  (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
781  }
782  }
783  }
784 
785  // Ignore unrelated casts in STL allocate() since the allocator must cast
786  // from void* to T* before object initialization completes. Don't match on the
787  // namespace because not all allocators are in std::
788  if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
790  SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
791  }
792 
793  // Ignore null checks in coroutine functions since the coroutines passes
794  // are not aware of how to move the extra UBSan instructions across the split
795  // coroutine boundaries.
796  if (D && SanOpts.has(SanitizerKind::Null))
797  if (const auto *FD = dyn_cast<FunctionDecl>(D))
798  if (FD->getBody() &&
799  FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
801 
802  if (D) {
803  // Apply xray attributes to the function (as a string, for now)
804  if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
807  if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
808  Fn->addFnAttr("function-instrument", "xray-always");
809  if (XRayAttr->neverXRayInstrument())
810  Fn->addFnAttr("function-instrument", "xray-never");
811  if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
813  Fn->addFnAttr("xray-log-args",
814  llvm::utostr(LogArgs->getArgumentCount()));
815  }
816  } else {
818  Fn->addFnAttr(
819  "xray-instruction-threshold",
820  llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
821  }
822 
823  unsigned Count, Offset;
824  if (const auto *Attr = D->getAttr<PatchableFunctionEntryAttr>()) {
825  Count = Attr->getCount();
826  Offset = Attr->getOffset();
827  } else {
828  Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
829  Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
830  }
831  if (Count && Offset <= Count) {
832  Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
833  if (Offset)
834  Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
835  }
836  }
837 
838  // Add no-jump-tables value.
839  Fn->addFnAttr("no-jump-tables",
840  llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
841 
842  // Add no-inline-line-tables value.
843  if (CGM.getCodeGenOpts().NoInlineLineTables)
844  Fn->addFnAttr("no-inline-line-tables");
845 
846  // Add profile-sample-accurate value.
847  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
848  Fn->addFnAttr("profile-sample-accurate");
849 
850  if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
851  Fn->addFnAttr("cfi-canonical-jump-table");
852 
853  if (getLangOpts().OpenCL) {
854  // Add metadata for a kernel function.
855  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
856  EmitOpenCLKernelMetadata(FD, Fn);
857  }
858 
859  // If we are checking function types, emit a function type signature as
860  // prologue data.
862  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
863  if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
864  // Remove any (C++17) exception specifications, to allow calling e.g. a
865  // noexcept function through a non-noexcept pointer.
866  auto ProtoTy =
868  EST_None);
869  llvm::Constant *FTRTTIConst =
870  CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
871  llvm::Constant *FTRTTIConstEncoded =
872  EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
873  llvm::Constant *PrologueStructElems[] = {PrologueSig,
874  FTRTTIConstEncoded};
875  llvm::Constant *PrologueStructConst =
876  llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
877  Fn->setPrologueData(PrologueStructConst);
878  }
879  }
880  }
881 
882  // If we're checking nullability, we need to know whether we can check the
883  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
884  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
887  if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
888  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
889  RetValNullabilityPrecondition =
890  llvm::ConstantInt::getTrue(getLLVMContext());
891  }
892  }
893 
894  // If we're in C++ mode and the function name is "main", it is guaranteed
895  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
896  // used within a program").
897  if (getLangOpts().CPlusPlus)
898  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
899  if (FD->isMain())
900  Fn->addFnAttr(llvm::Attribute::NoRecurse);
901 
902  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
903  if (FD->usesFPIntrin())
904  Fn->addFnAttr(llvm::Attribute::StrictFP);
905 
906  // If a custom alignment is used, force realigning to this alignment on
907  // any main function which certainly will need it.
908  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
909  if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
910  CGM.getCodeGenOpts().StackAlignment)
911  Fn->addFnAttr("stackrealign");
912 
913  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
914 
915  // Create a marker to make it easy to insert allocas into the entryblock
916  // later. Don't create this with the builder, because we don't want it
917  // folded.
918  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
919  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
920 
922 
923  Builder.SetInsertPoint(EntryBB);
924 
925  // If we're checking the return value, allocate space for a pointer to a
926  // precise source location of the checked return statement.
927  if (requiresReturnValueCheck()) {
928  ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
929  InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
930  }
931 
932  // Emit subprogram debug descriptor.
933  if (CGDebugInfo *DI = getDebugInfo()) {
934  // Reconstruct the type from the argument list so that implicit parameters,
935  // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
936  // convention.
938  if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
939  if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
940  CC = SrcFnTy->getCallConv();
941  SmallVector<QualType, 16> ArgTypes;
942  for (const VarDecl *VD : Args)
943  ArgTypes.push_back(VD->getType());
945  RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
946  DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
947  Builder);
948  }
949 
950  if (ShouldInstrumentFunction()) {
951  if (CGM.getCodeGenOpts().InstrumentFunctions)
952  CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
953  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
954  CurFn->addFnAttr("instrument-function-entry-inlined",
955  "__cyg_profile_func_enter");
956  if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
957  CurFn->addFnAttr("instrument-function-entry-inlined",
958  "__cyg_profile_func_enter_bare");
959  }
960 
961  // Since emitting the mcount call here impacts optimizations such as function
962  // inlining, we just add an attribute to insert a mcount call in backend.
963  // The attribute "counting-function" is set to mcount function name which is
964  // architecture dependent.
965  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
966  // Calls to fentry/mcount should not be generated if function has
967  // the no_instrument_function attribute.
968  if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
969  if (CGM.getCodeGenOpts().CallFEntry)
970  Fn->addFnAttr("fentry-call", "true");
971  else {
972  Fn->addFnAttr("instrument-function-entry-inlined",
973  getTarget().getMCountName());
974  }
975  if (CGM.getCodeGenOpts().MNopMCount) {
976  if (!CGM.getCodeGenOpts().CallFEntry)
977  CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
978  << "-mnop-mcount" << "-mfentry";
979  Fn->addFnAttr("mnop-mcount");
980  }
981 
982  if (CGM.getCodeGenOpts().RecordMCount) {
983  if (!CGM.getCodeGenOpts().CallFEntry)
984  CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
985  << "-mrecord-mcount" << "-mfentry";
986  Fn->addFnAttr("mrecord-mcount");
987  }
988  }
989  }
990 
991  if (CGM.getCodeGenOpts().PackedStack) {
992  if (getContext().getTargetInfo().getTriple().getArch() !=
993  llvm::Triple::systemz)
994  CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
995  << "-mpacked-stack";
996  Fn->addFnAttr("packed-stack");
997  }
998 
999  if (RetTy->isVoidType()) {
1000  // Void type; nothing to return.
1002 
1003  // Count the implicit return.
1004  if (!endsWithReturn(D))
1005  ++NumReturnExprs;
1006  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1007  // Indirect return; emit returned value directly into sret slot.
1008  // This reduces code size, and affects correctness in C++.
1009  auto AI = CurFn->arg_begin();
1011  ++AI;
1015  CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1019  }
1020  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1022  // Load the sret pointer from the argument struct and return into that.
1023  unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1024  llvm::Function::arg_iterator EI = CurFn->arg_end();
1025  --EI;
1026  llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1028  Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1030  } else {
1031  ReturnValue = CreateIRTemp(RetTy, "retval");
1032 
1033  // Tell the epilog emitter to autorelease the result. We do this
1034  // now so that various specialized functions can suppress it
1035  // during their IR-generation.
1036  if (getLangOpts().ObjCAutoRefCount &&
1038  RetTy->isObjCRetainableType())
1039  AutoreleaseResult = true;
1040  }
1041 
1043 
1045 
1046  // Emit OpenMP specific initialization of the device functions.
1047  if (getLangOpts().OpenMP && CurCodeDecl)
1048  CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1049 
1051 
1052  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1054  const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1055  if (MD->getParent()->isLambda() &&
1056  MD->getOverloadedOperator() == OO_Call) {
1057  // We're in a lambda; figure out the captures.
1060  if (LambdaThisCaptureField) {
1061  // If the lambda captures the object referred to by '*this' - either by
1062  // value or by reference, make sure CXXThisValue points to the correct
1063  // object.
1064 
1065  // Get the lvalue for the field (which is a copy of the enclosing object
1066  // or contains the address of the enclosing object).
1069  // If the enclosing object was captured by value, just use its address.
1070  CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1071  } else {
1072  // Load the lvalue pointed to by the field, since '*this' was captured
1073  // by reference.
1074  CXXThisValue =
1075  EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1076  }
1077  }
1078  for (auto *FD : MD->getParent()->fields()) {
1079  if (FD->hasCapturedVLAType()) {
1080  auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1082  auto VAT = FD->getCapturedVLAType();
1083  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1084  }
1085  }
1086  } else {
1087  // Not in a lambda; just use 'this' from the method.
1088  // FIXME: Should we generate a new load for each use of 'this'? The
1089  // fast register allocator would be happier...
1090  CXXThisValue = CXXABIThisValue;
1091  }
1092 
1093  // Check the 'this' pointer once per function, if it's available.
1094  if (CXXABIThisValue) {
1095  SanitizerSet SkippedChecks;
1096  SkippedChecks.set(SanitizerKind::ObjectSize, true);
1097  QualType ThisTy = MD->getThisType();
1098 
1099  // If this is the call operator of a lambda with no capture-default, it
1100  // may have a static invoker function, which may call this operator with
1101  // a null 'this' pointer.
1102  if (isLambdaCallOperator(MD) &&
1104  SkippedChecks.set(SanitizerKind::Null, true);
1105 
1106  EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1107  : TCK_MemberCall,
1108  Loc, CXXABIThisValue, ThisTy,
1109  getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1110  SkippedChecks);
1111  }
1112  }
1113 
1114  // If any of the arguments have a variably modified type, make sure to
1115  // emit the type size.
1116  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1117  i != e; ++i) {
1118  const VarDecl *VD = *i;
1119 
1120  // Dig out the type as written from ParmVarDecls; it's unclear whether
1121  // the standard (C99 6.9.1p10) requires this, but we're following the
1122  // precedent set by gcc.
1123  QualType Ty;
1124  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1125  Ty = PVD->getOriginalType();
1126  else
1127  Ty = VD->getType();
1128 
1129  if (Ty->isVariablyModifiedType())
1131  }
1132  // Emit a location at the end of the prologue.
1133  if (CGDebugInfo *DI = getDebugInfo())
1134  DI->EmitLocation(Builder, StartLoc);
1135 
1136  // TODO: Do we need to handle this in two places like we do with
1137  // target-features/target-cpu?
1138  if (CurFuncDecl)
1139  if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1140  LargestVectorWidth = VecWidth->getVectorWidth();
1141 }
1142 
1145  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1147  else
1148  EmitStmt(Body);
1149 }
1150 
1151 /// When instrumenting to collect profile data, the counts for some blocks
1152 /// such as switch cases need to not include the fall-through counts, so
1153 /// emit a branch around the instrumentation code. When not instrumenting,
1154 /// this just calls EmitBlock().
1156  const Stmt *S) {
1157  llvm::BasicBlock *SkipCountBB = nullptr;
1159  // When instrumenting for profiling, the fallthrough to certain
1160  // statements needs to skip over the instrumentation code so that we
1161  // get an accurate count.
1162  SkipCountBB = createBasicBlock("skipcount");
1163  EmitBranch(SkipCountBB);
1164  }
1165  EmitBlock(BB);
1166  uint64_t CurrentCount = getCurrentProfileCount();
1169  if (SkipCountBB)
1170  EmitBlock(SkipCountBB);
1171 }
1172 
1173 /// Tries to mark the given function nounwind based on the
1174 /// non-existence of any throwing calls within it. We believe this is
1175 /// lightweight enough to do at -O0.
1176 static void TryMarkNoThrow(llvm::Function *F) {
1177  // LLVM treats 'nounwind' on a function as part of the type, so we
1178  // can't do this on functions that can be overwritten.
1179  if (F->isInterposable()) return;
1180 
1181  for (llvm::BasicBlock &BB : *F)
1182  for (llvm::Instruction &I : BB)
1183  if (I.mayThrow())
1184  return;
1185 
1186  F->setDoesNotThrow();
1187 }
1188 
1190  FunctionArgList &Args) {
1191  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1192  QualType ResTy = FD->getReturnType();
1193 
1194  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1195  if (MD && MD->isInstance()) {
1196  if (CGM.getCXXABI().HasThisReturn(GD))
1197  ResTy = MD->getThisType();
1198  else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1199  ResTy = CGM.getContext().VoidPtrTy;
1200  CGM.getCXXABI().buildThisParam(*this, Args);
1201  }
1202 
1203  // The base version of an inheriting constructor whose constructed base is a
1204  // virtual base is not passed any arguments (because it doesn't actually call
1205  // the inherited constructor).
1206  bool PassedParams = true;
1207  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1208  if (auto Inherited = CD->getInheritedConstructor())
1209  PassedParams =
1210  getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1211 
1212  if (PassedParams) {
1213  for (auto *Param : FD->parameters()) {
1214  Args.push_back(Param);
1215  if (!Param->hasAttr<PassObjectSizeAttr>())
1216  continue;
1217 
1218  auto *Implicit = ImplicitParamDecl::Create(
1219  getContext(), Param->getDeclContext(), Param->getLocation(),
1220  /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1221  SizeArguments[Param] = Implicit;
1222  Args.push_back(Implicit);
1223  }
1224  }
1225 
1226  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1227  CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1228 
1229  return ResTy;
1230 }
1231 
1232 static bool
1234  const ASTContext &Context) {
1235  QualType T = FD->getReturnType();
1236  // Avoid the optimization for functions that return a record type with a
1237  // trivial destructor or another trivially copyable type.
1238  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1239  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1240  return !ClassDecl->hasTrivialDestructor();
1241  }
1242  return !T.isTriviallyCopyableType(Context);
1243 }
1244 
1245 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1246  const CGFunctionInfo &FnInfo) {
1247  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1248  CurGD = GD;
1249 
1250  FunctionArgList Args;
1251  QualType ResTy = BuildFunctionArgList(GD, Args);
1252 
1253  // Check if we should generate debug info for this function.
1254  if (FD->hasAttr<NoDebugAttr>())
1255  DebugInfo = nullptr; // disable debug info indefinitely for this function
1256 
1257  // The function might not have a body if we're generating thunks for a
1258  // function declaration.
1259  SourceRange BodyRange;
1260  if (Stmt *Body = FD->getBody())
1261  BodyRange = Body->getSourceRange();
1262  else
1263  BodyRange = FD->getLocation();
1264  CurEHLocation = BodyRange.getEnd();
1265 
1266  // Use the location of the start of the function to determine where
1267  // the function definition is located. By default use the location
1268  // of the declaration as the location for the subprogram. A function
1269  // may lack a declaration in the source code if it is created by code
1270  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1271  SourceLocation Loc = FD->getLocation();
1272 
1273  // If this is a function specialization then use the pattern body
1274  // as the location for the function.
1275  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1276  if (SpecDecl->hasBody(SpecDecl))
1277  Loc = SpecDecl->getLocation();
1278 
1279  Stmt *Body = FD->getBody();
1280 
1281  // Initialize helper which will detect jumps which can cause invalid lifetime
1282  // markers.
1283  if (Body && ShouldEmitLifetimeMarkers)
1284  Bypasses.Init(Body);
1285 
1286  // Emit the standard function prologue.
1287  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1288 
1289  // Generate the body of the function.
1290  PGO.assignRegionCounters(GD, CurFn);
1291  if (isa<CXXDestructorDecl>(FD))
1292  EmitDestructorBody(Args);
1293  else if (isa<CXXConstructorDecl>(FD))
1294  EmitConstructorBody(Args);
1295  else if (getLangOpts().CUDA &&
1296  !getLangOpts().CUDAIsDevice &&
1297  FD->hasAttr<CUDAGlobalAttr>())
1298  CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1299  else if (isa<CXXMethodDecl>(FD) &&
1300  cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1301  // The lambda static invoker function is special, because it forwards or
1302  // clones the body of the function call operator (but is actually static).
1303  EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1304  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1305  (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1306  cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1307  // Implicit copy-assignment gets the same special treatment as implicit
1308  // copy-constructors.
1310  } else if (Body) {
1311  EmitFunctionBody(Body);
1312  } else
1313  llvm_unreachable("no definition for emitted function");
1314 
1315  // C++11 [stmt.return]p2:
1316  // Flowing off the end of a function [...] results in undefined behavior in
1317  // a value-returning function.
1318  // C11 6.9.1p12:
1319  // If the '}' that terminates a function is reached, and the value of the
1320  // function call is used by the caller, the behavior is undefined.
1322  !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1323  bool ShouldEmitUnreachable =
1324  CGM.getCodeGenOpts().StrictReturn ||
1326  if (SanOpts.has(SanitizerKind::Return)) {
1327  SanitizerScope SanScope(this);
1328  llvm::Value *IsFalse = Builder.getFalse();
1329  EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1330  SanitizerHandler::MissingReturn,
1331  EmitCheckSourceLocation(FD->getLocation()), None);
1332  } else if (ShouldEmitUnreachable) {
1333  if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1334  EmitTrapCall(llvm::Intrinsic::trap);
1335  }
1336  if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1337  Builder.CreateUnreachable();
1338  Builder.ClearInsertionPoint();
1339  }
1340  }
1341 
1342  // Emit the standard function epilogue.
1343  FinishFunction(BodyRange.getEnd());
1344 
1345  // If we haven't marked the function nothrow through other means, do
1346  // a quick pass now to see if we can.
1347  if (!CurFn->doesNotThrow())
1349 }
1350 
1351 /// ContainsLabel - Return true if the statement contains a label in it. If
1352 /// this statement is not executed normally, it not containing a label means
1353 /// that we can just remove the code.
1354 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1355  // Null statement, not a label!
1356  if (!S) return false;
1357 
1358  // If this is a label, we have to emit the code, consider something like:
1359  // if (0) { ... foo: bar(); } goto foo;
1360  //
1361  // TODO: If anyone cared, we could track __label__'s, since we know that you
1362  // can't jump to one from outside their declared region.
1363  if (isa<LabelStmt>(S))
1364  return true;
1365 
1366  // If this is a case/default statement, and we haven't seen a switch, we have
1367  // to emit the code.
1368  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1369  return true;
1370 
1371  // If this is a switch statement, we want to ignore cases below it.
1372  if (isa<SwitchStmt>(S))
1373  IgnoreCaseStmts = true;
1374 
1375  // Scan subexpressions for verboten labels.
1376  for (const Stmt *SubStmt : S->children())
1377  if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1378  return true;
1379 
1380  return false;
1381 }
1382 
1383 /// containsBreak - Return true if the statement contains a break out of it.
1384 /// If the statement (recursively) contains a switch or loop with a break
1385 /// inside of it, this is fine.
1387  // Null statement, not a label!
1388  if (!S) return false;
1389 
1390  // If this is a switch or loop that defines its own break scope, then we can
1391  // include it and anything inside of it.
1392  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1393  isa<ForStmt>(S))
1394  return false;
1395 
1396  if (isa<BreakStmt>(S))
1397  return true;
1398 
1399  // Scan subexpressions for verboten breaks.
1400  for (const Stmt *SubStmt : S->children())
1401  if (containsBreak(SubStmt))
1402  return true;
1403 
1404  return false;
1405 }
1406 
1408  if (!S) return false;
1409 
1410  // Some statement kinds add a scope and thus never add a decl to the current
1411  // scope. Note, this list is longer than the list of statements that might
1412  // have an unscoped decl nested within them, but this way is conservatively
1413  // correct even if more statement kinds are added.
1414  if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1415  isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1416  isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1417  isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1418  return false;
1419 
1420  if (isa<DeclStmt>(S))
1421  return true;
1422 
1423  for (const Stmt *SubStmt : S->children())
1424  if (mightAddDeclToScope(SubStmt))
1425  return true;
1426 
1427  return false;
1428 }
1429 
1430 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1431 /// to a constant, or if it does but contains a label, return false. If it
1432 /// constant folds return true and set the boolean result in Result.
1434  bool &ResultBool,
1435  bool AllowLabels) {
1436  llvm::APSInt ResultInt;
1437  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1438  return false;
1439 
1440  ResultBool = ResultInt.getBoolValue();
1441  return true;
1442 }
1443 
1444 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1445 /// to a constant, or if it does but contains a label, return false. If it
1446 /// constant folds return true and set the folded value.
1448  llvm::APSInt &ResultInt,
1449  bool AllowLabels) {
1450  // FIXME: Rename and handle conversion of other evaluatable things
1451  // to bool.
1452  Expr::EvalResult Result;
1453  if (!Cond->EvaluateAsInt(Result, getContext()))
1454  return false; // Not foldable, not integer or not fully evaluatable.
1455 
1456  llvm::APSInt Int = Result.Val.getInt();
1457  if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1458  return false; // Contains a label.
1459 
1460  ResultInt = Int;
1461  return true;
1462 }
1463 
1464 
1465 
1466 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1467 /// statement) to the specified blocks. Based on the condition, this might try
1468 /// to simplify the codegen of the conditional based on the branch.
1469 ///
1471  llvm::BasicBlock *TrueBlock,
1472  llvm::BasicBlock *FalseBlock,
1473  uint64_t TrueCount) {
1474  Cond = Cond->IgnoreParens();
1475 
1476  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1477 
1478  // Handle X && Y in a condition.
1479  if (CondBOp->getOpcode() == BO_LAnd) {
1480  // If we have "1 && X", simplify the code. "0 && X" would have constant
1481  // folded if the case was simple enough.
1482  bool ConstantBool = false;
1483  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1484  ConstantBool) {
1485  // br(1 && X) -> br(X).
1486  incrementProfileCounter(CondBOp);
1487  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1488  TrueCount);
1489  }
1490 
1491  // If we have "X && 1", simplify the code to use an uncond branch.
1492  // "X && 0" would have been constant folded to 0.
1493  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1494  ConstantBool) {
1495  // br(X && 1) -> br(X).
1496  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1497  TrueCount);
1498  }
1499 
1500  // Emit the LHS as a conditional. If the LHS conditional is false, we
1501  // want to jump to the FalseBlock.
1502  llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1503  // The counter tells us how often we evaluate RHS, and all of TrueCount
1504  // can be propagated to that branch.
1505  uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1506 
1507  ConditionalEvaluation eval(*this);
1508  {
1509  ApplyDebugLocation DL(*this, Cond);
1510  EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1511  EmitBlock(LHSTrue);
1512  }
1513 
1514  incrementProfileCounter(CondBOp);
1515  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1516 
1517  // Any temporaries created here are conditional.
1518  eval.begin(*this);
1519  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1520  eval.end(*this);
1521 
1522  return;
1523  }
1524 
1525  if (CondBOp->getOpcode() == BO_LOr) {
1526  // If we have "0 || X", simplify the code. "1 || X" would have constant
1527  // folded if the case was simple enough.
1528  bool ConstantBool = false;
1529  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1530  !ConstantBool) {
1531  // br(0 || X) -> br(X).
1532  incrementProfileCounter(CondBOp);
1533  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1534  TrueCount);
1535  }
1536 
1537  // If we have "X || 0", simplify the code to use an uncond branch.
1538  // "X || 1" would have been constant folded to 1.
1539  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1540  !ConstantBool) {
1541  // br(X || 0) -> br(X).
1542  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1543  TrueCount);
1544  }
1545 
1546  // Emit the LHS as a conditional. If the LHS conditional is true, we
1547  // want to jump to the TrueBlock.
1548  llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1549  // We have the count for entry to the RHS and for the whole expression
1550  // being true, so we can divy up True count between the short circuit and
1551  // the RHS.
1552  uint64_t LHSCount =
1553  getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1554  uint64_t RHSCount = TrueCount - LHSCount;
1555 
1556  ConditionalEvaluation eval(*this);
1557  {
1558  ApplyDebugLocation DL(*this, Cond);
1559  EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1560  EmitBlock(LHSFalse);
1561  }
1562 
1563  incrementProfileCounter(CondBOp);
1564  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1565 
1566  // Any temporaries created here are conditional.
1567  eval.begin(*this);
1568  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1569 
1570  eval.end(*this);
1571 
1572  return;
1573  }
1574  }
1575 
1576  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1577  // br(!x, t, f) -> br(x, f, t)
1578  if (CondUOp->getOpcode() == UO_LNot) {
1579  // Negate the count.
1580  uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1581  // Negate the condition and swap the destination blocks.
1582  return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1583  FalseCount);
1584  }
1585  }
1586 
1587  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1588  // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1589  llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1590  llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1591 
1592  ConditionalEvaluation cond(*this);
1593  EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1594  getProfileCount(CondOp));
1595 
1596  // When computing PGO branch weights, we only know the overall count for
1597  // the true block. This code is essentially doing tail duplication of the
1598  // naive code-gen, introducing new edges for which counts are not
1599  // available. Divide the counts proportionally between the LHS and RHS of
1600  // the conditional operator.
1601  uint64_t LHSScaledTrueCount = 0;
1602  if (TrueCount) {
1603  double LHSRatio =
1604  getProfileCount(CondOp) / (double)getCurrentProfileCount();
1605  LHSScaledTrueCount = TrueCount * LHSRatio;
1606  }
1607 
1608  cond.begin(*this);
1609  EmitBlock(LHSBlock);
1610  incrementProfileCounter(CondOp);
1611  {
1612  ApplyDebugLocation DL(*this, Cond);
1613  EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1614  LHSScaledTrueCount);
1615  }
1616  cond.end(*this);
1617 
1618  cond.begin(*this);
1619  EmitBlock(RHSBlock);
1620  EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1621  TrueCount - LHSScaledTrueCount);
1622  cond.end(*this);
1623 
1624  return;
1625  }
1626 
1627  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1628  // Conditional operator handling can give us a throw expression as a
1629  // condition for a case like:
1630  // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1631  // Fold this to:
1632  // br(c, throw x, br(y, t, f))
1633  EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1634  return;
1635  }
1636 
1637  // If the branch has a condition wrapped by __builtin_unpredictable,
1638  // create metadata that specifies that the branch is unpredictable.
1639  // Don't bother if not optimizing because that metadata would not be used.
1640  llvm::MDNode *Unpredictable = nullptr;
1641  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1642  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1643  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1644  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1645  llvm::MDBuilder MDHelper(getLLVMContext());
1646  Unpredictable = MDHelper.createUnpredictable();
1647  }
1648  }
1649 
1650  // Create branch weights based on the number of times we get here and the
1651  // number of times the condition should be true.
1652  uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1653  llvm::MDNode *Weights =
1654  createProfileWeights(TrueCount, CurrentCount - TrueCount);
1655 
1656  // Emit the code with the fully general case.
1657  llvm::Value *CondV;
1658  {
1659  ApplyDebugLocation DL(*this, Cond);
1660  CondV = EvaluateExprAsBool(Cond);
1661  }
1662  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1663 }
1664 
1665 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1666 /// specified stmt yet.
1667 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1668  CGM.ErrorUnsupported(S, Type);
1669 }
1670 
1671 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1672 /// variable-length array whose elements have a non-zero bit-pattern.
1673 ///
1674 /// \param baseType the inner-most element type of the array
1675 /// \param src - a char* pointing to the bit-pattern for a single
1676 /// base element of the array
1677 /// \param sizeInChars - the total size of the VLA, in chars
1678 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1679  Address dest, Address src,
1680  llvm::Value *sizeInChars) {
1681  CGBuilderTy &Builder = CGF.Builder;
1682 
1683  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1684  llvm::Value *baseSizeInChars
1685  = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1686 
1687  Address begin =
1688  Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1689  llvm::Value *end =
1690  Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1691 
1692  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1693  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1694  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1695 
1696  // Make a loop over the VLA. C99 guarantees that the VLA element
1697  // count must be nonzero.
1698  CGF.EmitBlock(loopBB);
1699 
1700  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1701  cur->addIncoming(begin.getPointer(), originBB);
1702 
1703  CharUnits curAlign =
1704  dest.getAlignment().alignmentOfArrayElement(baseSize);
1705 
1706  // memcpy the individual element bit-pattern.
1707  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1708  /*volatile*/ false);
1709 
1710  // Go to the next element.
1711  llvm::Value *next =
1712  Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1713 
1714  // Leave if that's the end of the VLA.
1715  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1716  Builder.CreateCondBr(done, contBB, loopBB);
1717  cur->addIncoming(next, loopBB);
1718 
1719  CGF.EmitBlock(contBB);
1720 }
1721 
1722 void
1724  // Ignore empty classes in C++.
1725  if (getLangOpts().CPlusPlus) {
1726  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1727  if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1728  return;
1729  }
1730  }
1731 
1732  // Cast the dest ptr to the appropriate i8 pointer type.
1733  if (DestPtr.getElementType() != Int8Ty)
1734  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1735 
1736  // Get size and alignment info for this aggregate.
1738 
1739  llvm::Value *SizeVal;
1740  const VariableArrayType *vla;
1741 
1742  // Don't bother emitting a zero-byte memset.
1743  if (size.isZero()) {
1744  // But note that getTypeInfo returns 0 for a VLA.
1745  if (const VariableArrayType *vlaType =
1746  dyn_cast_or_null<VariableArrayType>(
1747  getContext().getAsArrayType(Ty))) {
1748  auto VlaSize = getVLASize(vlaType);
1749  SizeVal = VlaSize.NumElts;
1750  CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1751  if (!eltSize.isOne())
1752  SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1753  vla = vlaType;
1754  } else {
1755  return;
1756  }
1757  } else {
1758  SizeVal = CGM.getSize(size);
1759  vla = nullptr;
1760  }
1761 
1762  // If the type contains a pointer to data member we can't memset it to zero.
1763  // Instead, create a null constant and copy it to the destination.
1764  // TODO: there are other patterns besides zero that we can usefully memset,
1765  // like -1, which happens to be the pattern used by member-pointers.
1766  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1767  // For a VLA, emit a single element, then splat that over the VLA.
1768  if (vla) Ty = getContext().getBaseElementType(vla);
1769 
1770  llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1771 
1772  llvm::GlobalVariable *NullVariable =
1773  new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1774  /*isConstant=*/true,
1775  llvm::GlobalVariable::PrivateLinkage,
1776  NullConstant, Twine());
1777  CharUnits NullAlign = DestPtr.getAlignment();
1778  NullVariable->setAlignment(NullAlign.getAsAlign());
1779  Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1780  NullAlign);
1781 
1782  if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1783 
1784  // Get and call the appropriate llvm.memcpy overload.
1785  Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1786  return;
1787  }
1788 
1789  // Otherwise, just memset the whole thing to zero. This is legal
1790  // because in LLVM, all default initializers (other than the ones we just
1791  // handled above) are guaranteed to have a bit pattern of all zeros.
1792  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1793 }
1794 
1795 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1796  // Make sure that there is a block for the indirect goto.
1797  if (!IndirectBranch)
1799 
1800  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1801 
1802  // Make sure the indirect branch includes all of the address-taken blocks.
1803  IndirectBranch->addDestination(BB);
1804  return llvm::BlockAddress::get(CurFn, BB);
1805 }
1806 
1808  // If we already made the indirect branch for indirect goto, return its block.
1809  if (IndirectBranch) return IndirectBranch->getParent();
1810 
1811  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1812 
1813  // Create the PHI node that indirect gotos will add entries to.
1814  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1815  "indirect.goto.dest");
1816 
1817  // Create the indirect branch instruction.
1818  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1819  return IndirectBranch->getParent();
1820 }
1821 
1822 /// Computes the length of an array in elements, as well as the base
1823 /// element type and a properly-typed first element pointer.
1825  QualType &baseType,
1826  Address &addr) {
1827  const ArrayType *arrayType = origArrayType;
1828 
1829  // If it's a VLA, we have to load the stored size. Note that
1830  // this is the size of the VLA in bytes, not its size in elements.
1831  llvm::Value *numVLAElements = nullptr;
1832  if (isa<VariableArrayType>(arrayType)) {
1833  numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1834 
1835  // Walk into all VLAs. This doesn't require changes to addr,
1836  // which has type T* where T is the first non-VLA element type.
1837  do {
1838  QualType elementType = arrayType->getElementType();
1839  arrayType = getContext().getAsArrayType(elementType);
1840 
1841  // If we only have VLA components, 'addr' requires no adjustment.
1842  if (!arrayType) {
1843  baseType = elementType;
1844  return numVLAElements;
1845  }
1846  } while (isa<VariableArrayType>(arrayType));
1847 
1848  // We get out here only if we find a constant array type
1849  // inside the VLA.
1850  }
1851 
1852  // We have some number of constant-length arrays, so addr should
1853  // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1854  // down to the first element of addr.
1855  SmallVector<llvm::Value*, 8> gepIndices;
1856 
1857  // GEP down to the array type.
1858  llvm::ConstantInt *zero = Builder.getInt32(0);
1859  gepIndices.push_back(zero);
1860 
1861  uint64_t countFromCLAs = 1;
1862  QualType eltType;
1863 
1864  llvm::ArrayType *llvmArrayType =
1865  dyn_cast<llvm::ArrayType>(addr.getElementType());
1866  while (llvmArrayType) {
1867  assert(isa<ConstantArrayType>(arrayType));
1868  assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1869  == llvmArrayType->getNumElements());
1870 
1871  gepIndices.push_back(zero);
1872  countFromCLAs *= llvmArrayType->getNumElements();
1873  eltType = arrayType->getElementType();
1874 
1875  llvmArrayType =
1876  dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1877  arrayType = getContext().getAsArrayType(arrayType->getElementType());
1878  assert((!llvmArrayType || arrayType) &&
1879  "LLVM and Clang types are out-of-synch");
1880  }
1881 
1882  if (arrayType) {
1883  // From this point onwards, the Clang array type has been emitted
1884  // as some other type (probably a packed struct). Compute the array
1885  // size, and just emit the 'begin' expression as a bitcast.
1886  while (arrayType) {
1887  countFromCLAs *=
1888  cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1889  eltType = arrayType->getElementType();
1890  arrayType = getContext().getAsArrayType(eltType);
1891  }
1892 
1893  llvm::Type *baseType = ConvertType(eltType);
1894  addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1895  } else {
1896  // Create the actual GEP.
1897  addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1898  gepIndices, "array.begin"),
1899  addr.getAlignment());
1900  }
1901 
1902  baseType = eltType;
1903 
1904  llvm::Value *numElements
1905  = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1906 
1907  // If we had any VLA dimensions, factor them in.
1908  if (numVLAElements)
1909  numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1910 
1911  return numElements;
1912 }
1913 
1916  assert(vla && "type was not a variable array type!");
1917  return getVLASize(vla);
1918 }
1919 
1922  // The number of elements so far; always size_t.
1923  llvm::Value *numElements = nullptr;
1924 
1925  QualType elementType;
1926  do {
1927  elementType = type->getElementType();
1928  llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1929  assert(vlaSize && "no size for VLA!");
1930  assert(vlaSize->getType() == SizeTy);
1931 
1932  if (!numElements) {
1933  numElements = vlaSize;
1934  } else {
1935  // It's undefined behavior if this wraps around, so mark it that way.
1936  // FIXME: Teach -fsanitize=undefined to trap this.
1937  numElements = Builder.CreateNUWMul(numElements, vlaSize);
1938  }
1939  } while ((type = getContext().getAsVariableArrayType(elementType)));
1940 
1941  return { numElements, elementType };
1942 }
1943 
1947  assert(vla && "type was not a variable array type!");
1948  return getVLAElements1D(vla);
1949 }
1950 
1953  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1954  assert(VlaSize && "no size for VLA!");
1955  assert(VlaSize->getType() == SizeTy);
1956  return { VlaSize, Vla->getElementType() };
1957 }
1958 
1960  assert(type->isVariablyModifiedType() &&
1961  "Must pass variably modified type to EmitVLASizes!");
1962 
1964 
1965  // We're going to walk down into the type and look for VLA
1966  // expressions.
1967  do {
1968  assert(type->isVariablyModifiedType());
1969 
1970  const Type *ty = type.getTypePtr();
1971  switch (ty->getTypeClass()) {
1972 
1973 #define TYPE(Class, Base)
1974 #define ABSTRACT_TYPE(Class, Base)
1975 #define NON_CANONICAL_TYPE(Class, Base)
1976 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1977 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1978 #include "clang/AST/TypeNodes.inc"
1979  llvm_unreachable("unexpected dependent type!");
1980 
1981  // These types are never variably-modified.
1982  case Type::Builtin:
1983  case Type::Complex:
1984  case Type::Vector:
1985  case Type::ExtVector:
1986  case Type::Record:
1987  case Type::Enum:
1988  case Type::Elaborated:
1989  case Type::TemplateSpecialization:
1990  case Type::ObjCTypeParam:
1991  case Type::ObjCObject:
1992  case Type::ObjCInterface:
1993  case Type::ObjCObjectPointer:
1994  llvm_unreachable("type class is never variably-modified!");
1995 
1996  case Type::Adjusted:
1997  type = cast<AdjustedType>(ty)->getAdjustedType();
1998  break;
1999 
2000  case Type::Decayed:
2001  type = cast<DecayedType>(ty)->getPointeeType();
2002  break;
2003 
2004  case Type::Pointer:
2005  type = cast<PointerType>(ty)->getPointeeType();
2006  break;
2007 
2008  case Type::BlockPointer:
2009  type = cast<BlockPointerType>(ty)->getPointeeType();
2010  break;
2011 
2012  case Type::LValueReference:
2013  case Type::RValueReference:
2014  type = cast<ReferenceType>(ty)->getPointeeType();
2015  break;
2016 
2017  case Type::MemberPointer:
2018  type = cast<MemberPointerType>(ty)->getPointeeType();
2019  break;
2020 
2021  case Type::ConstantArray:
2022  case Type::IncompleteArray:
2023  // Losing element qualification here is fine.
2024  type = cast<ArrayType>(ty)->getElementType();
2025  break;
2026 
2027  case Type::VariableArray: {
2028  // Losing element qualification here is fine.
2029  const VariableArrayType *vat = cast<VariableArrayType>(ty);
2030 
2031  // Unknown size indication requires no size computation.
2032  // Otherwise, evaluate and record it.
2033  if (const Expr *size = vat->getSizeExpr()) {
2034  // It's possible that we might have emitted this already,
2035  // e.g. with a typedef and a pointer to it.
2036  llvm::Value *&entry = VLASizeMap[size];
2037  if (!entry) {
2038  llvm::Value *Size = EmitScalarExpr(size);
2039 
2040  // C11 6.7.6.2p5:
2041  // If the size is an expression that is not an integer constant
2042  // expression [...] each time it is evaluated it shall have a value
2043  // greater than zero.
2044  if (SanOpts.has(SanitizerKind::VLABound) &&
2045  size->getType()->isSignedIntegerType()) {
2046  SanitizerScope SanScope(this);
2047  llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2048  llvm::Constant *StaticArgs[] = {
2049  EmitCheckSourceLocation(size->getBeginLoc()),
2050  EmitCheckTypeDescriptor(size->getType())};
2051  EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2052  SanitizerKind::VLABound),
2053  SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2054  }
2055 
2056  // Always zexting here would be wrong if it weren't
2057  // undefined behavior to have a negative bound.
2058  entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2059  }
2060  }
2061  type = vat->getElementType();
2062  break;
2063  }
2064 
2065  case Type::FunctionProto:
2066  case Type::FunctionNoProto:
2067  type = cast<FunctionType>(ty)->getReturnType();
2068  break;
2069 
2070  case Type::Paren:
2071  case Type::TypeOf:
2072  case Type::UnaryTransform:
2073  case Type::Attributed:
2074  case Type::SubstTemplateTypeParm:
2075  case Type::PackExpansion:
2076  case Type::MacroQualified:
2077  // Keep walking after single level desugaring.
2078  type = type.getSingleStepDesugaredType(getContext());
2079  break;
2080 
2081  case Type::Typedef:
2082  case Type::Decltype:
2083  case Type::Auto:
2084  case Type::DeducedTemplateSpecialization:
2085  // Stop walking: nothing to do.
2086  return;
2087 
2088  case Type::TypeOfExpr:
2089  // Stop walking: emit typeof expression.
2090  EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2091  return;
2092 
2093  case Type::Atomic:
2094  type = cast<AtomicType>(ty)->getValueType();
2095  break;
2096 
2097  case Type::Pipe:
2098  type = cast<PipeType>(ty)->getElementType();
2099  break;
2100  }
2101  } while (type->isVariablyModifiedType());
2102 }
2103 
2105  if (getContext().getBuiltinVaListType()->isArrayType())
2106  return EmitPointerWithAlignment(E);
2107  return EmitLValue(E).getAddress(*this);
2108 }
2109 
2111  return EmitLValue(E).getAddress(*this);
2112 }
2113 
2115  const APValue &Init) {
2116  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2117  if (CGDebugInfo *Dbg = getDebugInfo())
2119  Dbg->EmitGlobalVariable(E->getDecl(), Init);
2120 }
2121 
2124  // At the moment, the only aggressive peephole we do in IR gen
2125  // is trunc(zext) folding, but if we add more, we can easily
2126  // extend this protection.
2127 
2128  if (!rvalue.isScalar()) return PeepholeProtection();
2129  llvm::Value *value = rvalue.getScalarVal();
2130  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2131 
2132  // Just make an extra bitcast.
2133  assert(HaveInsertPoint());
2134  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2135  Builder.GetInsertBlock());
2136 
2137  PeepholeProtection protection;
2138  protection.Inst = inst;
2139  return protection;
2140 }
2141 
2143  if (!protection.Inst) return;
2144 
2145  // In theory, we could try to duplicate the peepholes now, but whatever.
2146  protection.Inst->eraseFromParent();
2147 }
2148 
2150  QualType Ty, SourceLocation Loc,
2151  SourceLocation AssumptionLoc,
2152  llvm::Value *Alignment,
2153  llvm::Value *OffsetValue) {
2154  llvm::Value *TheCheck;
2155  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2156  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2157  if (SanOpts.has(SanitizerKind::Alignment)) {
2158  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2159  OffsetValue, TheCheck, Assumption);
2160  }
2161 }
2162 
2164  const Expr *E,
2165  SourceLocation AssumptionLoc,
2166  llvm::Value *Alignment,
2167  llvm::Value *OffsetValue) {
2168  if (auto *CE = dyn_cast<CastExpr>(E))
2169  E = CE->getSubExprAsWritten();
2170  QualType Ty = E->getType();
2171  SourceLocation Loc = E->getExprLoc();
2172 
2173  EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2174  OffsetValue);
2175 }
2176 
2177 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2178  llvm::Value *AnnotatedVal,
2179  StringRef AnnotationStr,
2180  SourceLocation Location) {
2181  llvm::Value *Args[4] = {
2182  AnnotatedVal,
2185  CGM.EmitAnnotationLineNo(Location)
2186  };
2187  return Builder.CreateCall(AnnotationFn, Args);
2188 }
2189 
2191  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2192  // FIXME We create a new bitcast for every annotation because that's what
2193  // llvm-gcc was doing.
2194  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2195  EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2196  Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2197  I->getAnnotation(), D->getLocation());
2198 }
2199 
2201  Address Addr) {
2202  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2203  llvm::Value *V = Addr.getPointer();
2204  llvm::Type *VTy = V->getType();
2205  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2206  CGM.Int8PtrTy);
2207 
2208  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2209  // FIXME Always emit the cast inst so we can differentiate between
2210  // annotation on the first field of a struct and annotation on the struct
2211  // itself.
2212  if (VTy != CGM.Int8PtrTy)
2214  V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2215  V = Builder.CreateBitCast(V, VTy);
2216  }
2217 
2218  return Address(V, Addr.getAlignment());
2219 }
2220 
2222 
2224  : CGF(CGF) {
2225  assert(!CGF->IsSanitizerScope);
2226  CGF->IsSanitizerScope = true;
2227 }
2228 
2230  CGF->IsSanitizerScope = false;
2231 }
2232 
2233 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2234  const llvm::Twine &Name,
2235  llvm::BasicBlock *BB,
2236  llvm::BasicBlock::iterator InsertPt) const {
2238  if (IsSanitizerScope)
2240 }
2241 
2243  llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2244  llvm::BasicBlock::iterator InsertPt) const {
2245  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2246  if (CGF)
2247  CGF->InsertHelper(I, Name, BB, InsertPt);
2248 }
2249 
2250 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2251  CodeGenModule &CGM, const FunctionDecl *FD,
2252  std::string &FirstMissing) {
2253  // If there aren't any required features listed then go ahead and return.
2254  if (ReqFeatures.empty())
2255  return false;
2256 
2257  // Now build up the set of caller features and verify that all the required
2258  // features are there.
2259  llvm::StringMap<bool> CallerFeatureMap;
2260  CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2261 
2262  // If we have at least one of the features in the feature list return
2263  // true, otherwise return false.
2264  return std::all_of(
2265  ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2266  SmallVector<StringRef, 1> OrFeatures;
2267  Feature.split(OrFeatures, '|');
2268  return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2269  if (!CallerFeatureMap.lookup(Feature)) {
2270  FirstMissing = Feature.str();
2271  return false;
2272  }
2273  return true;
2274  });
2275  });
2276 }
2277 
2278 // Emits an error if we don't have a valid set of target features for the
2279 // called function.
2281  const FunctionDecl *TargetDecl) {
2282  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2283 }
2284 
2285 // Emits an error if we don't have a valid set of target features for the
2286 // called function.
2288  const FunctionDecl *TargetDecl) {
2289  // Early exit if this is an indirect call.
2290  if (!TargetDecl)
2291  return;
2292 
2293  // Get the current enclosing function if it exists. If it doesn't
2294  // we can't check the target features anyhow.
2295  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2296  if (!FD)
2297  return;
2298 
2299  // Grab the required features for the call. For a builtin this is listed in
2300  // the td file with the default cpu, for an always_inline function this is any
2301  // listed cpu and any listed features.
2302  unsigned BuiltinID = TargetDecl->getBuiltinID();
2303  std::string MissingFeature;
2304  if (BuiltinID) {
2305  SmallVector<StringRef, 1> ReqFeatures;
2306  const char *FeatureList =
2308  // Return if the builtin doesn't have any required features.
2309  if (!FeatureList || StringRef(FeatureList) == "")
2310  return;
2311  StringRef(FeatureList).split(ReqFeatures, ',');
2312  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2313  CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2314  << TargetDecl->getDeclName()
2316 
2317  } else if (!TargetDecl->isMultiVersion() &&
2318  TargetDecl->hasAttr<TargetAttr>()) {
2319  // Get the required features for the callee.
2320 
2321  const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2324 
2325  SmallVector<StringRef, 1> ReqFeatures;
2326  llvm::StringMap<bool> CalleeFeatureMap;
2327  CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap,
2328  GlobalDecl(TargetDecl));
2329 
2330  for (const auto &F : ParsedAttr.Features) {
2331  if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2332  ReqFeatures.push_back(StringRef(F).substr(1));
2333  }
2334 
2335  for (const auto &F : CalleeFeatureMap) {
2336  // Only positive features are "required".
2337  if (F.getValue())
2338  ReqFeatures.push_back(F.getKey());
2339  }
2340  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2341  CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2342  << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2343  }
2344 }
2345 
2346 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2347  if (!CGM.getCodeGenOpts().SanitizeStats)
2348  return;
2349 
2350  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2351  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2352  CGM.getSanStats().create(IRB, SSK);
2353 }
2354 
2355 llvm::Value *
2356 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2357  llvm::Value *Condition = nullptr;
2358 
2359  if (!RO.Conditions.Architecture.empty())
2360  Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2361 
2362  if (!RO.Conditions.Features.empty()) {
2363  llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2364  Condition =
2365  Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2366  }
2367  return Condition;
2368 }
2369 
2371  llvm::Function *Resolver,
2373  llvm::Function *FuncToReturn,
2374  bool SupportsIFunc) {
2375  if (SupportsIFunc) {
2376  Builder.CreateRet(FuncToReturn);
2377  return;
2378  }
2379 
2381  llvm::for_each(Resolver->args(),
2382  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2383 
2384  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2385  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2386 
2387  if (Resolver->getReturnType()->isVoidTy())
2388  Builder.CreateRetVoid();
2389  else
2390  Builder.CreateRet(Result);
2391 }
2392 
2394  llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2395  assert(getContext().getTargetInfo().getTriple().isX86() &&
2396  "Only implemented for x86 targets");
2397 
2398  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2399 
2400  // Main function's basic block.
2401  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2402  Builder.SetInsertPoint(CurBlock);
2403  EmitX86CpuInit();
2404 
2405  for (const MultiVersionResolverOption &RO : Options) {
2406  Builder.SetInsertPoint(CurBlock);
2407  llvm::Value *Condition = FormResolverCondition(RO);
2408 
2409  // The 'default' or 'generic' case.
2410  if (!Condition) {
2411  assert(&RO == Options.end() - 1 &&
2412  "Default or Generic case must be last");
2414  SupportsIFunc);
2415  return;
2416  }
2417 
2418  llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2419  CGBuilderTy RetBuilder(*this, RetBlock);
2420  CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2421  SupportsIFunc);
2422  CurBlock = createBasicBlock("resolver_else", Resolver);
2423  Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2424  }
2425 
2426  // If no generic/default, emit an unreachable.
2427  Builder.SetInsertPoint(CurBlock);
2428  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2429  TrapCall->setDoesNotReturn();
2430  TrapCall->setDoesNotThrow();
2431  Builder.CreateUnreachable();
2432  Builder.ClearInsertionPoint();
2433 }
2434 
2435 // Loc - where the diagnostic will point, where in the source code this
2436 // alignment has failed.
2437 // SecondaryLoc - if present (will be present if sufficiently different from
2438 // Loc), the diagnostic will additionally point a "Note:" to this location.
2439 // It should be the location where the __attribute__((assume_aligned))
2440 // was written e.g.
2442  llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2443  SourceLocation SecondaryLoc, llvm::Value *Alignment,
2444  llvm::Value *OffsetValue, llvm::Value *TheCheck,
2445  llvm::Instruction *Assumption) {
2446  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2447  cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2448  llvm::Intrinsic::getDeclaration(
2449  Builder.GetInsertBlock()->getParent()->getParent(),
2450  llvm::Intrinsic::assume) &&
2451  "Assumption should be a call to llvm.assume().");
2452  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2453  "Assumption should be the last instruction of the basic block, "
2454  "since the basic block is still being generated.");
2455 
2456  if (!SanOpts.has(SanitizerKind::Alignment))
2457  return;
2458 
2459  // Don't check pointers to volatile data. The behavior here is implementation-
2460  // defined.
2461  if (Ty->getPointeeType().isVolatileQualified())
2462  return;
2463 
2464  // We need to temorairly remove the assumption so we can insert the
2465  // sanitizer check before it, else the check will be dropped by optimizations.
2466  Assumption->removeFromParent();
2467 
2468  {
2469  SanitizerScope SanScope(this);
2470 
2471  if (!OffsetValue)
2472  OffsetValue = Builder.getInt1(0); // no offset.
2473 
2474  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2475  EmitCheckSourceLocation(SecondaryLoc),
2477  llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2478  EmitCheckValue(Alignment),
2479  EmitCheckValue(OffsetValue)};
2480  EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2481  SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2482  }
2483 
2484  // We are now in the (new, empty) "cont" basic block.
2485  // Reintroduce the assumption.
2486  Builder.Insert(Assumption);
2487  // FIXME: Assumption still has it's original basic block as it's Parent.
2488 }
2489 
2491  if (CGDebugInfo *DI = getDebugInfo())
2492  return DI->SourceLocToDebugLoc(Location);
2493 
2494  return llvm::DebugLoc();
2495 }
const llvm::DataLayout & getDataLayout() const
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
Defines the clang::ASTContext interface.
Represents a function declaration or definition.
Definition: Decl.h:1783
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
Other implicit parameter.
Definition: Decl.h:1555
no exception specification
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2614
CanQualType VoidPtrTy
Definition: ASTContext.h:1044
A (possibly-)qualified type.
Definition: Type.h:654
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2808
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr)
Encode an address into a form suitable for use in a function prologue.
XRayInstrMask Mask
Definition: XRayInstr.h:64
Specialize PointerLikeTypeTraits to allow LazyGenerationalUpdatePtr to be placed into a PointerUnion...
Definition: Dominators.h:30
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD)
Returns the assumed alignment of an opaque pointer to the given class.
Definition: CGClass.cpp:37
Stmt - This represents one statement.
Definition: Stmt.h:66
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3422
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:234
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:557
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program...
Definition: Decl.cpp:2926
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:994
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
QualType getThisType() const
Return the type of the this pointer.
Definition: DeclCXX.cpp:2352
Checking the &#39;this&#39; pointer for a constructor call.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1039
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:40
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:88
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition: CGClass.cpp:2933
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:3022
The base class of the type hierarchy.
Definition: Type.h:1450
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1300
bool hasValue() const
Definition: APValue.h:359
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2233
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2889
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static bool hasRequiredFeatures(const SmallVectorImpl< StringRef > &ReqFeatures, CodeGenModule &CGM, const FunctionDecl *FD, std::string &FirstMissing)
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:707
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process...
constexpr XRayInstrMask Function
Definition: XRayInstr.h:38
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2383
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2910
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
FPRoundingModeKind
Possible rounding modes.
Definition: LangOptions.h:198
This file provides some common utility functions for processing Lambda related AST Constructs...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:820
QualType getReturnType() const
Definition: Decl.h:2445
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:7002
Extra information about a function prototype.
Definition: Type.h:3837
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified...
Definition: CGExpr.cpp:3322
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::Value * getPointer() const
Definition: Address.h:37
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information...
Definition: TargetInfo.h:168
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
Defines the Objective-C statement AST node classes.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1140
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1152
Represents a parameter to a function.
Definition: Decl.h:1595
static void destroyBlockInfos(CGBlockInfo *info)
Destroy a chain of block layouts.
Definition: CGBlocks.cpp:891
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition: CGClass.cpp:1533
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:528
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:272
void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
One of these records is kept for each identifier that is lexed.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
CGBlockInfo * FirstBlockInfo
FirstBlockInfo - The head of a singly-linked-list of block layouts.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2828
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:168
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Given that we are currently emitting a lambda, emit an l-value for one of its members.
Definition: CGExpr.cpp:3968
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:159
field_range fields() const
Definition: Decl.h:3963
Represents a member of a struct/union/class.
Definition: Decl.h:2729
bool usesFPIntrin() const
Indicates the function uses Floating Point constrained intrinsics.
Definition: Decl.h:2237
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:812
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:174
__DEVICE__ int max(int __a, int __b)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:53
void InitTempAlloca(Address Alloca, llvm::Value *Value)
InitTempAlloca - Provide an initial value for the given alloca which will be observable at all locati...
Definition: CGExpr.cpp:126
void disableSanitizerForInstruction(llvm::Instruction *I)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2399
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition: CGExpr.cpp:134
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
Is determined by runtime environment, corresponds to "round.dynamic".
Definition: LangOptions.h:208
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
static bool hasScalarEvaluationKind(QualType T)
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:2894
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:588
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2289
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:119
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
child_range children()
Definition: Stmt.cpp:224
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3434
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6326
The l-value was considered opaque, so the alignment was determined from a type, but that type was an ...
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:79
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:141
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:960
Values of this type can never be null.
Expr * getSizeExpr() const
Definition: Type.h:3058
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6256
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:182
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:760
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
bool isInstance() const
Definition: DeclCXX.h:1959
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool isAlignmentRequired(const Type *T) const
Determine if the alignment the type has was required using an alignment attribute.
llvm::SanitizerStatReport & getSanStats()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3173
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:2851
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2975
Checking the &#39;this&#39; pointer for a call to a non-static member function.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4685
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:765
Rounding toward +Inf, corresponds to "round.upward".
Definition: LangOptions.h:204
bool hasAttr() const
Definition: DeclBase.h:542
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:3732
CanQualType getReturnType() const
bool isValid() const
Definition: Address.h:35
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
Contains information gathered from parsing the contents of TargetAttr.
Definition: Attr.h:333
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1332
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1690
static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
const TargetCodeGenInfo & getTargetCodeGenInfo()
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any...
Definition: Decl.cpp:3501
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static llvm::fp::RoundingMode ToConstrainedRoundingMD(LangOptions::FPRoundingModeKind Kind)
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
llvm::Value * DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr)
Decode an address used in a function prologue, encoded by EncodeAddrForUseInPrologue.
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:119
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
unsigned Offset
Definition: Format.cpp:1827
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation&#39;s translation unit.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
bool isMultiVersion() const
True if this function is considered a multiversioned function.
Definition: Decl.h:2351
This represents one expression.
Definition: Expr.h:108
bool isDefaulted() const
Whether this function is defaulted per C++0x.
Definition: Decl.h:2130
static Address invalid()
Definition: Address.h:34
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:327
bool isObjCRetainableType() const
Definition: Type.cpp:4060
#define V(N, I)
Definition: ASTContext.h:2941
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:212
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:208
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
llvm::LLVMContext & getLLVMContext()
llvm::BasicBlock * GetIndirectGotoBlock()
void GenOpenCLArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1928
QualType getType() const
Definition: Expr.h:137
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition: CGClass.cpp:818
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:653
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:201
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
SourceLocation getEnd() const
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI)
Get a function type and produce the equivalent function type with the specified exception specificati...
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:2046
QualType getFunctionType(QualType ResultTy, ArrayRef< QualType > Args, const FunctionProtoType::ExtProtoInfo &EPI) const
Return a normal function type with a typed argument list.
Definition: ASTContext.h:1401
ValueDecl * getDecl()
Definition: Expr.h:1247
const LangOptions & getLangOpts() const
ASTContext & getContext() const
virtual void startNewFunction()
Definition: Mangle.h:75
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:265
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:40
The l-value was considered opaque, so the alignment was determined from a type.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value **> ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:417
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:162
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
Kind
QualType getCanonicalType() const
Definition: Type.h:6295
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
llvm::APSInt APSInt
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const
Parses the target attributes passed in, and returns only the ones that are valid feature names...
QualType getSingleStepDesugaredType(const ASTContext &Context) const
Return the specified type with one level of "sugar" removed from the type.
Definition: Type.h:967
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:164
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2166
Rounding to nearest, corresponds to "round.tonearest".
Definition: LangOptions.h:200
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:296
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
QualType getElementType() const
Definition: Type.h:3270
const Decl * getDecl() const
Definition: GlobalDecl.h:77
Represents the declaration of a label.
Definition: Decl.h:451
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:117
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:737
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
This forwards to CodeGenFunction::InsertHelper.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1931
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:45
SanitizerSet SanOpts
Sanitizers enabled for this function.
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:39
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
Rounding toward -Inf, corresponds to "round.downward".
Definition: LangOptions.h:202
An aligned address.
Definition: Address.h:24
llvm::APInt APInt
Definition: Integral.h:27
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
TypeClass getTypeClass() const
Definition: Type.h:1876
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:96
void SetFPModel()
SetFPModel - Control floating point behavior via fp-model settings.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2158
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression, because a __builtin_ms_va_list is a pointer to a char.
const CGFunctionInfo * CurFnInfo
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:224
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper, which adds necessary metadata to instructions.
Definition: CGBuilder.h:25
Address EmitVAListRef(const Expr *E)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:355
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location)
Emit an annotation call (intrinsic).
Dataflow Directional Tag Classes.
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:776
Assume that floating-point exceptions are masked.
Definition: LangOptions.h:214
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:586
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
FunctionDecl * getTemplateInstantiationPattern() const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:3612
StmtClass getStmtClass() const
Definition: Stmt.h:1109
void EmitFunctionBody(const Stmt *Body)
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2046
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks, lambdas, etc.
Definition: DeclBase.cpp:994
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:2934
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for &#39;this&#39;.
Definition: CGCXXABI.cpp:122
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:46
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
llvm::Module & getModule() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
JumpDest ReturnBlock
ReturnBlock - Unified return block.
Rounding toward zero, corresponds to "round.towardzero".
Definition: LangOptions.h:206
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4495
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
CodeGenTypes & getTypes() const
CharUnits getIndirectAlign() const
T * getAttr() const
Definition: DeclBase.h:538
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
ExtVectorType - Extended vector type.
Definition: Type.h:3354
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:473
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Optional< NullabilityKind > getNullability(const ASTContext &context) const
Determine the nullability of the given type.
Definition: Type.cpp:3828
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat]...
Definition: APValue.h:115
void getCaptureFields(llvm::DenseMap< const VarDecl *, FieldDecl *> &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1454
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2115
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:524
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
void unprotectFromPeepholes(PeepholeProtection protection)
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn&#39;t support the specified stmt yet...
bool hasUnaligned() const
Definition: Type.h:299
Represents a C++ struct/union/class.
Definition: DeclCXX.h:253
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:493
bool isVoidType() const
Definition: Type.h:6777
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2257
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6283
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
LambdaCaptureDefault getLambdaCaptureDefault() const
Definition: DeclCXX.h:996
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1246
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:582
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1777
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:153
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2546
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
CGCXXABI & getCXXABI() const
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2469
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1171
static bool shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, const ASTContext &Context)
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition: CGClass.cpp:1420
bool isPointerType() const
Definition: Type.h:6504
This structure provides a set of types that are commonly used during IR emission. ...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
struct clang::CodeGen::CodeGenFunction::MultiVersionResolverOption::Conds Conditions
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2889
bool Null(InterpState &S, CodePtr OpPC)
Definition: Interp.h:818
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:399
void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
QualType getType() const
Definition: Decl.h:630
A trivial tuple used to represent a source range.
LValue - This represents an lvalue references.
Definition: CGValue.h:167
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1531
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3039
Strictly preserve the floating-point exception semantics.
Definition: LangOptions.h:218
SanitizerMetadata * getSanitizerMetadata()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
APSInt & getInt()
Definition: APValue.h:380
const LangOptions & getLangOpts() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:163
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it&#39;s a VLA, and drill down to the base elem...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:369
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
Defines enum values for all the target-independent builtin functions.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool isScalar() const
Definition: CGValue.h:52
Attr - This represents one attribute.
Definition: Attr.h:45
SourceLocation getLocation() const
Definition: DeclBase.h:429
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2991
Transformations do not cause new exceptions but may hide some.
Definition: LangOptions.h:216
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.