21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/Operator.h" 26 using namespace clang;
27 using namespace CodeGen;
34 uint64_t AtomicSizeInBits;
35 uint64_t ValueSizeInBits;
45 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
52 ValueTy = ATy->getValueType();
57 uint64_t ValueAlignInBits;
58 uint64_t AtomicAlignInBits;
59 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
60 ValueSizeInBits = ValueTI.
Width;
61 ValueAlignInBits = ValueTI.
Align;
63 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
64 AtomicSizeInBits = AtomicTI.
Width;
65 AtomicAlignInBits = AtomicTI.
Align;
67 assert(ValueSizeInBits <= AtomicSizeInBits);
68 assert(ValueAlignInBits <= AtomicAlignInBits);
70 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
71 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
78 ValueSizeInBits = C.getTypeSize(ValueTy);
81 AtomicSizeInBits = C.toBits(
82 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
86 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
88 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
89 VoidPtrAddr, OffsetInChars.getQuantity());
92 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
93 "atomic_bitfield_base");
101 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 if (AtomicTy.isNull()) {
105 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
112 ValueSizeInBits = C.getTypeSize(ValueTy);
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
120 ValueSizeInBits = C.getTypeSize(ValueTy);
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 CharUnits getValueAlignment()
const {
return ValueAlign; }
136 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
137 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
139 bool shouldUseLibcall()
const {
return UseLibcall; }
140 const LValue &getAtomicLValue()
const {
return LVal; }
151 Address getAtomicAddress()
const {
152 return Address(getAtomicPointer(), getAtomicAlignment());
155 Address getAtomicAddressAsAtomicIntPointer()
const {
156 return emitCastToAtomicIntPointer(getAtomicAddress());
165 bool hasPadding()
const {
166 return (ValueSizeInBits != AtomicSizeInBits);
169 bool emitMemSetZeroIfNecessary()
const;
197 void emitCopyIntoMemory(
RValue rvalue)
const;
200 LValue projectValue()
const {
202 Address addr = getAtomicAddress();
213 bool AsValue, llvm::AtomicOrdering AO,
224 std::pair<RValue, llvm::Value *>
226 llvm::AtomicOrdering Success =
227 llvm::AtomicOrdering::SequentiallyConsistent,
228 llvm::AtomicOrdering Failure =
229 llvm::AtomicOrdering::SequentiallyConsistent,
230 bool IsWeak =
false);
235 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
240 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
247 Address CreateTempAlloca()
const;
253 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
254 llvm::AtomicOrdering AO,
bool IsVolatile);
256 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
260 llvm::AtomicOrdering Success =
261 llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering Failure =
263 llvm::AtomicOrdering::SequentiallyConsistent);
265 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
267 llvm::AtomicOrdering Success =
268 llvm::AtomicOrdering::SequentiallyConsistent,
269 llvm::AtomicOrdering Failure =
270 llvm::AtomicOrdering::SequentiallyConsistent,
271 bool IsWeak =
false);
274 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
278 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
282 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
285 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
290 Address AtomicInfo::CreateTempAlloca()
const {
292 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294 getAtomicAlignment(),
299 TempAlloca, getAtomicAddress().getType());
317 uint64_t expectedSize) {
318 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 if (hasPadding())
return true;
329 switch (getEvaluationKind()) {
336 AtomicSizeInBits / 2);
342 llvm_unreachable(
"bad evaluation kind");
345 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
348 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
352 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
362 llvm::AtomicOrdering SuccessOrder,
363 llvm::AtomicOrdering FailureOrder,
369 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
370 Ptr.
getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
373 Pair->setWeak(IsWeak);
382 llvm::BasicBlock *StoreExpectedBB =
387 llvm::BasicBlock *ContinueBB =
392 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
394 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
398 CGF.
Builder.CreateBr(ContinueBB);
400 CGF.
Builder.SetInsertPoint(ContinueBB);
413 llvm::AtomicOrdering SuccessOrder,
415 llvm::AtomicOrdering FailureOrder;
416 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
417 auto FOS = FO->getSExtValue();
418 if (!llvm::isValidAtomicOrderingCABI(FOS))
419 FailureOrder = llvm::AtomicOrdering::Monotonic;
421 switch ((llvm::AtomicOrderingCABI)FOS) {
422 case llvm::AtomicOrderingCABI::relaxed:
423 case llvm::AtomicOrderingCABI::release:
424 case llvm::AtomicOrderingCABI::acq_rel:
425 FailureOrder = llvm::AtomicOrdering::Monotonic;
427 case llvm::AtomicOrderingCABI::consume:
428 case llvm::AtomicOrderingCABI::acquire:
429 FailureOrder = llvm::AtomicOrdering::Acquire;
431 case llvm::AtomicOrderingCABI::seq_cst:
432 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
435 if (isStrongerThan(FailureOrder, SuccessOrder)) {
439 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
442 FailureOrder, Scope);
447 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
450 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
451 SuccessOrder != llvm::AtomicOrdering::Release)
453 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
458 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
465 CGF.
Builder.SetInsertPoint(MonotonicBB);
467 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
471 CGF.
Builder.SetInsertPoint(AcquireBB);
473 Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
475 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
481 CGF.
Builder.SetInsertPoint(SeqCstBB);
483 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
489 CGF.
Builder.SetInsertPoint(ContBB);
495 uint64_t Size, llvm::AtomicOrdering Order,
497 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
498 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
500 switch (E->
getOp()) {
501 case AtomicExpr::AO__c11_atomic_init:
502 case AtomicExpr::AO__opencl_atomic_init:
503 llvm_unreachable(
"Already handled!");
505 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
506 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
508 FailureOrder, Size, Order, Scope);
510 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
511 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
513 FailureOrder, Size, Order, Scope);
515 case AtomicExpr::AO__atomic_compare_exchange:
516 case AtomicExpr::AO__atomic_compare_exchange_n: {
517 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
519 Val1, Val2, FailureOrder, Size, Order, Scope);
522 llvm::BasicBlock *StrongBB =
525 llvm::BasicBlock *ContBB =
528 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
529 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
531 CGF.
Builder.SetInsertPoint(StrongBB);
533 FailureOrder, Size, Order, Scope);
536 CGF.
Builder.SetInsertPoint(WeakBB);
538 FailureOrder, Size, Order, Scope);
541 CGF.
Builder.SetInsertPoint(ContBB);
545 case AtomicExpr::AO__c11_atomic_load:
546 case AtomicExpr::AO__opencl_atomic_load:
547 case AtomicExpr::AO__atomic_load_n:
548 case AtomicExpr::AO__atomic_load: {
550 Load->setAtomic(Order, Scope);
556 case AtomicExpr::AO__c11_atomic_store:
557 case AtomicExpr::AO__opencl_atomic_store:
558 case AtomicExpr::AO__atomic_store:
559 case AtomicExpr::AO__atomic_store_n: {
562 Store->setAtomic(Order, Scope);
567 case AtomicExpr::AO__c11_atomic_exchange:
568 case AtomicExpr::AO__opencl_atomic_exchange:
569 case AtomicExpr::AO__atomic_exchange_n:
570 case AtomicExpr::AO__atomic_exchange:
571 Op = llvm::AtomicRMWInst::Xchg;
574 case AtomicExpr::AO__atomic_add_fetch:
575 PostOp = llvm::Instruction::Add;
577 case AtomicExpr::AO__c11_atomic_fetch_add:
578 case AtomicExpr::AO__opencl_atomic_fetch_add:
579 case AtomicExpr::AO__atomic_fetch_add:
580 Op = llvm::AtomicRMWInst::Add;
583 case AtomicExpr::AO__atomic_sub_fetch:
584 PostOp = llvm::Instruction::Sub;
586 case AtomicExpr::AO__c11_atomic_fetch_sub:
587 case AtomicExpr::AO__opencl_atomic_fetch_sub:
588 case AtomicExpr::AO__atomic_fetch_sub:
589 Op = llvm::AtomicRMWInst::Sub;
592 case AtomicExpr::AO__opencl_atomic_fetch_min:
594 : llvm::AtomicRMWInst::UMin;
597 case AtomicExpr::AO__opencl_atomic_fetch_max:
599 : llvm::AtomicRMWInst::UMax;
602 case AtomicExpr::AO__atomic_and_fetch:
605 case AtomicExpr::AO__c11_atomic_fetch_and:
606 case AtomicExpr::AO__opencl_atomic_fetch_and:
607 case AtomicExpr::AO__atomic_fetch_and:
611 case AtomicExpr::AO__atomic_or_fetch:
612 PostOp = llvm::Instruction::Or;
614 case AtomicExpr::AO__c11_atomic_fetch_or:
615 case AtomicExpr::AO__opencl_atomic_fetch_or:
616 case AtomicExpr::AO__atomic_fetch_or:
617 Op = llvm::AtomicRMWInst::Or;
620 case AtomicExpr::AO__atomic_xor_fetch:
621 PostOp = llvm::Instruction::Xor;
623 case AtomicExpr::AO__c11_atomic_fetch_xor:
624 case AtomicExpr::AO__opencl_atomic_fetch_xor:
625 case AtomicExpr::AO__atomic_fetch_xor:
626 Op = llvm::AtomicRMWInst::Xor;
629 case AtomicExpr::AO__atomic_nand_fetch:
632 case AtomicExpr::AO__atomic_fetch_nand:
633 Op = llvm::AtomicRMWInst::Nand;
638 llvm::AtomicRMWInst *RMWI =
646 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
647 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
648 Result = CGF.
Builder.CreateNot(Result);
665 uint64_t Size, llvm::AtomicOrdering Order,
672 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
678 if (
auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
681 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
688 auto Scopes = ScopeModel->getRuntimeValues();
689 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
690 for (
auto S : Scopes)
693 llvm::BasicBlock *ContBB =
696 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(),
false);
699 auto FallBack = ScopeModel->getFallBackValue();
700 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
701 for (
auto S : Scopes) {
704 SI->addCase(Builder.getInt32(S), B);
706 Builder.SetInsertPoint(B);
707 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
711 Builder.CreateBr(ContBB);
714 Builder.SetInsertPoint(ContBB);
721 if (UseOptimizedLibcall) {
728 SizeInBits)->getPointerTo();
746 MemTy = AT->getValueType();
747 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
755 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
757 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
758 bool UseLibcall = ((Ptr.
getAlignment() % sizeChars) != 0 ||
759 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
761 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
762 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
763 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
764 EmitAtomicInit(E->
getVal1(), lvalue);
772 switch (E->
getOp()) {
773 case AtomicExpr::AO__c11_atomic_init:
774 case AtomicExpr::AO__opencl_atomic_init:
775 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
777 case AtomicExpr::AO__c11_atomic_load:
778 case AtomicExpr::AO__opencl_atomic_load:
779 case AtomicExpr::AO__atomic_load_n:
782 case AtomicExpr::AO__atomic_load:
783 Dest = EmitPointerWithAlignment(E->
getVal1());
786 case AtomicExpr::AO__atomic_store:
787 Val1 = EmitPointerWithAlignment(E->
getVal1());
790 case AtomicExpr::AO__atomic_exchange:
791 Val1 = EmitPointerWithAlignment(E->
getVal1());
792 Dest = EmitPointerWithAlignment(E->
getVal2());
795 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
796 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
797 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
798 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
799 case AtomicExpr::AO__atomic_compare_exchange_n:
800 case AtomicExpr::AO__atomic_compare_exchange:
801 Val1 = EmitPointerWithAlignment(E->
getVal1());
802 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
803 Val2 = EmitPointerWithAlignment(E->
getVal2());
807 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
808 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
809 IsWeak = EmitScalarExpr(E->
getWeak());
812 case AtomicExpr::AO__c11_atomic_fetch_add:
813 case AtomicExpr::AO__c11_atomic_fetch_sub:
814 case AtomicExpr::AO__opencl_atomic_fetch_add:
815 case AtomicExpr::AO__opencl_atomic_fetch_sub:
825 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
826 auto Temp = CreateMemTemp(Val1Ty,
".atomictmp");
828 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
832 case AtomicExpr::AO__atomic_fetch_add:
833 case AtomicExpr::AO__atomic_fetch_sub:
834 case AtomicExpr::AO__atomic_add_fetch:
835 case AtomicExpr::AO__atomic_sub_fetch:
836 case AtomicExpr::AO__c11_atomic_store:
837 case AtomicExpr::AO__c11_atomic_exchange:
838 case AtomicExpr::AO__opencl_atomic_store:
839 case AtomicExpr::AO__opencl_atomic_exchange:
840 case AtomicExpr::AO__atomic_store_n:
841 case AtomicExpr::AO__atomic_exchange_n:
842 case AtomicExpr::AO__c11_atomic_fetch_and:
843 case AtomicExpr::AO__c11_atomic_fetch_or:
844 case AtomicExpr::AO__c11_atomic_fetch_xor:
845 case AtomicExpr::AO__opencl_atomic_fetch_and:
846 case AtomicExpr::AO__opencl_atomic_fetch_or:
847 case AtomicExpr::AO__opencl_atomic_fetch_xor:
848 case AtomicExpr::AO__opencl_atomic_fetch_min:
849 case AtomicExpr::AO__opencl_atomic_fetch_max:
850 case AtomicExpr::AO__atomic_fetch_and:
851 case AtomicExpr::AO__atomic_fetch_or:
852 case AtomicExpr::AO__atomic_fetch_xor:
853 case AtomicExpr::AO__atomic_fetch_nand:
854 case AtomicExpr::AO__atomic_and_fetch:
855 case AtomicExpr::AO__atomic_or_fetch:
856 case AtomicExpr::AO__atomic_xor_fetch:
857 case AtomicExpr::AO__atomic_nand_fetch:
867 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
868 AtomicInfo Atomics(*
this, AtomicVal);
870 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
871 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
872 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
874 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
876 Dest = CreateMemTemp(RValTy,
"cmpxchg.bool");
878 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
882 bool UseOptimizedLibcall =
false;
883 switch (E->
getOp()) {
884 case AtomicExpr::AO__c11_atomic_init:
885 case AtomicExpr::AO__opencl_atomic_init:
886 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
888 case AtomicExpr::AO__c11_atomic_fetch_add:
889 case AtomicExpr::AO__opencl_atomic_fetch_add:
890 case AtomicExpr::AO__atomic_fetch_add:
891 case AtomicExpr::AO__c11_atomic_fetch_and:
892 case AtomicExpr::AO__opencl_atomic_fetch_and:
893 case AtomicExpr::AO__atomic_fetch_and:
894 case AtomicExpr::AO__c11_atomic_fetch_or:
895 case AtomicExpr::AO__opencl_atomic_fetch_or:
896 case AtomicExpr::AO__atomic_fetch_or:
897 case AtomicExpr::AO__atomic_fetch_nand:
898 case AtomicExpr::AO__c11_atomic_fetch_sub:
899 case AtomicExpr::AO__opencl_atomic_fetch_sub:
900 case AtomicExpr::AO__atomic_fetch_sub:
901 case AtomicExpr::AO__c11_atomic_fetch_xor:
902 case AtomicExpr::AO__opencl_atomic_fetch_xor:
903 case AtomicExpr::AO__opencl_atomic_fetch_min:
904 case AtomicExpr::AO__opencl_atomic_fetch_max:
905 case AtomicExpr::AO__atomic_fetch_xor:
906 case AtomicExpr::AO__atomic_add_fetch:
907 case AtomicExpr::AO__atomic_and_fetch:
908 case AtomicExpr::AO__atomic_nand_fetch:
909 case AtomicExpr::AO__atomic_or_fetch:
910 case AtomicExpr::AO__atomic_sub_fetch:
911 case AtomicExpr::AO__atomic_xor_fetch:
913 UseOptimizedLibcall =
true;
916 case AtomicExpr::AO__c11_atomic_load:
917 case AtomicExpr::AO__c11_atomic_store:
918 case AtomicExpr::AO__c11_atomic_exchange:
919 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
920 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
921 case AtomicExpr::AO__opencl_atomic_load:
922 case AtomicExpr::AO__opencl_atomic_store:
923 case AtomicExpr::AO__opencl_atomic_exchange:
924 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
925 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
926 case AtomicExpr::AO__atomic_load_n:
927 case AtomicExpr::AO__atomic_load:
928 case AtomicExpr::AO__atomic_store_n:
929 case AtomicExpr::AO__atomic_store:
930 case AtomicExpr::AO__atomic_exchange_n:
931 case AtomicExpr::AO__atomic_exchange:
932 case AtomicExpr::AO__atomic_compare_exchange_n:
933 case AtomicExpr::AO__atomic_compare_exchange:
935 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
936 UseOptimizedLibcall =
true;
941 if (!UseOptimizedLibcall) {
944 getContext().getSizeType());
952 auto AS = PT->getAs<
PointerType>()->getPointeeType().getAddressSpace();
956 auto T = V->getType();
957 auto *DestType =
T->getPointerElementType()->getPointerTo(DestAS);
959 return getTargetHooks().performAddrSpaceCast(
965 getContext().VoidPtrTy);
967 std::string LibCallName;
969 MemTy->
isPointerType() ? getContext().getIntPtrType() : MemTy;
971 bool HaveRetTy =
false;
972 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
973 switch (E->
getOp()) {
974 case AtomicExpr::AO__c11_atomic_init:
975 case AtomicExpr::AO__opencl_atomic_init:
976 llvm_unreachable(
"Already handled!");
985 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
986 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
987 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
988 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
989 case AtomicExpr::AO__atomic_compare_exchange:
990 case AtomicExpr::AO__atomic_compare_exchange_n:
991 LibCallName =
"__atomic_compare_exchange";
992 RetTy = getContext().BoolTy;
997 getContext().VoidPtrTy);
1006 case AtomicExpr::AO__c11_atomic_exchange:
1007 case AtomicExpr::AO__opencl_atomic_exchange:
1008 case AtomicExpr::AO__atomic_exchange_n:
1009 case AtomicExpr::AO__atomic_exchange:
1010 LibCallName =
"__atomic_exchange";
1016 case AtomicExpr::AO__c11_atomic_store:
1017 case AtomicExpr::AO__opencl_atomic_store:
1018 case AtomicExpr::AO__atomic_store:
1019 case AtomicExpr::AO__atomic_store_n:
1020 LibCallName =
"__atomic_store";
1021 RetTy = getContext().VoidTy;
1028 case AtomicExpr::AO__c11_atomic_load:
1029 case AtomicExpr::AO__opencl_atomic_load:
1030 case AtomicExpr::AO__atomic_load:
1031 case AtomicExpr::AO__atomic_load_n:
1032 LibCallName =
"__atomic_load";
1036 case AtomicExpr::AO__atomic_add_fetch:
1037 PostOp = llvm::Instruction::Add;
1039 case AtomicExpr::AO__c11_atomic_fetch_add:
1040 case AtomicExpr::AO__opencl_atomic_fetch_add:
1041 case AtomicExpr::AO__atomic_fetch_add:
1042 LibCallName =
"__atomic_fetch_add";
1048 case AtomicExpr::AO__atomic_and_fetch:
1051 case AtomicExpr::AO__c11_atomic_fetch_and:
1052 case AtomicExpr::AO__opencl_atomic_fetch_and:
1053 case AtomicExpr::AO__atomic_fetch_and:
1054 LibCallName =
"__atomic_fetch_and";
1060 case AtomicExpr::AO__atomic_or_fetch:
1061 PostOp = llvm::Instruction::Or;
1063 case AtomicExpr::AO__c11_atomic_fetch_or:
1064 case AtomicExpr::AO__opencl_atomic_fetch_or:
1065 case AtomicExpr::AO__atomic_fetch_or:
1066 LibCallName =
"__atomic_fetch_or";
1072 case AtomicExpr::AO__atomic_sub_fetch:
1073 PostOp = llvm::Instruction::Sub;
1075 case AtomicExpr::AO__c11_atomic_fetch_sub:
1076 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1077 case AtomicExpr::AO__atomic_fetch_sub:
1078 LibCallName =
"__atomic_fetch_sub";
1084 case AtomicExpr::AO__atomic_xor_fetch:
1085 PostOp = llvm::Instruction::Xor;
1087 case AtomicExpr::AO__c11_atomic_fetch_xor:
1088 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1089 case AtomicExpr::AO__atomic_fetch_xor:
1090 LibCallName =
"__atomic_fetch_xor";
1094 case AtomicExpr::AO__opencl_atomic_fetch_min:
1096 ?
"__atomic_fetch_min" 1097 :
"__atomic_fetch_umin";
1101 case AtomicExpr::AO__opencl_atomic_fetch_max:
1103 ?
"__atomic_fetch_max" 1104 :
"__atomic_fetch_umax";
1110 case AtomicExpr::AO__atomic_nand_fetch:
1113 case AtomicExpr::AO__atomic_fetch_nand:
1114 LibCallName =
"__atomic_fetch_nand";
1121 LibCallName = std::string(
"__opencl") +
1122 StringRef(LibCallName).drop_front(1).str();
1126 if (UseOptimizedLibcall)
1127 LibCallName +=
"_" + llvm::utostr(Size);
1130 if (UseOptimizedLibcall) {
1133 RetTy = getContext().getIntTypeForBitwidth(
1134 getContext().toBits(sizeChars),
false);
1137 RetTy = getContext().VoidTy;
1139 getContext().VoidPtrTy);
1144 getContext().IntTy);
1151 assert(UseOptimizedLibcall || !PostOp);
1163 llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1164 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1166 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1167 ResVal = Builder.CreateNot(ResVal);
1169 Builder.CreateStore(
1171 Builder.CreateBitCast(Dest, ResVal->
getType()->getPointerTo()));
1177 return convertTempToRValue(
1178 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1182 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1183 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1184 E->
getOp() == AtomicExpr::AO__atomic_store ||
1185 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1186 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1187 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1188 E->
getOp() == AtomicExpr::AO__atomic_load ||
1189 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1191 if (isa<llvm::ConstantInt>(Order)) {
1192 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1195 if (llvm::isValidAtomicOrderingCABI(ord))
1196 switch ((llvm::AtomicOrderingCABI)ord) {
1197 case llvm::AtomicOrderingCABI::relaxed:
1198 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1199 llvm::AtomicOrdering::Monotonic, Scope);
1201 case llvm::AtomicOrderingCABI::consume:
1202 case llvm::AtomicOrderingCABI::acquire:
1205 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1206 llvm::AtomicOrdering::Acquire, Scope);
1208 case llvm::AtomicOrderingCABI::release:
1211 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1212 llvm::AtomicOrdering::Release, Scope);
1214 case llvm::AtomicOrderingCABI::acq_rel:
1215 if (IsLoad || IsStore)
1217 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1218 llvm::AtomicOrdering::AcquireRelease, Scope);
1220 case llvm::AtomicOrderingCABI::seq_cst:
1221 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1222 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1228 return convertTempToRValue(
1229 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1237 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1238 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1239 *SeqCstBB =
nullptr;
1240 MonotonicBB = createBasicBlock(
"monotonic", CurFn);
1242 AcquireBB = createBasicBlock(
"acquire", CurFn);
1244 ReleaseBB = createBasicBlock(
"release", CurFn);
1245 if (!IsLoad && !IsStore)
1246 AcqRelBB = createBasicBlock(
"acqrel", CurFn);
1247 SeqCstBB = createBasicBlock(
"seqcst", CurFn);
1248 llvm::BasicBlock *ContBB = createBasicBlock(
"atomic.continue", CurFn);
1254 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(),
false);
1255 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1258 Builder.SetInsertPoint(MonotonicBB);
1259 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1260 llvm::AtomicOrdering::Monotonic, Scope);
1261 Builder.CreateBr(ContBB);
1263 Builder.SetInsertPoint(AcquireBB);
1264 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1265 llvm::AtomicOrdering::Acquire, Scope);
1266 Builder.CreateBr(ContBB);
1267 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1269 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1273 Builder.SetInsertPoint(ReleaseBB);
1274 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1275 llvm::AtomicOrdering::Release, Scope);
1276 Builder.CreateBr(ContBB);
1277 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1280 if (!IsLoad && !IsStore) {
1281 Builder.SetInsertPoint(AcqRelBB);
1282 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1283 llvm::AtomicOrdering::AcquireRelease, Scope);
1284 Builder.CreateBr(ContBB);
1285 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1288 Builder.SetInsertPoint(SeqCstBB);
1289 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1290 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1291 Builder.CreateBr(ContBB);
1292 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1296 Builder.SetInsertPoint(ContBB);
1300 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1301 return convertTempToRValue(
1302 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1307 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1308 unsigned addrspace =
1309 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1310 llvm::IntegerType *ty =
1315 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1318 if (SourceSizeInBits != AtomicSizeInBits) {
1319 Address Tmp = CreateTempAlloca();
1321 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1325 return emitCastToAtomicIntPointer(Addr);
1331 bool asValue)
const {
1364 bool AsValue)
const {
1366 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1372 auto *ValTy = AsValue
1374 : getAtomicAddress().getType()->getPointerElementType();
1375 if (ValTy->isIntegerTy()) {
1376 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1378 }
else if (ValTy->isPointerTy())
1380 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1387 bool TempIsVolatile =
false;
1393 Temp = CreateTempAlloca();
1397 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1399 ->setVolatile(TempIsVolatile);
1401 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1404 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1405 llvm::AtomicOrdering AO,
bool) {
1419 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1422 Address Addr = getAtomicAddressAsAtomicIntPointer();
1424 Load->setAtomic(AO);
1428 Load->setVolatile(
true);
1437 if (!CGM.getCodeGenOpts().MSVolatile)
return false;
1438 AtomicInfo AI(*
this, LV);
1441 bool AtomicIsInline = !AI.shouldUseLibcall();
1443 if (getContext().getTypeSize(LV.
getType()) >
1444 getContext().getTypeSize(getContext().getIntPtrType()))
1446 return IsVolatile && AtomicIsInline;
1451 llvm::AtomicOrdering AO;
1454 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1456 AO = llvm::AtomicOrdering::Acquire;
1459 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1463 bool AsValue, llvm::AtomicOrdering AO,
1466 if (shouldUseLibcall()) {
1472 TempAddr = CreateTempAlloca();
1474 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1478 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1482 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1490 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1496 llvm::AtomicOrdering AO,
bool IsVolatile,
1498 AtomicInfo Atomics(*
this, src);
1499 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1505 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1522 emitMemSetZeroIfNecessary();
1525 LValue TempLVal = projectValue();
1538 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1546 AtomicInfo Atomics(CGF, TempLV);
1547 Atomics.emitCopyIntoMemory(rvalue);
1556 if (isa<llvm::IntegerType>(Value->getType()))
1559 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1561 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1562 if (isa<llvm::PointerType>(Value->getType()))
1563 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1564 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1570 Address Addr = materializeRValue(RVal);
1573 Addr = emitCastToAtomicIntPointer(Addr);
1577 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1579 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1581 Address Addr = getAtomicAddressAsAtomicIntPointer();
1583 ExpectedVal, DesiredVal,
1587 Inst->setWeak(IsWeak);
1590 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1591 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1592 return std::make_pair(PreviousVal, SuccessFailureVal);
1596 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1598 llvm::AtomicOrdering Success,
1599 llvm::AtomicOrdering Failure) {
1611 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1614 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1622 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1624 llvm::AtomicOrdering Failure,
bool IsWeak) {
1625 if (isStrongerThan(Failure, Success))
1628 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1631 if (shouldUseLibcall()) {
1633 Address ExpectedAddr = materializeRValue(Expected);
1634 Address DesiredAddr = materializeRValue(Desired);
1635 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1638 return std::make_pair(
1646 auto *ExpectedVal = convertRValueToInt(Expected);
1647 auto *DesiredVal = convertRValueToInt(Desired);
1648 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1650 return std::make_pair(
1661 LValue AtomicLVal = Atomics.getAtomicLValue();
1668 Address Ptr = Atomics.materializeRValue(OldRVal);
1701 RValue NewRVal = UpdateOp(UpRVal);
1711 void AtomicInfo::EmitAtomicUpdateLibcall(
1712 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1714 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1716 Address ExpectedAddr = CreateTempAlloca();
1718 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1722 Address DesiredAddr = CreateTempAlloca();
1724 requiresMemSetZero(getAtomicAddress().getElementType())) {
1728 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1733 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1736 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1740 void AtomicInfo::EmitAtomicUpdateOp(
1741 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1743 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1746 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1750 auto *CurBB = CGF.
Builder.GetInsertBlock();
1752 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1754 PHI->addIncoming(OldVal, CurBB);
1755 Address NewAtomicAddr = CreateTempAlloca();
1756 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1758 requiresMemSetZero(getAtomicAddress().getElementType())) {
1766 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1767 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1768 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1774 LValue AtomicLVal = Atomics.getAtomicLValue();
1798 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1799 RValue UpdateRVal,
bool IsVolatile) {
1800 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1802 Address ExpectedAddr = CreateTempAlloca();
1804 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1808 Address DesiredAddr = CreateTempAlloca();
1810 requiresMemSetZero(getAtomicAddress().getElementType())) {
1816 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1819 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1823 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1825 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1828 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1832 auto *CurBB = CGF.
Builder.GetInsertBlock();
1834 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1836 PHI->addIncoming(OldVal, CurBB);
1837 Address NewAtomicAddr = CreateTempAlloca();
1838 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1840 requiresMemSetZero(getAtomicAddress().getElementType())) {
1846 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1847 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1848 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1852 void AtomicInfo::EmitAtomicUpdate(
1853 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1855 if (shouldUseLibcall()) {
1856 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1858 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1862 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1864 if (shouldUseLibcall()) {
1865 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1867 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1874 llvm::AtomicOrdering AO;
1876 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1878 AO = llvm::AtomicOrdering::Release;
1881 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1890 llvm::AtomicOrdering AO,
bool IsVolatile,
1898 AtomicInfo atomics(*
this, dest);
1899 LValue LVal = atomics.getAtomicLValue();
1904 atomics.emitCopyIntoMemory(rvalue);
1909 if (atomics.shouldUseLibcall()) {
1911 Address srcAddr = atomics.materializeRValue(rvalue);
1916 getContext().getSizeType());
1917 args.
add(
RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1918 getContext().VoidPtrTy);
1920 getContext().VoidPtrTy);
1922 RValue::get(llvm::ConstantInt::get(IntTy, (
int)llvm::toCABI(AO))),
1923 getContext().IntTy);
1929 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1933 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1934 intValue = Builder.CreateIntCast(
1936 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1940 store->setAtomic(AO);
1944 store->setVolatile(
true);
1945 CGM.DecorateInstructionWithTBAA(store, dest.
getTBAAInfo());
1950 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1957 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1967 AtomicInfo Atomics(*
this, Obj);
1969 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1974 LValue LVal, llvm::AtomicOrdering AO,
1975 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
1976 AtomicInfo Atomics(*
this, LVal);
1977 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1981 AtomicInfo atomics(*
this, dest);
1983 switch (atomics.getEvaluationKind()) {
1999 bool Zeroed =
false;
2001 Zeroed = atomics.emitMemSetZeroIfNecessary();
2002 dest = atomics.projectValue();
2013 EmitAggExpr(init, slot);
2017 llvm_unreachable(
"bad evaluation kind");
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::IntegerType * IntTy
int
Address getAddress() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
llvm::LLVMContext & getLLVMContext()
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
void setAlignment(CharUnits A)
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>'.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getPointer() const
unsigned getAddressSpace() const
Return the address space that this address resides in.
Address getAddress() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
bool isVolatileQualified() const
CharUnits getAlignment() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Scope - A scope is a transient data structure that is used while parsing the program.
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create a new runtime function with the specified type and name.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ASTContext & getContext() const
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
LValueBaseInfo getBaseInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Address getExtVectorAddress() const
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Expr - This represents one expression.
llvm::StringRef getAsString(SyncScope S)
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
const FunctionProtoType * T
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
void add(RValue rvalue, QualType type, bool needscopy=false)
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
TBAAAccessInfo getTBAAInfo() const
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
const CGBitFieldInfo & getBitFieldInfo() const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
void EmitAggregateCopy(Address DestPtr, Address SrcPtr, QualType EltTy, bool isVolatile=false, bool isAssignment=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Expr * getOrderFail() const
bool isVolatileQualified() const
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
bool isAtomicType() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
llvm::Value * getBitFieldPointer() const
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
static RValue get(llvm::Value *V)
bool isPointerType() const
bool isExtVectorElt() const
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
LValue - This represents an lvalue references.
QualType getValueType() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getVectorIdx() const
CallArgList - Type for representing both the value and type of arguments in a call.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", bool CastToDefaultAddrSpace=true)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
llvm::Value * getPointer() const
llvm::Constant * getExtVectorElts() const
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.