21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/Operator.h" 26 using namespace clang;
27 using namespace CodeGen;
34 uint64_t AtomicSizeInBits;
35 uint64_t ValueSizeInBits;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits = C.getTypeSize(ValueTy);
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
91 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
112 ValueSizeInBits = C.getTypeSize(ValueTy);
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
120 ValueSizeInBits = C.getTypeSize(ValueTy);
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
136 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
138 bool shouldUseLibcall()
const {
return UseLibcall; }
139 const LValue &getAtomicLValue()
const {
return LVal; }
150 Address getAtomicAddress()
const {
151 return Address(getAtomicPointer(), getAtomicAlignment());
154 Address getAtomicAddressAsAtomicIntPointer()
const {
155 return emitCastToAtomicIntPointer(getAtomicAddress());
164 bool hasPadding()
const {
165 return (ValueSizeInBits != AtomicSizeInBits);
168 bool emitMemSetZeroIfNecessary()
const;
196 void emitCopyIntoMemory(
RValue rvalue)
const;
199 LValue projectValue()
const {
201 Address addr = getAtomicAddress();
212 bool AsValue, llvm::AtomicOrdering AO,
223 std::pair<RValue, llvm::Value *>
225 llvm::AtomicOrdering Success =
226 llvm::AtomicOrdering::SequentiallyConsistent,
227 llvm::AtomicOrdering Failure =
228 llvm::AtomicOrdering::SequentiallyConsistent,
229 bool IsWeak =
false);
234 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
239 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
246 Address CreateTempAlloca()
const;
252 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
253 llvm::AtomicOrdering AO,
bool IsVolatile);
255 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
259 llvm::AtomicOrdering Success =
260 llvm::AtomicOrdering::SequentiallyConsistent,
261 llvm::AtomicOrdering Failure =
262 llvm::AtomicOrdering::SequentiallyConsistent);
264 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266 llvm::AtomicOrdering Success =
267 llvm::AtomicOrdering::SequentiallyConsistent,
268 llvm::AtomicOrdering Failure =
269 llvm::AtomicOrdering::SequentiallyConsistent,
270 bool IsWeak =
false);
273 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
277 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
281 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
284 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
289 Address AtomicInfo::CreateTempAlloca()
const {
291 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293 getAtomicAlignment(),
298 TempAlloca, getAtomicAddress().getType());
316 uint64_t expectedSize) {
317 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
325 if (hasPadding())
return true;
328 switch (getEvaluationKind()) {
335 AtomicSizeInBits / 2);
341 llvm_unreachable(
"bad evaluation kind");
344 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
347 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
351 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
361 llvm::AtomicOrdering SuccessOrder,
362 llvm::AtomicOrdering FailureOrder,
368 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
372 Pair->setWeak(IsWeak);
381 llvm::BasicBlock *StoreExpectedBB =
386 llvm::BasicBlock *ContinueBB =
391 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
393 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
397 CGF.
Builder.CreateBr(ContinueBB);
399 CGF.
Builder.SetInsertPoint(ContinueBB);
412 llvm::AtomicOrdering SuccessOrder,
414 llvm::AtomicOrdering FailureOrder;
415 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
416 auto FOS = FO->getSExtValue();
417 if (!llvm::isValidAtomicOrderingCABI(FOS))
418 FailureOrder = llvm::AtomicOrdering::Monotonic;
420 switch ((llvm::AtomicOrderingCABI)FOS) {
421 case llvm::AtomicOrderingCABI::relaxed:
422 case llvm::AtomicOrderingCABI::release:
423 case llvm::AtomicOrderingCABI::acq_rel:
424 FailureOrder = llvm::AtomicOrdering::Monotonic;
426 case llvm::AtomicOrderingCABI::consume:
427 case llvm::AtomicOrderingCABI::acquire:
428 FailureOrder = llvm::AtomicOrdering::Acquire;
430 case llvm::AtomicOrderingCABI::seq_cst:
431 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
434 if (isStrongerThan(FailureOrder, SuccessOrder)) {
438 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
441 FailureOrder, Scope);
446 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
449 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
450 SuccessOrder != llvm::AtomicOrdering::Release)
452 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
457 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
464 CGF.
Builder.SetInsertPoint(MonotonicBB);
466 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
470 CGF.
Builder.SetInsertPoint(AcquireBB);
472 Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
474 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
476 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
480 CGF.
Builder.SetInsertPoint(SeqCstBB);
482 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
484 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
488 CGF.
Builder.SetInsertPoint(ContBB);
498 llvm::CmpInst::Predicate Pred;
501 llvm_unreachable(
"Unexpected min/max operation");
502 case AtomicExpr::AO__atomic_max_fetch:
503 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
505 case AtomicExpr::AO__atomic_min_fetch:
506 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
509 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
510 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
516 uint64_t Size, llvm::AtomicOrdering Order,
519 bool PostOpMinMax =
false;
522 switch (E->
getOp()) {
523 case AtomicExpr::AO__c11_atomic_init:
524 case AtomicExpr::AO__opencl_atomic_init:
525 llvm_unreachable(
"Already handled!");
527 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
528 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
530 FailureOrder, Size, Order, Scope);
532 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
533 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
535 FailureOrder, Size, Order, Scope);
537 case AtomicExpr::AO__atomic_compare_exchange:
538 case AtomicExpr::AO__atomic_compare_exchange_n: {
539 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
541 Val1, Val2, FailureOrder, Size, Order, Scope);
544 llvm::BasicBlock *StrongBB =
547 llvm::BasicBlock *ContBB =
550 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
551 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
553 CGF.
Builder.SetInsertPoint(StrongBB);
555 FailureOrder, Size, Order, Scope);
558 CGF.
Builder.SetInsertPoint(WeakBB);
560 FailureOrder, Size, Order, Scope);
563 CGF.
Builder.SetInsertPoint(ContBB);
567 case AtomicExpr::AO__c11_atomic_load:
568 case AtomicExpr::AO__opencl_atomic_load:
569 case AtomicExpr::AO__atomic_load_n:
570 case AtomicExpr::AO__atomic_load: {
572 Load->setAtomic(Order, Scope);
578 case AtomicExpr::AO__c11_atomic_store:
579 case AtomicExpr::AO__opencl_atomic_store:
580 case AtomicExpr::AO__atomic_store:
581 case AtomicExpr::AO__atomic_store_n: {
584 Store->setAtomic(Order, Scope);
589 case AtomicExpr::AO__c11_atomic_exchange:
590 case AtomicExpr::AO__opencl_atomic_exchange:
591 case AtomicExpr::AO__atomic_exchange_n:
592 case AtomicExpr::AO__atomic_exchange:
593 Op = llvm::AtomicRMWInst::Xchg;
596 case AtomicExpr::AO__atomic_add_fetch:
599 case AtomicExpr::AO__c11_atomic_fetch_add:
600 case AtomicExpr::AO__opencl_atomic_fetch_add:
601 case AtomicExpr::AO__atomic_fetch_add:
605 case AtomicExpr::AO__atomic_sub_fetch:
608 case AtomicExpr::AO__c11_atomic_fetch_sub:
609 case AtomicExpr::AO__opencl_atomic_fetch_sub:
610 case AtomicExpr::AO__atomic_fetch_sub:
614 case AtomicExpr::AO__atomic_min_fetch:
617 case AtomicExpr::AO__c11_atomic_fetch_min:
618 case AtomicExpr::AO__opencl_atomic_fetch_min:
619 case AtomicExpr::AO__atomic_fetch_min:
621 : llvm::AtomicRMWInst::UMin;
624 case AtomicExpr::AO__atomic_max_fetch:
627 case AtomicExpr::AO__c11_atomic_fetch_max:
628 case AtomicExpr::AO__opencl_atomic_fetch_max:
629 case AtomicExpr::AO__atomic_fetch_max:
631 : llvm::AtomicRMWInst::UMax;
634 case AtomicExpr::AO__atomic_and_fetch:
637 case AtomicExpr::AO__c11_atomic_fetch_and:
638 case AtomicExpr::AO__opencl_atomic_fetch_and:
639 case AtomicExpr::AO__atomic_fetch_and:
643 case AtomicExpr::AO__atomic_or_fetch:
644 PostOp = llvm::Instruction::Or;
646 case AtomicExpr::AO__c11_atomic_fetch_or:
647 case AtomicExpr::AO__opencl_atomic_fetch_or:
648 case AtomicExpr::AO__atomic_fetch_or:
649 Op = llvm::AtomicRMWInst::Or;
652 case AtomicExpr::AO__atomic_xor_fetch:
653 PostOp = llvm::Instruction::Xor;
655 case AtomicExpr::AO__c11_atomic_fetch_xor:
656 case AtomicExpr::AO__opencl_atomic_fetch_xor:
657 case AtomicExpr::AO__atomic_fetch_xor:
658 Op = llvm::AtomicRMWInst::Xor;
661 case AtomicExpr::AO__atomic_nand_fetch:
664 case AtomicExpr::AO__atomic_fetch_nand:
665 Op = llvm::AtomicRMWInst::Nand;
670 llvm::AtomicRMWInst *RMWI =
682 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
684 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
685 Result = CGF.
Builder.CreateNot(Result);
702 uint64_t Size, llvm::AtomicOrdering Order,
709 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
715 if (
auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
719 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
726 auto Scopes = ScopeModel->getRuntimeValues();
727 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
728 for (
auto S : Scopes)
731 llvm::BasicBlock *ContBB =
734 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(),
false);
737 auto FallBack = ScopeModel->getFallBackValue();
738 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
739 for (
auto S : Scopes) {
742 SI->addCase(Builder.getInt32(S), B);
744 Builder.SetInsertPoint(B);
745 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
751 Builder.CreateBr(ContBB);
754 Builder.SetInsertPoint(ContBB);
761 if (UseOptimizedLibcall) {
768 SizeInBits)->getPointerTo();
786 MemTy = AT->getValueType();
787 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
794 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
795 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
796 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
797 EmitAtomicInit(E->
getVal1(), lvalue);
802 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
804 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
806 bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
807 bool Misaligned = (Ptr.
getAlignment() % sizeChars) != 0;
808 bool UseLibcall = Misaligned | Oversized;
811 CGM.getDiags().Report(E->
getBeginLoc(), diag::warn_atomic_op_misaligned)
819 switch (E->
getOp()) {
820 case AtomicExpr::AO__c11_atomic_init:
821 case AtomicExpr::AO__opencl_atomic_init:
822 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
824 case AtomicExpr::AO__c11_atomic_load:
825 case AtomicExpr::AO__opencl_atomic_load:
826 case AtomicExpr::AO__atomic_load_n:
829 case AtomicExpr::AO__atomic_load:
830 Dest = EmitPointerWithAlignment(E->
getVal1());
833 case AtomicExpr::AO__atomic_store:
834 Val1 = EmitPointerWithAlignment(E->
getVal1());
837 case AtomicExpr::AO__atomic_exchange:
838 Val1 = EmitPointerWithAlignment(E->
getVal1());
839 Dest = EmitPointerWithAlignment(E->
getVal2());
842 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
843 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
844 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
845 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
846 case AtomicExpr::AO__atomic_compare_exchange_n:
847 case AtomicExpr::AO__atomic_compare_exchange:
848 Val1 = EmitPointerWithAlignment(E->
getVal1());
849 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
850 Val2 = EmitPointerWithAlignment(E->
getVal2());
854 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
855 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
856 IsWeak = EmitScalarExpr(E->
getWeak());
859 case AtomicExpr::AO__c11_atomic_fetch_add:
860 case AtomicExpr::AO__c11_atomic_fetch_sub:
861 case AtomicExpr::AO__opencl_atomic_fetch_add:
862 case AtomicExpr::AO__opencl_atomic_fetch_sub:
872 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
873 auto Temp = CreateMemTemp(Val1Ty,
".atomictmp");
875 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
879 case AtomicExpr::AO__atomic_fetch_add:
880 case AtomicExpr::AO__atomic_fetch_sub:
881 case AtomicExpr::AO__atomic_add_fetch:
882 case AtomicExpr::AO__atomic_sub_fetch:
883 case AtomicExpr::AO__c11_atomic_store:
884 case AtomicExpr::AO__c11_atomic_exchange:
885 case AtomicExpr::AO__opencl_atomic_store:
886 case AtomicExpr::AO__opencl_atomic_exchange:
887 case AtomicExpr::AO__atomic_store_n:
888 case AtomicExpr::AO__atomic_exchange_n:
889 case AtomicExpr::AO__c11_atomic_fetch_and:
890 case AtomicExpr::AO__c11_atomic_fetch_or:
891 case AtomicExpr::AO__c11_atomic_fetch_xor:
892 case AtomicExpr::AO__c11_atomic_fetch_max:
893 case AtomicExpr::AO__c11_atomic_fetch_min:
894 case AtomicExpr::AO__opencl_atomic_fetch_and:
895 case AtomicExpr::AO__opencl_atomic_fetch_or:
896 case AtomicExpr::AO__opencl_atomic_fetch_xor:
897 case AtomicExpr::AO__opencl_atomic_fetch_min:
898 case AtomicExpr::AO__opencl_atomic_fetch_max:
899 case AtomicExpr::AO__atomic_fetch_and:
900 case AtomicExpr::AO__atomic_fetch_or:
901 case AtomicExpr::AO__atomic_fetch_xor:
902 case AtomicExpr::AO__atomic_fetch_nand:
903 case AtomicExpr::AO__atomic_and_fetch:
904 case AtomicExpr::AO__atomic_or_fetch:
905 case AtomicExpr::AO__atomic_xor_fetch:
906 case AtomicExpr::AO__atomic_nand_fetch:
907 case AtomicExpr::AO__atomic_max_fetch:
908 case AtomicExpr::AO__atomic_min_fetch:
909 case AtomicExpr::AO__atomic_fetch_max:
910 case AtomicExpr::AO__atomic_fetch_min:
920 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
921 AtomicInfo Atomics(*
this, AtomicVal);
923 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
924 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
925 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
927 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
929 Dest = CreateMemTemp(RValTy,
"cmpxchg.bool");
931 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
935 bool UseOptimizedLibcall =
false;
936 switch (E->
getOp()) {
937 case AtomicExpr::AO__c11_atomic_init:
938 case AtomicExpr::AO__opencl_atomic_init:
939 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
941 case AtomicExpr::AO__c11_atomic_fetch_add:
942 case AtomicExpr::AO__opencl_atomic_fetch_add:
943 case AtomicExpr::AO__atomic_fetch_add:
944 case AtomicExpr::AO__c11_atomic_fetch_and:
945 case AtomicExpr::AO__opencl_atomic_fetch_and:
946 case AtomicExpr::AO__atomic_fetch_and:
947 case AtomicExpr::AO__c11_atomic_fetch_or:
948 case AtomicExpr::AO__opencl_atomic_fetch_or:
949 case AtomicExpr::AO__atomic_fetch_or:
950 case AtomicExpr::AO__atomic_fetch_nand:
951 case AtomicExpr::AO__c11_atomic_fetch_sub:
952 case AtomicExpr::AO__opencl_atomic_fetch_sub:
953 case AtomicExpr::AO__atomic_fetch_sub:
954 case AtomicExpr::AO__c11_atomic_fetch_xor:
955 case AtomicExpr::AO__opencl_atomic_fetch_xor:
956 case AtomicExpr::AO__opencl_atomic_fetch_min:
957 case AtomicExpr::AO__opencl_atomic_fetch_max:
958 case AtomicExpr::AO__atomic_fetch_xor:
959 case AtomicExpr::AO__c11_atomic_fetch_max:
960 case AtomicExpr::AO__c11_atomic_fetch_min:
961 case AtomicExpr::AO__atomic_add_fetch:
962 case AtomicExpr::AO__atomic_and_fetch:
963 case AtomicExpr::AO__atomic_nand_fetch:
964 case AtomicExpr::AO__atomic_or_fetch:
965 case AtomicExpr::AO__atomic_sub_fetch:
966 case AtomicExpr::AO__atomic_xor_fetch:
967 case AtomicExpr::AO__atomic_fetch_max:
968 case AtomicExpr::AO__atomic_fetch_min:
969 case AtomicExpr::AO__atomic_max_fetch:
970 case AtomicExpr::AO__atomic_min_fetch:
972 UseOptimizedLibcall =
true;
975 case AtomicExpr::AO__atomic_load:
976 case AtomicExpr::AO__atomic_store:
977 case AtomicExpr::AO__atomic_exchange:
978 case AtomicExpr::AO__atomic_compare_exchange:
984 case AtomicExpr::AO__c11_atomic_load:
985 case AtomicExpr::AO__c11_atomic_store:
986 case AtomicExpr::AO__c11_atomic_exchange:
987 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
988 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
989 case AtomicExpr::AO__opencl_atomic_load:
990 case AtomicExpr::AO__opencl_atomic_store:
991 case AtomicExpr::AO__opencl_atomic_exchange:
992 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
993 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
994 case AtomicExpr::AO__atomic_load_n:
995 case AtomicExpr::AO__atomic_store_n:
996 case AtomicExpr::AO__atomic_exchange_n:
997 case AtomicExpr::AO__atomic_compare_exchange_n:
1000 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1001 UseOptimizedLibcall =
true;
1006 if (!UseOptimizedLibcall) {
1009 getContext().getSizeType());
1017 auto AS = PT->castAs<
PointerType>()->getPointeeType().getAddressSpace();
1021 auto T = V->getType();
1022 auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1024 return getTargetHooks().performAddrSpaceCast(
1030 getContext().VoidPtrTy);
1032 std::string LibCallName;
1034 MemTy->
isPointerType() ? getContext().getIntPtrType() : MemTy;
1036 bool HaveRetTy =
false;
1037 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1038 bool PostOpMinMax =
false;
1039 switch (E->
getOp()) {
1040 case AtomicExpr::AO__c11_atomic_init:
1041 case AtomicExpr::AO__opencl_atomic_init:
1042 llvm_unreachable(
"Already handled!");
1051 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1052 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1053 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1054 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1055 case AtomicExpr::AO__atomic_compare_exchange:
1056 case AtomicExpr::AO__atomic_compare_exchange_n:
1057 LibCallName =
"__atomic_compare_exchange";
1058 RetTy = getContext().BoolTy;
1063 getContext().VoidPtrTy);
1072 case AtomicExpr::AO__c11_atomic_exchange:
1073 case AtomicExpr::AO__opencl_atomic_exchange:
1074 case AtomicExpr::AO__atomic_exchange_n:
1075 case AtomicExpr::AO__atomic_exchange:
1076 LibCallName =
"__atomic_exchange";
1082 case AtomicExpr::AO__c11_atomic_store:
1083 case AtomicExpr::AO__opencl_atomic_store:
1084 case AtomicExpr::AO__atomic_store:
1085 case AtomicExpr::AO__atomic_store_n:
1086 LibCallName =
"__atomic_store";
1087 RetTy = getContext().VoidTy;
1094 case AtomicExpr::AO__c11_atomic_load:
1095 case AtomicExpr::AO__opencl_atomic_load:
1096 case AtomicExpr::AO__atomic_load:
1097 case AtomicExpr::AO__atomic_load_n:
1098 LibCallName =
"__atomic_load";
1102 case AtomicExpr::AO__atomic_add_fetch:
1105 case AtomicExpr::AO__c11_atomic_fetch_add:
1106 case AtomicExpr::AO__opencl_atomic_fetch_add:
1107 case AtomicExpr::AO__atomic_fetch_add:
1108 LibCallName =
"__atomic_fetch_add";
1114 case AtomicExpr::AO__atomic_and_fetch:
1117 case AtomicExpr::AO__c11_atomic_fetch_and:
1118 case AtomicExpr::AO__opencl_atomic_fetch_and:
1119 case AtomicExpr::AO__atomic_fetch_and:
1120 LibCallName =
"__atomic_fetch_and";
1126 case AtomicExpr::AO__atomic_or_fetch:
1127 PostOp = llvm::Instruction::Or;
1129 case AtomicExpr::AO__c11_atomic_fetch_or:
1130 case AtomicExpr::AO__opencl_atomic_fetch_or:
1131 case AtomicExpr::AO__atomic_fetch_or:
1132 LibCallName =
"__atomic_fetch_or";
1138 case AtomicExpr::AO__atomic_sub_fetch:
1141 case AtomicExpr::AO__c11_atomic_fetch_sub:
1142 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1143 case AtomicExpr::AO__atomic_fetch_sub:
1144 LibCallName =
"__atomic_fetch_sub";
1150 case AtomicExpr::AO__atomic_xor_fetch:
1151 PostOp = llvm::Instruction::Xor;
1153 case AtomicExpr::AO__c11_atomic_fetch_xor:
1154 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1155 case AtomicExpr::AO__atomic_fetch_xor:
1156 LibCallName =
"__atomic_fetch_xor";
1160 case AtomicExpr::AO__atomic_min_fetch:
1161 PostOpMinMax =
true;
1163 case AtomicExpr::AO__c11_atomic_fetch_min:
1164 case AtomicExpr::AO__atomic_fetch_min:
1165 case AtomicExpr::AO__opencl_atomic_fetch_min:
1167 ?
"__atomic_fetch_min" 1168 :
"__atomic_fetch_umin";
1172 case AtomicExpr::AO__atomic_max_fetch:
1173 PostOpMinMax =
true;
1175 case AtomicExpr::AO__c11_atomic_fetch_max:
1176 case AtomicExpr::AO__atomic_fetch_max:
1177 case AtomicExpr::AO__opencl_atomic_fetch_max:
1179 ?
"__atomic_fetch_max" 1180 :
"__atomic_fetch_umax";
1186 case AtomicExpr::AO__atomic_nand_fetch:
1189 case AtomicExpr::AO__atomic_fetch_nand:
1190 LibCallName =
"__atomic_fetch_nand";
1197 LibCallName = std::string(
"__opencl") +
1198 StringRef(LibCallName).drop_front(1).str();
1202 if (UseOptimizedLibcall)
1203 LibCallName +=
"_" + llvm::utostr(Size);
1206 if (UseOptimizedLibcall) {
1209 RetTy = getContext().getIntTypeForBitwidth(
1210 getContext().toBits(sizeChars),
false);
1213 RetTy = getContext().VoidTy;
1215 getContext().VoidPtrTy);
1220 getContext().IntTy);
1227 assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1239 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1243 }
else if (PostOp) {
1244 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1245 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1247 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1248 ResVal = Builder.CreateNot(ResVal);
1250 Builder.CreateStore(
1252 Builder.CreateBitCast(Dest, ResVal->
getType()->getPointerTo()));
1258 return convertTempToRValue(
1259 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1263 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1264 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1265 E->
getOp() == AtomicExpr::AO__atomic_store ||
1266 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1267 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1268 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1269 E->
getOp() == AtomicExpr::AO__atomic_load ||
1270 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1272 if (isa<llvm::ConstantInt>(Order)) {
1273 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1276 if (llvm::isValidAtomicOrderingCABI(ord))
1277 switch ((llvm::AtomicOrderingCABI)ord) {
1278 case llvm::AtomicOrderingCABI::relaxed:
1279 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1280 llvm::AtomicOrdering::Monotonic, Scope);
1282 case llvm::AtomicOrderingCABI::consume:
1283 case llvm::AtomicOrderingCABI::acquire:
1286 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287 llvm::AtomicOrdering::Acquire, Scope);
1289 case llvm::AtomicOrderingCABI::release:
1292 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1293 llvm::AtomicOrdering::Release, Scope);
1295 case llvm::AtomicOrderingCABI::acq_rel:
1296 if (IsLoad || IsStore)
1298 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1299 llvm::AtomicOrdering::AcquireRelease, Scope);
1301 case llvm::AtomicOrderingCABI::seq_cst:
1302 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1303 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1309 return convertTempToRValue(
1310 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1318 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1319 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1320 *SeqCstBB =
nullptr;
1321 MonotonicBB = createBasicBlock(
"monotonic", CurFn);
1323 AcquireBB = createBasicBlock(
"acquire", CurFn);
1325 ReleaseBB = createBasicBlock(
"release", CurFn);
1326 if (!IsLoad && !IsStore)
1327 AcqRelBB = createBasicBlock(
"acqrel", CurFn);
1328 SeqCstBB = createBasicBlock(
"seqcst", CurFn);
1329 llvm::BasicBlock *ContBB = createBasicBlock(
"atomic.continue", CurFn);
1335 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(),
false);
1336 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1339 Builder.SetInsertPoint(MonotonicBB);
1340 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1341 llvm::AtomicOrdering::Monotonic, Scope);
1342 Builder.CreateBr(ContBB);
1344 Builder.SetInsertPoint(AcquireBB);
1345 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1346 llvm::AtomicOrdering::Acquire, Scope);
1347 Builder.CreateBr(ContBB);
1348 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1350 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1354 Builder.SetInsertPoint(ReleaseBB);
1355 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1356 llvm::AtomicOrdering::Release, Scope);
1357 Builder.CreateBr(ContBB);
1358 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1361 if (!IsLoad && !IsStore) {
1362 Builder.SetInsertPoint(AcqRelBB);
1363 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1364 llvm::AtomicOrdering::AcquireRelease, Scope);
1365 Builder.CreateBr(ContBB);
1366 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1369 Builder.SetInsertPoint(SeqCstBB);
1370 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1372 Builder.CreateBr(ContBB);
1373 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1377 Builder.SetInsertPoint(ContBB);
1381 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1382 return convertTempToRValue(
1383 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1388 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1389 unsigned addrspace =
1390 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1391 llvm::IntegerType *ty =
1396 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1399 if (SourceSizeInBits != AtomicSizeInBits) {
1400 Address Tmp = CreateTempAlloca();
1402 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1406 return emitCastToAtomicIntPointer(Addr);
1412 bool asValue)
const {
1445 bool AsValue)
const {
1447 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1453 auto *ValTy = AsValue
1455 : getAtomicAddress().getType()->getPointerElementType();
1456 if (ValTy->isIntegerTy()) {
1457 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1459 }
else if (ValTy->isPointerTy())
1461 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1468 bool TempIsVolatile =
false;
1474 Temp = CreateTempAlloca();
1478 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1480 ->setVolatile(TempIsVolatile);
1482 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1485 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1486 llvm::AtomicOrdering AO,
bool) {
1500 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1503 Address Addr = getAtomicAddressAsAtomicIntPointer();
1505 Load->setAtomic(AO);
1509 Load->setVolatile(
true);
1518 if (!CGM.getCodeGenOpts().MSVolatile)
return false;
1519 AtomicInfo AI(*
this, LV);
1522 bool AtomicIsInline = !AI.shouldUseLibcall();
1524 if (getContext().getTypeSize(LV.
getType()) >
1525 getContext().getTypeSize(getContext().getIntPtrType()))
1527 return IsVolatile && AtomicIsInline;
1532 llvm::AtomicOrdering AO;
1535 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1537 AO = llvm::AtomicOrdering::Acquire;
1540 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1544 bool AsValue, llvm::AtomicOrdering AO,
1547 if (shouldUseLibcall()) {
1553 TempAddr = CreateTempAlloca();
1555 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1559 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1563 auto *
Load = EmitAtomicLoadOp(AO, IsVolatile);
1571 return ConvertIntToValueOrAtomic(
Load, ResultSlot, Loc, AsValue);
1577 llvm::AtomicOrdering AO,
bool IsVolatile,
1579 AtomicInfo Atomics(*
this, src);
1580 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1586 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1605 emitMemSetZeroIfNecessary();
1608 LValue TempLVal = projectValue();
1621 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1629 AtomicInfo Atomics(CGF, TempLV);
1630 Atomics.emitCopyIntoMemory(rvalue);
1639 if (isa<llvm::IntegerType>(Value->getType()))
1642 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1644 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1645 if (isa<llvm::PointerType>(Value->getType()))
1646 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1647 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1653 Address Addr = materializeRValue(RVal);
1656 Addr = emitCastToAtomicIntPointer(Addr);
1660 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1662 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1664 Address Addr = getAtomicAddressAsAtomicIntPointer();
1666 ExpectedVal, DesiredVal,
1670 Inst->setWeak(IsWeak);
1673 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1674 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1675 return std::make_pair(PreviousVal, SuccessFailureVal);
1679 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1681 llvm::AtomicOrdering Success,
1682 llvm::AtomicOrdering Failure) {
1694 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1697 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1705 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1707 llvm::AtomicOrdering Failure,
bool IsWeak) {
1708 if (isStrongerThan(Failure, Success))
1711 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1714 if (shouldUseLibcall()) {
1716 Address ExpectedAddr = materializeRValue(Expected);
1717 Address DesiredAddr = materializeRValue(Desired);
1718 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1721 return std::make_pair(
1729 auto *ExpectedVal = convertRValueToInt(Expected);
1730 auto *DesiredVal = convertRValueToInt(Desired);
1731 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1733 return std::make_pair(
1744 LValue AtomicLVal = Atomics.getAtomicLValue();
1751 Address Ptr = Atomics.materializeRValue(OldRVal);
1784 RValue NewRVal = UpdateOp(UpRVal);
1794 void AtomicInfo::EmitAtomicUpdateLibcall(
1795 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1797 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1799 Address ExpectedAddr = CreateTempAlloca();
1801 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1805 Address DesiredAddr = CreateTempAlloca();
1807 requiresMemSetZero(getAtomicAddress().getElementType())) {
1811 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1816 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1819 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1823 void AtomicInfo::EmitAtomicUpdateOp(
1824 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1826 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1829 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1833 auto *CurBB = CGF.
Builder.GetInsertBlock();
1835 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1837 PHI->addIncoming(OldVal, CurBB);
1838 Address NewAtomicAddr = CreateTempAlloca();
1839 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1841 requiresMemSetZero(getAtomicAddress().getElementType())) {
1849 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1850 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1851 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1857 LValue AtomicLVal = Atomics.getAtomicLValue();
1881 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1882 RValue UpdateRVal,
bool IsVolatile) {
1883 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1885 Address ExpectedAddr = CreateTempAlloca();
1887 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1891 Address DesiredAddr = CreateTempAlloca();
1893 requiresMemSetZero(getAtomicAddress().getElementType())) {
1899 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1902 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1906 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1908 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1911 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1915 auto *CurBB = CGF.
Builder.GetInsertBlock();
1917 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1919 PHI->addIncoming(OldVal, CurBB);
1920 Address NewAtomicAddr = CreateTempAlloca();
1921 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1923 requiresMemSetZero(getAtomicAddress().getElementType())) {
1929 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1930 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1931 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1935 void AtomicInfo::EmitAtomicUpdate(
1936 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1938 if (shouldUseLibcall()) {
1939 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1941 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1945 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1947 if (shouldUseLibcall()) {
1948 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1950 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1957 llvm::AtomicOrdering AO;
1959 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1961 AO = llvm::AtomicOrdering::Release;
1964 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1973 llvm::AtomicOrdering AO,
bool IsVolatile,
1981 AtomicInfo atomics(*
this, dest);
1982 LValue LVal = atomics.getAtomicLValue();
1987 atomics.emitCopyIntoMemory(rvalue);
1992 if (atomics.shouldUseLibcall()) {
1994 Address srcAddr = atomics.materializeRValue(rvalue);
1999 getContext().getSizeType());
2000 args.
add(
RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2001 getContext().VoidPtrTy);
2003 getContext().VoidPtrTy);
2005 RValue::get(llvm::ConstantInt::get(IntTy, (
int)llvm::toCABI(AO))),
2006 getContext().IntTy);
2012 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2016 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2017 intValue = Builder.CreateIntCast(
2019 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2023 store->setAtomic(AO);
2027 store->setVolatile(
true);
2028 CGM.DecorateInstructionWithTBAA(store, dest.
getTBAAInfo());
2033 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2040 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2050 AtomicInfo Atomics(*
this, Obj);
2052 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2057 LValue LVal, llvm::AtomicOrdering AO,
2058 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2059 AtomicInfo Atomics(*
this, LVal);
2060 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2064 AtomicInfo atomics(*
this, dest);
2066 switch (atomics.getEvaluationKind()) {
2082 bool Zeroed =
false;
2084 Zeroed = atomics.emitMemSetZeroIfNecessary();
2085 dest = atomics.projectValue();
2095 EmitAggExpr(init, slot);
2099 llvm_unreachable(
"bad evaluation kind");
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::IntegerType * IntTy
int
Address getAddress() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
llvm::LLVMContext & getLLVMContext()
void setAlignment(CharUnits A)
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
llvm::Expected< T > Expected
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>'.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getPointer() const
unsigned getAddressSpace() const
Return the address space that this address resides in.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
bool isVolatileQualified() const
CharUnits getAlignment() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Scope - A scope is a transient data structure that is used while parsing the program.
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ASTContext & getContext() const
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
LValueBaseInfo getBaseInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Address getExtVectorAddress() const
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
This represents one expression.
static AggValueSlot forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
llvm::StringRef getAsString(SyncScope S)
Address getAddress(CodeGenFunction &CGF) const
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
const T * castAs() const
Member-template castAs<specific type>.
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
TBAAAccessInfo getTBAAInfo() const
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
const LangOptions & getLangOpts() const
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
const CGBitFieldInfo & getBitFieldInfo() const
llvm::Value * getPointer(CodeGenFunction &CGF) const
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
SourceLocation getBeginLoc() const LLVM_READONLY
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name. ...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Expr * getOrderFail() const
bool isVolatileQualified() const
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
bool isAtomicType() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
llvm::Value * getBitFieldPointer() const
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
bool Add(InterpState &S, CodePtr OpPC)
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
__DEVICE__ int min(int __a, int __b)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
static RValue get(llvm::Value *V)
bool isPointerType() const
bool isExtVectorElt() const
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
bool Load(InterpState &S, CodePtr OpPC)
LValue - This represents an lvalue references.
QualType getValueType() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getVectorIdx() const
CallArgList - Type for representing both the value and type of arguments in a call.
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
bool Sub(InterpState &S, CodePtr OpPC)
llvm::Constant * getExtVectorElts() const
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.