[CodeGen] Simplify the way lifetime of block captures is extended

Rather than pushing inactive cleanups for the block captures at the
entry of a full expression and activating them during the creation of
the block literal, just call pushLifetimeExtendedDestroy to ensure the
cleanups are popped at the end of the scope enclosing the block
expression.

rdar://problem/63996471

Differential Revision: https://reviews.llvm.org/D81624
This commit is contained in:
Akira Hatanaka 2020-06-03 16:41:50 -07:00
parent 8d8ec55035
commit c9a52de002
21 changed files with 148 additions and 245 deletions

View File

@ -36,7 +36,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
CapturesNonExternalType(false), LocalAddress(Address::invalid()),
StructureType(nullptr), Block(block), DominatingIP(nullptr) {
StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@ -775,151 +775,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
/// Enter the scope of a block. This should be run at the entrance to
/// a full-expression so that the block's cleanups are pushed at the
/// right place in the stack.
static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
assert(CGF.HaveInsertPoint());
// Allocate the block info and place it at the head of the list.
CGBlockInfo &blockInfo =
*new CGBlockInfo(block, CGF.CurFn->getName());
blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
CGF.FirstBlockInfo = &blockInfo;
// Compute information about the layout, etc., of this block,
// pushing cleanups as necessary.
computeBlockInfo(CGF.CGM, &CGF, blockInfo);
// Nothing else to do if it can be global.
if (blockInfo.CanBeGlobal) return;
// Make the allocation for the block.
blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
blockInfo.BlockAlign, "block");
// If there are cleanups to emit, enter them (but inactive).
if (!blockInfo.NeedsCopyDispose) return;
// Walk through the captures (in order) and find the ones not
// captured by constant.
for (const auto &CI : block->captures()) {
// Ignore __block captures; there's nothing special in the
// on-stack block that we need to do for them.
if (CI.isByRef()) continue;
// Ignore variables that are constant-captured.
const VarDecl *variable = CI.getVariable();
CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) continue;
// Ignore objects that aren't destructed.
QualType VT = getCaptureFieldType(CGF, CI);
QualType::DestructionKind dtorKind = VT.isDestructedType();
if (dtorKind == QualType::DK_none) continue;
CodeGenFunction::Destroyer *destroyer;
// Block captures count as local values and have imprecise semantics.
// They also can't be arrays, so need to worry about that.
//
// For const-qualified captures, emit clang.arc.use to ensure the captured
// object doesn't get released while we are still depending on its validity
// within the block.
if (VT.isConstQualified() &&
VT.getObjCLifetime() == Qualifiers::OCL_Strong &&
CGF.CGM.getCodeGenOpts().OptimizationLevel != 0) {
assert(CGF.CGM.getLangOpts().ObjCAutoRefCount &&
"expected ObjC ARC to be enabled");
destroyer = CodeGenFunction::emitARCIntrinsicUse;
} else if (dtorKind == QualType::DK_objc_strong_lifetime) {
destroyer = CodeGenFunction::destroyARCStrongImprecise;
} else {
destroyer = CGF.getDestroyer(dtorKind);
}
// GEP down to the address.
Address addr =
CGF.Builder.CreateStructGEP(blockInfo.LocalAddress, capture.getIndex());
// We can use that GEP as the dominating IP.
if (!blockInfo.DominatingIP)
blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
CleanupKind cleanupKind = InactiveNormalCleanup;
bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
if (useArrayEHCleanup)
cleanupKind = InactiveNormalAndEHCleanup;
CGF.pushDestroy(cleanupKind, addr, VT,
destroyer, useArrayEHCleanup);
// Remember where that cleanup was.
capture.setCleanup(CGF.EHStack.stable_begin());
}
}
/// Enter a full-expression with a non-trivial number of objects to
/// clean up.
void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) {
if (const auto EWC = dyn_cast<ExprWithCleanups>(E)) {
assert(EWC->getNumObjects() != 0);
for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects())
if (auto *BD = C.dyn_cast<BlockDecl *>())
enterBlockScope(*this, BD);
}
}
/// Find the layout for the given block in a linked list and remove it.
static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
const BlockDecl *block) {
while (true) {
assert(head && *head);
CGBlockInfo *cur = *head;
// If this is the block we're looking for, splice it out of the list.
if (cur->getBlockDecl() == block) {
*head = cur->NextBlockInfo;
return cur;
}
head = &cur->NextBlockInfo;
}
}
/// Destroy a chain of block layouts.
void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
assert(head && "destroying an empty chain");
do {
CGBlockInfo *cur = head;
head = cur->NextBlockInfo;
delete cur;
} while (head != nullptr);
}
/// Emit a block literal expression in the current function.
llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
if (!blockExpr->getBlockDecl()->hasCaptures()) {
if (!blockExpr->getBlockDecl()->hasCaptures())
// The block literal is emitted as a global variable, and the block invoke
// function has to be extracted from its initializer.
if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) {
if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
return Block;
}
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
computeBlockInfo(CGM, this, blockInfo);
blockInfo.BlockExpression = blockExpr;
return EmitBlockLiteral(blockInfo);
}
// Find the block info for this block and take ownership of it.
std::unique_ptr<CGBlockInfo> blockInfo;
blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
blockExpr->getBlockDecl()));
blockInfo->BlockExpression = blockExpr;
return EmitBlockLiteral(*blockInfo);
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
computeBlockInfo(CGM, this, blockInfo);
blockInfo.BlockExpression = blockExpr;
if (!blockInfo.CanBeGlobal)
blockInfo.LocalAddress = CreateTempAlloca(blockInfo.StructureType,
blockInfo.BlockAlign, "block");
return EmitBlockLiteral(blockInfo);
}
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
@ -1161,12 +1033,64 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*captured by init*/ false);
}
// Activate the cleanup if layout pushed one.
if (!CI.isByRef()) {
EHScopeStack::stable_iterator cleanup = capture.getCleanup();
if (cleanup.isValid())
ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
// Push a cleanup for the capture if necessary.
if (!blockInfo.NeedsCopyDispose)
continue;
// Ignore __block captures; there's nothing special in the on-stack block
// that we need to do for them.
if (CI.isByRef())
continue;
// Ignore objects that aren't destructed.
QualType::DestructionKind dtorKind = type.isDestructedType();
if (dtorKind == QualType::DK_none)
continue;
CodeGenFunction::Destroyer *destroyer;
// Block captures count as local values and have imprecise semantics.
// They also can't be arrays, so need to worry about that.
//
// For const-qualified captures, emit clang.arc.use to ensure the captured
// object doesn't get released while we are still depending on its validity
// within the block.
if (type.isConstQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Strong &&
CGM.getCodeGenOpts().OptimizationLevel != 0) {
assert(CGM.getLangOpts().ObjCAutoRefCount &&
"expected ObjC ARC to be enabled");
destroyer = emitARCIntrinsicUse;
} else if (dtorKind == QualType::DK_objc_strong_lifetime) {
destroyer = destroyARCStrongImprecise;
} else {
destroyer = getDestroyer(dtorKind);
}
CleanupKind cleanupKind = NormalCleanup;
bool useArrayEHCleanup = needsEHCleanup(dtorKind);
if (useArrayEHCleanup)
cleanupKind = NormalAndEHCleanup;
// Extend the lifetime of the capture to the end of the scope enclosing the
// block expression except when the block decl is in the list of RetExpr's
// cleanup objects, in which case its lifetime ends after the full
// expression.
auto IsBlockDeclInRetExpr = [&]() {
auto *EWC = llvm::dyn_cast_or_null<ExprWithCleanups>(RetExpr);
if (EWC)
for (auto &C : EWC->getObjects())
if (auto *BD = C.dyn_cast<BlockDecl *>())
if (BD == blockDecl)
return true;
return false;
};
if (IsBlockDeclInRetExpr())
pushDestroy(cleanupKind, blockField, type, destroyer, useArrayEHCleanup);
else
pushLifetimeExtendedDestroy(cleanupKind, blockField, type, destroyer,
useArrayEHCleanup);
}
// Cast to the converted block-pointer type, which happens (somewhat

View File

@ -257,10 +257,6 @@ public:
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
/// An instruction which dominates the full-expression that the
/// block is inside.
llvm::Instruction *DominatingIP;
/// The next block in the block-info chain. Invalid if this block
/// info is not part of the CGF's block-info chain, which is true
/// if it corresponds to a global block or a block whose expression

View File

@ -179,12 +179,10 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
bool IsActive = !(Kind & InactiveCleanup);
bool IsLifetimeMarker = Kind & LifetimeMarker;
EHCleanupScope *Scope =
new (Buffer) EHCleanupScope(IsNormalCleanup,
IsEHCleanup,
IsActive,
Size,
BranchFixups.size(),
InnermostNormalCleanup,

View File

@ -284,8 +284,8 @@ public:
return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
}
EHCleanupScope(bool isNormal, bool isEH, bool isActive,
unsigned cleanupSize, unsigned fixupDepth,
EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize,
unsigned fixupDepth,
EHScopeStack::stable_iterator enclosingNormal,
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
@ -293,7 +293,7 @@ public:
ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
CleanupBits.IsActive = isActive;
CleanupBits.IsActive = true;
CleanupBits.IsLifetimeMarker = false;
CleanupBits.TestFlagInNormalCleanup = false;
CleanupBits.TestFlagInEHCleanup = false;

View File

@ -762,10 +762,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
enterFullExpression(fe);
if (const FullExpr *fe = dyn_cast<FullExpr>(init))
init = fe->getSubExpr();
}
CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is

View File

@ -1330,7 +1330,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
enterFullExpression(cleanups);
RunCleanupsScope Scope(*this);
LValue LV = EmitLValue(cleanups->getSubExpr());
if (LV.isSimple()) {

View File

@ -1349,7 +1349,6 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope cleanups(CGF);
Visit(E->getSubExpr());
}

View File

@ -222,7 +222,6 @@ public:
return Visit(DIE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
ComplexPairTy Vals = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression

View File

@ -2342,7 +2342,6 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
Value *V = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression

View File

@ -3256,7 +3256,6 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainScalarExpr(cleanups->getSubExpr());
}
@ -3272,7 +3271,6 @@ llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
}
@ -3383,7 +3381,6 @@ static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
// Look through full-expressions.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
}

View File

@ -1070,6 +1070,19 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
EmitBranchThroughCleanup(ReturnBlock);
}
namespace {
// RAII struct used to save and restore a return statment's result expression.
struct SaveRetExprRAII {
SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
: OldRetExpr(CGF.RetExpr), CGF(CGF) {
CGF.RetExpr = RetExpr;
}
~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
const Expr *OldRetExpr;
CodeGenFunction &CGF;
};
} // namespace
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
/// if the function returns void, or may be missing one if the function returns
/// non-void. Fun stuff :).
@ -1095,15 +1108,19 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evaluate the side effects.
const Expr *RV = S.getRetValue();
// Treat block literals in a return expression as if they appeared
// in their own scope. This permits a small, easily-implemented
// exception to our over-conservative rules about not jumping to
// statements following block literals with non-trivial cleanups.
// Record the result expression of the return statement. The recorded
// expression is used to determine whether a block capture's lifetime should
// end at the end of the full expression as opposed to the end of the scope
// enclosing the block expression.
//
// This permits a small, easily-implemented exception to our over-conservative
// rules about not jumping to statements following block literals with
// non-trivial cleanups.
SaveRetExprRAII SaveRetExpr(RV, *this);
RunCleanupsScope cleanupScope(*this);
if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
enterFullExpression(fe);
if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV))
RV = fe->getSubExpr();
}
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.

View File

@ -5259,15 +5259,6 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
if (const auto *FE = dyn_cast<FullExpr>(CS))
enterFullExpression(FE);
// Processing for statements under 'atomic capture'.
if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
for (const Stmt *C : Compound->body()) {
if (const auto *FE = dyn_cast<FullExpr>(C))
enterFullExpression(FE);
}
}
auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF,
PrePostActionTy &) {

View File

@ -79,12 +79,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CodeGenFunction::~CodeGenFunction() {
assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
// If there are any unclaimed block infos, go ahead and destroy them
// now. This can happen if IR-gen gets clever and skips evaluating
// something.
if (FirstBlockInfo)
destroyBlockInfos(FirstBlockInfo);
if (getLangOpts().OpenMP && CurFn)
CGM.getOpenMPRuntime().functionFinished(*this);

View File

@ -444,6 +444,10 @@ public:
/// This is invalid if sret is not in use.
Address ReturnValuePointer = Address::invalid();
/// If a return statement is being visited, this holds the return statment's
/// result expression.
const Expr *RetExpr = nullptr;
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
if (CurLexicalScope)
@ -648,9 +652,6 @@ public:
unsigned NextCleanupDestIndex = 1;
/// FirstBlockInfo - The head of a singly-linked-list of block layouts.
CGBlockInfo *FirstBlockInfo = nullptr;
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock = nullptr;
@ -1925,7 +1926,6 @@ public:
/// information about the block, including the block invoke function, the
/// captured variables, etc.
llvm::Value *EmitBlockLiteral(const BlockExpr *);
static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
@ -4227,14 +4227,6 @@ public:
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
void enterFullExpression(const FullExpr *E) {
if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
if (EWC->getNumObjects() == 0)
return;
enterNonTrivialFullExpression(E);
}
void enterNonTrivialFullExpression(const FullExpr *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
RValue EmitAtomicExpr(AtomicExpr *E);

View File

@ -85,11 +85,6 @@ enum CleanupKind : unsigned {
NormalAndEHCleanup = EHCleanup | NormalCleanup,
InactiveCleanup = 0x4,
InactiveEHCleanup = EHCleanup | InactiveCleanup,
InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup,
LifetimeMarker = 0x8,
NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
};

View File

@ -103,12 +103,11 @@ namespace test_block_in_lambda {
// CHECK-LABEL: define internal void @"_ZZN20test_block_in_lambda4testENS_1AEENK3$_0clEv"(
// CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], align 8
// CHECK: [[THIS:%.*]] = load [[LAMBDA_T:%.*]]*, [[LAMBDA_T:%.*]]**
// CHECK: [[TO_DESTROY:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[LAMBDA_T]], [[LAMBDA_T]]* [[THIS]], i32 0, i32 0
// CHECK-NEXT: call void @_ZN20test_block_in_lambda1AC1ERKS0_({{.*}}* [[T0]], {{.*}}* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[T1]])
// CHECK-NEXT: call void @_ZN20test_block_in_lambda1AC1ERKS0_({{.*}}* [[BLOCK_CAPTURED]], {{.*}}* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[T1]])
// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to void ()*
// CHECK-NEXT: call void @_ZN20test_block_in_lambda9takeBlockEU13block_pointerFvvE(void ()* [[T0]])
// CHECK-NEXT: call void @_ZN20test_block_in_lambda1AD1Ev({{.*}}* [[TO_DESTROY]])
// CHECK-NEXT: call void @_ZN20test_block_in_lambda1AD1Ev({{.*}}* [[BLOCK_CAPTURED]])
// CHECK-NEXT: ret void
}

View File

@ -156,10 +156,10 @@ namespace test5 {
// CHECK-NEXT: [[B:%.*]] = alloca void ()*, align 8
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align 8
// CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[COND_CLEANUP_SAVE:%.*]] = alloca [[A]]*, align 8
// CHECK-NEXT: [[T0:%.*]] = zext i1
// CHECK-NEXT: store i8 [[T0]], i8* [[COND]], align 1
// CHECK-NEXT: call void @_ZN5test51AC1Ev([[A]]* [[X]])
// CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[COND]], align 1
// CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1
// CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]]
@ -169,6 +169,7 @@ namespace test5 {
// CHECK: [[CAPTURE:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: call void @_ZN5test51AC1ERKS0_([[A]]* [[CAPTURE]], [[A]]* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[X]])
// CHECK-NEXT: store i1 true, i1* [[CLEANUP_ACTIVE]]
// CHECK-NEXT: store [[A]]* [[CAPTURE]], [[A]]** [[COND_CLEANUP_SAVE]], align 8
// CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to void ()*
// CHECK-NEXT: br label
// CHECK: br label
@ -178,7 +179,8 @@ namespace test5 {
// CHECK-NEXT: call void @_ZN5test511doWithBlockEU13block_pointerFvvE(
// CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]]
// CHECK-NEXT: br i1 [[T0]]
// CHECK: call void @_ZN5test51AD1Ev([[A]]* [[CLEANUP_ADDR]])
// CHECK: [[T3:%.*]] = load [[A]]*, [[A]]** [[COND_CLEANUP_SAVE]], align 8
// CHECK-NEXT: call void @_ZN5test51AD1Ev([[A]]* [[T3]])
// CHECK-NEXT: br label
// CHECK: call void @_ZN5test51AD1Ev([[A]]* [[X]])
// CHECK-NEXT: ret void

View File

@ -34,14 +34,13 @@ void test2(id x) {
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK-NEXT: [[PARM:%.*]] = call i8* @llvm.objc.retain(i8* {{%.*}})
// CHECK-NEXT: store i8* [[PARM]], i8** [[X]]
// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]],
// CHECK-NEXT: [[T1:%.*]] = call i8* @llvm.objc.retain(i8* [[T0]])
// CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]],
// CHECK-NEXT: bitcast
// CHECK-NEXT: call void @test2_helper(
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOTREL]]
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) [[NUW]], !clang.imprecise_release
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) [[NUW]], !clang.imprecise_release
@ -296,13 +295,12 @@ void test7(void) {
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK: store
// CHECK-NEXT: store
// CHECK: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]*, [[TEST8]]** [[SELF]],
// CHECK-NEXT: store %0* [[T1]], %0** [[T0]]
// CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to
// CHECK: call void @test8_helper(
// CHECK-NEXT: [[T2:%.*]] = load [[TEST8]]*, [[TEST8]]** [[D0]]
// CHECK-NEXT: [[T2:%.*]] = load [[TEST8]]*, [[TEST8]]** [[T0]]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.use([[TEST8]]* [[T2]])
// CHECK: ret void
@ -498,11 +496,11 @@ void test13(id x) {
// CHECK-NEXT: [[B:%.*]] = alloca void ()*, align 8
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align 8
// CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[COND_CLEANUP_SAVE:%.*]] = alloca i8**,
// CHECK-NEXT: [[T0:%.*]] = call i8* @llvm.objc.retain(i8* {{%.*}})
// CHECK-NEXT: store i8* [[T0]], i8** [[X]], align 8
// CHECK-NEXT: [[BPTR1:%.*]] = bitcast void ()** [[B]] to i8*
// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 8, i8* [[BPTR1]])
// CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], align 8
// CHECK-NEXT: [[T1:%.*]] = icmp ne i8* [[T0]], null
// CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]]
@ -514,6 +512,7 @@ void test13(id x) {
// CHECK-NEXT: [[T1:%.*]] = call i8* @llvm.objc.retain(i8* [[T0]])
// CHECK-NEXT: store i8* [[T1]], i8** [[CAPTURE]], align 8
// CHECK-NEXT: store i1 true, i1* [[CLEANUP_ACTIVE]]
// CHECK-NEXT: store i8** [[CAPTURE]], i8*** [[COND_CLEANUP_SAVE]], align 8
// CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to void ()*
// CHECK-NEXT: br label
// CHECK: br label
@ -530,7 +529,8 @@ void test13(id x) {
// CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]]
// CHECK-NEXT: br i1 [[T0]]
// CHECK: [[T0:%.*]] = load i8*, i8** [[CLEANUP_ADDR]]
// CHECK: [[V12:%.*]] = load i8**, i8*** [[COND_CLEANUP_SAVE]], align 8
// CHECK: [[T0:%.*]] = load i8*, i8** [[V12]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
// CHECK-NEXT: br label
@ -562,7 +562,6 @@ void test16() {
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK-NEXT: [[BLKVARPTR1:%.*]] = bitcast void ()** [[BLKVAR]] to i8*
// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 8, i8* [[BLKVARPTR1]])
// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: store void ()* null, void ()** [[BLKVAR]], align 8
}
@ -588,37 +587,31 @@ id (^test17(id self, int which))(void) {
// CHECK-NEXT: store i8* [[T0]], i8** [[SELF]], align
// CHECK-NOT: objc_retain
// CHECK-NOT: objc_release
// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5
// CHECK-NOT: objc_retain
// CHECK-NOT: objc_release
// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5
// CHECK: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5
// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align
// CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]])
// CHECK-NEXT: store i8* [[T2]], i8** [[T0]],
// CHECK-NEXT: store i8* [[T2]], i8** [[CAPTURED]],
// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B0]] to i8* ()*
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8*
// CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retainBlock(i8* [[T1]])
// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()*
// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]]
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]]
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CAPTURED]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
// CHECK-NEXT: store i32
// CHECK-NEXT: br label
// CHECK-NOT: objc_retain
// CHECK-NOT: objc_release
// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5
// CHECK-NOT: objc_retain
// CHECK-NOT: objc_release
// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5
// CHECK: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5
// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align
// CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]])
// CHECK-NEXT: store i8* [[T2]], i8** [[T0]],
// CHECK-NEXT: store i8* [[T2]], i8** [[CAPTURED]],
// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B1]] to i8* ()*
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8*
// CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retainBlock(i8* [[T1]])
// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()*
// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]]
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]]
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CAPTURED]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
// CHECK-NEXT: store i32
// CHECK-NEXT: br label
@ -629,7 +622,6 @@ void test18(id x) {
// CHECK-UNOPT-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK-UNOPT-NEXT: store i8* null, i8** [[X]]
// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[X]],
// CHECK-UNOPT-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-UNOPT: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 4
// CHECK-UNOPT: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i64, i64, i8*, i8*, i8*, i64 }* @[[BLOCK_DESCRIPTOR_TMP44]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 8
// CHECK-UNOPT: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
@ -638,7 +630,7 @@ void test18(id x) {
// CHECK-UNOPT-NEXT: store i8* [[T1]], i8** [[SLOT]],
// CHECK-UNOPT-NEXT: bitcast
// CHECK-UNOPT-NEXT: call void @test18_helper(
// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[SLOTREL]], i8* null) [[NUW:#[0-9]+]]
// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[SLOT]], i8* null) [[NUW:#[0-9]+]]
// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], i8* null) [[NUW]]
// CHECK-UNOPT-NEXT: ret void
extern void test18_helper(id (^)(void));
@ -672,7 +664,6 @@ void test19(void (^b)(void)) {
// CHECK-NEXT: store void ()* [[T2]], void ()** [[B]]
// Block setup. We skip most of this. Note the bare retain.
// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 4
// CHECK: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i64, i64, i8*, i8*, i8*, i64 }* @[[BLOCK_DESCRIPTOR_TMP48]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 8
// CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
@ -688,7 +679,7 @@ void test19(void (^b)(void)) {
test19_sink(^(int x) { b(); });
// Block teardown.
// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[SLOTREL]]
// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[SLOT]]
// CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8*
// CHECK-NEXT: call void @llvm.objc.release(i8* [[T1]])
@ -705,11 +696,10 @@ void test19(void (^b)(void)) {
// CHECK-NEXT: [[BLOCK:%.*]] = alloca <[[BLOCKTY:.*]]>
// CHECK-NEXT: [[RETAINEDX:%.*]] = call i8* @llvm.objc.retain(i8* %{{.*}})
// CHECK-NEXT: store i8* [[RETAINEDX]], i8** [[XADDR]]
// CHECK-NEXT: [[CAPTUREFIELD:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5
// CHECK: [[BLOCKCAPTURED:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5
// CHECK: [[CAPTURED:%.*]] = load i8*, i8** [[XADDR]]
// CHECK: store i8* [[CAPTURED]], i8** [[BLOCKCAPTURED]]
// CHECK: [[CAPTURE:%.*]] = load i8*, i8** [[CAPTUREFIELD]]
// CHECK: [[CAPTURE:%.*]] = load i8*, i8** [[BLOCKCAPTURED]]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.use(i8* [[CAPTURE]])
// CHECK-NEXT: [[X:%.*]] = load i8*, i8** [[XADDR]]
// CHECK-NEXT: call void @llvm.objc.release(i8* [[X]])
@ -718,12 +708,11 @@ void test19(void (^b)(void)) {
// CHECK-UNOPT-LABEL: define void @test20(
// CHECK-UNOPT: [[XADDR:%.*]] = alloca i8*
// CHECK-UNOPT-NEXT: [[BLOCK:%.*]] = alloca <[[BLOCKTY:.*]]>
// CHECK-UNOPT: [[CAPTUREFIELD:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5
// CHECK-UNOPT: [[BLOCKCAPTURED:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5
// CHECK-UNOPT: [[CAPTURED:%.*]] = load i8*, i8** [[XADDR]]
// CHECK-UNOPT: [[RETAINED:%.*]] = call i8* @llvm.objc.retain(i8* [[CAPTURED]])
// CHECK-UNOPT: store i8* [[RETAINED]], i8** [[BLOCKCAPTURED]]
// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** [[CAPTUREFIELD]], i8* null)
// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** [[BLOCKCAPTURED]], i8* null)
void test20_callee(void (^)());
void test20(const id x) {
@ -740,5 +729,23 @@ void test21(id x) {
test21_callee(1, ^{ (void)x; });
}
// The lifetime of 'x', which is captured by the block in the statement
// expression, should be extended.
// CHECK-COMMON-LABEL: define i8* @test22(
// CHECK-COMMON: %[[BLOCK_CAPTURED:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %{{.*}}*, i8* }>, <{ i8*, i32, i32, i8*, %{{.*}}*, i8* }>* %{{.*}}, i32 0, i32 5
// CHECK-COMMON: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %{{.*}})
// CHECK-COMMON: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED]], align 8
// CHECK-COMMON: call void @test22_1()
// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** %[[BLOCK_CAPTURED]], i8* null)
// CHECK: %[[V15:.*]] = load i8*, i8** %[[BLOCK_CAPTURED]], align 8
// CHECK: call void @llvm.objc.release(i8* %[[V15]])
id test22(int c, id x) {
extern id test22_0(void);
extern void test22_1(void);
return c ? test22_0() : ({ id (^b)(void) = ^{ return x; }; test22_1(); b(); });
}
// CHECK: attributes [[NUW]] = { nounwind }
// CHECK-UNOPT: attributes [[NUW]] = { nounwind }

View File

@ -65,14 +65,13 @@ void test0(NSArray *array) {
// CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]]
// CHECK-LP64-NEXT: store i8* [[T3]], i8** [[X]]
// CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-LP64: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-LP64-NEXT: [[T1:%.*]] = load i8*, i8** [[X]]
// CHECK-LP64-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]])
// CHECK-LP64-NEXT: store i8* [[T2]], i8** [[T0]]
// CHECK-LP64-NEXT: store i8* [[T2]], i8** [[CAPTURED]]
// CHECK-LP64-NEXT: [[BLOCK1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]]
// CHECK-LP64-NEXT: call void @use_block(void ()* [[BLOCK1]])
// CHECK-LP64-NEXT: call void @llvm.objc.storeStrong(i8** [[D0]], i8* null)
// CHECK-LP64-NEXT: call void @llvm.objc.storeStrong(i8** [[CAPTURED]], i8* null)
// CHECK-LP64-NOT: call void (...) @llvm.objc.clang.arc.use(
// CHECK-LP64-OPT: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i64 0, i32 5
@ -118,12 +117,11 @@ void test1(NSArray *array) {
// CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]]
// CHECK-LP64-NEXT: call i8* @llvm.objc.initWeak(i8** [[X]], i8* [[T3]])
// CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-LP64-NEXT: call void @llvm.objc.copyWeak(i8** [[T0]], i8** [[X]])
// CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to
// CHECK-LP64: call void @use_block
// CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[D0]])
// CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[T0]])
// CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[X]])
// rdar://problem/9817306
@ -207,7 +205,6 @@ NSArray *array4;
// CHECK-LP64: [[SELF_ADDR:%.*]] = alloca [[TY:%.*]]*,
// CHECK-LP64: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>,
// CHECK-LP64: store [[TY]]* %self, [[TY]]** [[SELF_ADDR]]
// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5
// CHECK-LP64: [[BC:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5
// CHECK-LP64: [[T1:%.*]] = load [[TY]]*, [[TY]]** [[SELF_ADDR]]
// CHECK-LP64: [[T2:%.*]] = bitcast [[TY]]* [[T1]] to i8*
@ -218,7 +215,7 @@ NSArray *array4;
// CHECK-LP64-OPT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK-LP64-OPT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i64 0, i32 5
// CHECK-LP64: [[T5:%.*]] = bitcast [[TY]]** [[T0]] to i8**
// CHECK-LP64: [[T5:%.*]] = bitcast [[TY]]** [[BC]] to i8**
// CHECK-LP64: call void @llvm.objc.storeStrong(i8** [[T5]], i8* null)
// CHECK-LP64-NOT: call void (...) @llvm.objc.clang.arc.use([[TY]]* [[T5]])
// CHECK-LP64: switch i32 {{%.*}}, label %[[UNREACHABLE:.*]] [

View File

@ -95,7 +95,6 @@ void test5(BlockTy2 b, int *p) {
// CHECK-NOARC: store i8* %[[B]], i8** %[[B_ADDR]], align 8
// CHECK-ARC: store i8* null, i8** %[[B_ADDR]], align 8
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* %[[B]])
// CHECK-ARC: %[[V0:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 5
// CHECK: %[[BLOCK_ISA:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 0
// CHECK: store i8* bitcast (i8** @_NSConcreteGlobalBlock to i8*), i8** %[[BLOCK_ISA]], align 8
// CHECK: %[[BLOCK_FLAGS:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 1
@ -109,7 +108,7 @@ void test5(BlockTy2 b, int *p) {
// CHECK-ARC: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %[[V2]])
// CHECK-ARC: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED]], align 8
// CHECK: call void @noescapeFunc0(
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[V0]], i8* null)
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[BLOCK_CAPTURED]], i8* null)
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* null)
// Non-escaping blocks don't need copy/dispose helper functions.

View File

@ -95,7 +95,7 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// COMMON: [[WAIT_EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %event_wait_list to %opencl.clk_event_t{{.*}}* addrspace(4)*
// COMMON: [[EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %clk_event to %opencl.clk_event_t{{.*}}* addrspace(4)*
// COMMON: store i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* [[INVL2:@__device_side_enqueue_block_invoke[^ ]*]] to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %block.invoke
// COMMON: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block3 to %struct.__opencl_block_literal_generic*
// COMMON: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block4 to %struct.__opencl_block_literal_generic*
// COMMON: [[BL_I8:%[0-9]+]] = addrspacecast %struct.__opencl_block_literal_generic* [[BL]] to i8 addrspace(4)*
// COMMON-LABEL: call i32 @__enqueue_kernel_basic_events
// COMMON-SAME: (%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]],