[OPENMP] Fix for http://llvm.org/PR24674: assertion failed and and abort trap
Fix processing of shared variables with reference types in OpenMP constructs. Previously, if the variable was not marked in one of the private clauses, the reference to this variable was emitted incorrectly and caused an assertion later. llvm-svn: 246846
This commit is contained in:
parent
cb405bf311
commit
caacd53dde
|
@ -1942,11 +1942,17 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
|
|||
if (auto *FD = LambdaCaptureFields.lookup(VD))
|
||||
return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
|
||||
else if (CapturedStmtInfo) {
|
||||
if (auto *V = LocalDeclMap.lookup(VD))
|
||||
if (auto *V = LocalDeclMap.lookup(VD)) {
|
||||
if (VD->getType()->isReferenceType()) {
|
||||
llvm::LoadInst *LI = Builder.CreateLoad(V);
|
||||
LI->setAlignment(Alignment.getQuantity());
|
||||
V = LI;
|
||||
return MakeNaturalAlignAddrLValue(V, T);
|
||||
}
|
||||
return MakeAddrLValue(V, T, Alignment);
|
||||
else
|
||||
return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
|
||||
CapturedStmtInfo->getContextValue());
|
||||
}
|
||||
return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
|
||||
CapturedStmtInfo->getContextValue());
|
||||
}
|
||||
assert(isa<BlockDecl>(CurCodeDecl));
|
||||
return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()),
|
||||
|
|
|
@ -383,6 +383,12 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
|
|||
auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
|
||||
// Get the address of the private variable.
|
||||
auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
|
||||
if (PrivateVD->getType()->isReferenceType())
|
||||
PrivateAddr =
|
||||
EmitLoadOfLValue(MakeNaturalAlignAddrLValue(
|
||||
PrivateAddr, PrivateVD->getType()),
|
||||
(*IRef)->getExprLoc())
|
||||
.getScalarVal();
|
||||
EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
|
||||
AssignOp);
|
||||
}
|
||||
|
|
|
@ -584,7 +584,15 @@ public:
|
|||
if (SavedLocals.count(LocalVD) > 0) return false;
|
||||
SavedLocals[LocalVD] = CGF.LocalDeclMap.lookup(LocalVD);
|
||||
CGF.LocalDeclMap.erase(LocalVD);
|
||||
SavedPrivates[LocalVD] = PrivateGen();
|
||||
auto *V = PrivateGen();
|
||||
QualType VarTy = LocalVD->getType();
|
||||
if (VarTy->isReferenceType()) {
|
||||
auto *TempAlloca = CGF.CreateMemTemp(VarTy);
|
||||
LValue RefLVal = CGF.MakeNaturalAlignAddrLValue(TempAlloca, VarTy);
|
||||
CGF.EmitStoreOfScalar(V, RefLVal);
|
||||
V = TempAlloca;
|
||||
}
|
||||
SavedPrivates[LocalVD] = V;
|
||||
CGF.LocalDeclMap[LocalVD] = SavedLocals[LocalVD];
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,23 @@ int main() {
|
|||
return a;
|
||||
}
|
||||
|
||||
struct S {
|
||||
int a;
|
||||
};
|
||||
// CHECK-LABEL: critical_ref
|
||||
void critical_ref(S &s) {
|
||||
// CHECK: [[S_ADDR:%.+]] = alloca %struct.S*,
|
||||
// CHECK: [[S_REF:%.+]] = load %struct.S*, %struct.S** [[S_ADDR]],
|
||||
// CHECK: [[S_A_REF:%.+]] = getelementptr inbounds %struct.S, %struct.S* [[S_REF]], i32 0, i32 0
|
||||
++s.a;
|
||||
// CHECK: call void @__kmpc_critical(
|
||||
#pragma omp critical
|
||||
// CHECK: [[S_REF:%.+]] = load %struct.S*, %struct.S** [[S_ADDR]],
|
||||
// CHECK: [[S_A_REF:%.+]] = getelementptr inbounds %struct.S, %struct.S* [[S_REF]], i32 0, i32 0
|
||||
++s.a;
|
||||
// CHECK: call void @__kmpc_end_critical(
|
||||
}
|
||||
|
||||
// CHECK-LABEL: parallel_critical
|
||||
// TERM_DEBUG-LABEL: parallel_critical
|
||||
void parallel_critical() {
|
||||
|
|
|
@ -16,6 +16,7 @@ struct St {
|
|||
};
|
||||
|
||||
volatile int g = 1212;
|
||||
volatile int &g1 = g;
|
||||
|
||||
template <class T>
|
||||
struct S {
|
||||
|
@ -71,7 +72,7 @@ int main() {
|
|||
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
||||
// LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for firstprivate(g)
|
||||
#pragma omp for firstprivate(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// Skip temp vars for loop
|
||||
|
@ -85,6 +86,7 @@ int main() {
|
|||
// LAMBDA: store i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
|
||||
// LAMBDA: call void @__kmpc_barrier(
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// LAMBDA: call void @__kmpc_for_static_init_4(
|
||||
// LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
|
||||
// LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
|
@ -96,6 +98,7 @@ int main() {
|
|||
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
||||
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
||||
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
|
||||
|
@ -112,7 +115,7 @@ int main() {
|
|||
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
||||
// BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for firstprivate(g)
|
||||
#pragma omp for firstprivate(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// Skip temp vars for loop
|
||||
|
@ -126,6 +129,7 @@ int main() {
|
|||
// BLOCKS: store i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
|
||||
// BLOCKS: call void @__kmpc_barrier(
|
||||
g = 1;
|
||||
g1 =1;
|
||||
// BLOCKS: call void @__kmpc_for_static_init_4(
|
||||
// BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -137,6 +141,7 @@ int main() {
|
|||
^{
|
||||
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
// BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
|
|
@ -19,6 +19,7 @@ struct S {
|
|||
};
|
||||
|
||||
volatile int g = 1212;
|
||||
volatile int &g1 = g;
|
||||
float f;
|
||||
char cnt;
|
||||
|
||||
|
@ -62,7 +63,7 @@ int main() {
|
|||
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
||||
// LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for lastprivate(g)
|
||||
#pragma omp for lastprivate(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
|
@ -81,6 +82,7 @@ int main() {
|
|||
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
|
||||
// LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// Check for final copying of private values back to original vars.
|
||||
// LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
|
||||
// LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
|
||||
|
@ -98,6 +100,7 @@ int main() {
|
|||
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
||||
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
||||
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
|
||||
|
@ -114,7 +117,7 @@ int main() {
|
|||
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
||||
// BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for lastprivate(g)
|
||||
#pragma omp for lastprivate(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
|
@ -134,6 +137,7 @@ int main() {
|
|||
// BLOCKS: call void {{%.+}}(i8
|
||||
// BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// Check for final copying of private values back to original vars.
|
||||
// BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
|
||||
// BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
|
||||
|
@ -148,9 +152,11 @@ int main() {
|
|||
// BLOCKS: [[LAST_DONE]]
|
||||
// BLOCKS: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
^{
|
||||
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
||||
g = 2;
|
||||
g1 = 1;
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
// BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -408,6 +414,7 @@ int main() {
|
|||
// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
|
||||
// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]],
|
||||
// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
|
||||
// CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*,
|
||||
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
|
||||
|
||||
// Check for default initialization.
|
||||
|
@ -424,6 +431,7 @@ int main() {
|
|||
// CHECK: [[VAR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3
|
||||
// CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_REF_PTR]],
|
||||
// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
|
||||
// CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]]
|
||||
// CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
|
||||
// <Skip loop body>
|
||||
// CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
|
||||
|
@ -456,7 +464,8 @@ int main() {
|
|||
// CHECK: [[S_ARR_BODY_DONE]]
|
||||
|
||||
// original var=private_var;
|
||||
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV]])
|
||||
// CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]],
|
||||
// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]])
|
||||
// CHECK: br label %[[LAST_DONE]]
|
||||
// CHECK: [[LAST_DONE]]
|
||||
// CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
|
||||
|
|
|
@ -19,6 +19,7 @@ struct S {
|
|||
};
|
||||
|
||||
volatile int g = 1212;
|
||||
volatile int &g1 = g;
|
||||
float f;
|
||||
char cnt;
|
||||
|
||||
|
@ -51,7 +52,7 @@ int main() {
|
|||
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
||||
// LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for linear(g:5)
|
||||
#pragma omp for linear(g, g1:5)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
|
@ -61,6 +62,7 @@ int main() {
|
|||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
// LAMBDA: alloca i{{[0-9]+}},
|
||||
// LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
|
||||
// LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
|
||||
// LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
|
||||
|
@ -79,11 +81,13 @@ int main() {
|
|||
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
|
||||
// LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
|
||||
g += 5;
|
||||
g1 += 5;
|
||||
// LAMBDA: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
|
||||
[&]() {
|
||||
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
||||
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
||||
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
|
||||
|
@ -100,7 +104,7 @@ int main() {
|
|||
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
||||
// BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for linear(g:5)
|
||||
#pragma omp for linear(g, g1:5)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
|
@ -110,6 +114,7 @@ int main() {
|
|||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
// BLOCKS: alloca i{{[0-9]+}},
|
||||
// BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
|
||||
// BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
|
||||
// BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
|
||||
|
@ -129,11 +134,14 @@ int main() {
|
|||
// BLOCKS: call void {{%.+}}(i8
|
||||
// BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
|
||||
g += 5;
|
||||
g1 += 5;
|
||||
// BLOCKS: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
|
||||
g = 1;
|
||||
g1 = 5;
|
||||
^{
|
||||
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
// BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -227,6 +235,7 @@ int main() {
|
|||
// CHECK: alloca i{{[0-9]+}},
|
||||
// CHECK: [[PVAR_PRIV:%.+]] = alloca i32*,
|
||||
// CHECK: [[LVAR_PRIV:%.+]] = alloca i32,
|
||||
// CHECK: [[LVAR_PRIV_REF:%.+]] = alloca i32*,
|
||||
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
|
||||
|
||||
// Check for default initialization.
|
||||
|
@ -238,6 +247,8 @@ int main() {
|
|||
// CHECK: [[LVAR_REF:%.+]] = load i32*, i32** [[LVAR_PTR_REF]],
|
||||
// CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_REF]],
|
||||
// CHECK: store i32 [[LVAR_VAL]], i32* [[LVAR_START]],
|
||||
// CHECK: store i32* [[LVAR_PRIV]], i32** [[LVAR_PRIV_REF]],
|
||||
|
||||
// CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
|
||||
// CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_START]],
|
||||
// CHECK: [[CNT:%.+]] = load i32, i32*
|
||||
|
@ -253,6 +264,7 @@ int main() {
|
|||
// CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_PRIV]]
|
||||
// CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i32 1
|
||||
// CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]],
|
||||
// CHECK: [[LVAR_PRIV:%.+]] = load i32*, i32** [[LVAR_PRIV_REF]],
|
||||
// CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_PRIV]],
|
||||
// CHECK: [[ADD:%.+]] = add nsw i32 [[LVAR_VAL]], 1
|
||||
// CHECK: store i32 [[ADD]], i32* [[LVAR_PRIV]],
|
||||
|
|
|
@ -18,6 +18,7 @@ struct S {
|
|||
};
|
||||
|
||||
volatile double g;
|
||||
volatile double &g1 = g;
|
||||
|
||||
// CHECK: [[S_FLOAT_TY:%.+]] = type { float }
|
||||
// CHECK: [[CAP_MAIN_TY:%.+]] = type { i8 }
|
||||
|
@ -30,7 +31,7 @@ T tmain() {
|
|||
T t_var = T();
|
||||
T vec[] = {1, 2};
|
||||
S<T> s_arr[] = {1, 2};
|
||||
S<T> var(3);
|
||||
S<T> &var = test;
|
||||
#pragma omp parallel
|
||||
#pragma omp for private(t_var, vec, s_arr, s_arr, var, var)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
|
@ -49,12 +50,13 @@ int main() {
|
|||
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
||||
// LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for private(g)
|
||||
#pragma omp for private(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
|
||||
// LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
|
||||
// LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
|
||||
// LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
|
@ -65,6 +67,7 @@ int main() {
|
|||
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
||||
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
||||
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
|
||||
|
@ -81,12 +84,13 @@ int main() {
|
|||
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
||||
// BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* {{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for private(g)
|
||||
#pragma omp for private(g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
|
||||
// BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
|
||||
// BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// BLOCKS: call {{.*}}void @__kmpc_for_static_init_4(
|
||||
// BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -97,6 +101,7 @@ int main() {
|
|||
^{
|
||||
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
// BLOCKS: store double 2.0{{.+}}, double*
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -110,7 +115,7 @@ int main() {
|
|||
int t_var = 0;
|
||||
int vec[] = {1, 2};
|
||||
S<float> s_arr[] = {1, 2};
|
||||
S<float> var(3);
|
||||
S<float> &var = test;
|
||||
#pragma omp parallel
|
||||
#pragma omp for private(t_var, vec, s_arr, s_arr, var, var)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
#ifndef HEADER
|
||||
#define HEADER
|
||||
|
||||
volatile double g;
|
||||
volatile double g, g_orig;
|
||||
volatile double &g1 = g_orig;
|
||||
|
||||
template <class T>
|
||||
struct S {
|
||||
|
@ -36,7 +37,8 @@ T tmain() {
|
|||
T t_var = T(), t_var1;
|
||||
T vec[] = {1, 2};
|
||||
S<T> s_arr[] = {1, 2};
|
||||
S<T> var(3), var1;
|
||||
S<T> &var = test;
|
||||
S<T> var1;
|
||||
#pragma omp parallel
|
||||
#pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
|
@ -61,24 +63,25 @@ int main() {
|
|||
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
||||
// LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for reduction(+:g)
|
||||
#pragma omp for reduction(+:g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* %{{.+}})
|
||||
// LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
|
||||
|
||||
// Reduction list for runtime.
|
||||
// LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*],
|
||||
// LAMBDA: [[RED_LIST:%.+]] = alloca [2 x i8*],
|
||||
|
||||
// LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
|
||||
// LAMBDA: call void @__kmpc_for_static_init_4(
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
|
||||
// LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]]
|
||||
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
|
||||
// LAMBDA: call void @__kmpc_for_static_fini(
|
||||
|
||||
// LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
|
||||
// LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i32 0, i32 0
|
||||
// LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
|
||||
// LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
|
||||
// LAMBDA: call i32 @__kmpc_reduce(
|
||||
|
@ -104,6 +107,7 @@ int main() {
|
|||
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
||||
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
||||
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
|
||||
|
@ -120,16 +124,17 @@ int main() {
|
|||
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
||||
// BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
|
||||
#pragma omp parallel
|
||||
#pragma omp for reduction(-:g)
|
||||
#pragma omp for reduction(-:g, g1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* %{{.+}})
|
||||
// BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
|
||||
|
||||
// Reduction list for runtime.
|
||||
// BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*],
|
||||
// BLOCKS: [[RED_LIST:%.+]] = alloca [2 x i8*],
|
||||
|
||||
// BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
|
||||
g = 1;
|
||||
g1 = 1;
|
||||
// BLOCKS: call void @__kmpc_for_static_init_4(
|
||||
// BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -138,7 +143,7 @@ int main() {
|
|||
// BLOCKS: call void {{%.+}}(i8
|
||||
// BLOCKS: call void @__kmpc_for_static_fini(
|
||||
|
||||
// BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
|
||||
// BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i32 0, i32 0
|
||||
// BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
|
||||
// BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
|
||||
// BLOCKS: call i32 @__kmpc_reduce(
|
||||
|
@ -163,6 +168,7 @@ int main() {
|
|||
^{
|
||||
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
||||
g = 2;
|
||||
g1 = 2;
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
// BLOCKS: store double 2.0{{.+}}, double*
|
||||
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
||||
|
@ -176,7 +182,8 @@ int main() {
|
|||
float t_var = 0, t_var1;
|
||||
int vec[] = {1, 2};
|
||||
S<float> s_arr[] = {1, 2};
|
||||
S<float> var(3), var1;
|
||||
S<float> &var = test;
|
||||
S<float> var1;
|
||||
#pragma omp parallel
|
||||
#pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
|
|
|
@ -525,8 +525,10 @@ void linear(float *a) {
|
|||
//
|
||||
// Update linear vars after loop, as the loop was operating on a private version.
|
||||
// CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR]],
|
||||
// CHECK: store i64* [[K_REF]], i64** [[K_PRIV_REF:%.+]],
|
||||
// CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
|
||||
// CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
|
||||
// CHECK-NEXT: [[K_REF:%.+]] = load i64*, i64** [[K_PRIV_REF]],
|
||||
// CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_REF]]
|
||||
//
|
||||
|
||||
|
@ -566,8 +568,10 @@ void linear(float *a) {
|
|||
//
|
||||
// Update linear vars after loop, as the loop was operating on a private version.
|
||||
// CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR]],
|
||||
// CHECK: store i64* [[K_REF]], i64** [[K_PRIV_REF:%.+]],
|
||||
// CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
|
||||
// CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
|
||||
// CHECK-NEXT: [[K_REF:%.+]] = load i64*, i64** [[K_PRIV_REF]],
|
||||
// CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_REF]]
|
||||
//
|
||||
#pragma omp simd linear(uval(k) : 3)
|
||||
|
|
|
@ -38,7 +38,7 @@ int main() {
|
|||
// CHECK-DAG: [[C_ADDR:%.+]] = alloca [[TEST_CLASS_TY]]
|
||||
char a;
|
||||
char a2[2];
|
||||
TestClass c;
|
||||
TestClass &c = tc;
|
||||
|
||||
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:@.+]])
|
||||
// CHECK-DAG: [[DID_IT:%.+]] = alloca i32,
|
||||
|
@ -81,8 +81,7 @@ int main() {
|
|||
// CHECK: [[A_PTR_REF:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[COPY_LIST]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
||||
// CHECK: store i8* [[A_ADDR]], i8** [[A_PTR_REF]],
|
||||
// CHECK: [[C_PTR_REF:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[COPY_LIST]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
|
||||
// CHECK: [[C_PTR_REF_VOID_PTR:%.+]] = bitcast [[TEST_CLASS_TY]]* [[C_ADDR]] to i8*
|
||||
// CHECK: store i8* [[C_PTR_REF_VOID_PTR]], i8** [[C_PTR_REF]],
|
||||
// CHECK: store i8* {{.+}}, i8** [[C_PTR_REF]],
|
||||
// CHECK: [[TC_PTR_REF:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[COPY_LIST]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
|
||||
// CHECK: [[TC_THREADPRIVATE_ADDR_VOID_PTR:%.+]] = call{{.*}} i8* @__kmpc_threadprivate_cached
|
||||
// CHECK: [[TC_THREADPRIVATE_ADDR:%.+]] = bitcast i8* [[TC_THREADPRIVATE_ADDR_VOID_PTR]] to [[TEST_CLASS_TY]]*
|
||||
|
|
Loading…
Reference in New Issue