[SROA] Don't de-atomic volatile loads and stores
Volatile loads and stores are made visible in global state regardless of what memory is involved. It is not correct to disregard the ordering and synchronization scope because it is possible to synchronize with memory operations performed by hardware. This partially addresses PR23737. llvm-svn: 242126
This commit is contained in:
parent
4ca1903696
commit
62690b1952
|
@ -2593,13 +2593,21 @@ private:
|
|||
V = rewriteIntegerLoad(LI);
|
||||
} else if (NewBeginOffset == NewAllocaBeginOffset &&
|
||||
canConvertValue(DL, NewAllocaTy, LI.getType())) {
|
||||
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), LI.isVolatile(),
|
||||
LI.getName());
|
||||
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
|
||||
LI.isVolatile(), LI.getName());
|
||||
if (LI.isVolatile())
|
||||
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
||||
|
||||
V = NewLI;
|
||||
} else {
|
||||
Type *LTy = TargetTy->getPointerTo();
|
||||
V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
|
||||
getSliceAlign(TargetTy), LI.isVolatile(),
|
||||
LI.getName());
|
||||
LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
|
||||
getSliceAlign(TargetTy),
|
||||
LI.isVolatile(), LI.getName());
|
||||
if (LI.isVolatile())
|
||||
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
||||
|
||||
V = NewLI;
|
||||
IsPtrAdjusted = true;
|
||||
}
|
||||
V = convertValue(DL, IRB, V, TargetTy);
|
||||
|
@ -2722,7 +2730,8 @@ private:
|
|||
NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
|
||||
SI.isVolatile());
|
||||
}
|
||||
(void)NewSI;
|
||||
if (SI.isVolatile())
|
||||
NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope());
|
||||
Pass.DeadInsts.insert(&SI);
|
||||
deleteIfTriviallyDead(OldOp);
|
||||
|
||||
|
|
|
@ -1595,3 +1595,14 @@ entry:
|
|||
store i32 %load, i32* %a.gep1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @PR23737() {
|
||||
; CHECK-LABEL: @PR23737(
|
||||
; CHECK: store atomic volatile {{.*}} seq_cst
|
||||
; CHECK: load atomic volatile {{.*}} seq_cst
|
||||
entry:
|
||||
%ptr = alloca i64, align 8
|
||||
store atomic volatile i64 0, i64* %ptr seq_cst, align 8
|
||||
%load = load atomic volatile i64, i64* %ptr seq_cst, align 8
|
||||
ret void
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue