[RISCV] Add codegen support for atomic load/stores with RV32A

Fences are inserted according to table A.6 in the current draft of version 2.3
of the RISC-V Instruction Set Manual, which incorporates the memory model
changes and definitions contributed by the RISC-V Memory Consistency Model
task group.

Instruction selection failures will now occur for 8/16/32-bit atomicrmw and 
cmpxchg operations when targeting RV32IA until lowering for these operations 
is added in a follow-on patch.

Differential Revision: https://reviews.llvm.org/D47589

llvm-svn: 334591
This commit is contained in:
Alex Bradbury 2018-06-13 12:04:51 +00:00
parent dc790dd5d0
commit 96f492d7df
5 changed files with 273 additions and 2 deletions

View File

@ -137,8 +137,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BlockAddress, XLenVT, Custom);
setOperationAction(ISD::ConstantPool, XLenVT, Custom);
// Atomic operations aren't suported in the base RV32I ISA.
setMaxAtomicSizeInBitsSupported(0);
if (Subtarget.hasStdExtA())
setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
else
setMaxAtomicSizeInBitsSupported(0);
setBooleanContents(ZeroOrOneBooleanContent);
@ -1553,3 +1555,21 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
return Builder.CreateFence(Ord);
if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
return Builder.CreateFence(AtomicOrdering::Release);
return nullptr;
}
Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
return Builder.CreateFence(AtomicOrdering::Acquire);
return nullptr;
}

View File

@ -66,6 +66,14 @@ public:
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return isa<LoadInst>(I) || isa<StoreInst>(I);
}
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
private:
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
const SmallVectorImpl<ISD::InputArg> &Ins,

View File

@ -757,6 +757,12 @@ def : Pat<(atomic_fence (i32 6), (imm)), (FENCE_TSO)>;
// fence seq_cst -> fence rw, rw
def : Pat<(atomic_fence (i32 7), (imm)), (FENCE 0b11, 0b11)>;
// Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
// Although these are lowered to fence+load/store instructions defined in the
// base RV32I/RV64I ISA, this lowering is only used when the A extension is
// present. This is necessary as it isn't valid to mix __atomic_* libcalls
// with inline atomic operations for the same object.
/// Other pseudo-instructions
// Pessimistically assume the stack pointer will be clobbered

View File

@ -75,3 +75,23 @@ defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">;
defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">;
defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">;
} // Predicates = [HasStedExtA, IsRV64]
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtA] in {
/// Atomic loads and stores
// Fences will be inserted for atomic load/stores according to the logic in
// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
defm : LdPat<atomic_load_8, LB>;
defm : LdPat<atomic_load_16, LH>;
defm : LdPat<atomic_load_32, LW>;
defm : StPat<atomic_store_8, SB, GPR>;
defm : StPat<atomic_store_16, SH, GPR>;
defm : StPat<atomic_store_32, SW, GPR>;
} // Predicates = [HasStdExtF]

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IA %s
define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered:
@ -12,6 +14,11 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i8_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i8, i8* %a unordered, align 1
ret i8 %1
}
@ -26,6 +33,11 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i8, i8* %a monotonic, align 1
ret i8 %1
}
@ -40,6 +52,12 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i8_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i8, i8* %a acquire, align 1
ret i8 %1
}
@ -54,6 +72,13 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i8_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, rw
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i8, i8* %a seq_cst, align 1
ret i8 %1
}
@ -68,6 +93,11 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i16_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i16, i16* %a unordered, align 2
ret i16 %1
}
@ -82,6 +112,11 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i16, i16* %a monotonic, align 2
ret i16 %1
}
@ -96,6 +131,12 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i16_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i16, i16* %a acquire, align 2
ret i16 %1
}
@ -110,6 +151,13 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i16_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, rw
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i16, i16* %a seq_cst, align 2
ret i16 %1
}
@ -124,6 +172,11 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i32_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i32, i32* %a unordered, align 4
ret i32 %1
}
@ -138,6 +191,11 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret
%1 = load atomic i32, i32* %a monotonic, align 4
ret i32 %1
}
@ -152,6 +210,12 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i32_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i32, i32* %a acquire, align 4
ret i32 %1
}
@ -166,6 +230,13 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i32_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, rw
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret
%1 = load atomic i32, i32* %a seq_cst, align 4
ret i32 %1
}
@ -180,6 +251,16 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i64_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: mv a1, zero
; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
%1 = load atomic i64, i64* %a unordered, align 8
ret i64 %1
}
@ -194,6 +275,16 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: mv a1, zero
; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
%1 = load atomic i64, i64* %a monotonic, align 8
ret i64 %1
}
@ -208,6 +299,16 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: addi a1, zero, 2
; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
%1 = load atomic i64, i64* %a acquire, align 8
ret i64 %1
}
@ -222,6 +323,16 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: addi a1, zero, 5
; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
%1 = load atomic i64, i64* %a seq_cst, align 8
ret i64 %1
}
@ -236,6 +347,11 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i8_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i8 %b, i8* %a unordered, align 1
ret void
}
@ -250,6 +366,11 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i8 %b, i8* %a monotonic, align 1
ret void
}
@ -264,6 +385,12 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i8_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sb a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i8 %b, i8* %a release, align 1
ret void
}
@ -278,6 +405,12 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i8_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sb a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i8 %b, i8* %a seq_cst, align 1
ret void
}
@ -292,6 +425,11 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i16_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i16 %b, i16* %a unordered, align 2
ret void
}
@ -306,6 +444,11 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i16 %b, i16* %a monotonic, align 2
ret void
}
@ -320,6 +463,12 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i16_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sh a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i16 %b, i16* %a release, align 2
ret void
}
@ -334,6 +483,12 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i16_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sh a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i16 %b, i16* %a seq_cst, align 2
ret void
}
@ -348,6 +503,11 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i32_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i32 %b, i32* %a unordered, align 4
ret void
}
@ -362,6 +522,11 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i32 %b, i32* %a monotonic, align 4
ret void
}
@ -376,6 +541,12 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i32_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sw a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i32 %b, i32* %a release, align 4
ret void
}
@ -390,6 +561,12 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i32_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sw a0, 0(a1)
; RV32IA-NEXT: ret
store atomic i32 %b, i32* %a seq_cst, align 4
ret void
}
@ -404,6 +581,16 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i64_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
store atomic i64 %b, i64* %a unordered, align 8
ret void
}
@ -418,6 +605,16 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
store atomic i64 %b, i64* %a monotonic, align 8
ret void
}
@ -432,6 +629,16 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: addi a3, zero, 3
; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
store atomic i64 %b, i64* %a release, align 8
ret void
}
@ -446,6 +653,16 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_store_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp)
; RV32IA-NEXT: addi a3, zero, 5
; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
store atomic i64 %b, i64* %a seq_cst, align 8
ret void
}