diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 10538ffc27f1..fe8cebca9426 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -16360,6 +16360,20 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(EFLAGS.getValue(1)); return; } + case ISD::ATOMIC_SWAP: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + // Delegate to generic TypeLegalization. Situations we can really handle + // should have already been dealt with by X86AtomicExpand.cpp. + break; case ISD::ATOMIC_LOAD: { ReplaceATOMIC_LOAD(N, Results, DAG); return; diff --git a/llvm/test/CodeGen/X86/atomic-ops-ancient-64.ll b/llvm/test/CodeGen/X86/atomic-ops-ancient-64.ll new file mode 100644 index 000000000000..18749b902871 --- /dev/null +++ b/llvm/test/CodeGen/X86/atomic-ops-ancient-64.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s + +define i64 @test_add(i64* %addr, i64 %inc) { +; CHECK-LABEL: test_add: +; CHECK: calll __sync_fetch_and_add_8 + %old = atomicrmw add i64* %addr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_sub(i64* %addr, i64 %inc) { +; CHECK-LABEL: test_sub: +; CHECK: calll __sync_fetch_and_sub_8 + %old = atomicrmw sub i64* %addr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_and(i64* %andr, i64 %inc) { +; CHECK-LABEL: test_and: +; CHECK: calll __sync_fetch_and_and_8 + %old = atomicrmw and i64* %andr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_or(i64* %orr, i64 %inc) { +; CHECK-LABEL: test_or: +; CHECK: calll __sync_fetch_and_or_8 + %old = atomicrmw or i64* %orr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_xor(i64* %xorr, i64 %inc) { +; CHECK-LABEL: test_xor: +; CHECK: calll __sync_fetch_and_xor_8 + %old = atomicrmw xor i64* %xorr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_nand(i64* %nandr, i64 %inc) { +; CHECK-LABEL: test_nand: +; CHECK: calll __sync_fetch_and_nand_8 + %old = atomicrmw nand i64* %nandr, i64 %inc seq_cst + ret i64 %old +}