Add remaining 64-bit atomic patterns for x86-64.

llvm-svn: 55029
This commit is contained in:
Dale Johannesen 2008-08-20 00:48:50 +00:00
parent 847ebb90b8
commit 6f765f392c
2 changed files with 60 additions and 0 deletions

View File

@ -6568,6 +6568,38 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass, true);
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
case X86::ATOMAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
X86::LCMPXCHG64, X86::MOV64rr,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
X86::OR64ri32, X86::MOV64rm,
X86::LCMPXCHG64, X86::MOV64rr,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMXOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
X86::XOR64ri32, X86::MOV64rm,
X86::LCMPXCHG64, X86::MOV64rr,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMNAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
X86::LCMPXCHG64, X86::MOV64rr,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass, true);
case X86::ATOMMIN64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
case X86::ATOMMAX64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
case X86::ATOMUMIN64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
case X86::ATOMUMAX64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
}
}

View File

@ -1148,6 +1148,34 @@ def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val)
[(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
}
// Atomic exchange, and, or, xor
let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMAND64 PSUEDO!",
[(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>;
def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMOR64 PSUEDO!",
[(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>;
def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMXOR64 PSUEDO!",
[(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>;
def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMNAND64 PSUEDO!",
[(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>;
def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
"#ATOMMIN64 PSUEDO!",
[(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>;
def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMMAX64 PSUEDO!",
[(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>;
def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMIN64 PSUEDO!",
[(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>;
def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMAX64 PSUEDO!",
[(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>;
}
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns