completely eliminate the MOV16r0 'instruction'. The only

interesting part of this is the divrem changes, which are
already tested by CodeGen/X86/divrem.ll.

llvm-svn: 91975
This commit is contained in:
Chris Lattner 2009-12-23 01:45:04 +00:00
parent 230a5d527e
commit 518b037620
4 changed files with 11 additions and 19 deletions

View File

@ -355,10 +355,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
lower_lea64_32mem(&OutMI, 1);
break;
case X86::MOV16r0:
OutMI.setOpcode(X86::MOV32r0);
lower_subreg32(&OutMI, 0);
break;
case X86::MOVZX16rr8:
OutMI.setOpcode(X86::MOVZX32rr8);
lower_subreg32(&OutMI, 0);

View File

@ -1867,27 +1867,28 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
}
}
unsigned LoReg, HiReg;
unsigned LoReg, HiReg, ClrReg;
unsigned ClrOpcode, SExtOpcode;
EVT ClrVT = NVT;
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
LoReg = X86::AL; ClrReg = HiReg = X86::AH;
ClrOpcode = 0;
SExtOpcode = X86::CBW;
break;
case MVT::i16:
LoReg = X86::AX; HiReg = X86::DX;
ClrOpcode = X86::MOV16r0;
ClrOpcode = X86::MOV32r0; ClrReg = X86::EDX; ClrVT = MVT::i32;
SExtOpcode = X86::CWD;
break;
case MVT::i32:
LoReg = X86::EAX; HiReg = X86::EDX;
LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
ClrOpcode = X86::MOV32r0;
SExtOpcode = X86::CDQ;
break;
case MVT::i64:
LoReg = X86::RAX; HiReg = X86::RDX;
LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
ClrOpcode = ~0U; // NOT USED.
SExtOpcode = X86::CQO;
break;
@ -1942,10 +1943,10 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
MVT::i64, Zero, ClrNode, SubRegNo),
0);
} else {
ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, ClrVT), 0);
}
InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg,
InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
ClrNode, InFlag).getValue(1);
}
}

View File

@ -1018,13 +1018,11 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
switch (Opc) {
default: break;
case X86::MOV8r0:
case X86::MOV16r0:
case X86::MOV32r0: {
if (!isSafeToClobberEFLAGS(MBB, I)) {
switch (Opc) {
default: break;
case X86::MOV8r0: Opc = X86::MOV8ri; break;
case X86::MOV16r0: Opc = X86::MOV16ri; break;
case X86::MOV32r0: Opc = X86::MOV32ri; break;
}
Clone = false;
@ -2292,9 +2290,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
if (MI->getOpcode() == X86::MOV16r0)
NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV32r0)
if (MI->getOpcode() == X86::MOV32r0)
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV8r0)
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
@ -2563,7 +2559,6 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
} else if (OpNum == 0) { // If operand 0
switch (Opc) {
case X86::MOV8r0:
case X86::MOV16r0:
case X86::MOV32r0:
return true;
default: break;

View File

@ -3707,8 +3707,8 @@ def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins),
// Use xorl instead of xorw since we don't care about the high 16 bits,
// it's smaller, and it avoids a partial-register update.
def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
"", [/*(set GR16:$dst, 0)*/]>;
//def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
// "", [/*(set GR16:$dst, 0)*/]>;
}
let AddedComplexity = 1 in