Add direct support for the isnan intrinsic, implementing test/Regression/CodeGen/X86/isnan.llx

testcase

llvm-svn: 14141
This commit is contained in:
Chris Lattner 2004-06-11 04:31:10 +00:00
parent a0cfedef3a
commit 26a964f88e
1 changed files with 22 additions and 9 deletions

View File

@ -1628,6 +1628,7 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
case Intrinsic::frameaddress: case Intrinsic::frameaddress:
case Intrinsic::memcpy: case Intrinsic::memcpy:
case Intrinsic::memset: case Intrinsic::memset:
case Intrinsic::isnan:
case Intrinsic::readport: case Intrinsic::readport:
case Intrinsic::writeport: case Intrinsic::writeport:
// We directly implement these intrinsics // We directly implement these intrinsics
@ -1636,19 +1637,19 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
// On X86, memory operations are in-order. Lower this intrinsic // On X86, memory operations are in-order. Lower this intrinsic
// into a volatile load. // into a volatile load.
Instruction *Before = CI->getPrev(); Instruction *Before = CI->getPrev();
LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI); LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
CI->replaceAllUsesWith (LI); CI->replaceAllUsesWith(LI);
BB->getInstList().erase (CI); BB->getInstList().erase(CI);
break; break;
} }
case Intrinsic::writeio: { case Intrinsic::writeio: {
// On X86, memory operations are in-order. Lower this intrinsic // On X86, memory operations are in-order. Lower this intrinsic
// into a volatile store. // into a volatile store.
Instruction *Before = CI->getPrev(); Instruction *Before = CI->getPrev();
StoreInst * LI = new StoreInst (CI->getOperand(1), StoreInst *LI = new StoreInst(CI->getOperand(1),
CI->getOperand(2), true, CI); CI->getOperand(2), true, CI);
CI->replaceAllUsesWith (LI); CI->replaceAllUsesWith(LI);
BB->getInstList().erase (CI); BB->getInstList().erase(CI);
break; break;
} }
default: default:
@ -1656,12 +1657,11 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
Instruction *Before = CI->getPrev(); Instruction *Before = CI->getPrev();
TM.getIntrinsicLowering().LowerIntrinsicCall(CI); TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
if (Before) { // Move iterator to instruction after call if (Before) { // Move iterator to instruction after call
I = Before; ++I; I = Before; ++I;
} else { } else {
I = BB->begin(); I = BB->begin();
} }
} }
} }
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
@ -1698,6 +1698,19 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
} }
return; return;
case Intrinsic::isnan:
TmpReg1 = getReg(CI.getOperand(1));
if (0) { // for processors prior to the P6
BuildMI(BB, X86::FpUCOM, 2).addReg(TmpReg1).addReg(TmpReg1);
BuildMI(BB, X86::FNSTSW8r, 0);
BuildMI(BB, X86::SAHF, 1);
} else {
BuildMI(BB, X86::FpUCOMI, 2).addReg(TmpReg1).addReg(TmpReg1);
}
TmpReg2 = getReg(CI);
BuildMI(BB, X86::SETPr, 0, TmpReg2);
return;
case Intrinsic::memcpy: { case Intrinsic::memcpy: {
assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!"); assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
unsigned Align = 1; unsigned Align = 1;