diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 96f815f599b8..4de6e35b2e97 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1538,19 +1538,22 @@ TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1, N0.getOperand(0).getNode()->hasOneUse() && isa(N0.getOperand(1))) { LoadSDNode *Lod = cast(N0.getOperand(0)); - uint64_t Mask = cast(N0.getOperand(1))->getZExtValue(); uint64_t bestMask = 0; unsigned bestWidth = 0, bestOffset = 0; - if (!Lod->isVolatile() && Lod->isUnindexed()) { + if (!Lod->isVolatile() && Lod->isUnindexed() && + // FIXME: This uses getZExtValue() below so it only works on i64 and + // below. + N0.getValueType().getSizeInBits() <= 64) { unsigned origWidth = N0.getValueType().getSizeInBits(); // We can narrow (e.g.) 16-bit extending loads on 32-bit target to // 8 bits, but have to be careful... if (Lod->getExtensionType() != ISD::NON_EXTLOAD) origWidth = Lod->getMemoryVT().getSizeInBits(); + uint64_t Mask =cast(N0.getOperand(1))->getZExtValue(); for (unsigned width = origWidth / 2; width>=8; width /= 2) { uint64_t newMask = (1ULL << width) - 1; for (unsigned offset=0; offsetisLittleEndian()) bestOffset = (origWidth/width - offset - 1) * (width/8); else diff --git a/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll b/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll new file mode 100644 index 000000000000..405a6a8d6e90 --- /dev/null +++ b/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll @@ -0,0 +1,16 @@ +; RUN: llvm-as < %s | llc +; rdar://6836460 + +define i32 @test(i128* %P) nounwind { +entry: + %tmp48 = load i128* %P + %and49 = and i128 %tmp48, 18446744073709551616 ; [#uses=1] + %tobool = icmp ne i128 %and49, 0 ; [#uses=1] + br i1 %tobool, label %if.then50, label %if.end61 + +if.then50: ; preds = %if.then20 + ret i32 1241 + +if.end61: ; preds = %if.then50, %if.then20, %entry + ret i32 123 +}