Revert "[APInt] Fix a few places that use APInt::getRawData to operate within the normal API."

This reverts commit r301105, 4, 3 and 1, as a follow up of the previous
revert, which broke even more bots.

For reference:
Revert "[APInt] Use operator<<= where possible. NFC"
Revert "[APInt] Use operator<<= instead of shl where possible. NFC"
Revert "[APInt] Use ashInPlace where possible."

PR32754.

llvm-svn: 301111
This commit is contained in:
Renato Golin 2017-04-23 12:15:30 +00:00
parent cc4a9120f6
commit 4abfb3d741
14 changed files with 38 additions and 46 deletions

View File

@ -876,13 +876,6 @@ public:
return *this;
}
/// \brief Left-shift assignment function.
///
/// Shifts *this left by shiftAmt and assigns the result to *this.
///
/// \returns *this after shifting left by ShiftAmt
APInt &operator<<=(const APInt &ShiftAmt);
/// @}
/// \name Binary Operators
/// @{
@ -964,11 +957,7 @@ public:
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
APInt shl(const APInt &ShiftAmt) const {
APInt R(*this);
R <<= ShiftAmt;
return R;
}
APInt shl(const APInt &shiftAmt) const;
/// \brief Rotate left by rotateAmt.
APInt rotl(const APInt &rotateAmt) const;

View File

@ -1949,7 +1949,8 @@ bool MIParser::getHexUint(APInt &Result) {
return true;
StringRef V = S.substr(2);
APInt A(V.size()*4, V, 16);
Result = A.zextOrTrunc(A.getActiveBits());
Result = APInt(A.getActiveBits(),
ArrayRef<uint64_t>(A.getRawData(), A.getNumWords()));
return false;
}

View File

@ -5343,7 +5343,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
SDValue Shift;
if (c2 > c1) {
Mask <<= c2 - c1;
Mask = Mask.shl(c2 - c1);
SDLoc DL(N);
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, N1.getValueType()));

View File

@ -2589,7 +2589,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) {
DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT));
APInt Shift(Sz, 1);
Shift <<= J;
Shift = Shift.shl(J);
Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT));
Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2);
}

View File

@ -158,7 +158,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo) {
// and the low 64 bits here.
if (DAG.getDataLayout().isBigEndian() &&
CN->getValueType(0).getSimpleVT() == llvm::MVT::ppcf128) {
APInt Val = CN->getValueAPF().bitcastToAPInt().rotl(64);
uint64_t words[2] = { CN->getValueAPF().bitcastToAPInt().getRawData()[1],
CN->getValueAPF().bitcastToAPInt().getRawData()[0] };
APInt Val(128, words);
return DAG.getConstant(Val, SDLoc(CN),
TLI.getTypeToTransformTo(*DAG.getContext(),
CN->getValueType(0)));
@ -1058,10 +1060,10 @@ void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo,
APInt C = cast<ConstantFPSDNode>(N)->getValueAPF().bitcastToAPInt();
SDLoc dl(N);
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
C.extractBits(64, 64)),
APInt(64, C.getRawData()[1])),
dl, NVT);
Hi = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
C.extractBits(64, 0)),
APInt(64, C.getRawData()[0])),
dl, NVT);
}

View File

@ -2323,8 +2323,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
Depth + 1);
KnownZero <<= *ShAmt;
KnownOne <<= *ShAmt;
KnownZero = KnownZero << *ShAmt;
KnownOne = KnownOne << *ShAmt;
// Low bits are known zero.
KnownZero.setLowBits(ShAmt->getZExtValue());
}
@ -4160,7 +4160,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
unsigned FromBits = EVT.getScalarSizeInBits();
Val <<= Val.getBitWidth() - FromBits;
Val.ashrInPlace(Val.getBitWidth() - FromBits);
Val = Val.ashr(Val.getBitWidth() - FromBits);
return getConstant(Val, DL, ConstantVT);
};

View File

@ -1714,7 +1714,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
bestWidth = width;
break;
}
newMask <<= width;
newMask = newMask << width;
}
}
}
@ -2981,7 +2981,7 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
Flags.setExact(true);
Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
Created.push_back(Op1.getNode());
d.ashrInPlace(ShAmt);
d = d.ashr(ShAmt);
}
// Calculate the multiplicative inverse, using Newton's method.

View File

@ -1565,7 +1565,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
Tmp = Tmp.zext(SrcBitSize);
Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
Tmp = Tmp.zext(DstBitSize);
Tmp <<= ShiftAmt;
Tmp = Tmp.shl(ShiftAmt);
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
Elt.IntVal |= Tmp;
}

View File

@ -844,7 +844,7 @@ APInt llvm::APIntOps::RoundDoubleToAPInt(double Double, unsigned width) {
// Otherwise, we have to shift the mantissa bits up to the right location
APInt Tmp(width, mantissa);
Tmp <<= (unsigned)exp - 52;
Tmp = Tmp.shl((unsigned)exp - 52);
return isNeg ? -Tmp : Tmp;
}
@ -1128,10 +1128,9 @@ void APInt::lshrSlowCase(unsigned ShiftAmt) {
/// Left-shift this APInt by shiftAmt.
/// @brief Left-shift function.
APInt &APInt::operator<<=(const APInt &shiftAmt) {
APInt APInt::shl(const APInt &shiftAmt) const {
// It's undefined behavior in C to shift by BitWidth or greater.
*this <<= (unsigned)shiftAmt.getLimitedValue(BitWidth);
return *this;
return shl((unsigned)shiftAmt.getLimitedValue(BitWidth));
}
void APInt::shlSlowCase(unsigned ShiftAmt) {

View File

@ -1852,17 +1852,17 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
OpUsefulBits = 1;
if (MSB >= Imm) {
OpUsefulBits <<= MSB - Imm + 1;
OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
--OpUsefulBits;
// The interesting part will be in the lower part of the result
getUsefulBits(Op, OpUsefulBits, Depth + 1);
// The interesting part was starting at Imm in the argument
OpUsefulBits <<= Imm;
OpUsefulBits = OpUsefulBits.shl(Imm);
} else {
OpUsefulBits <<= MSB + 1;
OpUsefulBits = OpUsefulBits.shl(MSB + 1);
--OpUsefulBits;
// The interesting part will be shifted in the result
OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
getUsefulBits(Op, OpUsefulBits, Depth + 1);
// The interesting part was at zero in the argument
OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
@ -1892,7 +1892,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
// Shift Left
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Mask <<= ShiftAmt;
Mask = Mask.shl(ShiftAmt);
getUsefulBits(Op, Mask, Depth + 1);
Mask.lshrInPlace(ShiftAmt);
} else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
@ -1902,7 +1902,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Mask.lshrInPlace(ShiftAmt);
getUsefulBits(Op, Mask, Depth + 1);
Mask <<= ShiftAmt;
Mask = Mask.shl(ShiftAmt);
} else
return;
@ -1930,13 +1930,13 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
uint64_t Width = MSB - Imm + 1;
uint64_t LSB = Imm;
OpUsefulBits <<= Width;
OpUsefulBits = OpUsefulBits.shl(Width);
--OpUsefulBits;
if (Op.getOperand(1) == Orig) {
// Copy the low bits from the result to bits starting from LSB.
Mask = ResultUsefulBits & OpUsefulBits;
Mask <<= LSB;
Mask = Mask.shl(LSB);
}
if (Op.getOperand(0) == Orig)
@ -1947,9 +1947,9 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
uint64_t Width = MSB + 1;
uint64_t LSB = UsefulBits.getBitWidth() - Imm;
OpUsefulBits <<= Width;
OpUsefulBits = OpUsefulBits.shl(Width);
--OpUsefulBits;
OpUsefulBits <<= LSB;
OpUsefulBits = OpUsefulBits.shl(LSB);
if (Op.getOperand(1) == Orig) {
// Copy the bits from the result to the zero bits.

View File

@ -124,7 +124,7 @@ void llvm::HexagonLowerToMC(const MCInstrInfo &MCII, const MachineInstr *MI,
// FP immediates are used only when setting GPRs, so they may be dealt
// with like regular immediates from this point on.
auto Expr = HexagonMCExpr::create(
MCConstantExpr::create(Val.bitcastToAPInt().getZExtValue(),
MCConstantExpr::create(*Val.bitcastToAPInt().getRawData(),
AP.OutContext),
AP.OutContext);
HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend);

View File

@ -26717,8 +26717,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
unsigned ShAmt = ShiftImm->getZExtValue();
if (Opc == X86ISD::VSHLI) {
KnownZero <<= ShAmt;
KnownOne <<= ShAmt;
KnownZero = KnownZero << ShAmt;
KnownOne = KnownOne << ShAmt;
// Low bits are known zero.
KnownZero.setLowBits(ShAmt);
} else {
@ -31054,7 +31054,8 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask <<= N1C->getAPIntValue();
const APInt &ShAmt = N1C->getAPIntValue();
Mask = Mask.shl(ShAmt);
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
// we carefully interrogate the mask to make sure we are semantics
@ -31264,9 +31265,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
unsigned ShiftImm = ShiftVal.getZExtValue();
for (APInt &Elt : EltBits) {
if (X86ISD::VSHLI == Opcode)
Elt <<= ShiftImm;
Elt = Elt.shl(ShiftImm);
else if (X86ISD::VSRAI == Opcode)
Elt.ashrInPlace(ShiftImm);
Elt = Elt.ashr(ShiftImm);
else
Elt.lshrInPlace(ShiftImm);
}

View File

@ -378,7 +378,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II,
for (unsigned i = 0; i != NumSubElts; ++i) {
unsigned SubEltIdx = (NumSubElts - 1) - i;
auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
Count <<= BitWidth;
Count = Count.shl(BitWidth);
Count |= SubElt->getValue().zextOrTrunc(64);
}
}

View File

@ -1562,7 +1562,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum));
UndefElts |= LaneElts;
}
}