Replace "fallthrough" comments with LLVM_FALLTHROUGH

This is a mechanical change of comments in switches like fallthrough,
fall-through, or fall-thru to use the LLVM_FALLTHROUGH macro instead.

llvm-svn: 278902
This commit is contained in:
Justin Bogner 2016-08-17 05:10:15 +00:00
parent 0dace2d3a1
commit b03fd12cef
60 changed files with 160 additions and 134 deletions

View File

@ -2773,7 +2773,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
// fall-through
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
@ -2784,7 +2784,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
// fall-through
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
@ -2804,7 +2804,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
// fall-through
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
@ -2815,7 +2815,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
// fall-through
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
@ -2877,7 +2877,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
case Instruction::LShr:
if (ICmpInst::isSigned(Pred))
break;
// fall-through
LLVM_FALLTHROUGH;
case Instruction::SDiv:
case Instruction::AShr:
if (!LBO->isExact() || !RBO->isExact())

View File

@ -537,7 +537,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
// fallthrough
LLVM_FALLTHROUGH;
case LibFunc::strcpy:
case LibFunc::stpcpy:
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
@ -549,7 +549,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
// fallthrough
LLVM_FALLTHROUGH;
case LibFunc::strncpy:
case LibFunc::stpncpy:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
@ -642,7 +642,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
// fallthrough
LLVM_FALLTHROUGH;
case LibFunc::memcpy:
case LibFunc::mempcpy:
case LibFunc::memmove:
@ -655,7 +655,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!IsSizeTTy(FTy.getParamType(NumParams)))
return false;
// fallthrough
LLVM_FALLTHROUGH;
case LibFunc::memset:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0)->isPointerTy() &&

View File

@ -2271,7 +2271,7 @@ std::error_code BitcodeReader::parseMetadata(bool ModuleLevel) {
}
case bitc::METADATA_DISTINCT_NODE:
IsDistinct = true;
// fallthrough...
LLVM_FALLTHROUGH;
case bitc::METADATA_NODE: {
SmallVector<Metadata *, 8> Elts;
Elts.reserve(Record.size());
@ -3355,7 +3355,7 @@ std::error_code BitcodeReader::parseUseLists() {
break;
case bitc::USELIST_CODE_BB:
IsBB = true;
// fallthrough
LLVM_FALLTHROUGH;
case bitc::USELIST_CODE_DEFAULT: {
unsigned RecordLength = Record.size();
if (RecordLength < 3)

View File

@ -1583,7 +1583,7 @@ bool MIParser::parseMachineOperand(MachineOperand &Dest,
lex();
break;
}
// fallthrough
LLVM_FALLTHROUGH;
default:
// FIXME: Parse the MCSymbol machine operand.
return error("expected a machine operand");

View File

@ -145,7 +145,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[StageCycle];
// FALLTHROUGH
LLVM_FALLTHROUGH;
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[StageCycle];
@ -197,7 +197,7 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[cycle + i];
// FALLTHROUGH
LLVM_FALLTHROUGH;
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[cycle + i];

View File

@ -801,7 +801,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
// FALLTHROUGH
LLVM_FALLTHROUGH;
case TargetLowering::Legal: {
Value = SDValue(Node, 0);
Chain = SDValue(Node, 1);
@ -1598,6 +1598,7 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, SDValue &LHS,
break;
}
// Fallthrough if we are unsigned integer.
LLVM_FALLTHROUGH;
case ISD::SETLE:
case ISD::SETGT:
case ISD::SETGE:

View File

@ -1776,7 +1776,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
switch (BoolType) {
case TargetLoweringBase::UndefinedBooleanContent:
OVF = DAG.getNode(ISD::AND, dl, NVT, DAG.getConstant(1, dl, NVT), OVF);
// Fallthrough
LLVM_FALLTHROUGH;
case TargetLoweringBase::ZeroOrOneBooleanContent:
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);
break;

View File

@ -2481,7 +2481,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
default:
if (Op.getOpcode() < ISD::BUILTIN_OP_END)
break;
// Fallthrough
LLVM_FALLTHROUGH;
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
@ -3868,7 +3868,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
// fallthrough
LLVM_FALLTHROUGH;
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE:

View File

@ -483,7 +483,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(createSjLjEHPreparePass());
// FALLTHROUGH
LLVM_FALLTHROUGH;
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
addPass(createDwarfEHPass(TM));

View File

@ -97,7 +97,8 @@ public:
(void)p;
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
} // fall-through
LLVM_FALLTHROUGH;
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// or add / sub instructions.
@ -196,7 +197,8 @@ public:
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
(void)p;
} // fall-through
LLVM_FALLTHROUGH;
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// or add / sub instructions.

View File

@ -925,7 +925,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return Constant::getNullValue(C1->getType());
// Fallthrough
LLVM_FALLTHROUGH;
case Instruction::Add:
case Instruction::Sub:
return UndefValue::get(C1->getType());

View File

@ -265,7 +265,7 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
break;
}
++NumIndirect;
// FALLTHROUGH for Indirect Outputs.
LLVM_FALLTHROUGH; // We fall through for Indirect Outputs.
case InlineAsm::isInput:
if (NumClobbers) return false; // inputs before clobbers.
++NumInputs;

View File

@ -449,7 +449,7 @@ static Value *stripPointerCastsAndOffsets(Value *V) {
case PSK_InBoundsConstantIndices:
if (!GEP->hasAllConstantIndices())
return V;
// fallthrough
LLVM_FALLTHROUGH;
case PSK_InBounds:
if (!GEP->isInBounds())
return V;
@ -848,7 +848,7 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
// virtual (or inline) interface to handle this though, so instead we make
// the TrackingVH accessors guarantee that a client never sees this value.
// FALLTHROUGH
LLVM_FALLTHROUGH;
case Weak:
// Weak goes to the new value, which will unlink it from Old's list.
Entry->operator=(New);

View File

@ -36,7 +36,7 @@ Expected<std::unique_ptr<SymbolicFile>> SymbolicFile::createSymbolicFile(
case sys::fs::file_magic::bitcode:
if (Context)
return errorOrToExpected(IRObjectFile::create(Object, *Context));
// Fallthrough
LLVM_FALLTHROUGH;
case sys::fs::file_magic::unknown:
case sys::fs::file_magic::archive:
case sys::fs::file_magic::macho_universal_binary:

View File

@ -317,7 +317,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
case Option::SeparateClass: case Option::JoinedOrSeparateClass:
case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
Name += ' ';
// FALLTHROUGH
LLVM_FALLTHROUGH;
case Option::JoinedClass: case Option::CommaJoinedClass:
case Option::JoinedAndSeparateClass:
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))

View File

@ -155,7 +155,7 @@ tgtok::TokKind TGLexer::LexToken() {
case '0': case '1':
if (NextChar == 'b')
return LexNumber();
// Fallthrough
LLVM_FALLTHROUGH;
case '2': case '3': case '4': case '5':
case '6': case '7': case '8': case '9':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':

View File

@ -1286,6 +1286,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
continue;
}
// Fallthrough to try convert this to a bit.
LLVM_FALLTHROUGH;
}
// All other values must be convertible to just a single bit.
Init *Bit = Vals[i]->convertInitializerTo(BitRecTy::get());

View File

@ -1179,7 +1179,8 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
changeFPCCToAArch64CC(CC, CondCode, CondCode2);
break;
case ISD::SETUO:
Invert = true; // Fallthrough
Invert = true;
LLVM_FALLTHROUGH;
case ISD::SETO:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;
@ -6720,8 +6721,8 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
case AArch64CC::LT:
if (!NoNans)
return SDValue();
// If we ignore NaNs then we can use to the MI implementation.
// Fallthrough.
// If we ignore NaNs then we can use to the MI implementation.
LLVM_FALLTHROUGH;
case AArch64CC::MI:
if (IsZero)
return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);

View File

@ -375,7 +375,8 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
// if NZCV is used, do not fold.
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to ADDXri and ADDWri.
// fall-through to ADDXri and ADDWri.
LLVM_FALLTHROUGH;
case AArch64::ADDXri:
case AArch64::ADDWri:
// add x, 1 -> csinc.
@ -402,7 +403,8 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
// if NZCV is used, do not fold.
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to SUBXrr and SUBWrr.
// fall-through to SUBXrr and SUBWrr.
LLVM_FALLTHROUGH;
case AArch64::SUBXrr:
case AArch64::SUBWrr: {
// neg x -> csneg, represented as sub dst, xzr, src.

View File

@ -3455,7 +3455,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst,
if (RI->isSubRegisterEq(Rn, Rt2))
return Error(Loc[1], "unpredictable LDP instruction, writeback base "
"is also a destination");
// FALLTHROUGH
LLVM_FALLTHROUGH;
}
case AArch64::LDPDi:
case AArch64::LDPQi:

View File

@ -1097,7 +1097,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
case AArch64::STXRB:
case AArch64::STXRH:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
// FALLTHROUGH
LLVM_FALLTHROUGH;
case AArch64::LDARW:
case AArch64::LDARB:
case AArch64::LDARH:
@ -1121,7 +1121,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
case AArch64::STLXRX:
case AArch64::STXRX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
// FALLTHROUGH
LLVM_FALLTHROUGH;
case AArch64::LDARX:
case AArch64::LDAXRX:
case AArch64::LDXRX:
@ -1133,7 +1133,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
case AArch64::STLXPW:
case AArch64::STXPW:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
// FALLTHROUGH
LLVM_FALLTHROUGH;
case AArch64::LDAXPW:
case AArch64::LDXPW:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
@ -1142,7 +1142,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
case AArch64::STLXPX:
case AArch64::STXPX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
// FALLTHROUGH
LLVM_FALLTHROUGH;
case AArch64::LDAXPX:
case AArch64::LDXPX:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
@ -1218,7 +1218,7 @@ static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
case AArch64::STPXpre:
case AArch64::LDPSWpre:
NeedsDisjointWritebackTransfer = true;
// Fallthrough
LLVM_FALLTHROUGH;
case AArch64::LDNPXi:
case AArch64::STNPXi:
case AArch64::LDPXi:
@ -1232,7 +1232,7 @@ static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
case AArch64::LDPWpre:
case AArch64::STPWpre:
NeedsDisjointWritebackTransfer = true;
// Fallthrough
LLVM_FALLTHROUGH;
case AArch64::LDNPWi:
case AArch64::STNPWi:
case AArch64::LDPWi:

View File

@ -586,9 +586,10 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG));
return;
}
// Fall-through. Since we don't care about out of bounds values
// we can use FP_TO_SINT for uints too. The DAGLegalizer code for uint
// considers some extra cases which are not necessary here.
// Since we don't care about out of bounds values we can use FP_TO_SINT for
// uints too. The DAGLegalizer code for uint considers some extra cases
// which are not necessary here.
LLVM_FALLTHROUGH;
case ISD::FP_TO_SINT: {
if (N->getValueType(0) == MVT::i1) {
Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));

View File

@ -2389,7 +2389,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// have the same legalization requires ments as global and private
// loads.
//
// Fall-through
LLVM_FALLTHROUGH;
case AMDGPUAS::GLOBAL_ADDRESS:
case AMDGPUAS::FLAT_ADDRESS:
if (NumElements > 4)

View File

@ -1721,7 +1721,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
// Fall-through
LLVM_FALLTHROUGH;
default:
continue;
}

View File

@ -249,7 +249,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
<< "]";
return false;
}
// Fallthrough
LLVM_FALLTHROUGH;
case 'c': // Don't print "#" before an immediate operand.
if (!MI->getOperand(OpNum).isImm())
return true;

View File

@ -684,7 +684,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
case ARM::Bcc:
isCond = true;
UOpc = ARM::B;
// Fallthrough
LLVM_FALLTHROUGH;
case ARM::B:
Bits = 24;
Scale = 4;

View File

@ -1075,7 +1075,8 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
SrcReg = Res;
} // Fallthrough here.
LLVM_FALLTHROUGH;
}
case MVT::i8:
if (isThumb2) {
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
@ -1848,7 +1849,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
// For AAPCS ABI targets, just use VFP variant of the calling convention.
return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
}
// Fallthrough
LLVM_FALLTHROUGH;
case CallingConv::C:
case CallingConv::CXX_FAST_TLS:
// Use target triple & subtarget features to do actual dispatch.

View File

@ -356,7 +356,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
GPRCS2Size += 4;
break;
}
// fallthrough
LLVM_FALLTHROUGH;
case ARM::R0:
case ARM::R1:
case ARM::R2:
@ -559,7 +559,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop())
break;
// fallthrough
LLVM_FALLTHROUGH;
case ARM::R0:
case ARM::R1:
case ARM::R2:
@ -1558,7 +1558,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
switch (Reg) {
case ARM::LR:
LRSpilled = true;
// Fallthrough
LLVM_FALLTHROUGH;
case ARM::R0: case ARM::R1:
case ARM::R2: case ARM::R3:
case ARM::R4: case ARM::R5:

View File

@ -4420,7 +4420,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
case InlineAsm::Constraint_i:
// FIXME: It seems strange that 'i' is needed here since it's supposed to
// be an immediate and not a memory constraint.
// Fallthrough.
LLVM_FALLTHROUGH;
case InlineAsm::Constraint_m:
case InlineAsm::Constraint_o:
case InlineAsm::Constraint_Q:

View File

@ -4906,22 +4906,22 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
switch (SetCCOpcode) {
default: llvm_unreachable("Illegal FP comparison");
case ISD::SETUNE:
case ISD::SETNE: Invert = true; // Fallthrough
case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETOEQ:
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
case ISD::SETOLT:
case ISD::SETLT: Swap = true; // Fallthrough
case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETGT: Opc = ARMISD::VCGT; break;
case ISD::SETOLE:
case ISD::SETLE: Swap = true; // Fallthrough
case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETOGE:
case ISD::SETGE: Opc = ARMISD::VCGE; break;
case ISD::SETUGE: Swap = true; // Fallthrough
case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
case ISD::SETUGT: Swap = true; // Fallthrough
case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
case ISD::SETUEQ: Invert = true; // Fallthrough
case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETONE:
// Expand this to (OLT | OGT).
TmpOp0 = Op0;
@ -4930,7 +4930,9 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
break;
case ISD::SETUO: Invert = true; // Fallthrough
case ISD::SETUO:
Invert = true;
LLVM_FALLTHROUGH;
case ISD::SETO:
// Expand this to (OLT | OGE).
TmpOp0 = Op0;

View File

@ -5425,7 +5425,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
return false;
}
// w/ a ':' after the '#', it's just like a plain ':'.
// FALLTHROUGH
LLVM_FALLTHROUGH;
}
case AsmToken::Colon: {
S = Parser.getTok().getLoc();

View File

@ -375,7 +375,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case ARM::fixup_arm_movt_hi16:
if (!IsPCRel)
Value >>= 16;
// Fallthrough
LLVM_FALLTHROUGH;
case ARM::fixup_arm_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned Lo12 = Value & 0x0FFF;
@ -387,7 +387,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case ARM::fixup_t2_movt_hi16:
if (!IsPCRel)
Value >>= 16;
// Fallthrough
LLVM_FALLTHROUGH;
case ARM::fixup_t2_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned i = (Value & 0x800) >> 11;
@ -403,7 +403,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case ARM::fixup_arm_ldst_pcrel_12:
// ARM PC-relative values are offset by 8.
Value -= 4;
// FALLTHROUGH
LLVM_FALLTHROUGH;
case ARM::fixup_t2_ldst_pcrel_12: {
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
Value -= 4;

View File

@ -1493,7 +1493,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
case ARM_AM::lsl: SBits = 0x0; break;
case ARM_AM::lsr: SBits = 0x2; break;
case ARM_AM::asr: SBits = 0x4; break;
case ARM_AM::rrx: // FALLTHROUGH
case ARM_AM::rrx: LLVM_FALLTHROUGH;
case ARM_AM::ror: SBits = 0x6; break;
}

View File

@ -208,7 +208,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
if (Asm.isThumbFunc(A))
FixedValue &= 0xfffffffe;
MovtBit = 1;
// Fallthrough
LLVM_FALLTHROUGH;
case ARM::fixup_t2_movw_lo16:
ThumbBit = 1;
break;

View File

@ -154,7 +154,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
GPRCS2Size += 4;
break;
}
// fallthrough
LLVM_FALLTHROUGH;
case ARM::R4:
case ARM::R5:
case ARM::R6:

View File

@ -651,7 +651,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
case ARM::t2ADDSri: {
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
// fallthrough
LLVM_FALLTHROUGH;
}
case ARM::t2ADDSrr:
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);

View File

@ -172,7 +172,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Opcode = AVR::ADIWRdK;
break;
}
// Fallthrough
LLVM_FALLTHROUGH;
}
default: {
// This opcode will get expanded into a pair of subi/sbci.

View File

@ -569,8 +569,8 @@ public:
if (!Resolved) {
switch ((unsigned)Fixup.getKind()) {
case fixup_Hexagon_B22_PCREL:
// GetFixupCount assumes B22 won't relax
// Fallthrough
// GetFixupCount assumes B22 won't relax
LLVM_FALLTHROUGH;
default:
return false;
break;

View File

@ -215,7 +215,7 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeJR:
++jumpr;
// Fall-through.
LLVM_FALLTHROUGH;
case HexagonII::TypeJ:
++jumps;
break;

View File

@ -807,7 +807,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
std::swap(LHS, RHS);
break;
case ISD::SETULE:
std::swap(LHS, RHS); // FALLTHROUGH
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETUGE:
// Turn lhs u>= rhs with lhs constant into rhs u< lhs+1, this allows us to
// fold constant into instruction.
@ -820,7 +821,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
TCC = MSP430CC::COND_HS; // aka COND_C
break;
case ISD::SETUGT:
std::swap(LHS, RHS); // FALLTHROUGH
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETULT:
// Turn lhs u< rhs with lhs constant into rhs u>= lhs+1, this allows us to
// fold constant into instruction.
@ -833,7 +835,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
TCC = MSP430CC::COND_LO; // aka COND_NC
break;
case ISD::SETLE:
std::swap(LHS, RHS); // FALLTHROUGH
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETGE:
// Turn lhs >= rhs with lhs constant into rhs < lhs+1, this allows us to
// fold constant into instruction.
@ -846,7 +849,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
TCC = MSP430CC::COND_GE;
break;
case ISD::SETGT:
std::swap(LHS, RHS); // FALLTHROUGH
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETLT:
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
// fold constant into instruction.

View File

@ -1662,7 +1662,7 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
break;
case Mips::SC_MM:
Inst.addOperand(MCOperand::createReg(Reg));
// fallthrough
LLVM_FALLTHROUGH;
default:
Inst.addOperand(MCOperand::createReg(Reg));
if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM ||

View File

@ -531,7 +531,7 @@ bool MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
case ELF::R_MIPS_GPREL32:
if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
return true;
// fallthrough
LLVM_FALLTHROUGH;
case ELF::R_MIPS_26:
case ELF::R_MIPS_64:
case ELF::R_MIPS_GPREL16:

View File

@ -2765,19 +2765,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
break;
@ -3235,19 +3235,19 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::AExt:
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
// Fallthrough
LLVM_FALLTHROUGH;
case CCValAssign::SExt:
Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
break;

View File

@ -1571,7 +1571,8 @@ bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
return false;
}
}
// Fall-through to process non-register-name identifiers as expression.
// Fall-through to process non-register-name identifiers as expression.
LLVM_FALLTHROUGH;
// All other expressions
case AsmToken::LParen:
case AsmToken::Plus:
@ -1644,7 +1645,7 @@ bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
break;
}
}
// Fall-through..
LLVM_FALLTHROUGH;
default:
return Error(S, "invalid memory operand");

View File

@ -3575,7 +3575,8 @@ void PPCDAGToDAGISel::PeepholeCROps() {
Op.getOperand(0) == Op.getOperand(1))
Op2Not = true;
}
} // fallthrough
LLVM_FALLTHROUGH;
}
case PPC::BC:
case PPC::BCn:
case PPC::SELECT_I4:

View File

@ -3748,7 +3748,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
ArgOffset += PtrByteSize;
break;
}
// FALLTHROUGH
LLVM_FALLTHROUGH;
case MVT::i64: // PPC64
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);

View File

@ -131,12 +131,12 @@ int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
return TTI::TCC_Free;
case Instruction::And:
RunFree = true; // (for the rotate-and-mask instructions)
// Fallthrough...
LLVM_FALLTHROUGH;
case Instruction::Add:
case Instruction::Or:
case Instruction::Xor:
ShiftedFree = true;
// Fallthrough...
LLVM_FALLTHROUGH;
case Instruction::Sub:
case Instruction::Mul:
case Instruction::Shl:
@ -147,7 +147,8 @@ int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
case Instruction::ICmp:
UnsignedFree = true;
ImmIdx = 1;
// Fallthrough... (zero comparisons can use record-form instructions)
// Zero comparisons can use record-form instructions.
LLVM_FALLTHROUGH;
case Instruction::Select:
ZeroFree = true;
break;

View File

@ -182,18 +182,18 @@ getX86ConditionCode(CmpInst::Predicate Predicate) {
default: break;
// Floating-point Predicates
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
case CmpInst::FCMP_OEQ: // fall-through
case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
// Integer Predicates
@ -229,15 +229,15 @@ getX86SSEConditionCode(CmpInst::Predicate Predicate) {
switch (Predicate) {
default: llvm_unreachable("Unexpected predicate");
case CmpInst::FCMP_OEQ: CC = 0; break;
case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
case CmpInst::FCMP_OGT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OLT: CC = 1; break;
case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OLE: CC = 2; break;
case CmpInst::FCMP_UNO: CC = 3; break;
case CmpInst::FCMP_UNE: CC = 4; break;
case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_UGE: CC = 5; break;
case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_UGT: CC = 6; break;
case CmpInst::FCMP_ORD: CC = 7; break;
case CmpInst::FCMP_UEQ:
@ -518,8 +518,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
TII.get(X86::AND8ri), AndResult)
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
ValReg = AndResult;
LLVM_FALLTHROUGH; // handle i1 as i8.
}
// FALLTHROUGH, handling i1 as i8.
case MVT::i8: Opc = X86::MOV8mr; break;
case MVT::i16: Opc = X86::MOV16mr; break;
case MVT::i32:
@ -659,7 +659,9 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
bool Signed = true;
switch (VT.getSimpleVT().SimpleTy) {
default: break;
case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
case MVT::i1:
Signed = false;
LLVM_FALLTHROUGH; // Handle as i8.
case MVT::i8: Opc = X86::MOV8mi; break;
case MVT::i16: Opc = X86::MOV16mi; break;
case MVT::i32: Opc = X86::MOV32mi; break;
@ -1601,7 +1603,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
switch (Predicate) {
default: break;
case CmpInst::FCMP_OEQ:
std::swap(TrueMBB, FalseMBB); // fall-through
std::swap(TrueMBB, FalseMBB);
LLVM_FALLTHROUGH;
case CmpInst::FCMP_UNE:
NeedExtraBranch = true;
Predicate = CmpInst::FCMP_ONE;

View File

@ -2700,7 +2700,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
case InlineAsm::Constraint_i:
// FIXME: It seems strange that 'i' is needed here since it's supposed to
// be an immediate and not a memory constraint.
// Fallthrough.
LLVM_FALLTHROUGH;
case InlineAsm::Constraint_o: // offsetable ??
case InlineAsm::Constraint_v: // not offsetable ??
case InlineAsm::Constraint_m: // memory

View File

@ -7567,7 +7567,7 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
case MVT::v4i64:
case MVT::v8i32:
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
// FALLTHROUGH
LLVM_FALLTHROUGH;
case MVT::v2i64:
case MVT::v4i32:
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
@ -7583,7 +7583,7 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8)));
}
// FALLTHROUGH
LLVM_FALLTHROUGH;
case MVT::v8i16: {
// For integer shuffles we need to expand the mask and cast the inputs to
// v8i16s prior to blending.
@ -7609,8 +7609,8 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
}
LLVM_FALLTHROUGH;
}
// FALLTHROUGH
case MVT::v16i8:
case MVT::v32i8: {
assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
@ -15383,19 +15383,19 @@ static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
case ISD::SETOEQ:
case ISD::SETEQ: SSECC = 0; break;
case ISD::SETOGT:
case ISD::SETGT: Swap = true; // Fallthrough
case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLT:
case ISD::SETOLT: SSECC = 1; break;
case ISD::SETOGE:
case ISD::SETGE: Swap = true; // Fallthrough
case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLE:
case ISD::SETOLE: SSECC = 2; break;
case ISD::SETUO: SSECC = 3; break;
case ISD::SETUNE:
case ISD::SETNE: SSECC = 4; break;
case ISD::SETULE: Swap = true; // Fallthrough
case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: SSECC = 5; break;
case ISD::SETULT: Swap = true; // Fallthrough
case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGT: SSECC = 6; break;
case ISD::SETO: SSECC = 7; break;
case ISD::SETUEQ:
@ -15501,12 +15501,12 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
case ISD::SETNE: SSECC = 4; break;
case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
case ISD::SETLT: Swap = true; //fall-through
case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
case ISD::SETULT: SSECC = 1; Unsigned = true; break;
case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
case ISD::SETULE: Unsigned = true; //fall-through
case ISD::SETULE: Unsigned = true; LLVM_FALLTHROUGH;
case ISD::SETLE: SSECC = 2; break;
}
@ -18267,7 +18267,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
case Intrinsic::x86_avx_vtestz_pd:
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
IsTestPacked = true; // Fallthrough
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_avx_ptestz_256:
// ZF = 1
@ -18277,7 +18278,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
case Intrinsic::x86_avx_vtestc_pd:
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestc_pd_256:
IsTestPacked = true; // Fallthrough
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_avx_ptestc_256:
// CF = 1
@ -18287,7 +18289,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
case Intrinsic::x86_avx_vtestnzc_pd:
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestnzc_pd_256:
IsTestPacked = true; // Fallthrough
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestnzc_256:
// ZF and CF = 0
@ -24759,7 +24762,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
// These nodes' second result is a boolean.
if (Op.getResNo() == 0)
break;
// Fallthrough
LLVM_FALLTHROUGH;
case X86ISD::SETCC:
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
@ -25946,7 +25949,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
Chain.push_back(V);
// Fallthrough!
LLVM_FALLTHROUGH;
case ISD::BITCAST:
V = V.getOperand(0);
continue;
@ -27705,7 +27708,7 @@ static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
case ISD::AND:
case X86ISD::AND:
isAnd = true;
// fallthru
LLVM_FALLTHROUGH;
case ISD::OR:
case X86ISD::OR:
SetCC0 = Cond->getOperand(0);
@ -31675,7 +31678,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
case ISD::OR:
case ISD::XOR:
Commute = true;
// fallthrough
LLVM_FALLTHROUGH;
case ISD::SUB: {
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);

View File

@ -151,13 +151,14 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
// If VLX isn't support we shouldn't inflate to these classes.
if (!Subtarget.hasVLX())
break;
// Fallthrough. The VLX check above passed, AVX512 check below will pass.
// The VLX check above passed, AVX512 check below will pass.
LLVM_FALLTHROUGH;
case X86::VR128XRegClassID:
case X86::VR256XRegClassID:
// If AVX-512 isn't support we shouldn't inflate to these classes.
if (!Subtarget.hasAVX512())
break;
// Fallthrough.
LLVM_FALLTHROUGH;
case X86::GR8RegClassID:
case X86::GR16RegClassID:
case X86::GR32RegClassID:

View File

@ -1413,7 +1413,7 @@ int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
// immediates here as the normal path expects bit 31 to be sign extended.
if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
return TTI::TCC_Free;
// Fallthrough
LLVM_FALLTHROUGH;
case Instruction::Add:
case Instruction::Sub:
case Instruction::Mul:

View File

@ -2289,7 +2289,7 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &ICI) {
case Instruction::UDiv:
if (Instruction *I = foldICmpUDivConstant(ICI, LHSI, RHSV))
return I;
// fall-through
LLVM_FALLTHROUGH;
case Instruction::SDiv:
if (Instruction *I = foldICmpDivConstant(ICI, LHSI, RHSV))
return I;
@ -4165,7 +4165,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
case Instruction::LShr:
if (I.isSigned())
break;
// fall-through
LLVM_FALLTHROUGH;
case Instruction::SDiv:
case Instruction::AShr:
if (!BO0->isExact() || !BO1->isExact())

View File

@ -511,7 +511,7 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
switch (ord) {
case AtomicOrdering::NotAtomic:
llvm_unreachable("unexpected atomic ordering!");
case AtomicOrdering::Unordered: // Fall-through.
case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
case AtomicOrdering::Monotonic: v = 0; break;
// Not specified yet:
// case AtomicOrdering::Consume: v = 1; break;

View File

@ -423,7 +423,7 @@ bool ObjCARCContract::tryToPeepholeInstruction(
if (!optimizeRetainCall(F, Inst))
return false;
// If we succeed in our optimization, fall through.
// FALLTHROUGH
LLVM_FALLTHROUGH;
case ARCInstKind::RetainRV:
case ARCInstKind::ClaimRV: {
// If we're compiling for a target which needs a special inline-asm

View File

@ -275,7 +275,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_SLE:
std::swap(LHS, RHS);
// fallthrough
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_SGE:
if (match(RHS, m_ConstantInt<0>())) {
Index = LHS;
@ -285,7 +285,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_SLT:
std::swap(LHS, RHS);
// fallthrough
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_SGT:
if (match(RHS, m_ConstantInt<-1>())) {
Index = LHS;
@ -301,7 +301,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_ULT:
std::swap(LHS, RHS);
// fallthrough
LLVM_FALLTHROUGH;
case ICmpInst::ICMP_UGT:
if (IsNonNegativeAndNotLoopVarying(LHS)) {
Index = RHS;

View File

@ -501,7 +501,8 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
// GEPs are cheap if all indices are constant.
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
return false;
// fall-thru to increment case
// fall-thru to increment case
LLVM_FALLTHROUGH;
case Instruction::Add:
case Instruction::Sub:
case Instruction::And:

View File

@ -770,7 +770,7 @@ static void copyMetadata(Instruction *DstInst, const Instruction *SrcInst,
MD.second = NewMD;
}
}
// fallthrough.
LLVM_FALLTHROUGH;
case LLVMContext::MD_make_implicit:
case LLVMContext::MD_dbg:
DstInst->setMetadata(MD.first, MD.second);

View File

@ -1565,7 +1565,7 @@ PointerIntPair<DeclContext *, 1> DeclContextTree::getChildDeclContext(
!DIE->getAttributeValueAsUnsignedConstant(&U.getOrigUnit(),
dwarf::DW_AT_external, 0))
return PointerIntPair<DeclContext *, 1>(nullptr);
// Fallthrough
LLVM_FALLTHROUGH;
case dwarf::DW_TAG_member:
case dwarf::DW_TAG_namespace:
case dwarf::DW_TAG_structure_type:

View File

@ -1656,7 +1656,7 @@ void ELFDumper<ELFT>::printValue(uint64_t Type, uint64_t Value) {
OS << "RELA";
break;
}
// Fallthrough.
LLVM_FALLTHROUGH;
case DT_PLTGOT:
case DT_HASH:
case DT_STRTAB: