[Hexagon] Add patterns for funnel shifts

llvm-svn: 349770
This commit is contained in:
Krzysztof Parzyszek 2018-12-20 16:39:20 +00:00
parent d363f08426
commit 30c42e2ab6
4 changed files with 375 additions and 30 deletions

View File

@ -1359,6 +1359,11 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BSWAP, MVT::i32, Legal);
setOperationAction(ISD::BSWAP, MVT::i64, Legal);
setOperationAction(ISD::FSHL, MVT::i32, Legal);
setOperationAction(ISD::FSHL, MVT::i64, Legal);
setOperationAction(ISD::FSHR, MVT::i32, Legal);
setOperationAction(ISD::FSHR, MVT::i64, Legal);
for (unsigned IntExpOp :
{ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
@ -1538,8 +1543,10 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
// Subtarget-specific operation actions.
//
if (Subtarget.hasV60Ops()) {
setOperationAction(ISD::ROTL, MVT::i32, Custom);
setOperationAction(ISD::ROTL, MVT::i64, Custom);
setOperationAction(ISD::ROTL, MVT::i32, Legal);
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i32, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
}
if (Subtarget.hasV66Ops()) {
setOperationAction(ISD::FADD, MVT::f64, Legal);

View File

@ -177,6 +177,11 @@ def UDEC32: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(V-32, SDLoc(N), MVT::i32);
}]>;
class Subi<int From>: SDNodeXForm<imm,
"int32_t V = " # From # " - N->getSExtValue();" #
"return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i32);"
>;
def Log2_32: SDNodeXForm<imm, [{
uint32_t V = N->getZExtValue();
return CurDAG->getTargetConstant(Log2_32(V), SDLoc(N), MVT::i32);
@ -995,10 +1000,90 @@ def: OpR_RR_pat<S2_asr_r_p, Sra, i64, I64, I32>;
def: OpR_RR_pat<S2_lsr_r_p, Srl, i64, I64, I32>;
def: OpR_RR_pat<S2_asl_r_p, Shl, i64, I64, I32>;
let Predicates = [HasV60] in {
// Funnel shifts.
def IsMul8_U3: PatLeaf<(i32 imm), [{
uint64_t V = N->getZExtValue();
return V % 8 == 0 && isUInt<3>(V / 8);
}]>;
def Divu8: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() / 8, SDLoc(N), MVT::i32);
}]>;
// Funnel shift-left.
def FShl32i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
(HiReg (S2_asl_i_p (Combinew $Rs, $Rt), $S))>;
def FShl32r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
(HiReg (S2_asl_r_p (Combinew $Rs, $Rt), $Ru))>;
def FShl64i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
(S2_lsr_i_p_or (S2_asl_i_p $Rt, $S), $Rs, (Subi<64> $S))>;
def FShl64r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
(S2_lsr_r_p_or (S2_asl_r_p $Rt, $Ru), $Rs, (A2_subri 64, $Ru))>;
// Combined SDNodeXForm: (Divu8 (Subi<64> $S))
def Divu64_8: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((64 - N->getSExtValue()) / 8,
SDLoc(N), MVT::i32);
}]>;
// Special cases:
let AddedComplexity = 100 in {
def: Pat<(fshl I32:$Rs, I32:$Rt, (i32 16)),
(A2_combine_hl I32:$Rs, I32:$Rt)>;
def: Pat<(fshl I64:$Rs, I64:$Rt, IsMul8_U3:$S),
(S2_valignib I64:$Rs, I64:$Rt, (Divu64_8 $S))>;
}
let Predicates = [HasV60], AddedComplexity = 50 in {
def: OpR_RI_pat<S6_rol_i_r, Rol, i32, I32, u5_0ImmPred>;
def: OpR_RI_pat<S6_rol_i_p, Rol, i64, I64, u6_0ImmPred>;
}
let AddedComplexity = 30 in {
def: Pat<(rotl I32:$Rs, u5_0ImmPred:$S), (FShl32i $Rs, $Rs, imm:$S)>;
def: Pat<(rotl I64:$Rs, u6_0ImmPred:$S), (FShl64i $Rs, $Rs, imm:$S)>;
def: Pat<(fshl I32:$Rs, I32:$Rt, u5_0ImmPred:$S), (FShl32i $Rs, $Rt, imm:$S)>;
def: Pat<(fshl I64:$Rs, I64:$Rt, u6_0ImmPred:$S), (FShl64i $Rs, $Rt, imm:$S)>;
}
def: Pat<(rotl I32:$Rs, I32:$Rt), (FShl32r $Rs, $Rs, $Rt)>;
def: Pat<(rotl I64:$Rs, I32:$Rt), (FShl64r $Rs, $Rs, $Rt)>;
def: Pat<(fshl I32:$Rs, I32:$Rt, I32:$Ru), (FShl32r $Rs, $Rt, $Ru)>;
def: Pat<(fshl I64:$Rs, I64:$Rt, I32:$Ru), (FShl64r $Rs, $Rt, $Ru)>;
// Funnel shift-right.
def FShr32i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
(LoReg (S2_lsr_i_p (Combinew $Rs, $Rt), $S))>;
def FShr32r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
(LoReg (S2_lsr_r_p (Combinew $Rs, $Rt), $Ru))>;
def FShr64i: OutPatFrag<(ops node:$Rs, node:$Rt, node:$S),
(S2_asl_i_p_or (S2_lsr_i_p $Rt, $S), $Rs, (Subi<64> $S))>;
def FShr64r: OutPatFrag<(ops node:$Rs, node:$Rt, node:$Ru),
(S2_asl_r_p_or (S2_lsr_r_p $Rt, $Ru), $Rs, (A2_subri 64, $Ru))>;
// Special cases:
let AddedComplexity = 100 in {
def: Pat<(fshr I32:$Rs, I32:$Rt, (i32 16)),
(A2_combine_hl I32:$Rs, I32:$Rt)>;
def: Pat<(fshr I64:$Rs, I64:$Rt, IsMul8_U3:$S),
(S2_valignib I64:$Rs, I64:$Rt, (Divu8 $S))>;
}
let Predicates = [HasV60], AddedComplexity = 50 in {
def: Pat<(rotr I32:$Rs, u5_0ImmPred:$S), (S6_rol_i_r I32:$Rs, (Subi<32> $S))>;
def: Pat<(rotr I64:$Rs, u6_0ImmPred:$S), (S6_rol_i_p I64:$Rs, (Subi<64> $S))>;
}
let AddedComplexity = 30 in {
def: Pat<(rotr I32:$Rs, u5_0ImmPred:$S), (FShr32i $Rs, $Rs, imm:$S)>;
def: Pat<(rotr I64:$Rs, u6_0ImmPred:$S), (FShr64i $Rs, $Rs, imm:$S)>;
def: Pat<(fshr I32:$Rs, I32:$Rt, u5_0ImmPred:$S), (FShr32i $Rs, $Rt, imm:$S)>;
def: Pat<(fshr I64:$Rs, I64:$Rt, u6_0ImmPred:$S), (FShr64i $Rs, $Rt, imm:$S)>;
}
def: Pat<(rotr I32:$Rs, I32:$Rt), (FShr32r $Rs, $Rs, $Rt)>;
def: Pat<(rotr I64:$Rs, I32:$Rt), (FShr64r $Rs, $Rs, $Rt)>;
def: Pat<(fshr I32:$Rs, I32:$Rt, I32:$Ru), (FShr32r $Rs, $Rt, $Ru)>;
def: Pat<(fshr I64:$Rs, I64:$Rt, I32:$Ru), (FShr64r $Rs, $Rt, $Ru)>;
def: Pat<(sra (add (sra I32:$Rs, u5_0ImmPred:$u5), 1), (i32 1)),
(S2_asr_i_r_rnd I32:$Rs, imm:$u5)>;
@ -1183,6 +1268,7 @@ def: Pat<(HexagonVASL V2I16:$Rs, I32:$Rt),
def: Pat<(HexagonVLSR V2I16:$Rs, I32:$Rt),
(LoReg (S2_lsr_i_vh (ToAext64 $Rs), I32:$Rt))>;
// --(9) Arithmetic/bitwise ----------------------------------------------
//

View File

@ -0,0 +1,265 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK-LABEL: f0:
; CHECK: r[[R00:[0-9]+]]:[[R01:[0-9]+]] = combine(r0,r1)
; CHECK: r[[R02:[0-9]+]]:[[R03:[0-9]+]] = asl(r[[R00]]:[[R01]],#17)
define i32 @f0(i32 %a0, i32 %a1) #1 {
b0:
%v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 17)
ret i32 %v0
}
; CHECK-LABEL: f1:
; CHECK: r[[R10:[0-9]+]]:[[R11:[0-9]+]] = combine(r0,r1)
; CHECK: r[[R12:[0-9]+]]:[[R13:[0-9]+]] = asl(r[[R10]]:[[R11]],r2)
define i32 @f1(i32 %a0, i32 %a1, i32 %a2) #1 {
b0:
%v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 %a2)
ret i32 %v0
}
; CHECK-LABEL: f2:
; CHECK: r[[R20:[0-9]+]]:[[R21:[0-9]+]] = asl(r3:2,#17)
; CHECK: r[[R20]]:[[R21]] |= lsr(r1:0,#47)
define i64 @f2(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 17)
ret i64 %v0
}
; CHECK-LABEL: f3:
; CHECK: r[[R30:[0-9]+]]:[[R31:[0-9]+]] = asl(r3:2,r4)
; CHECK: r[[R32:[0-9]+]] = sub(#64,r4)
; CHECK: r[[R30]]:[[R31]] |= lsr(r1:0,r[[R32]])
define i64 @f3(i64 %a0, i64 %a1, i64 %a2) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 %a2)
ret i64 %v0
}
; CHECK-LABEL: f4:
; CHECK: r[[R40:[0-9]+]]:[[R41:[0-9]+]] = combine(r0,r1)
; CHECK: r[[R42:[0-9]+]]:[[R43:[0-9]+]] = lsr(r[[R40]]:[[R41]],#17)
define i32 @f4(i32 %a0, i32 %a1) #1 {
b0:
%v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 17)
ret i32 %v0
}
; CHECK-LABEL: f5:
; CHECK: r[[R50:[0-9]+]]:[[R51:[0-9]+]] = combine(r0,r1)
; CHECK: r[[R52:[0-9]+]]:[[R53:[0-9]+]] = lsr(r[[R50]]:[[R51]],r2)
define i32 @f5(i32 %a0, i32 %a1, i32 %a2) #1 {
b0:
%v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 %a2)
ret i32 %v0
}
; CHECK-LABEL: f6:
; CHECK: r[[R60:[0-9]+]]:[[R61:[0-9]+]] = lsr(r3:2,#17)
; CHECK: r[[R60]]:[[R61]] |= asl(r1:0,#47)
define i64 @f6(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 17)
ret i64 %v0
}
; CHECK-LABEL: f7:
; CHECK: r[[R70:[0-9]+]]:[[R71:[0-9]+]] = lsr(r3:2,r4)
; CHECK: r[[R72:[0-9]+]] = sub(#64,r4)
; CHECK: r[[R70]]:[[R71]] |= asl(r1:0,r6)
define i64 @f7(i64 %a0, i64 %a1, i64 %a2) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 %a2)
ret i64 %v0
}
; CHECK-LABEL: f8:
; CHECK: r[[R80:[0-9]+]] = rol(r0,#17)
define i32 @f8(i32 %a0) #1 {
b0:
%v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a0, i32 17)
ret i32 %v0
}
; CHECK-LABEL: f9:
; CHECK: r[[R90:[0-9]+]]:[[R91:[0-9]+]] = combine(r0,r0)
; CHECK: r[[R92:[0-9]+]]:[[R93:[0-9]+]] = asl(r[[R90]]:[[R91]],r1)
define i32 @f9(i32 %a0, i32 %a1) #1 {
b0:
%v0 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 %a0, i32 %a1)
ret i32 %v0
}
; CHECK-LABEL: f10:
; CHECK: r[[RA0:[0-9]+]]:[[RA1:[0-9]+]] = rol(r1:0,#17)
define i64 @f10(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a0, i64 17)
ret i64 %v0
}
; CHECK-LABEL: f11:
; CHECK: r[[RB0:[0-9]+]]:[[RB1:[0-9]+]] = asl(r1:0,r2)
; CHECK: r[[RB2:[0-9]+]] = sub(#64,r2)
; CHECK: r[[RB0]]:[[RB1]] |= lsr(r1:0,r[[RB2]])
define i64 @f11(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a0, i64 %a1)
ret i64 %v0
}
; CHECK-LABEL: f12:
; CHECK: r[[RC0:[0-9]+]] = rol(r0,#15)
define i32 @f12(i32 %a0, i32 %a1) #1 {
b0:
%v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a0, i32 17)
ret i32 %v0
}
; CHECK-LABEL: f13:
; CHECK: r[[RD0:[0-9]+]]:[[RD1:[0-9]+]] = combine(r0,r0)
; CHECK: r[[RD2:[0-9]+]]:[[RD3:[0-9]+]] = lsr(r[[RD0]]:[[RD1]],r1)
define i32 @f13(i32 %a0, i32 %a1) #1 {
b0:
%v0 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 %a0, i32 %a1)
ret i32 %v0
}
; CHECK-LABEL: f14:
; CHECK: r[[RE0:[0-9]+]]:[[RE1:[0-9]+]] = rol(r1:0,#47)
define i64 @f14(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a0, i64 17)
ret i64 %v0
}
; CHECK-LABEL: f15:
; CHECK: r[[RF0:[0-9]+]]:[[RF1:[0-9]+]] = lsr(r1:0,r2)
; CHECK: r[[RF2:[0-9]+]] = sub(#64,r2)
; CHECK: r[[RF0]]:[[RF1]] |= asl(r1:0,r[[RF2]])
define i64 @f15(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a0, i64 %a1)
ret i64 %v0
}
; CHECK-LABEL: f16:
; CHECK: r[[RG0:[0-9]+]]:[[RG1:[0-9]+]] = valignb(r1:0,r3:2,#7)
define i64 @f16(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 8)
ret i64 %v0
}
; CHECK-LABEL: f17:
; CHECK: r[[RH0:[0-9]+]]:[[RH1:[0-9]+]] = valignb(r1:0,r3:2,#6)
define i64 @f17(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 16)
ret i64 %v0
}
; CHECK-LABEL: f18:
; CHECK: r[[RI0:[0-9]+]]:[[RI1:[0-9]+]] = valignb(r1:0,r3:2,#5)
define i64 @f18(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 24)
ret i64 %v0
}
; CHECK-LABEL: f19:
; CHECK: r[[RJ0:[0-9]+]]:[[RJ1:[0-9]+]] = valignb(r1:0,r3:2,#4)
define i64 @f19(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 32)
ret i64 %v0
}
; CHECK-LABEL: f20:
; CHECK: r[[RK0:[0-9]+]]:[[RK1:[0-9]+]] = valignb(r1:0,r3:2,#3)
define i64 @f20(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 40)
ret i64 %v0
}
; CHECK-LABEL: f21:
; CHECK: r[[RL0:[0-9]+]]:[[RL1:[0-9]+]] = valignb(r1:0,r3:2,#2)
define i64 @f21(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 48)
ret i64 %v0
}
; CHECK-LABEL: f22:
; CHECK: r[[RM0:[0-9]+]]:[[RM1:[0-9]+]] = valignb(r1:0,r3:2,#1)
define i64 @f22(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshl.i64(i64 %a0, i64 %a1, i64 56)
ret i64 %v0
}
; CHECK-LABEL: f23:
; CHECK: r[[RN0:[0-9]+]]:[[RN1:[0-9]+]] = valignb(r1:0,r3:2,#1)
define i64 @f23(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 8)
ret i64 %v0
}
; CHECK-LABEL: f24:
; CHECK: r[[RO0:[0-9]+]]:[[RO1:[0-9]+]] = valignb(r1:0,r3:2,#2)
define i64 @f24(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 16)
ret i64 %v0
}
; CHECK-LABEL: f25:
; CHECK: r[[RP0:[0-9]+]]:[[RP1:[0-9]+]] = valignb(r1:0,r3:2,#3)
define i64 @f25(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 24)
ret i64 %v0
}
; CHECK-LABEL: f26:
; CHECK: r[[RQ0:[0-9]+]]:[[RQ1:[0-9]+]] = valignb(r1:0,r3:2,#4)
define i64 @f26(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 32)
ret i64 %v0
}
; CHECK-LABEL: f27:
; CHECK: r[[RR0:[0-9]+]]:[[RR1:[0-9]+]] = valignb(r1:0,r3:2,#5)
define i64 @f27(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 40)
ret i64 %v0
}
; CHECK-LABEL: f28:
; CHECK: r[[RS0:[0-9]+]]:[[RS1:[0-9]+]] = valignb(r1:0,r3:2,#6)
define i64 @f28(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 48)
ret i64 %v0
}
; CHECK-LABEL: f29:
; CHECK: r[[RT0:[0-9]+]]:[[RT1:[0-9]+]] = valignb(r1:0,r3:2,#7)
define i64 @f29(i64 %a0, i64 %a1) #1 {
b0:
%v0 = tail call i64 @llvm.fshr.i64(i64 %a0, i64 %a1, i64 56)
ret i64 %v0
}
declare i32 @llvm.fshl.i32(i32, i32, i32) #0
declare i32 @llvm.fshr.i32(i32, i32, i32) #0
declare i64 @llvm.fshl.i64(i64, i64, i64) #0
declare i64 @llvm.fshr.i64(i64, i64, i64) #0
attributes #0 = { nounwind readnone speculatable }
attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-packets" }

View File

@ -13,13 +13,10 @@ b0:
}
; CHECK-LABEL: f1
; No variable-shift rotates. Check for the default expansion code.
; This is a rotate left by %a1(r1).
; CHECK: r[[R10:[0-9]+]] = sub(#32,r1)
; CHECK: r[[R11:[0-9]+]] = and(r1,#31)
; CHECK: r[[R12:[0-9]+]] = and(r[[R10]],#31)
; CHECK: r[[R13:[0-9]+]] = asl(r0,r[[R11]])
; CHECK: r[[R13]] |= lsr(r0,r[[R12]])
; This is a rotate left by %a1(r1). Use register-pair shift to implement it.
; CHECK: r[[R10:[0-9]+]]:[[R11:[0-9]+]] = combine(r0,r0)
; CHECK: r[[R12:[0-9]+]]:[[R13:[0-9]+]] = asl(r[[R10]]:[[R11]],r1)
; CHECK: r0 = r[[R12]]
define i32 @f1(i32 %a0, i32 %a1) #0 {
b0:
%v0 = shl i32 %a0, %a1
@ -40,13 +37,9 @@ b0:
}
; CHECK-LABEL: f3
; No variable-shift rotates. Check for the default expansion code.
; This is a rotate right by %a1(r1) that became a rotate left by 32-%a1.
; CHECK: r[[R30:[0-9]+]] = sub(#32,r1)
; CHECK: r[[R31:[0-9]+]] = and(r1,#31)
; CHECK: r[[R32:[0-9]+]] = and(r[[R30]],#31)
; CHECK: r[[R33:[0-9]+]] = asl(r0,r[[R32]])
; CHECK: r[[R33]] |= lsr(r0,r[[R31]])
; This is a rotate right by %a1(r1). Use register-pair shift to implement it.
; CHECK: r[[R30:[0-9]+]]:[[R31:[0-9]+]] = combine(r0,r0)
; CHECK: r[[R32:[0-9]+]]:[[R33:[0-9]+]] = lsr(r[[R30]]:[[R31]],r1)
define i32 @f3(i32 %a0, i32 %a1) #0 {
b0:
%v0 = lshr i32 %a0, %a1
@ -67,13 +60,10 @@ b0:
}
; CHECK-LABEL: f5
; No variable-shift rotates. Check for the default expansion code.
; This is a rotate left by %a1(r2).
; CHECK: r[[R50:[0-9]+]] = sub(#64,r2)
; CHECK: r[[R51:[0-9]+]] = and(r2,#63)
; CHECK: r[[R52:[0-9]+]] = and(r[[R50]],#63)
; CHECK: r[[R53:[0-9]+]]:[[R54:[0-9]+]] = asl(r1:0,r[[R51]])
; CHECK: r[[R53]]:[[R54]] |= lsr(r1:0,r[[R52]])
; CHECK: r[[R50:[0-9]+]]:[[R51:[0-9]+]] = asl(r1:0,r2)
; CHECK: r[[R52:[0-9]+]] = sub(#64,r2)
; CHECK: r[[R50]]:[[R51]] |= lsr(r1:0,r[[R52]])
define i64 @f5(i64 %a0, i32 %a1) #0 {
b0:
%v0 = zext i32 %a1 to i64
@ -96,13 +86,10 @@ b0:
}
; CHECK-LABEL: f7
; No variable-shift rotates. Check for the default expansion code.
; This is a rotate right by %a1(r2) that became a rotate left by 64-%a1.
; CHECK: r[[R70:[0-9]+]] = sub(#64,r2)
; CHECK: r[[R71:[0-9]+]] = and(r2,#63)
; CHECK: r[[R72:[0-9]+]] = and(r[[R70]],#63)
; CHECK: r[[R73:[0-9]+]]:[[R75:[0-9]+]] = asl(r1:0,r[[R72]])
; CHECK: r[[R73]]:[[R75]] |= lsr(r1:0,r[[R71]])
; This is a rotate right by %a1(r2).
; CHECK: r[[R70:[0-9]+]]:[[R71:[0-9]+]] = lsr(r1:0,r2)
; CHECK: r[[R72:[0-9]+]] = sub(#64,r2)
; CHECK: r[[R70]]:[[R71]] |= asl(r1:0,r[[R72]])
define i64 @f7(i64 %a0, i32 %a1) #0 {
b0:
%v0 = zext i32 %a1 to i64