[X86] Use memory form of shift right by 1 when the rotl immediate is one less than the operation size.

An earlier commit already did this for the register form.

llvm-svn: 295626
This commit is contained in:
Craig Topper 2017-02-20 00:37:23 +00:00
parent 0f14411b57
commit c184b671d9
2 changed files with 11 additions and 11 deletions

View File

@ -662,19 +662,19 @@ def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
// Rotate by 1
def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
"ror{b}\t$dst",
[(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)],
[(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst)],
IIC_SR>;
def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t$dst",
[(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)],
[(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst)],
IIC_SR>, OpSize16;
def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t$dst",
[(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)],
[(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst)],
IIC_SR>, OpSize32;
def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
"ror{q}\t$dst",
[(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)],
[(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst)],
IIC_SR>;
} // SchedRW

View File

@ -558,7 +558,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 64-LABEL: rotr1_64_mem:
; 64: # BB#0:
; 64-NEXT: rolq $63, (%rdi)
; 64-NEXT: rorq (%rdi)
; 64-NEXT: retq
%A = load i64, i64 *%Aptr
%B = shl i64 %A, 63
@ -572,12 +572,12 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind {
; 32-LABEL: rotr1_32_mem:
; 32: # BB#0:
; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: roll $31, (%eax)
; 32-NEXT: rorl (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_32_mem:
; 64: # BB#0:
; 64-NEXT: roll $31, (%rdi)
; 64-NEXT: rorl (%rdi)
; 64-NEXT: retq
%A = load i32, i32 *%Aptr
%B = shl i32 %A, 31
@ -591,12 +591,12 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind {
; 32-LABEL: rotr1_16_mem:
; 32: # BB#0:
; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: rolw $15, (%eax)
; 32-NEXT: rorw (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_16_mem:
; 64: # BB#0:
; 64-NEXT: rolw $15, (%rdi)
; 64-NEXT: rorw (%rdi)
; 64-NEXT: retq
%A = load i16, i16 *%Aptr
%B = shl i16 %A, 15
@ -610,12 +610,12 @@ define void @rotr1_8_mem(i8* %Aptr) nounwind {
; 32-LABEL: rotr1_8_mem:
; 32: # BB#0:
; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: rolb $7, (%eax)
; 32-NEXT: rorb (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_8_mem:
; 64: # BB#0:
; 64-NEXT: rolb $7, (%rdi)
; 64-NEXT: rorb (%rdi)
; 64-NEXT: retq
%A = load i8, i8 *%Aptr
%B = shl i8 %A, 7