hanchenye-llvm-project/llvm/test/CodeGen/SPARC/basictest.ll

100 lines
2.1 KiB
LLVM
Raw Normal View History

2010-11-23 10:26:52 +08:00
; RUN: llc < %s -march=sparc | FileCheck %s
2006-02-05 13:52:55 +08:00
2010-11-23 10:26:52 +08:00
define i32 @test0(i32 %X) {
2007-03-28 09:27:12 +08:00
%tmp.1 = add i32 %X, 1
ret i32 %tmp.1
; CHECK-LABEL: test0:
; CHECK: add %o0, 1, %o0
2010-11-23 10:26:52 +08:00
}
;; xnor tests.
define i32 @test1(i32 %X, i32 %Y) {
%A = xor i32 %X, %Y
%B = xor i32 %A, -1
ret i32 %B
; CHECK-LABEL: test1:
; CHECK: xnor %o0, %o1, %o0
2010-11-23 10:26:52 +08:00
}
define i32 @test2(i32 %X, i32 %Y) {
%A = xor i32 %X, -1
%B = xor i32 %A, %Y
ret i32 %B
; CHECK-LABEL: test2:
; CHECK: xnor %o0, %o1, %o0
2006-02-05 13:52:55 +08:00
}
; CHECK-LABEL: store_zero:
; CHECK: st %g0, [%o0]
; CHECK: st %g0, [%o1+4]
define i32 @store_zero(i32* %a, i32* %b) {
entry:
store i32 0, i32* %a, align 4
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction One of several parallel first steps to remove the target type of pointers, replacing them with a single opaque pointer type. This adds an explicit type parameter to the gep instruction so that when the first parameter becomes an opaque pointer type, the type to gep through is still available to the instructions. * This doesn't modify gep operators, only instructions (operators will be handled separately) * Textual IR changes only. Bitcode (including upgrade) and changing the in-memory representation will be in separate changes. * geps of vectors are transformed as: getelementptr <4 x float*> %x, ... ->getelementptr float, <4 x float*> %x, ... Then, once the opaque pointer type is introduced, this will ultimately look like: getelementptr float, <4 x ptr> %x with the unambiguous interpretation that it is a vector of pointers to float. * address spaces remain on the pointer, not the type: getelementptr float addrspace(1)* %x ->getelementptr float, float addrspace(1)* %x Then, eventually: getelementptr float, ptr addrspace(1) %x Importantly, the massive amount of test case churn has been automated by same crappy python code. I had to manually update a few test cases that wouldn't fit the script's model (r228970,r229196,r229197,r229198). The python script just massages stdin and writes the result to stdout, I then wrapped that in a shell script to handle replacing files, then using the usual find+xargs to migrate all the files. update.py: import fileinput import sys import re ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))") normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))") def conv(match, line): if not match: return line line = match.groups()[0] if len(match.groups()[5]) == 0: line += match.groups()[2] line += match.groups()[3] line += ", " line += match.groups()[1] line += "\n" return line for line in sys.stdin: if line.find("getelementptr ") == line.find("getelementptr inbounds"): if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("): line = conv(re.match(ibrep, line), line) elif line.find("getelementptr ") != line.find("getelementptr ("): line = conv(re.match(normrep, line), line) sys.stdout.write(line) apply.sh: for name in "$@" do python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name" rm -f "$name.tmp" done The actual commands: From llvm/src: find test/ -name *.ll | xargs ./apply.sh From llvm/src/tools/clang: find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}" From llvm/src/tools/polly: find test/ -name *.ll | xargs ./apply.sh After that, check-all (with llvm, clang, clang-tools-extra, lld, compiler-rt, and polly all checked out). The extra 'rm' in the apply.sh script is due to a few files in clang's test suite using interesting unicode stuff that my python script was throwing exceptions on. None of those files needed to be migrated, so it seemed sufficient to ignore those cases. Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7636 llvm-svn: 230786
2015-02-28 03:29:02 +08:00
%0 = getelementptr inbounds i32, i32* %b, i32 1
store i32 0, i32* %0, align 4
ret i32 0
}
; CHECK-LABEL: signed_divide:
; CHECK: sra %o0, 31, %o2
; CHECK: wr %g0, %o2, %y
; CHECK: sdiv %o0, %o1, %o0
define i32 @signed_divide(i32 %a, i32 %b) {
%r = sdiv i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: unsigned_divide:
; CHECK: wr %g0, %g0, %y
; CHECK: udiv %o0, %o1, %o0
define i32 @unsigned_divide(i32 %a, i32 %b) {
%r = udiv i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: multiply_32x32:
; CHECK: smul %o0, %o1, %o0
define i32 @multiply_32x32(i32 %a, i32 %b) {
%r = mul i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: signed_multiply_32x32_64:
; CHECK: smul %o0, %o1, %o1
; CHECK: rd %y, %o0
define i64 @signed_multiply_32x32_64(i32 %a, i32 %b) {
%xa = sext i32 %a to i64
%xb = sext i32 %b to i64
%r = mul i64 %xa, %xb
ret i64 %r
}
; CHECK-LABEL: unsigned_multiply_32x32_64:
;FIXME: the smul in the output is totally redundant and should not there.
; CHECK: smul %o0, %o1, %o2
; CHECK: umul %o0, %o1, %o0
; CHECK: rd %y, %o0
; CHECK: retl
; CHECK: mov %o2, %o1
define i64 @unsigned_multiply_32x32_64(i32 %a, i32 %b) {
%xa = zext i32 %a to i64
%xb = zext i32 %b to i64
%r = mul i64 %xa, %xb
ret i64 %r
}
[Sparc] Implement i64 load/store support for 32-bit sparc. The LDD/STD instructions can load/store a 64bit quantity from/to memory to/from a consecutive even/odd pair of (32-bit) registers. They are part of SparcV8, and also present in SparcV9. (Although deprecated there, as you can store 64bits in one register). As recommended on llvmdev in the thread "How to enable use of 64bit load/store for 32bit architecture" from Apr 2015, I've modeled the 64-bit load/store operations as working on a v2i32 type, rather than making i64 a legal type, but with few legal operations. The latter does not (currently) work, as there is much code in llvm which assumes that if i64 is legal, operations like "add" will actually work on it. The same assumption does not hold for v2i32 -- for vector types, it is workable to support only load/store, and expand everything else. This patch: - Adds a new register class, IntPair, for even/odd pairs of registers. - Modifies the list of reserved registers, the stack spilling code, and register copying code to support the IntPair register class. - Adds support in AsmParser. (note that in asm text, you write the name of the first register of the pair only. So the parser has to morph the single register into the equivalent paired register). - Adds the new instructions themselves (LDD/STD/LDDA/STDA). - Hooks up the instructions and registers as a vector type v2i32. Adds custom legalizer to transform i64 load/stores into v2i32 load/stores and bitcasts, so that the new instructions can actually be generated, and marks all operations other than load/store on v2i32 as needing to be expanded. - Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG. This hack undoes the transformation of i64 operands into two arbitrarily-allocated separate i32 registers in SelectionDAGBuilder. and instead passes them in a single IntPair. (Arbitrarily allocated registers are not useful, asm code expects to be receiving a pair, which can be passed to ldd/std.) Also adds a bunch of test cases covering all the bugs I've added along the way. Differential Revision: http://reviews.llvm.org/D8713 llvm-svn: 244484
2015-08-11 03:11:39 +08:00
; CHECK-LABEL: load_store_64bit:
; CHECK: ldd [%o0], %o2
; CHECK: addcc %o3, 3, %o5
; CHECK: addxcc %o2, 0, %o4
; CHECK: retl
; CHECK: std %o4, [%o1]
define void @load_store_64bit(i64* %x, i64* %y) {
entry:
%0 = load i64, i64* %x
%add = add nsw i64 %0, 3
store i64 %add, i64* %y
ret void
}