Merge commit '1bbee3e217d75e7bc3bfe5d8c1b35e776fce96e6' into sync-cg_gcc-2023-06-19

This commit is contained in:
Antoni Boucher 2023-06-19 18:51:02 -04:00
parent 876a7d8d6f
commit 38c16e9862
23 changed files with 244 additions and 264 deletions

View File

@ -20,9 +20,9 @@ jobs:
matrix:
libgccjit_version:
- { gcc: "libgccjit.so", artifacts_branch: "master" }
commands: [
"--test-successful-rustc --nb-parts 2 --current-part 0",
"--test-successful-rustc --nb-parts 2 --current-part 1",
cargo_runner: [
"sde -future -rtm_mode full --",
"",
]
steps:
@ -36,6 +36,20 @@ jobs:
- name: Install packages
run: sudo apt-get install ninja-build ripgrep
- name: Install Intel Software Development Emulator
if: ${{ matrix.cargo_runner }}
run: |
mkdir intel-sde
cd intel-sde
dir=sde-external-9.14.0-2022-10-25-lin
file=$dir.tar.xz
wget https://downloadmirror.intel.com/751535/$file
tar xvf $file
sudo mkdir /usr/share/intel-sde
sudo cp -r $dir/* /usr/share/intel-sde
sudo ln -s /usr/share/intel-sde/sde /usr/bin/sde
sudo ln -s /usr/share/intel-sde/sde64 /usr/bin/sde64
- name: Download artifact
uses: dawidd6/action-download-artifact@v2
with:
@ -91,6 +105,10 @@ jobs:
./prepare_build.sh
./build.sh --release --release-sysroot
cargo test
- name: Clean
if: ${{ !matrix.cargo_runner }}
run: |
./clean_all.sh
- name: Prepare dependencies
@ -107,10 +125,18 @@ jobs:
args: --release
- name: Run tests
if: ${{ !matrix.cargo_runner }}
run: |
./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore
- name: Run stdarch tests
if: ${{ !matrix.cargo_runner }}
run: |
cd build_sysroot/sysroot_src/library/stdarch/
CHANNEL=release TARGET=x86_64-unknown-linux-gnu ../../../../cargo.sh test
- name: Run stdarch tests
if: ${{ matrix.cargo_runner }}
run: |
cd build_sysroot/sysroot_src/library/stdarch/
STDARCH_TEST_EVERYTHING=1 CHANNEL=release CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="${{ matrix.cargo_runner }}" TARGET=x86_64-unknown-linux-gnu ../../../../cargo.sh test -- --skip rtm --skip tbm --skip sse4a

2
.gitignore vendored
View File

@ -23,3 +23,5 @@ benchmarks
tools/llvm-project
tools/llvmint
tools/llvmint-2
# The `llvm` folder is generated by the `tools/generate_intrinsics.py` script to update intrinsics.
llvm

View File

@ -193,7 +193,7 @@ Using git-subtree with `rustc` requires a patched git to make it work.
The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493).
Use the following instructions to install it:
```
```bash
git clone git@github.com:tqc/git.git
cd git
git checkout tqc/subtree
@ -204,6 +204,21 @@ make
cp git-subtree ~/bin
```
Then, do a sync with this command:
```bash
PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name
cd ../rustc_codegen_gcc
git checkout master
git pull
git checkout sync_branch_name
git merge master
```
TODO: write a script that does the above.
https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725
### How to use [mem-trace](https://github.com/antoyo/mem-trace)
`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`.

View File

@ -9,6 +9,7 @@ compiler_builtins = "0.1"
alloc = { path = "./sysroot_src/library/alloc" }
std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
test = { path = "./sysroot_src/library/test" }
proc_macro = { path = "./sysroot_src/library/proc_macro" }
[patch.crates-io]
rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }

View File

@ -29,10 +29,10 @@ git config user.name || git config user.name "None"
git commit -m "Initial commit" -q
for file in $(ls ../../patches/ | grep -v patcha); do
echo "[GIT] apply" $file
git apply ../../patches/$file
git add -A
git commit --no-gpg-sign -m "Patch $file"
echo "[GIT] apply" $file
git apply ../../patches/$file
git add -A
git commit --no-gpg-sign -m "Patch $file"
done
popd

View File

@ -451,6 +451,9 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
drop_in_place(to_drop);
}
#[lang = "unpin"]
pub auto trait Unpin {}
#[lang = "deref"]
pub trait Deref {
type Target: ?Sized;
@ -488,10 +491,23 @@ pub struct Box<T: ?Sized, A: Allocator = Global>(Unique<T>, A);
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
impl<T> Box<T> {
pub fn new(val: T) -> Box<T> {
unsafe {
let size = intrinsics::size_of::<T>();
let ptr = libc::malloc(size);
intrinsics::copy(&val as *const T as *const u8, ptr, size);
Box(Unique { pointer: NonNull(ptr as *const T), _marker: PhantomData }, Global)
}
}
}
impl<T: ?Sized, A: Allocator> Drop for Box<T, A> {
fn drop(&mut self) {
// inner value is dropped by compiler
libc::free(self.pointer.0 as *mut u8);
// inner value is dropped by compiler.
unsafe {
libc::free(self.0.pointer.0 as *mut u8);
}
}
}

View File

@ -168,6 +168,9 @@ fn main() {
world as Box<dyn SomeTrait>;
assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
assert_eq!(intrinsics::bitreverse(0xddccu16), 0x33bbu16);
assert_eq!(intrinsics::bitreverse(0xffee_ddccu32), 0x33bb77ffu32);
assert_eq!(intrinsics::bitreverse(0x1234_5678_ffee_ddccu64), 0x33bb77ff1e6a2c48u64);
assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);

View File

@ -58,6 +58,7 @@ fn main() {
assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
assert_eq!(0x1234_5678_ffee_ddcc_1234_5678_ffee_ddccu128.reverse_bits(), 0x33bb77ff1e6a2c4833bb77ff1e6a2c48u128);
let _d = 0i128.checked_div(2i128);
let _d = 0u128.checked_div(2u128);

View File

@ -54,8 +54,8 @@ tests/ui/issues/issue-40883.rs
tests/ui/issues/issue-43853.rs
tests/ui/issues/issue-47364.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/assert-without-captures-does-not-create-unnecessary-code.rs
tests/ui/rfc-2091-track-caller/std-panic-locations.rs
tests/ui/rfcs/rfc1857-drop-order.rs
tests/ui/rfcs/rfc-2091-track-caller/std-panic-locations.rs
tests/ui/rfcs/rfc-1857-stabilize-drop-order/drop-order.rs
tests/ui/simd/issue-17170.rs
tests/ui/simd/issue-39720.rs
tests/ui/simd/issue-89193.rs
@ -66,3 +66,5 @@ tests/ui/generator/panic-safe.rs
tests/ui/issues/issue-14875.rs
tests/ui/issues/issue-29948.rs
tests/ui/panic-while-printing.rs
tests/ui/enum-discriminant/get_discr.rs
tests/ui/panics/nested_panic_caught.rs

View File

@ -1,49 +0,0 @@
From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Sun, 24 Nov 2019 15:34:06 +0100
Subject: [PATCH] [core] Ignore failing tests
---
library/core/tests/iter.rs | 4 ++++
library/core/tests/num/bignum.rs | 10 ++++++++++
library/core/tests/num/mod.rs | 5 +++--
library/core/tests/time.rs | 1 +
4 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
index 4bc44e9..8e3c7a4 100644
--- a/library/core/tests/array.rs
+++ b/library/core/tests/array.rs
@@ -242,6 +242,7 @@ fn iterator_drops() {
assert_eq!(i.get(), 5);
}
+/*
// This test does not work on targets without panic=unwind support.
// To work around this problem, test is marked is should_panic, so it will
// be automagically skipped on unsuitable targets, such as
@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
assert_eq!(COUNTER.load(Relaxed), 0);
panic!("test succeeded")
}
+*/
#[test]
fn empty_array_is_always_default() {
@@ -304,6 +304,7 @@ fn array_map() {
assert_eq!(b, [1, 2, 3]);
}
+/*
// See note on above test for why `should_panic` is used.
#[test]
#[should_panic(expected = "test succeeded")]
@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
panic!("test succeeded")
}
+*/
#[test]
fn cell_allows_array_cycle() {
-- 2.21.0 (Apple Git-122)

View File

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2023-03-02"
channel = "nightly-2023-06-19"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View File

@ -518,7 +518,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place);
}
}
}
}

View File

@ -2,9 +2,13 @@
use gccjit::FnAttribute;
use gccjit::Function;
use rustc_attr::InstructionSetAttr;
#[cfg(feature="master")]
use rustc_attr::InlineAttr;
use rustc_codegen_ssa::target_features::tied_target_features;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
#[cfg(feature="master")]
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_session::Session;
use rustc_span::symbol::sym;
use smallvec::{smallvec, SmallVec};
@ -67,6 +71,24 @@ fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
}
}
/// Get GCC attribute for the provided inline heuristic.
#[cfg(feature="master")]
#[inline]
fn inline_attr<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, inline: InlineAttr) -> Option<FnAttribute<'gcc>> {
match inline {
InlineAttr::Hint => Some(FnAttribute::Inline),
InlineAttr::Always => Some(FnAttribute::AlwaysInline),
InlineAttr::Never => {
if cx.sess().target.arch != "amdgpu" {
Some(FnAttribute::NoInline)
} else {
None
}
}
InlineAttr::None => None,
}
}
/// Composite function which sets GCC attributes for function depending on its AST (`#[attribute]`)
/// attributes.
pub fn from_fn_attrs<'gcc, 'tcx>(
@ -77,6 +99,23 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
#[cfg(feature="master")]
{
let inline =
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
InlineAttr::Never
}
else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
InlineAttr::Hint
}
else {
codegen_fn_attrs.inline
};
if let Some(attr) = inline_attr(cx, inline) {
func.add_attribute(attr);
}
}
let function_features =
codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::<Vec<&str>>();

View File

@ -181,6 +181,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
})
.collect();
debug_assert_eq!(casted_args.len(), args.len());
Cow::Owned(casted_args)
}
@ -207,7 +209,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let func_name = format!("{:?}", func_ptr);
let casted_args: Vec<_> = param_types
let mut casted_args: Vec<_> = param_types
.into_iter()
.zip(args.iter())
.enumerate()
@ -237,6 +239,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
})
.collect();
// NOTE: to take into account variadic functions.
for i in casted_args.len()..args.len() {
casted_args.push(args[i]);
}
Cow::Owned(casted_args)
}
@ -280,8 +287,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
}
fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
fn function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
let gcc_func =
match func_ptr.get_type().dyncast_function_ptr_type() {
Some(func) => func,
None => {
// NOTE: due to opaque pointers now being used, we need to cast here.
let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
func_ptr = self.context.new_cast(None, func_ptr, typ);
new_func_type
},
};
let func_name = format!("{:?}", func_ptr);
let previous_arg_count = args.len();
let orig_args = args;
@ -424,16 +440,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.llbb().end_with_void_return(None)
}
fn ret(&mut self, value: RValue<'gcc>) {
let value =
fn ret(&mut self, mut value: RValue<'gcc>) {
if self.structs_as_pointer.borrow().contains(&value) {
// NOTE: hack to workaround a limitation of the rustc API: see comment on
// CodegenCx.structs_as_pointer
value.dereference(None).to_rvalue()
value = value.dereference(None).to_rvalue();
}
let expected_return_type = self.current_func().get_return_type();
if !expected_return_type.is_compatible_with(value.get_type()) {
// NOTE: due to opaque pointers now being used, we need to cast here.
value = self.context.new_cast(None, value, expected_return_type);
}
else {
value
};
self.llbb().end_with_return(None, value);
}
@ -719,17 +736,25 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
unimplemented!();
}
fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
let block = self.llbb();
let function = block.get_function();
// NOTE: instead of returning the dereference here, we have to assign it to a variable in
// the current basic block. Otherwise, it could be used in another basic block, causing a
// dereference after a drop, for instance.
// TODO(antoyo): handle align of the load instruction.
let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer());
// FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
// Ideally, we shouldn't need to do this check.
let aligned_type =
if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
pointee_ty
}
else {
pointee_ty.get_aligned(align.bytes())
};
let ptr = self.context.new_cast(None, ptr, aligned_type.make_pointer());
let deref = ptr.dereference(None).to_rvalue();
unsafe { RETURN_VALUE_COUNT += 1 };
let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
let loaded_value = function.new_local(None, aligned_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
block.add_assignment(None, loaded_value, deref);
loaded_value.to_rvalue()
}
@ -909,7 +934,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.context.new_bitcast(None, result, ptr_type)
}
fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
fn inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
// NOTE: due to opaque pointers now being used, we need to cast here.
let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
// NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
let mut indices = indices.into_iter();
let index = indices.next().expect("first index in inbounds_gep");
@ -938,6 +965,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
element.get_address(None)
}
else if let Some(struct_type) = value_type.is_struct() {
// NOTE: due to opaque pointers now being used, we need to bitcast here.
let ptr = self.bitcast_if_needed(ptr, value_type.make_pointer());
ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
}
else {
@ -1356,7 +1385,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn call(
&mut self,
_typ: Type<'gcc>,
typ: Type<'gcc>,
_fn_attrs: Option<&CodegenFnAttrs>,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
func: RValue<'gcc>,
@ -1370,7 +1399,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
else {
// If it's a not function that was defined, it's a function pointer.
self.function_ptr_call(func, args, funclet)
self.function_ptr_call(typ, func, args, funclet)
};
if let Some(_fn_abi) = fn_abi {
// TODO(bjorn3): Apply function attributes
@ -1843,7 +1872,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
#[cfg(feature="master")]
let (cond, element_type) = {
let then_val_vector_type = then_val.get_type().dyncast_vector().expect("vector type");
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type");
let then_val_element_type = then_val_vector_type.get_element_type();
let then_val_element_size = then_val_element_type.get_size();

View File

@ -1,5 +1,5 @@
#[cfg(feature = "master")]
use gccjit::FnAttribute;
use gccjit::{FnAttribute, VarAttribute, Visibility};
use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue};
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
use rustc_middle::span_bug;
@ -234,7 +234,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
);
if !self.tcx.is_reachable_non_generic(def_id) {
// TODO(antoyo): set visibility.
#[cfg(feature = "master")]
global.add_attribute(VarAttribute::Visibility(Visibility::Hidden));
}
global

View File

@ -132,7 +132,7 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll
pub fn mangle_name(name: &str) -> String {
name.replace(|char: char| {
if !char.is_alphanumeric() && char != '_' {
debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
debug_assert!("$.*".contains(char), "Unsupported char in function name {}: {}", name, char);
true
}
else {

View File

@ -2967,10 +2967,6 @@ match name {
"llvm.nvvm.clz.ll" => "__nvvm_clz_ll",
"llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f",
"llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f",
"llvm.nvvm.cp.async.ca.shared.global.16" => "__nvvm_cp_async_ca_shared_global_16",
"llvm.nvvm.cp.async.ca.shared.global.4" => "__nvvm_cp_async_ca_shared_global_4",
"llvm.nvvm.cp.async.ca.shared.global.8" => "__nvvm_cp_async_ca_shared_global_8",
"llvm.nvvm.cp.async.cg.shared.global.16" => "__nvvm_cp_async_cg_shared_global_16",
"llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group",
"llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive",
"llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc",
@ -3086,18 +3082,8 @@ match name {
"llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16",
"llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2",
"llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f",
"llvm.nvvm.fma.rn.ftz.f16" => "__nvvm_fma_rn_ftz_f16",
"llvm.nvvm.fma.rn.ftz.f16x2" => "__nvvm_fma_rn_ftz_f16x2",
"llvm.nvvm.fma.rn.ftz.relu.f16" => "__nvvm_fma_rn_ftz_relu_f16",
"llvm.nvvm.fma.rn.ftz.relu.f16x2" => "__nvvm_fma_rn_ftz_relu_f16x2",
"llvm.nvvm.fma.rn.ftz.sat.f16" => "__nvvm_fma_rn_ftz_sat_f16",
"llvm.nvvm.fma.rn.ftz.sat.f16x2" => "__nvvm_fma_rn_ftz_sat_f16x2",
"llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16",
"llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2",
"llvm.nvvm.fma.rn.relu.f16" => "__nvvm_fma_rn_relu_f16",
"llvm.nvvm.fma.rn.relu.f16x2" => "__nvvm_fma_rn_relu_f16x2",
"llvm.nvvm.fma.rn.sat.f16" => "__nvvm_fma_rn_sat_f16",
"llvm.nvvm.fma.rn.sat.f16x2" => "__nvvm_fma_rn_sat_f16x2",
"llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d",
"llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f",
"llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f",
@ -3111,32 +3097,18 @@ match name {
"llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16",
"llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2",
"llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f",
"llvm.nvvm.fmax.ftz.f16" => "__nvvm_fmax_ftz_f16",
"llvm.nvvm.fmax.ftz.f16x2" => "__nvvm_fmax_ftz_f16x2",
"llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f",
"llvm.nvvm.fmax.ftz.nan.f16" => "__nvvm_fmax_ftz_nan_f16",
"llvm.nvvm.fmax.ftz.nan.f16x2" => "__nvvm_fmax_ftz_nan_f16x2",
"llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f",
"llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16",
"llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16x2",
"llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f",
"llvm.nvvm.fmax.ftz.xorsign.abs.f16" => "__nvvm_fmax_ftz_xorsign_abs_f16",
"llvm.nvvm.fmax.ftz.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_xorsign_abs_f16x2",
"llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16",
"llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2",
"llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f",
"llvm.nvvm.fmax.nan.f16" => "__nvvm_fmax_nan_f16",
"llvm.nvvm.fmax.nan.f16x2" => "__nvvm_fmax_nan_f16x2",
"llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16",
"llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2",
"llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f",
"llvm.nvvm.fmax.nan.xorsign.abs.f16" => "__nvvm_fmax_nan_xorsign_abs_f16",
"llvm.nvvm.fmax.nan.xorsign.abs.f16x2" => "__nvvm_fmax_nan_xorsign_abs_f16x2",
"llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16",
"llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2",
"llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f",
"llvm.nvvm.fmax.xorsign.abs.f16" => "__nvvm_fmax_xorsign_abs_f16",
"llvm.nvvm.fmax.xorsign.abs.f16x2" => "__nvvm_fmax_xorsign_abs_f16x2",
"llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16",
"llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2",
"llvm.nvvm.fmin.d" => "__nvvm_fmin_d",
@ -3144,32 +3116,18 @@ match name {
"llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16",
"llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2",
"llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f",
"llvm.nvvm.fmin.ftz.f16" => "__nvvm_fmin_ftz_f16",
"llvm.nvvm.fmin.ftz.f16x2" => "__nvvm_fmin_ftz_f16x2",
"llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f",
"llvm.nvvm.fmin.ftz.nan.f16" => "__nvvm_fmin_ftz_nan_f16",
"llvm.nvvm.fmin.ftz.nan.f16x2" => "__nvvm_fmin_ftz_nan_f16x2",
"llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f",
"llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16",
"llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16x2",
"llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f",
"llvm.nvvm.fmin.ftz.xorsign.abs.f16" => "__nvvm_fmin_ftz_xorsign_abs_f16",
"llvm.nvvm.fmin.ftz.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_xorsign_abs_f16x2",
"llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16",
"llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2",
"llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f",
"llvm.nvvm.fmin.nan.f16" => "__nvvm_fmin_nan_f16",
"llvm.nvvm.fmin.nan.f16x2" => "__nvvm_fmin_nan_f16x2",
"llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16",
"llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2",
"llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f",
"llvm.nvvm.fmin.nan.xorsign.abs.f16" => "__nvvm_fmin_nan_xorsign_abs_f16",
"llvm.nvvm.fmin.nan.xorsign.abs.f16x2" => "__nvvm_fmin_nan_xorsign_abs_f16x2",
"llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16",
"llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2",
"llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f",
"llvm.nvvm.fmin.xorsign.abs.f16" => "__nvvm_fmin_xorsign_abs_f16",
"llvm.nvvm.fmin.xorsign.abs.f16x2" => "__nvvm_fmin_xorsign_abs_f16x2",
"llvm.nvvm.fns" => "__nvvm_fns",
"llvm.nvvm.h2f" => "__nvvm_h2f",
"llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm",
@ -7895,6 +7853,10 @@ match name {
"llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64",
"llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32",
"llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64",
"llvm.x86.tcmmimfp16ps" => "__builtin_ia32_tcmmimfp16ps",
"llvm.x86.tcmmimfp16ps.internal" => "__builtin_ia32_tcmmimfp16ps_internal",
"llvm.x86.tcmmrlfp16ps" => "__builtin_ia32_tcmmrlfp16ps",
"llvm.x86.tcmmrlfp16ps.internal" => "__builtin_ia32_tcmmrlfp16ps_internal",
"llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps",
"llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal",
"llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd",

View File

@ -313,6 +313,13 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let new_args = args.to_vec();
args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into();
},
"__builtin_ia32_vpshrdv_v8di" | "__builtin_ia32_vpshrdv_v4di" | "__builtin_ia32_vpshrdv_v2di" |
"__builtin_ia32_vpshrdv_v16si" | "__builtin_ia32_vpshrdv_v8si" | "__builtin_ia32_vpshrdv_v4si" |
"__builtin_ia32_vpshrdv_v32hi" | "__builtin_ia32_vpshrdv_v16hi" | "__builtin_ia32_vpshrdv_v8hi" => {
// The first two arguments are reversed, compared to LLVM.
let new_args = args.to_vec();
args = vec![new_args[1], new_args[0], new_args[2]].into();
},
_ => (),
}
}

View File

@ -551,141 +551,52 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let context = &self.cx.context;
let result =
match width {
8 => {
8 | 16 | 32 | 64 => {
let mask = ((1u128 << width) - 1) as u64;
let (m0, m1, m2) = if width > 16 {
(
context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
)
} else {
(
context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
)
};
let one = context.new_rvalue_from_int(typ, 1);
let two = context.new_rvalue_from_int(typ, 2);
let four = context.new_rvalue_from_int(typ, 4);
// First step.
let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
let left = self.lshr(value, one);
let left = self.and(left, m0);
let right = self.and(value, m0);
let right = self.shl(right, one);
let step1 = self.or(left, right);
// Second step.
let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
let left = self.lshr(step1, two);
let left = self.and(left, m1);
let right = self.and(step1, m1);
let right = self.shl(right, two);
let step2 = self.or(left, right);
// Third step.
let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
let left = self.lshr(step2, four);
let left = self.and(left, m2);
let right = self.and(step2, m2);
let right = self.shl(right, four);
let step3 = self.or(left, right);
// Fourth step.
if width == 8 {
step3
},
16 => {
// First step.
let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
let step1 = self.or(left, right);
// Second step.
let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
let step2 = self.or(left, right);
// Third step.
let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
let step3 = self.or(left, right);
// Fourth step.
let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
let step4 = self.or(left, right);
step4
},
32 => {
// TODO(antoyo): Refactor with other implementations.
// First step.
let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
let step1 = self.or(left, right);
// Second step.
let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
let step2 = self.or(left, right);
// Third step.
let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
let step3 = self.or(left, right);
// Fourth step.
let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
let step4 = self.or(left, right);
// Fifth step.
let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
let step5 = self.or(left, right);
step5
},
64 => {
// First step.
let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
let step1 = self.or(left, right);
// Second step.
let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
let step2 = self.or(left, right);
// Third step.
let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
let left = self.xor(step2, left);
let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
let left = self.or(temp, left);
let step3 = self.xor(left, step2);
// Fourth step.
let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
let left = self.xor(step3, left);
let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
let left = self.or(temp, left);
let step4 = self.xor(left, step3);
// Fifth step.
let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
let left = self.xor(step4, left);
let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
let left = self.or(temp, left);
let step5 = self.xor(left, step4);
step5
} else {
self.gcc_bswap(step3, width)
}
},
128 => {
// TODO(antoyo): find a more efficient implementation?

View File

@ -165,10 +165,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty }
);
let arg1 = args[0].immediate();
// NOTE: we get different vector types for the same vector type and libgccjit doesn't
// compare them as equal, so bitcast.
// FIXME(antoyo): allow comparing vector types as equal in libgccjit.
let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type());
return Ok(compare_simd_types(
bx,
args[0].immediate(),
args[1].immediate(),
arg1,
arg2,
in_elem,
llret_ty,
cmp_op,
@ -341,7 +346,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// endian and MSB-first for big endian.
let vector = args[0].immediate();
let vector_type = vector.get_type().dyncast_vector().expect("vector type");
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let expected_int_bits = in_len.max(8);
@ -848,7 +854,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
(true, true) => {
// Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
// TODO(antoyo): improve using conditional operators if possible.
let arg_type = lhs.get_type();
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): convert lhs and rhs to unsigned.
let sum = lhs + rhs;
let vector_type = arg_type.dyncast_vector().expect("vector type");
@ -878,7 +885,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
res & cmp
},
(true, false) => {
let arg_type = lhs.get_type();
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): this uses the same algorithm from saturating add, but add the
// negative of the right operand. Find a proper subtraction algorithm.
let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);

View File

@ -111,6 +111,8 @@ impl CodegenBackend for GccCodegenBackend {
}
fn init(&self, sess: &Session) {
#[cfg(feature="master")]
gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
if sess.lto() != Lto::No {
sess.emit_warning(LTONotSupported {});
}

View File

@ -383,8 +383,8 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
unimplemented!();
}
fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
// FIXME(antoyo): return correct type.
self.type_void()
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
let (return_type, param_types, variadic, _) = fn_abi.gcc_type(self);
self.context.new_function_pointer_type(None, return_type, &param_types, variadic)
}
}

View File

@ -214,12 +214,14 @@ function setup_rustc() {
rm config.toml || true
cat > config.toml <<EOF
changelog-seen = 2
[rust]
codegen-backends = []
deny-warnings = false
[build]
cargo = "$(which cargo)"
cargo = "$(rustup which cargo)"
local-rebuild = true
rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
@ -237,7 +239,7 @@ EOF
function asm_tests() {
setup_rustc
echo "[TEST] rustc test suite"
echo "[TEST] rustc asm test suite"
RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/assembly/asm --rustc-args "$RUSTC_ARGS"
}
@ -338,6 +340,8 @@ function test_rustc() {
for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" tests/ui); do
rm $test
done
rm tests/ui/consts/const_cmp_type_id.rs
rm tests/ui/consts/issue-73976-monomorphic.rs
git checkout -- tests/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed