Merge from rustc

This commit is contained in:
The Miri Conjob Bot 2024-01-26 05:09:55 +00:00
commit b07da7103f
254 changed files with 4580 additions and 2955 deletions

View File

@ -212,9 +212,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]] [[package]]
name = "askama" name = "askama"
version = "0.12.0" version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47cbc3cf73fa8d9833727bbee4835ba5c421a0d65b72daf9a7b5d0e0f9cfb57e" checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28"
dependencies = [ dependencies = [
"askama_derive", "askama_derive",
"askama_escape", "askama_escape",
@ -222,14 +222,14 @@ dependencies = [
[[package]] [[package]]
name = "askama_derive" name = "askama_derive"
version = "0.12.1" version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c22fbe0413545c098358e56966ff22cdd039e10215ae213cfbd65032b119fc94" checksum = "19fe8d6cb13c4714962c072ea496f3392015f0989b1a2847bb4b2d9effd71d83"
dependencies = [ dependencies = [
"askama_parser",
"basic-toml", "basic-toml",
"mime", "mime",
"mime_guess", "mime_guess",
"nom",
"proc-macro2", "proc-macro2",
"quote", "quote",
"serde", "serde",
@ -242,6 +242,15 @@ version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341"
[[package]]
name = "askama_parser"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acb1161c6b64d1c3d83108213c2a2533a342ac225aabd0bda218278c2ddb00c0"
dependencies = [
"nom",
]
[[package]] [[package]]
name = "autocfg" name = "autocfg"
version = "1.1.0" version = "1.1.0"
@ -3654,10 +3663,10 @@ version = "0.0.0"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bitflags 2.4.1", "bitflags 2.4.1",
"either",
"elsa", "elsa",
"ena", "ena",
"indexmap", "indexmap",
"itertools",
"jobserver", "jobserver",
"libc", "libc",
"measureme", "measureme",

View File

@ -375,11 +375,11 @@ pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
} }
ItemKind::MacCall(mac) => visitor.visit_mac_call(mac), ItemKind::MacCall(mac) => visitor.visit_mac_call(mac),
ItemKind::MacroDef(ts) => visitor.visit_mac_def(ts, item.id), ItemKind::MacroDef(ts) => visitor.visit_mac_def(ts, item.id),
ItemKind::Delegation(box Delegation { id: _, qself, path, body }) => { ItemKind::Delegation(box Delegation { id, qself, path, body }) => {
if let Some(qself) = qself { if let Some(qself) = qself {
visitor.visit_ty(&qself.ty); visitor.visit_ty(&qself.ty);
} }
walk_path(visitor, path); visitor.visit_path(path, *id);
if let Some(body) = body { if let Some(body) = body {
visitor.visit_block(body); visitor.visit_block(body);
} }
@ -502,7 +502,7 @@ where
} }
GenericArgs::Parenthesized(data) => { GenericArgs::Parenthesized(data) => {
walk_list!(visitor, visit_ty, &data.inputs); walk_list!(visitor, visit_ty, &data.inputs);
walk_fn_ret_ty(visitor, &data.output); visitor.visit_fn_ret_ty(&data.output);
} }
} }
} }
@ -713,11 +713,11 @@ pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem,
AssocItemKind::MacCall(mac) => { AssocItemKind::MacCall(mac) => {
visitor.visit_mac_call(mac); visitor.visit_mac_call(mac);
} }
AssocItemKind::Delegation(box Delegation { id: _, qself, path, body }) => { AssocItemKind::Delegation(box Delegation { id, qself, path, body }) => {
if let Some(qself) = qself { if let Some(qself) = qself {
visitor.visit_ty(&qself.ty); visitor.visit_ty(&qself.ty);
} }
walk_path(visitor, path); visitor.visit_path(path, *id);
if let Some(body) = body { if let Some(body) = body {
visitor.visit_block(body); visitor.visit_block(body);
} }

View File

@ -19,19 +19,6 @@ pub fn expand_deriving_eq(
) { ) {
let span = cx.with_def_site_ctxt(span); let span = cx.with_def_site_ctxt(span);
let structural_trait_def = TraitDef {
span,
path: path_std!(marker::StructuralEq),
skip_path_as_bound: true, // crucial!
needs_copy_as_bound_if_packed: false,
additional_bounds: Vec::new(),
supports_unions: true,
methods: Vec::new(),
associated_types: Vec::new(),
is_const: false,
};
structural_trait_def.expand(cx, mitem, item, push);
let trait_def = TraitDef { let trait_def = TraitDef {
span, span,
path: path_std!(cmp::Eq), path: path_std!(cmp::Eq),

View File

@ -104,9 +104,6 @@ unsafe impl<T: ?Sized> Freeze for &mut T {}
#[lang = "structural_peq"] #[lang = "structural_peq"]
pub trait StructuralPartialEq {} pub trait StructuralPartialEq {}
#[lang = "structural_teq"]
pub trait StructuralEq {}
#[lang = "not"] #[lang = "not"]
pub trait Not { pub trait Not {
type Output; type Output;

View File

@ -443,6 +443,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, a); ret.write_cvalue(fx, a);
} }
sym::is_val_statically_known => {
intrinsic_args!(fx, args => (_a); intrinsic);
let res = fx.bcx.ins().iconst(types::I8, 0);
ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
}
sym::breakpoint => { sym::breakpoint => {
intrinsic_args!(fx, args => (); intrinsic); intrinsic_args!(fx, args => (); intrinsic);

View File

@ -100,9 +100,6 @@ unsafe impl<T: ?Sized> Freeze for &mut T {}
#[lang = "structural_peq"] #[lang = "structural_peq"]
pub trait StructuralPartialEq {} pub trait StructuralPartialEq {}
#[lang = "structural_teq"]
pub trait StructuralEq {}
#[lang = "not"] #[lang = "not"]
pub trait Not { pub trait Not {
type Output; type Output;

View File

@ -196,15 +196,16 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let mut functions = FxHashMap::default(); let mut functions = FxHashMap::default();
let builtins = [ let builtins = [
"__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow", "__builtin_unreachable", "abort", "__builtin_expect", /*"__builtin_expect_with_probability",*/
"__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/ "__builtin_constant_p", "__builtin_add_overflow", "__builtin_mul_overflow", "__builtin_saddll_overflow",
/*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
"__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow", "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
"__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow", "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
"__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos", "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
"powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf", "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
"fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf", "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
"ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round", "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
"__builtin_expect_with_probability",
]; ];
for builtin in builtins.iter() { for builtin in builtins.iter() {

View File

@ -123,6 +123,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::unlikely => { sym::unlikely => {
self.expect(args[0].immediate(), false) self.expect(args[0].immediate(), false)
} }
sym::is_val_statically_known => {
let a = args[0].immediate();
let builtin = self.context.get_builtin_function("__builtin_constant_p");
let res = self.context.new_call(None, builtin, &[a]);
self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
}
kw::Try => { kw::Try => {
try_intrinsic( try_intrinsic(
self, self,

View File

@ -61,9 +61,6 @@ mod libc {
#[lang = "structural_peq"] #[lang = "structural_peq"]
pub trait StructuralPartialEq {} pub trait StructuralPartialEq {}
#[lang = "structural_teq"]
pub trait StructuralEq {}
#[lang = "drop_in_place"] #[lang = "drop_in_place"]
#[allow(unconditional_recursion)] #[allow(unconditional_recursion)]
pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {

View File

@ -916,6 +916,20 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
// FIXME: This is an infinitesimally small portion of the types you can
// pass to this intrinsic, if we can ever lazily register intrinsics we
// should register these when they're used, that way any type can be
// passed.
ifn!("llvm.is.constant.i1", fn(i1) -> i1);
ifn!("llvm.is.constant.i8", fn(t_i8) -> i1);
ifn!("llvm.is.constant.i16", fn(t_i16) -> i1);
ifn!("llvm.is.constant.i32", fn(t_i32) -> i1);
ifn!("llvm.is.constant.i64", fn(t_i64) -> i1);
ifn!("llvm.is.constant.i128", fn(t_i128) -> i1);
ifn!("llvm.is.constant.isize", fn(t_isize) -> i1);
ifn!("llvm.is.constant.f32", fn(t_f32) -> i1);
ifn!("llvm.is.constant.f64", fn(t_f64) -> i1);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1); ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32); ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void); ifn!("llvm.localescape", fn(...) -> void);

View File

@ -119,6 +119,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::likely => { sym::likely => {
self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)]) self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
} }
sym::is_val_statically_known => self.call_intrinsic(
&format!("llvm.is.constant.{:?}", args[0].layout.immediate_llvm_type(self.cx)),
&[args[0].immediate()],
),
sym::unlikely => self sym::unlikely => self
.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]), .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
kw::Try => { kw::Try => {

View File

@ -52,6 +52,15 @@ use std::path::{Path, PathBuf};
use std::process::{ExitStatus, Output, Stdio}; use std::process::{ExitStatus, Output, Stdio};
use std::{env, fmt, fs, io, mem, str}; use std::{env, fmt, fs, io, mem, str};
#[derive(Default)]
pub struct SearchPaths(OnceCell<Vec<PathBuf>>);
impl SearchPaths {
pub(super) fn get(&self, sess: &Session) -> &[PathBuf] {
self.0.get_or_init(|| archive_search_paths(sess))
}
}
pub fn ensure_removed(dcx: &DiagCtxt, path: &Path) { pub fn ensure_removed(dcx: &DiagCtxt, path: &Path) {
if let Err(e) = fs::remove_file(path) { if let Err(e) = fs::remove_file(path) {
if e.kind() != io::ErrorKind::NotFound { if e.kind() != io::ErrorKind::NotFound {
@ -1265,7 +1274,7 @@ fn link_sanitizer_runtime(
let path = find_sanitizer_runtime(sess, &filename); let path = find_sanitizer_runtime(sess, &filename);
let rpath = path.to_str().expect("non-utf8 component in path"); let rpath = path.to_str().expect("non-utf8 component in path");
linker.args(&["-Wl,-rpath", "-Xlinker", rpath]); linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
linker.link_dylib(&filename, false, true); linker.link_dylib_by_name(&filename, false, true);
} else if sess.target.is_like_msvc && flavor == LinkerFlavor::Msvc(Lld::No) && name == "asan" { } else if sess.target.is_like_msvc && flavor == LinkerFlavor::Msvc(Lld::No) && name == "asan" {
// MSVC provides the `/INFERASANLIBS` argument to automatically find the // MSVC provides the `/INFERASANLIBS` argument to automatically find the
// compatible ASAN library. // compatible ASAN library.
@ -1273,7 +1282,7 @@ fn link_sanitizer_runtime(
} else { } else {
let filename = format!("librustc{channel}_rt.{name}.a"); let filename = format!("librustc{channel}_rt.{name}.a");
let path = find_sanitizer_runtime(sess, &filename).join(&filename); let path = find_sanitizer_runtime(sess, &filename).join(&filename);
linker.link_whole_rlib(&path); linker.link_staticlib_by_path(&path, true);
} }
} }
@ -2445,7 +2454,7 @@ fn add_native_libs_from_crate(
archive_builder_builder: &dyn ArchiveBuilderBuilder, archive_builder_builder: &dyn ArchiveBuilderBuilder,
codegen_results: &CodegenResults, codegen_results: &CodegenResults,
tmpdir: &Path, tmpdir: &Path,
search_paths: &OnceCell<Vec<PathBuf>>, search_paths: &SearchPaths,
bundled_libs: &FxHashSet<Symbol>, bundled_libs: &FxHashSet<Symbol>,
cnum: CrateNum, cnum: CrateNum,
link_static: bool, link_static: bool,
@ -2505,28 +2514,16 @@ fn add_native_libs_from_crate(
if let Some(filename) = lib.filename { if let Some(filename) = lib.filename {
// If rlib contains native libs as archives, they are unpacked to tmpdir. // If rlib contains native libs as archives, they are unpacked to tmpdir.
let path = tmpdir.join(filename.as_str()); let path = tmpdir.join(filename.as_str());
if whole_archive { cmd.link_staticlib_by_path(&path, whole_archive);
cmd.link_whole_rlib(&path);
} else {
cmd.link_rlib(&path);
}
} }
} else { } else {
if whole_archive { cmd.link_staticlib_by_name(name, verbatim, whole_archive, search_paths);
cmd.link_whole_staticlib(
name,
verbatim,
search_paths.get_or_init(|| archive_search_paths(sess)),
);
} else {
cmd.link_staticlib(name, verbatim)
}
} }
} }
} }
NativeLibKind::Dylib { as_needed } => { NativeLibKind::Dylib { as_needed } => {
if link_dynamic { if link_dynamic {
cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true)) cmd.link_dylib_by_name(name, verbatim, as_needed.unwrap_or(true))
} }
} }
NativeLibKind::Unspecified => { NativeLibKind::Unspecified => {
@ -2534,17 +2531,17 @@ fn add_native_libs_from_crate(
// link kind is unspecified. // link kind is unspecified.
if !link_output_kind.can_link_dylib() && !sess.target.crt_static_allows_dylibs { if !link_output_kind.can_link_dylib() && !sess.target.crt_static_allows_dylibs {
if link_static { if link_static {
cmd.link_staticlib(name, verbatim) cmd.link_staticlib_by_name(name, verbatim, false, search_paths);
} }
} else { } else {
if link_dynamic { if link_dynamic {
cmd.link_dylib(name, verbatim, true); cmd.link_dylib_by_name(name, verbatim, true);
} }
} }
} }
NativeLibKind::Framework { as_needed } => { NativeLibKind::Framework { as_needed } => {
if link_dynamic { if link_dynamic {
cmd.link_framework(name, as_needed.unwrap_or(true)) cmd.link_framework_by_name(name, verbatim, as_needed.unwrap_or(true))
} }
} }
NativeLibKind::RawDylib => { NativeLibKind::RawDylib => {
@ -2581,7 +2578,7 @@ fn add_local_native_libraries(
} }
} }
let search_paths = OnceCell::new(); let search_paths = SearchPaths::default();
// All static and dynamic native library dependencies are linked to the local crate. // All static and dynamic native library dependencies are linked to the local crate.
let link_static = true; let link_static = true;
let link_dynamic = true; let link_dynamic = true;
@ -2623,7 +2620,7 @@ fn add_upstream_rust_crates<'a>(
.find(|(ty, _)| *ty == crate_type) .find(|(ty, _)| *ty == crate_type)
.expect("failed to find crate type in dependency format list"); .expect("failed to find crate type in dependency format list");
let search_paths = OnceCell::new(); let search_paths = SearchPaths::default();
for &cnum in &codegen_results.crate_info.used_crates { for &cnum in &codegen_results.crate_info.used_crates {
// We may not pass all crates through to the linker. Some crates may appear statically in // We may not pass all crates through to the linker. Some crates may appear statically in
// an existing dylib, meaning we'll pick up all the symbols from the dylib. // an existing dylib, meaning we'll pick up all the symbols from the dylib.
@ -2698,7 +2695,7 @@ fn add_upstream_native_libraries(
tmpdir: &Path, tmpdir: &Path,
link_output_kind: LinkOutputKind, link_output_kind: LinkOutputKind,
) { ) {
let search_path = OnceCell::new(); let search_paths = SearchPaths::default();
for &cnum in &codegen_results.crate_info.used_crates { for &cnum in &codegen_results.crate_info.used_crates {
// Static libraries are not linked here, they are linked in `add_upstream_rust_crates`. // Static libraries are not linked here, they are linked in `add_upstream_rust_crates`.
// FIXME: Merge this function to `add_upstream_rust_crates` so that all native libraries // FIXME: Merge this function to `add_upstream_rust_crates` so that all native libraries
@ -2720,7 +2717,7 @@ fn add_upstream_native_libraries(
archive_builder_builder, archive_builder_builder,
codegen_results, codegen_results,
tmpdir, tmpdir,
&search_path, &search_paths,
&Default::default(), &Default::default(),
cnum, cnum,
link_static, link_static,
@ -2791,7 +2788,7 @@ fn add_static_crate<'a>(
} else { } else {
fix_windows_verbatim_for_gcc(path) fix_windows_verbatim_for_gcc(path)
}; };
cmd.link_rlib(&rlib_path); cmd.link_staticlib_by_path(&rlib_path, false);
}; };
if !are_upstream_rust_objects_already_included(sess) if !are_upstream_rust_objects_already_included(sess)
@ -2859,13 +2856,24 @@ fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) {
// Just need to tell the linker about where the library lives and // Just need to tell the linker about where the library lives and
// what its name is // what its name is
let parent = cratepath.parent(); let parent = cratepath.parent();
// When producing a dll, the MSVC linker may not actually emit a
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
if sess.target.is_like_msvc && !cratepath.with_extension("dll.lib").exists() {
return;
}
if let Some(dir) = parent { if let Some(dir) = parent {
cmd.include_path(&rehome_sysroot_lib_dir(sess, dir)); cmd.include_path(&rehome_sysroot_lib_dir(sess, dir));
} }
let stem = cratepath.file_stem().unwrap().to_str().unwrap(); // "<dir>/name.dll -> name.dll" on windows-msvc
// "<dir>/name.dll -> name" on windows-gnu
// "<dir>/libname.<ext> -> name" elsewhere
let stem = if sess.target.is_like_msvc { cratepath.file_name() } else { cratepath.file_stem() };
let stem = stem.unwrap().to_str().unwrap();
// Convert library file-stem into a cc -l argument. // Convert library file-stem into a cc -l argument.
let prefix = if stem.starts_with("lib") && !sess.target.is_like_windows { 3 } else { 0 }; let prefix = if stem.starts_with("lib") && !sess.target.is_like_windows { 3 } else { 0 };
cmd.link_rust_dylib(&stem[prefix..], parent.unwrap_or_else(|| Path::new(""))); cmd.link_dylib_by_name(&stem[prefix..], false, true);
} }
fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool { fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {

View File

@ -1,5 +1,6 @@
use super::command::Command; use super::command::Command;
use super::symbol_export; use super::symbol_export;
use crate::back::link::SearchPaths;
use crate::errors; use crate::errors;
use rustc_span::symbol::sym; use rustc_span::symbol::sym;
@ -166,13 +167,18 @@ pub fn get_linker<'a>(
pub trait Linker { pub trait Linker {
fn cmd(&mut self) -> &mut Command; fn cmd(&mut self) -> &mut Command;
fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path); fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path);
fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool); fn link_dylib_by_name(&mut self, name: &str, verbatim: bool, as_needed: bool);
fn link_rust_dylib(&mut self, lib: &str, path: &Path); fn link_framework_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) {
fn link_framework(&mut self, framework: &str, as_needed: bool); bug!("framework linked with unsupported linker")
fn link_staticlib(&mut self, lib: &str, verbatim: bool); }
fn link_rlib(&mut self, lib: &Path); fn link_staticlib_by_name(
fn link_whole_rlib(&mut self, lib: &Path); &mut self,
fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]); name: &str,
verbatim: bool,
whole_archive: bool,
search_paths: &SearchPaths,
);
fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool);
fn include_path(&mut self, path: &Path); fn include_path(&mut self, path: &Path);
fn framework_path(&mut self, path: &Path); fn framework_path(&mut self, path: &Path);
fn output_filename(&mut self, path: &Path); fn output_filename(&mut self, path: &Path);
@ -432,8 +438,8 @@ impl<'a> Linker for GccLinker<'a> {
} }
} }
fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool) { fn link_dylib_by_name(&mut self, name: &str, verbatim: bool, as_needed: bool) {
if self.sess.target.os == "illumos" && lib == "c" { if self.sess.target.os == "illumos" && name == "c" {
// libc will be added via late_link_args on illumos so that it will // libc will be added via late_link_args on illumos so that it will
// appear last in the library search order. // appear last in the library search order.
// FIXME: This should be replaced by a more complete and generic // FIXME: This should be replaced by a more complete and generic
@ -454,7 +460,7 @@ impl<'a> Linker for GccLinker<'a> {
} }
} }
self.hint_dynamic(); self.hint_dynamic();
self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },)); self.cmd.arg(format!("-l{}{name}", if verbatim && self.is_gnu { ":" } else { "" },));
if !as_needed { if !as_needed {
if self.sess.target.is_like_osx { if self.sess.target.is_like_osx {
// See above FIXME comment // See above FIXME comment
@ -463,14 +469,56 @@ impl<'a> Linker for GccLinker<'a> {
} }
} }
} }
fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
self.hint_static(); fn link_framework_by_name(&mut self, name: &str, _verbatim: bool, as_needed: bool) {
self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },)); self.hint_dynamic();
if !as_needed {
// FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
// flag but we have no way to detect that here.
// self.cmd.arg("-needed_framework").arg(name);
self.sess.dcx().emit_warn(errors::Ld64UnimplementedModifier);
}
self.cmd.arg("-framework").arg(name);
} }
fn link_rlib(&mut self, lib: &Path) {
fn link_staticlib_by_name(
&mut self,
name: &str,
verbatim: bool,
whole_archive: bool,
search_paths: &SearchPaths,
) {
self.hint_static(); self.hint_static();
self.cmd.arg(lib); let colon = if verbatim && self.is_gnu { ":" } else { "" };
if !whole_archive {
self.cmd.arg(format!("-l{colon}{name}"));
} else if self.sess.target.is_like_osx {
// -force_load is the macOS equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
self.linker_arg("-force_load");
let search_paths = search_paths.get(self.sess);
self.linker_arg(find_native_static_library(name, verbatim, search_paths, self.sess));
} else {
self.linker_arg("--whole-archive");
self.cmd.arg(format!("-l{colon}{name}"));
self.linker_arg("--no-whole-archive");
}
} }
fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool) {
self.hint_static();
if !whole_archive {
self.cmd.arg(path);
} else if self.sess.target.is_like_osx {
self.linker_arg("-force_load");
self.linker_arg(path);
} else {
self.linker_arg("--whole-archive");
self.linker_arg(path);
self.linker_arg("--no-whole-archive");
}
}
fn include_path(&mut self, path: &Path) { fn include_path(&mut self, path: &Path) {
self.cmd.arg("-L").arg(path); self.cmd.arg("-L").arg(path);
} }
@ -493,55 +541,6 @@ impl<'a> Linker for GccLinker<'a> {
self.linker_args(&["-z", "norelro"]); self.linker_args(&["-z", "norelro"]);
} }
fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
self.hint_dynamic();
self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, framework: &str, as_needed: bool) {
self.hint_dynamic();
if !as_needed {
// FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
// flag but we have no way to detect that here.
// self.cmd.arg("-needed_framework").arg(framework);
self.sess.dcx().emit_warn(errors::Ld64UnimplementedModifier);
}
self.cmd.arg("-framework").arg(framework);
}
// Here we explicitly ask that the entire archive is included into the
// result artifact. For more details see #15460, but the gist is that
// the linker will strip away any unused objects in the archive if we
// don't otherwise explicitly reference them. This can occur for
// libraries which are just providing bindings, libraries with generic
// functions, etc.
fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]) {
self.hint_static();
let target = &self.sess.target;
if !target.is_like_osx {
self.linker_arg("--whole-archive");
self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },));
self.linker_arg("--no-whole-archive");
} else {
// -force_load is the macOS equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
self.linker_arg("-force_load");
let lib = find_native_static_library(lib, verbatim, search_path, self.sess);
self.linker_arg(&lib);
}
}
fn link_whole_rlib(&mut self, lib: &Path) {
self.hint_static();
if self.sess.target.is_like_osx {
self.linker_arg("-force_load");
self.linker_arg(&lib);
} else {
self.linker_args(&[OsString::from("--whole-archive"), lib.into()]);
self.linker_arg("--no-whole-archive");
}
}
fn gc_sections(&mut self, keep_metadata: bool) { fn gc_sections(&mut self, keep_metadata: bool) {
// The dead_strip option to the linker specifies that functions and data // The dead_strip option to the linker specifies that functions and data
// unreachable by the entry point will be removed. This is quite useful // unreachable by the entry point will be removed. This is quite useful
@ -821,9 +820,32 @@ impl<'a> Linker for MsvcLinker<'a> {
} }
} }
fn link_rlib(&mut self, lib: &Path) { fn link_dylib_by_name(&mut self, name: &str, verbatim: bool, _as_needed: bool) {
self.cmd.arg(lib); self.cmd.arg(format!("{}{}", name, if verbatim { "" } else { ".lib" }));
} }
fn link_staticlib_by_name(
&mut self,
name: &str,
verbatim: bool,
whole_archive: bool,
_search_paths: &SearchPaths,
) {
let prefix = if whole_archive { "/WHOLEARCHIVE:" } else { "" };
let suffix = if verbatim { "" } else { ".lib" };
self.cmd.arg(format!("{prefix}{name}{suffix}"));
}
fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool) {
if !whole_archive {
self.cmd.arg(path);
} else {
let mut arg = OsString::from("/WHOLEARCHIVE:");
arg.push(path);
self.cmd.arg(arg);
}
}
fn add_object(&mut self, path: &Path) { fn add_object(&mut self, path: &Path) {
self.cmd.arg(path); self.cmd.arg(path);
} }
@ -845,25 +867,6 @@ impl<'a> Linker for MsvcLinker<'a> {
self.cmd.arg("/OPT:NOREF,NOICF"); self.cmd.arg("/OPT:NOREF,NOICF");
} }
fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
// When producing a dll, the MSVC linker may not actually emit a
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
let name = format!("{lib}.dll.lib");
if path.join(&name).exists() {
self.cmd.arg(name);
}
}
fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn full_relro(&mut self) { fn full_relro(&mut self) {
// noop // noop
} }
@ -899,18 +902,7 @@ impl<'a> Linker for MsvcLinker<'a> {
fn framework_path(&mut self, _path: &Path) { fn framework_path(&mut self, _path: &Path) {
bug!("frameworks are not supported on windows") bug!("frameworks are not supported on windows")
} }
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
bug!("frameworks are not supported on windows")
}
fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
self.cmd.arg(format!("/WHOLEARCHIVE:{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn link_whole_rlib(&mut self, path: &Path) {
let mut arg = OsString::from("/WHOLEARCHIVE:");
arg.push(path);
self.cmd.arg(arg);
}
fn optimize(&mut self) { fn optimize(&mut self) {
// Needs more investigation of `/OPT` arguments // Needs more investigation of `/OPT` arguments
} }
@ -1057,12 +1049,27 @@ impl<'a> Linker for EmLinker<'a> {
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {} fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn include_path(&mut self, path: &Path) { fn link_dylib_by_name(&mut self, name: &str, _verbatim: bool, _as_needed: bool) {
self.cmd.arg("-L").arg(path); // Emscripten always links statically
self.cmd.arg("-l").arg(name);
} }
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) { fn link_staticlib_by_name(
self.cmd.arg("-l").arg(lib); &mut self,
name: &str,
_verbatim: bool,
_whole_archive: bool,
_search_paths: &SearchPaths,
) {
self.cmd.arg("-l").arg(name);
}
fn link_staticlib_by_path(&mut self, path: &Path, _whole_archive: bool) {
self.cmd.arg(path);
}
fn include_path(&mut self, path: &Path) {
self.cmd.arg("-L").arg(path);
} }
fn output_filename(&mut self, path: &Path) { fn output_filename(&mut self, path: &Path) {
@ -1073,29 +1080,6 @@ impl<'a> Linker for EmLinker<'a> {
self.cmd.arg(path); self.cmd.arg(path);
} }
fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
// Emscripten always links statically
self.link_staticlib(lib, verbatim);
}
fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
// not supported?
self.link_staticlib(lib, verbatim);
}
fn link_whole_rlib(&mut self, lib: &Path) {
// not supported?
self.link_rlib(lib);
}
fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
self.link_dylib(lib, false, true);
}
fn link_rlib(&mut self, lib: &Path) {
self.add_object(lib);
}
fn full_relro(&mut self) { fn full_relro(&mut self) {
// noop // noop
} }
@ -1112,10 +1096,6 @@ impl<'a> Linker for EmLinker<'a> {
bug!("frameworks are not supported on Emscripten") bug!("frameworks are not supported on Emscripten")
} }
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
bug!("frameworks are not supported on Emscripten")
}
fn gc_sections(&mut self, _keep_metadata: bool) { fn gc_sections(&mut self, _keep_metadata: bool) {
// noop // noop
} }
@ -1249,16 +1229,30 @@ impl<'a> Linker for WasmLd<'a> {
} }
} }
fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) { fn link_dylib_by_name(&mut self, name: &str, _verbatim: bool, _as_needed: bool) {
self.cmd.arg("-l").arg(lib); self.cmd.arg("-l").arg(name);
} }
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) { fn link_staticlib_by_name(
self.cmd.arg("-l").arg(lib); &mut self,
name: &str,
_verbatim: bool,
whole_archive: bool,
_search_paths: &SearchPaths,
) {
if !whole_archive {
self.cmd.arg("-l").arg(name);
} else {
self.cmd.arg("--whole-archive").arg("-l").arg(name).arg("--no-whole-archive");
}
} }
fn link_rlib(&mut self, lib: &Path) { fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool) {
self.cmd.arg(lib); if !whole_archive {
self.cmd.arg(path);
} else {
self.cmd.arg("--whole-archive").arg(path).arg("--no-whole-archive");
}
} }
fn include_path(&mut self, path: &Path) { fn include_path(&mut self, path: &Path) {
@ -1283,22 +1277,6 @@ impl<'a> Linker for WasmLd<'a> {
fn no_relro(&mut self) {} fn no_relro(&mut self) {}
fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
self.cmd.arg("-l").arg(lib);
}
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
panic!("frameworks not supported")
}
fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
self.cmd.arg("--whole-archive").arg("-l").arg(lib).arg("--no-whole-archive");
}
fn link_whole_rlib(&mut self, lib: &Path) {
self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive");
}
fn gc_sections(&mut self, _keep_metadata: bool) { fn gc_sections(&mut self, _keep_metadata: bool) {
self.cmd.arg("--gc-sections"); self.cmd.arg("--gc-sections");
} }
@ -1398,17 +1376,40 @@ pub struct L4Bender<'a> {
} }
impl<'a> Linker for L4Bender<'a> { impl<'a> Linker for L4Bender<'a> {
fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) { fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn link_dylib_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) {
bug!("dylibs are not supported on L4Re"); bug!("dylibs are not supported on L4Re");
} }
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
fn link_staticlib_by_name(
&mut self,
name: &str,
_verbatim: bool,
whole_archive: bool,
_search_paths: &SearchPaths,
) {
self.hint_static(); self.hint_static();
self.cmd.arg(format!("-PC{lib}")); if !whole_archive {
self.cmd.arg(format!("-PC{name}"));
} else {
self.cmd.arg("--whole-archive").arg(format!("-l{name}")).arg("--no-whole-archive");
}
} }
fn link_rlib(&mut self, lib: &Path) {
fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool) {
self.hint_static(); self.hint_static();
self.cmd.arg(lib); if !whole_archive {
self.cmd.arg(path);
} else {
self.cmd.arg("--whole-archive").arg(path).arg("--no-whole-archive");
}
} }
fn include_path(&mut self, path: &Path) { fn include_path(&mut self, path: &Path) {
self.cmd.arg("-L").arg(path); self.cmd.arg("-L").arg(path);
} }
@ -1436,31 +1437,6 @@ impl<'a> Linker for L4Bender<'a> {
self.cmd.arg("-z").arg("norelro"); self.cmd.arg("-z").arg("norelro");
} }
fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn link_rust_dylib(&mut self, _: &str, _: &Path) {
panic!("Rust dylibs not supported");
}
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
bug!("frameworks not supported on L4Re");
}
fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
self.hint_static();
self.cmd.arg("--whole-archive").arg(format!("-l{lib}"));
self.cmd.arg("--no-whole-archive");
}
fn link_whole_rlib(&mut self, lib: &Path) {
self.hint_static();
self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive");
}
fn gc_sections(&mut self, keep_metadata: bool) { fn gc_sections(&mut self, keep_metadata: bool) {
if !keep_metadata { if !keep_metadata {
self.cmd.arg("--gc-sections"); self.cmd.arg("--gc-sections");
@ -1571,19 +1547,56 @@ impl<'a> AixLinker<'a> {
} }
impl<'a> Linker for AixLinker<'a> { impl<'a> Linker for AixLinker<'a> {
fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) { fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
match output_kind {
LinkOutputKind::DynamicDylib => {
self.hint_dynamic();
self.build_dylib(out_filename);
}
LinkOutputKind::StaticDylib => {
self.hint_static();
self.build_dylib(out_filename);
}
_ => {}
}
}
fn link_dylib_by_name(&mut self, name: &str, _verbatim: bool, _as_needed: bool) {
self.hint_dynamic(); self.hint_dynamic();
self.cmd.arg(format!("-l{lib}")); self.cmd.arg(format!("-l{name}"));
} }
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) { fn link_staticlib_by_name(
&mut self,
name: &str,
verbatim: bool,
whole_archive: bool,
search_paths: &SearchPaths,
) {
self.hint_static(); self.hint_static();
self.cmd.arg(format!("-l{lib}")); if !whole_archive {
self.cmd.arg(format!("-l{name}"));
} else {
let mut arg = OsString::from("-bkeepfile:");
let search_path = search_paths.get(self.sess);
arg.push(find_native_static_library(name, verbatim, search_path, self.sess));
self.cmd.arg(arg);
}
} }
fn link_rlib(&mut self, lib: &Path) { fn link_staticlib_by_path(&mut self, path: &Path, whole_archive: bool) {
self.hint_static(); self.hint_static();
self.cmd.arg(lib); if !whole_archive {
self.cmd.arg(path);
} else {
let mut arg = OsString::from("-bkeepfile:");
arg.push(path);
self.cmd.arg(arg);
}
} }
fn include_path(&mut self, path: &Path) { fn include_path(&mut self, path: &Path) {
@ -1608,44 +1621,6 @@ impl<'a> Linker for AixLinker<'a> {
fn no_relro(&mut self) {} fn no_relro(&mut self) {}
fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
match output_kind {
LinkOutputKind::DynamicDylib => {
self.hint_dynamic();
self.build_dylib(out_filename);
}
LinkOutputKind::StaticDylib => {
self.hint_static();
self.build_dylib(out_filename);
}
_ => {}
}
}
fn link_rust_dylib(&mut self, lib: &str, _: &Path) {
self.hint_dynamic();
self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
bug!("frameworks not supported on AIX");
}
fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]) {
self.hint_static();
let lib = find_native_static_library(lib, verbatim, search_path, self.sess);
self.cmd.arg(format!("-bkeepfile:{}", lib.to_str().unwrap()));
}
fn link_whole_rlib(&mut self, lib: &Path) {
self.hint_static();
self.cmd.arg(format!("-bkeepfile:{}", lib.to_str().unwrap()));
}
fn gc_sections(&mut self, _keep_metadata: bool) { fn gc_sections(&mut self, _keep_metadata: bool) {
self.cmd.arg("-bgc"); self.cmd.arg("-bgc");
} }
@ -1810,11 +1785,21 @@ impl<'a> Linker for PtxLinker<'a> {
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {} fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn link_rlib(&mut self, path: &Path) { fn link_dylib_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) {
self.cmd.arg("--rlib").arg(path); panic!("external dylibs not supported")
} }
fn link_whole_rlib(&mut self, path: &Path) { fn link_staticlib_by_name(
&mut self,
_name: &str,
_verbatim: bool,
_whole_archive: bool,
_search_paths: &SearchPaths,
) {
panic!("staticlibs not supported")
}
fn link_staticlib_by_path(&mut self, path: &Path, _whole_archive: bool) {
self.cmd.arg("--rlib").arg(path); self.cmd.arg("--rlib").arg(path);
} }
@ -1844,30 +1829,10 @@ impl<'a> Linker for PtxLinker<'a> {
self.cmd.arg("-o").arg(path); self.cmd.arg("-o").arg(path);
} }
fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
panic!("external dylibs not supported")
}
fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
panic!("external dylibs not supported")
}
fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
panic!("staticlibs not supported")
}
fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
panic!("staticlibs not supported")
}
fn framework_path(&mut self, _path: &Path) { fn framework_path(&mut self, _path: &Path) {
panic!("frameworks not supported") panic!("frameworks not supported")
} }
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
panic!("frameworks not supported")
}
fn full_relro(&mut self) {} fn full_relro(&mut self) {}
fn partial_relro(&mut self) {} fn partial_relro(&mut self) {}
@ -1907,11 +1872,21 @@ impl<'a> Linker for BpfLinker<'a> {
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {} fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn link_rlib(&mut self, path: &Path) { fn link_dylib_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) {
self.cmd.arg(path); panic!("external dylibs not supported")
} }
fn link_whole_rlib(&mut self, path: &Path) { fn link_staticlib_by_name(
&mut self,
_name: &str,
_verbatim: bool,
_whole_archive: bool,
_search_paths: &SearchPaths,
) {
panic!("staticlibs not supported")
}
fn link_staticlib_by_path(&mut self, path: &Path, _whole_archive: bool) {
self.cmd.arg(path); self.cmd.arg(path);
} }
@ -1942,30 +1917,10 @@ impl<'a> Linker for BpfLinker<'a> {
self.cmd.arg("-o").arg(path); self.cmd.arg("-o").arg(path);
} }
fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
panic!("external dylibs not supported")
}
fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
panic!("external dylibs not supported")
}
fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
panic!("staticlibs not supported")
}
fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
panic!("staticlibs not supported")
}
fn framework_path(&mut self, _path: &Path) { fn framework_path(&mut self, _path: &Path) {
panic!("frameworks not supported") panic!("frameworks not supported")
} }
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
panic!("frameworks not supported")
}
fn full_relro(&mut self) {} fn full_relro(&mut self) {}
fn partial_relro(&mut self) {} fn partial_relro(&mut self) {}

View File

@ -531,6 +531,11 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
)?; )?;
} }
} }
// The intrinsic represents whether the value is known to the optimizer (LLVM).
// We're not doing any optimizations here, so there is no optimizer that could know the value.
// (We know the value here in the machine of course, but this is the runtime of that code,
// not the optimization stage.)
sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
_ => { _ => {
throw_unsup_format!( throw_unsup_format!(
"intrinsic `{intrinsic_name}` is not supported at compile-time" "intrinsic `{intrinsic_name}` is not supported at compile-time"

View File

@ -138,7 +138,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
} }
// Trait objects are not allowed in type level constants, as we have no concept for // Trait objects are not allowed in type level constants, as we have no concept for
// resolving their backing type, even if we can do that at const eval time. We may // resolving their backing type, even if we can do that at const eval time. We may
// hypothetically be able to allow `dyn StructuralEq` trait objects in the future, // hypothetically be able to allow `dyn StructuralPartialEq` trait objects in the future,
// but it is unclear if this is useful. // but it is unclear if this is useful.
ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType), ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType),

View File

@ -864,9 +864,6 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
InvalidProgramInfo::FnAbiAdjustForForeignAbi(_) => { InvalidProgramInfo::FnAbiAdjustForForeignAbi(_) => {
rustc_middle::error::middle_adjust_for_foreign_abi_error rustc_middle::error::middle_adjust_for_foreign_abi_error
} }
InvalidProgramInfo::ConstPropNonsense => {
panic!("We had const-prop nonsense, this should never be printed")
}
} }
} }
fn add_args<G: EmissionGuarantee>( fn add_args<G: EmissionGuarantee>(
@ -875,9 +872,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
builder: &mut DiagnosticBuilder<'_, G>, builder: &mut DiagnosticBuilder<'_, G>,
) { ) {
match self { match self {
InvalidProgramInfo::TooGeneric InvalidProgramInfo::TooGeneric | InvalidProgramInfo::AlreadyReported(_) => {}
| InvalidProgramInfo::AlreadyReported(_)
| InvalidProgramInfo::ConstPropNonsense => {}
InvalidProgramInfo::Layout(e) => { InvalidProgramInfo::Layout(e) => {
// The level doesn't matter, `diag` is consumed without it being used. // The level doesn't matter, `diag` is consumed without it being used.
let dummy_level = Level::Bug; let dummy_level = Level::Bug;

View File

@ -1131,13 +1131,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span: Option<Span>, span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>, layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let const_val = val.eval(*self.tcx, self.param_env, span).map_err(|err| { M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| {
// FIXME: somehow this is reachable even when POST_MONO_CHECKS is on. let const_val = val.eval(*ecx.tcx, ecx.param_env, span).map_err(|err| {
// Are we not always populating `required_consts`? // FIXME: somehow this is reachable even when POST_MONO_CHECKS is on.
err.emit_note(*self.tcx); // Are we not always populating `required_consts`?
err err.emit_note(*ecx.tcx);
})?; err
self.const_val_to_op(const_val, val.ty(), layout) })?;
ecx.const_val_to_op(const_val, val.ty(), layout)
})
} }
#[must_use] #[must_use]

View File

@ -13,6 +13,7 @@ use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::def_id::DefId; use rustc_span::def_id::DefId;
use rustc_span::Span;
use rustc_target::abi::{Align, Size}; use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi; use rustc_target::spec::abi::Abi as CallAbi;
@ -510,6 +511,27 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
Ok(()) Ok(())
} }
/// Evaluate the given constant. The `eval` function will do all the required evaluation,
/// but this hook has the chance to do some pre/postprocessing.
#[inline(always)]
fn eval_mir_constant<F>(
ecx: &InterpCx<'mir, 'tcx, Self>,
val: mir::Const<'tcx>,
span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
eval: F,
) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>
where
F: Fn(
&InterpCx<'mir, 'tcx, Self>,
mir::Const<'tcx>,
Option<Span>,
Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>,
{
eval(ecx, val, span, layout)
}
} }
/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines

View File

@ -643,11 +643,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let layout = self.layout_of_local(frame, local, layout)?; let layout = self.layout_of_local(frame, local, layout)?;
let op = *frame.locals[local].access()?; let op = *frame.locals[local].access()?;
if matches!(op, Operand::Immediate(_)) { if matches!(op, Operand::Immediate(_)) {
if layout.is_unsized() { assert!(!layout.is_unsized());
// ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
// efficiently check whether they are sized. We have to catch that case here.
throw_inval!(ConstPropNonsense);
}
} }
Ok(OpTy { op, layout }) Ok(OpTy { op, layout })
} }

View File

@ -519,11 +519,7 @@ where
} else { } else {
// Unsized `Local` isn't okay (we cannot store the metadata). // Unsized `Local` isn't okay (we cannot store the metadata).
match frame_ref.locals[local].access()? { match frame_ref.locals[local].access()? {
Operand::Immediate(_) => { Operand::Immediate(_) => bug!(),
// ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
// efficiently check whether they are sized. We have to catch that case here.
throw_inval!(ConstPropNonsense);
}
Operand::Indirect(mplace) => Place::Ptr(*mplace), Operand::Indirect(mplace) => Place::Ptr(*mplace),
} }
}; };
@ -816,17 +812,8 @@ where
// avoid force_allocation. // avoid force_allocation.
let src = match self.read_immediate_raw(src)? { let src = match self.read_immediate_raw(src)? {
Right(src_val) => { Right(src_val) => {
// FIXME(const_prop): Const-prop can possibly evaluate an assert!(!src.layout().is_unsized());
// unsized copy operation when it thinks that the type is assert!(!dest.layout().is_unsized());
// actually sized, due to a trivially false where-clause
// predicate like `where Self: Sized` with `Self = dyn Trait`.
// See #102553 for an example of such a predicate.
if src.layout().is_unsized() {
throw_inval!(ConstPropNonsense);
}
if dest.layout().is_unsized() {
throw_inval!(ConstPropNonsense);
}
assert_eq!(src.layout().size, dest.layout().size); assert_eq!(src.layout().size, dest.layout().size);
// Yay, we got a value that we can write directly. // Yay, we got a value that we can write directly.
return if layout_compat { return if layout_compat {

View File

@ -153,11 +153,7 @@ where
// Offset may need adjustment for unsized fields. // Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() { let (meta, offset) = if field_layout.is_unsized() {
if base.layout().is_sized() { assert!(!base.layout().is_sized());
// An unsized field of a sized type? Sure...
// But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
throw_inval!(ConstPropNonsense);
}
let base_meta = base.meta(); let base_meta = base.meta();
// Re-use parent metadata to determine dynamic field layout. // Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same // With custom DSTS, this *will* execute user-defined code, but the same
@ -205,29 +201,26 @@ where
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
// So we just "offset" by 0. // So we just "offset" by 0.
let layout = base.layout().for_variant(self, variant); let layout = base.layout().for_variant(self, variant);
if layout.abi.is_uninhabited() { // In the future we might want to allow this to permit code like this:
// `read_discriminant` should have excluded uninhabited variants... but ConstProp calls // (this is a Rust/MIR pseudocode mix)
// us on dead code. // ```
// In the future we might want to allow this to permit code like this: // enum Option2 {
// (this is a Rust/MIR pseudocode mix) // Some(i32, !),
// ``` // None,
// enum Option2 { // }
// Some(i32, !), //
// None, // fn panic() -> ! { panic!() }
// } //
// // let x: Option2;
// fn panic() -> ! { panic!() } // x.Some.0 = 42;
// // x.Some.1 = panic();
// let x: Option2; // SetDiscriminant(x, Some);
// x.Some.0 = 42; // ```
// x.Some.1 = panic(); // However, for now we don't generate such MIR, and this check here *has* found real
// SetDiscriminant(x, Some); // bugs (see https://github.com/rust-lang/rust/issues/115145), so we will keep rejecting
// ``` // it.
// However, for now we don't generate such MIR, and this check here *has* found real assert!(!layout.abi.is_uninhabited());
// bugs (see https://github.com/rust-lang/rust/issues/115145), so we will keep rejecting
// it.
throw_inval!(ConstPropNonsense)
}
// This cannot be `transmute` as variants *can* have a smaller size than the entire enum. // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
base.offset(Size::ZERO, layout, self) base.offset(Size::ZERO, layout, self)
} }

View File

@ -14,7 +14,7 @@ pub use self::type_name::type_name;
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
/// same type as the result. /// same type as the result.
#[inline] #[inline]
pub(crate) fn binop_left_homogeneous(op: mir::BinOp) -> bool { pub fn binop_left_homogeneous(op: mir::BinOp) -> bool {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
match op { match op {
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor
@ -26,7 +26,7 @@ pub(crate) fn binop_left_homogeneous(op: mir::BinOp) -> bool {
/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
/// same type as the LHS. /// same type as the LHS.
#[inline] #[inline]
pub(crate) fn binop_right_homogeneous(op: mir::BinOp) -> bool { pub fn binop_right_homogeneous(op: mir::BinOp) -> bool {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
match op { match op {
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor

View File

@ -7,10 +7,10 @@ edition = "2021"
# tidy-alphabetical-start # tidy-alphabetical-start
arrayvec = { version = "0.7", default-features = false } arrayvec = { version = "0.7", default-features = false }
bitflags = "2.4.1" bitflags = "2.4.1"
either = "1.0"
elsa = "=1.7.1" elsa = "=1.7.1"
ena = "0.14.2" ena = "0.14.2"
indexmap = { version = "2.0.0" } indexmap = { version = "2.0.0" }
itertools = "0.11"
jobserver_crate = { version = "0.1.27", package = "jobserver" } jobserver_crate = { version = "0.1.27", package = "jobserver" }
libc = "0.2" libc = "0.2"
measureme = "11" measureme = "11"

View File

@ -3,7 +3,7 @@ use crate::fx::{FxHashMap, FxHasher};
use crate::sync::{is_dyn_thread_safe, CacheAligned}; use crate::sync::{is_dyn_thread_safe, CacheAligned};
use crate::sync::{Lock, LockGuard, Mode}; use crate::sync::{Lock, LockGuard, Mode};
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use itertools::Either; use either::Either;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut; use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};

View File

@ -1,6 +1,6 @@
use crate::fx::FxHashMap; use crate::fx::FxHashMap;
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
use itertools::Either; use either::Either;
use std::fmt; use std::fmt;
use std::hash::Hash; use std::hash::Hash;
use std::ops::Index; use std::ops::Index;

View File

@ -143,8 +143,6 @@ language_item_table! {
Unsize, sym::unsize, unsize_trait, Target::Trait, GenericRequirement::Minimum(1); Unsize, sym::unsize, unsize_trait, Target::Trait, GenericRequirement::Minimum(1);
/// Trait injected by `#[derive(PartialEq)]`, (i.e. "Partial EQ"). /// Trait injected by `#[derive(PartialEq)]`, (i.e. "Partial EQ").
StructuralPeq, sym::structural_peq, structural_peq_trait, Target::Trait, GenericRequirement::None; StructuralPeq, sym::structural_peq, structural_peq_trait, Target::Trait, GenericRequirement::None;
/// Trait injected by `#[derive(Eq)]`, (i.e. "Total EQ"; no, I will not apologize).
StructuralTeq, sym::structural_teq, structural_teq_trait, Target::Trait, GenericRequirement::None;
Copy, sym::copy, copy_trait, Target::Trait, GenericRequirement::Exact(0); Copy, sym::copy, copy_trait, Target::Trait, GenericRequirement::Exact(0);
Clone, sym::clone, clone_trait, Target::Trait, GenericRequirement::None; Clone, sym::clone, clone_trait, Target::Trait, GenericRequirement::None;
Sync, sym::sync, sync_trait, Target::Trait, GenericRequirement::Exact(0); Sync, sym::sync, sync_trait, Target::Trait, GenericRequirement::Exact(0);
@ -208,6 +206,10 @@ language_item_table! {
FnMut, sym::fn_mut, fn_mut_trait, Target::Trait, GenericRequirement::Exact(1); FnMut, sym::fn_mut, fn_mut_trait, Target::Trait, GenericRequirement::Exact(1);
FnOnce, sym::fn_once, fn_once_trait, Target::Trait, GenericRequirement::Exact(1); FnOnce, sym::fn_once, fn_once_trait, Target::Trait, GenericRequirement::Exact(1);
AsyncFn, sym::async_fn, async_fn_trait, Target::Trait, GenericRequirement::Exact(1);
AsyncFnMut, sym::async_fn_mut, async_fn_mut_trait, Target::Trait, GenericRequirement::Exact(1);
AsyncFnOnce, sym::async_fn_once, async_fn_once_trait, Target::Trait, GenericRequirement::Exact(1);
FnOnceOutput, sym::fn_once_output, fn_once_output, Target::AssocTy, GenericRequirement::None; FnOnceOutput, sym::fn_once_output, fn_once_output, Target::AssocTy, GenericRequirement::None;
Iterator, sym::iterator, iterator_trait, Target::Trait, GenericRequirement::Exact(0); Iterator, sym::iterator, iterator_trait, Target::Trait, GenericRequirement::Exact(0);

View File

@ -453,6 +453,8 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
sym::black_box => (1, vec![param(0)], param(0)), sym::black_box => (1, vec![param(0)], param(0)),
sym::is_val_statically_known => (1, vec![param(0)], tcx.types.bool),
sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)), sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)),
sym::vtable_size | sym::vtable_align => { sym::vtable_size | sym::vtable_align => {

View File

@ -25,14 +25,21 @@ use rustc_trait_selection::traits::ObligationCtxt;
use rustc_trait_selection::traits::{self, ObligationCause}; use rustc_trait_selection::traits::{self, ObligationCause};
use std::collections::BTreeMap; use std::collections::BTreeMap;
pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) { pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) -> Result<(), ErrorGuaranteed> {
let lang_items = tcx.lang_items(); let lang_items = tcx.lang_items();
Checker { tcx, trait_def_id } let checker = Checker { tcx, trait_def_id };
.check(lang_items.drop_trait(), visit_implementation_of_drop) let mut res = checker.check(lang_items.drop_trait(), visit_implementation_of_drop);
.check(lang_items.copy_trait(), visit_implementation_of_copy) res = res.and(checker.check(lang_items.copy_trait(), visit_implementation_of_copy));
.check(lang_items.const_param_ty_trait(), visit_implementation_of_const_param_ty) res = res.and(
.check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized) checker.check(lang_items.const_param_ty_trait(), visit_implementation_of_const_param_ty),
.check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn); );
res = res.and(
checker.check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized),
);
res.and(
checker
.check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn),
)
} }
struct Checker<'tcx> { struct Checker<'tcx> {
@ -41,33 +48,40 @@ struct Checker<'tcx> {
} }
impl<'tcx> Checker<'tcx> { impl<'tcx> Checker<'tcx> {
fn check<F>(&self, trait_def_id: Option<DefId>, mut f: F) -> &Self fn check<F>(&self, trait_def_id: Option<DefId>, mut f: F) -> Result<(), ErrorGuaranteed>
where where
F: FnMut(TyCtxt<'tcx>, LocalDefId), F: FnMut(TyCtxt<'tcx>, LocalDefId) -> Result<(), ErrorGuaranteed>,
{ {
let mut res = Ok(());
if Some(self.trait_def_id) == trait_def_id { if Some(self.trait_def_id) == trait_def_id {
for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) { for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) {
f(self.tcx, impl_def_id); res = res.and(f(self.tcx, impl_def_id));
} }
} }
self res
} }
} }
fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) { fn visit_implementation_of_drop(
tcx: TyCtxt<'_>,
impl_did: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
// Destructors only work on local ADT types. // Destructors only work on local ADT types.
match tcx.type_of(impl_did).instantiate_identity().kind() { match tcx.type_of(impl_did).instantiate_identity().kind() {
ty::Adt(def, _) if def.did().is_local() => return, ty::Adt(def, _) if def.did().is_local() => return Ok(()),
ty::Error(_) => return, ty::Error(_) => return Ok(()),
_ => {} _ => {}
} }
let impl_ = tcx.hir().expect_item(impl_did).expect_impl(); let impl_ = tcx.hir().expect_item(impl_did).expect_impl();
tcx.dcx().emit_err(errors::DropImplOnWrongItem { span: impl_.self_ty.span }); Err(tcx.dcx().emit_err(errors::DropImplOnWrongItem { span: impl_.self_ty.span }))
} }
fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) { fn visit_implementation_of_copy(
tcx: TyCtxt<'_>,
impl_did: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
debug!("visit_implementation_of_copy: impl_did={:?}", impl_did); debug!("visit_implementation_of_copy: impl_did={:?}", impl_did);
let self_type = tcx.type_of(impl_did).instantiate_identity(); let self_type = tcx.type_of(impl_did).instantiate_identity();
@ -79,59 +93,68 @@ fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type); debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type);
let span = match tcx.hir().expect_item(impl_did).expect_impl() { let span = match tcx.hir().expect_item(impl_did).expect_impl() {
hir::Impl { polarity: hir::ImplPolarity::Negative(_), .. } => return, hir::Impl { polarity: hir::ImplPolarity::Negative(_), .. } => return Ok(()),
hir::Impl { self_ty, .. } => self_ty.span, hir::Impl { self_ty, .. } => self_ty.span,
}; };
let cause = traits::ObligationCause::misc(span, impl_did); let cause = traits::ObligationCause::misc(span, impl_did);
match type_allowed_to_implement_copy(tcx, param_env, self_type, cause) { match type_allowed_to_implement_copy(tcx, param_env, self_type, cause) {
Ok(()) => {} Ok(()) => Ok(()),
Err(CopyImplementationError::InfringingFields(fields)) => { Err(CopyImplementationError::InfringingFields(fields)) => {
infringing_fields_error(tcx, fields, LangItem::Copy, impl_did, span); Err(infringing_fields_error(tcx, fields, LangItem::Copy, impl_did, span))
} }
Err(CopyImplementationError::NotAnAdt) => { Err(CopyImplementationError::NotAnAdt) => {
tcx.dcx().emit_err(errors::CopyImplOnNonAdt { span }); Err(tcx.dcx().emit_err(errors::CopyImplOnNonAdt { span }))
} }
Err(CopyImplementationError::HasDestructor) => { Err(CopyImplementationError::HasDestructor) => {
tcx.dcx().emit_err(errors::CopyImplOnTypeWithDtor { span }); Err(tcx.dcx().emit_err(errors::CopyImplOnTypeWithDtor { span }))
} }
} }
} }
fn visit_implementation_of_const_param_ty(tcx: TyCtxt<'_>, impl_did: LocalDefId) { fn visit_implementation_of_const_param_ty(
tcx: TyCtxt<'_>,
impl_did: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
let self_type = tcx.type_of(impl_did).instantiate_identity(); let self_type = tcx.type_of(impl_did).instantiate_identity();
assert!(!self_type.has_escaping_bound_vars()); assert!(!self_type.has_escaping_bound_vars());
let param_env = tcx.param_env(impl_did); let param_env = tcx.param_env(impl_did);
let span = match tcx.hir().expect_item(impl_did).expect_impl() { let span = match tcx.hir().expect_item(impl_did).expect_impl() {
hir::Impl { polarity: hir::ImplPolarity::Negative(_), .. } => return, hir::Impl { polarity: hir::ImplPolarity::Negative(_), .. } => return Ok(()),
impl_ => impl_.self_ty.span, impl_ => impl_.self_ty.span,
}; };
let cause = traits::ObligationCause::misc(span, impl_did); let cause = traits::ObligationCause::misc(span, impl_did);
match type_allowed_to_implement_const_param_ty(tcx, param_env, self_type, cause) { match type_allowed_to_implement_const_param_ty(tcx, param_env, self_type, cause) {
Ok(()) => {} Ok(()) => Ok(()),
Err(ConstParamTyImplementationError::InfrigingFields(fields)) => { Err(ConstParamTyImplementationError::InfrigingFields(fields)) => {
infringing_fields_error(tcx, fields, LangItem::ConstParamTy, impl_did, span); Err(infringing_fields_error(tcx, fields, LangItem::ConstParamTy, impl_did, span))
} }
Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => { Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => {
tcx.dcx().emit_err(errors::ConstParamTyImplOnNonAdt { span }); Err(tcx.dcx().emit_err(errors::ConstParamTyImplOnNonAdt { span }))
} }
} }
} }
fn visit_implementation_of_coerce_unsized(tcx: TyCtxt<'_>, impl_did: LocalDefId) { fn visit_implementation_of_coerce_unsized(
tcx: TyCtxt<'_>,
impl_did: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did); debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did);
// Just compute this for the side-effects, in particular reporting // Just compute this for the side-effects, in particular reporting
// errors; other parts of the code may demand it for the info of // errors; other parts of the code may demand it for the info of
// course. // course.
let span = tcx.def_span(impl_did); let span = tcx.def_span(impl_did);
tcx.at(span).coerce_unsized_info(impl_did); tcx.at(span).ensure().coerce_unsized_info(impl_did)
} }
fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDefId) { fn visit_implementation_of_dispatch_from_dyn(
tcx: TyCtxt<'_>,
impl_did: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did); debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did);
let span = tcx.def_span(impl_did); let span = tcx.def_span(impl_did);
@ -166,26 +189,28 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
match (source.kind(), target.kind()) { match (source.kind(), target.kind()) {
(&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b)) (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
if infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, r_a, *r_b).is_ok() if infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, r_a, *r_b).is_ok()
&& mutbl_a == *mutbl_b => {} && mutbl_a == *mutbl_b =>
(&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (), {
Ok(())
}
(&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => Ok(()),
(&Adt(def_a, args_a), &Adt(def_b, args_b)) if def_a.is_struct() && def_b.is_struct() => { (&Adt(def_a, args_a), &Adt(def_b, args_b)) if def_a.is_struct() && def_b.is_struct() => {
if def_a != def_b { if def_a != def_b {
let source_path = tcx.def_path_str(def_a.did()); let source_path = tcx.def_path_str(def_a.did());
let target_path = tcx.def_path_str(def_b.did()); let target_path = tcx.def_path_str(def_b.did());
tcx.dcx().emit_err(errors::DispatchFromDynCoercion { return Err(tcx.dcx().emit_err(errors::DispatchFromDynCoercion {
span, span,
trait_name: "DispatchFromDyn", trait_name: "DispatchFromDyn",
note: true, note: true,
source_path, source_path,
target_path, target_path,
}); }));
return;
} }
let mut res = Ok(());
if def_a.repr().c() || def_a.repr().packed() { if def_a.repr().c() || def_a.repr().packed() {
tcx.dcx().emit_err(errors::DispatchFromDynRepr { span }); res = Err(tcx.dcx().emit_err(errors::DispatchFromDynRepr { span }));
} }
let fields = &def_a.non_enum_variant().fields; let fields = &def_a.non_enum_variant().fields;
@ -207,11 +232,11 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, ty_a, ty_b) infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, ty_a, ty_b)
{ {
if ok.obligations.is_empty() { if ok.obligations.is_empty() {
tcx.dcx().emit_err(errors::DispatchFromDynZST { res = Err(tcx.dcx().emit_err(errors::DispatchFromDynZST {
span, span,
name: field.name, name: field.name,
ty: ty_a, ty: ty_a,
}); }));
return false; return false;
} }
@ -222,13 +247,13 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if coerced_fields.is_empty() { if coerced_fields.is_empty() {
tcx.dcx().emit_err(errors::DispatchFromDynSingle { res = Err(tcx.dcx().emit_err(errors::DispatchFromDynSingle {
span, span,
trait_name: "DispatchFromDyn", trait_name: "DispatchFromDyn",
note: true, note: true,
}); }));
} else if coerced_fields.len() > 1 { } else if coerced_fields.len() > 1 {
tcx.dcx().emit_err(errors::DispatchFromDynMulti { res = Err(tcx.dcx().emit_err(errors::DispatchFromDynMulti {
span, span,
coercions_note: true, coercions_note: true,
number: coerced_fields.len(), number: coerced_fields.len(),
@ -244,7 +269,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(", "), .join(", "),
}); }));
} else { } else {
let ocx = ObligationCtxt::new(&infcx); let ocx = ObligationCtxt::new(&infcx);
for field in coerced_fields { for field in coerced_fields {
@ -261,21 +286,25 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
} }
let errors = ocx.select_all_or_error(); let errors = ocx.select_all_or_error();
if !errors.is_empty() { if !errors.is_empty() {
infcx.err_ctxt().report_fulfillment_errors(errors); res = Err(infcx.err_ctxt().report_fulfillment_errors(errors));
} }
// Finally, resolve all regions. // Finally, resolve all regions.
let outlives_env = OutlivesEnvironment::new(param_env); let outlives_env = OutlivesEnvironment::new(param_env);
let _ = ocx.resolve_regions_and_report_errors(impl_did, &outlives_env); res = res.and(ocx.resolve_regions_and_report_errors(impl_did, &outlives_env));
} }
res
} }
_ => { _ => Err(tcx
tcx.dcx().emit_err(errors::CoerceUnsizedMay { span, trait_name: "DispatchFromDyn" }); .dcx()
} .emit_err(errors::CoerceUnsizedMay { span, trait_name: "DispatchFromDyn" })),
} }
} }
pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> CoerceUnsizedInfo { pub fn coerce_unsized_info<'tcx>(
tcx: TyCtxt<'tcx>,
impl_did: LocalDefId,
) -> Result<CoerceUnsizedInfo, ErrorGuaranteed> {
debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did); debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did);
let span = tcx.def_span(impl_did); let span = tcx.def_span(impl_did);
@ -292,8 +321,6 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
let param_env = tcx.param_env(impl_did); let param_env = tcx.param_env(impl_did);
assert!(!source.has_escaping_bound_vars()); assert!(!source.has_escaping_bound_vars());
let err_info = CoerceUnsizedInfo { custom_kind: None };
debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target); debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target);
let infcx = tcx.infer_ctxt().build(); let infcx = tcx.infer_ctxt().build();
@ -337,14 +364,13 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
if def_a != def_b { if def_a != def_b {
let source_path = tcx.def_path_str(def_a.did()); let source_path = tcx.def_path_str(def_a.did());
let target_path = tcx.def_path_str(def_b.did()); let target_path = tcx.def_path_str(def_b.did());
tcx.dcx().emit_err(errors::DispatchFromDynSame { return Err(tcx.dcx().emit_err(errors::DispatchFromDynSame {
span, span,
trait_name: "CoerceUnsized", trait_name: "CoerceUnsized",
note: true, note: true,
source_path, source_path,
target_path, target_path,
}); }));
return err_info;
} }
// Here we are considering a case of converting // Here we are considering a case of converting
@ -419,12 +445,11 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if diff_fields.is_empty() { if diff_fields.is_empty() {
tcx.dcx().emit_err(errors::CoerceUnsizedOneField { return Err(tcx.dcx().emit_err(errors::CoerceUnsizedOneField {
span, span,
trait_name: "CoerceUnsized", trait_name: "CoerceUnsized",
note: true, note: true,
}); }));
return err_info;
} else if diff_fields.len() > 1 { } else if diff_fields.len() > 1 {
let item = tcx.hir().expect_item(impl_did); let item = tcx.hir().expect_item(impl_did);
let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(t), .. }) = &item.kind { let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(t), .. }) = &item.kind {
@ -433,7 +458,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
tcx.def_span(impl_did) tcx.def_span(impl_did)
}; };
tcx.dcx().emit_err(errors::CoerceUnsizedMulti { return Err(tcx.dcx().emit_err(errors::CoerceUnsizedMulti {
span, span,
coercions_note: true, coercions_note: true,
number: diff_fields.len(), number: diff_fields.len(),
@ -442,9 +467,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
.map(|&(i, a, b)| format!("`{}` (`{}` to `{}`)", fields[i].name, a, b)) .map(|&(i, a, b)| format!("`{}` (`{}` to `{}`)", fields[i].name, a, b))
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(", "), .join(", "),
}); }));
return err_info;
} }
let (i, a, b) = diff_fields[0]; let (i, a, b) = diff_fields[0];
@ -453,8 +476,9 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
} }
_ => { _ => {
tcx.dcx().emit_err(errors::DispatchFromDynStruct { span, trait_name: "CoerceUnsized" }); return Err(tcx
return err_info; .dcx()
.emit_err(errors::DispatchFromDynStruct { span, trait_name: "CoerceUnsized" }));
} }
}; };
@ -477,7 +501,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
let outlives_env = OutlivesEnvironment::new(param_env); let outlives_env = OutlivesEnvironment::new(param_env);
let _ = ocx.resolve_regions_and_report_errors(impl_did, &outlives_env); let _ = ocx.resolve_regions_and_report_errors(impl_did, &outlives_env);
CoerceUnsizedInfo { custom_kind: kind } Ok(CoerceUnsizedInfo { custom_kind: kind })
} }
fn infringing_fields_error( fn infringing_fields_error(

View File

@ -10,6 +10,7 @@ use rustc_errors::{error_code, struct_span_code_err};
use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::query::Providers; use rustc_middle::query::Providers;
use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt}; use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
use rustc_span::ErrorGuaranteed;
use rustc_trait_selection::traits; use rustc_trait_selection::traits;
mod builtin; mod builtin;
@ -18,7 +19,11 @@ mod inherent_impls_overlap;
mod orphan; mod orphan;
mod unsafety; mod unsafety;
fn check_impl(tcx: TyCtxt<'_>, impl_def_id: LocalDefId, trait_ref: ty::TraitRef<'_>) { fn check_impl(
tcx: TyCtxt<'_>,
impl_def_id: LocalDefId,
trait_ref: ty::TraitRef<'_>,
) -> Result<(), ErrorGuaranteed> {
debug!( debug!(
"(checking implementation) adding impl for trait '{:?}', item '{}'", "(checking implementation) adding impl for trait '{:?}', item '{}'",
trait_ref, trait_ref,
@ -28,18 +33,18 @@ fn check_impl(tcx: TyCtxt<'_>, impl_def_id: LocalDefId, trait_ref: ty::TraitRef<
// Skip impls where one of the self type is an error type. // Skip impls where one of the self type is an error type.
// This occurs with e.g., resolve failures (#30589). // This occurs with e.g., resolve failures (#30589).
if trait_ref.references_error() { if trait_ref.references_error() {
return; return Ok(());
} }
enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id); enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id)
enforce_empty_impls_for_marker_traits(tcx, impl_def_id, trait_ref.def_id); .and(enforce_empty_impls_for_marker_traits(tcx, impl_def_id, trait_ref.def_id))
} }
fn enforce_trait_manually_implementable( fn enforce_trait_manually_implementable(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
impl_def_id: LocalDefId, impl_def_id: LocalDefId,
trait_def_id: DefId, trait_def_id: DefId,
) { ) -> Result<(), ErrorGuaranteed> {
let impl_header_span = tcx.def_span(impl_def_id); let impl_header_span = tcx.def_span(impl_def_id);
// Disallow *all* explicit impls of traits marked `#[rustc_deny_explicit_impl]` // Disallow *all* explicit impls of traits marked `#[rustc_deny_explicit_impl]`
@ -59,18 +64,17 @@ fn enforce_trait_manually_implementable(
err.code(error_code!(E0328)); err.code(error_code!(E0328));
} }
err.emit(); return Err(err.emit());
return;
} }
if let ty::trait_def::TraitSpecializationKind::AlwaysApplicable = if let ty::trait_def::TraitSpecializationKind::AlwaysApplicable =
tcx.trait_def(trait_def_id).specialization_kind tcx.trait_def(trait_def_id).specialization_kind
{ {
if !tcx.features().specialization && !tcx.features().min_specialization { if !tcx.features().specialization && !tcx.features().min_specialization {
tcx.dcx().emit_err(errors::SpecializationTrait { span: impl_header_span }); return Err(tcx.dcx().emit_err(errors::SpecializationTrait { span: impl_header_span }));
return;
} }
} }
Ok(())
} }
/// We allow impls of marker traits to overlap, so they can't override impls /// We allow impls of marker traits to overlap, so they can't override impls
@ -79,22 +83,22 @@ fn enforce_empty_impls_for_marker_traits(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
impl_def_id: LocalDefId, impl_def_id: LocalDefId,
trait_def_id: DefId, trait_def_id: DefId,
) { ) -> Result<(), ErrorGuaranteed> {
if !tcx.trait_def(trait_def_id).is_marker { if !tcx.trait_def(trait_def_id).is_marker {
return; return Ok(());
} }
if tcx.associated_item_def_ids(trait_def_id).is_empty() { if tcx.associated_item_def_ids(trait_def_id).is_empty() {
return; return Ok(());
} }
struct_span_code_err!( Err(struct_span_code_err!(
tcx.dcx(), tcx.dcx(),
tcx.def_span(impl_def_id), tcx.def_span(impl_def_id),
E0715, E0715,
"impls for marker traits cannot contain items" "impls for marker traits cannot contain items"
) )
.emit(); .emit())
} }
pub fn provide(providers: &mut Providers) { pub fn provide(providers: &mut Providers) {
@ -115,23 +119,23 @@ pub fn provide(providers: &mut Providers) {
}; };
} }
fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) { fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) -> Result<(), ErrorGuaranteed> {
// Trigger building the specialization graph for the trait. This will detect and report any // Trigger building the specialization graph for the trait. This will detect and report any
// overlap errors. // overlap errors.
tcx.ensure().specialization_graph_of(def_id); let mut res = tcx.ensure().specialization_graph_of(def_id);
let impls = tcx.hir().trait_impls(def_id); let impls = tcx.hir().trait_impls(def_id);
for &impl_def_id in impls { for &impl_def_id in impls {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity(); let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
check_impl(tcx, impl_def_id, trait_ref); res = res.and(check_impl(tcx, impl_def_id, trait_ref));
check_object_overlap(tcx, impl_def_id, trait_ref); res = res.and(check_object_overlap(tcx, impl_def_id, trait_ref));
unsafety::check_item(tcx, impl_def_id); res = res.and(unsafety::check_item(tcx, impl_def_id));
tcx.ensure().orphan_check_impl(impl_def_id); res = res.and(tcx.ensure().orphan_check_impl(impl_def_id));
} }
builtin::check_trait(tcx, def_id); res.and(builtin::check_trait(tcx, def_id))
} }
/// Checks whether an impl overlaps with the automatic `impl Trait for dyn Trait`. /// Checks whether an impl overlaps with the automatic `impl Trait for dyn Trait`.
@ -139,12 +143,12 @@ fn check_object_overlap<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
impl_def_id: LocalDefId, impl_def_id: LocalDefId,
trait_ref: ty::TraitRef<'tcx>, trait_ref: ty::TraitRef<'tcx>,
) { ) -> Result<(), ErrorGuaranteed> {
let trait_def_id = trait_ref.def_id; let trait_def_id = trait_ref.def_id;
if trait_ref.references_error() { if trait_ref.references_error() {
debug!("coherence: skipping impl {:?} with error {:?}", impl_def_id, trait_ref); debug!("coherence: skipping impl {:?} with error {:?}", impl_def_id, trait_ref);
return; return Ok(());
} }
// check for overlap with the automatic `impl Trait for dyn Trait` // check for overlap with the automatic `impl Trait for dyn Trait`
@ -173,7 +177,7 @@ fn check_object_overlap<'tcx>(
let mut supertrait_def_ids = traits::supertrait_def_ids(tcx, component_def_id); let mut supertrait_def_ids = traits::supertrait_def_ids(tcx, component_def_id);
if supertrait_def_ids.any(|d| d == trait_def_id) { if supertrait_def_ids.any(|d| d == trait_def_id) {
let span = tcx.def_span(impl_def_id); let span = tcx.def_span(impl_def_id);
struct_span_code_err!( return Err(struct_span_code_err!(
tcx.dcx(), tcx.dcx(),
span, span,
E0371, E0371,
@ -189,9 +193,10 @@ fn check_object_overlap<'tcx>(
tcx.def_path_str(trait_def_id) tcx.def_path_str(trait_def_id)
), ),
) )
.emit(); .emit());
} }
} }
} }
} }
Ok(())
} }

View File

@ -6,8 +6,9 @@ use rustc_hir as hir;
use rustc_hir::Unsafety; use rustc_hir::Unsafety;
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::LocalDefId; use rustc_span::def_id::LocalDefId;
use rustc_span::ErrorGuaranteed;
pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) { pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), ErrorGuaranteed> {
let item = tcx.hir().expect_item(def_id); let item = tcx.hir().expect_item(def_id);
let impl_ = item.expect_impl(); let impl_ = item.expect_impl();
@ -18,7 +19,7 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle"); impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle");
match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) { match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) {
(Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => { (Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => {
struct_span_code_err!( return Err(struct_span_code_err!(
tcx.dcx(), tcx.dcx(),
tcx.def_span(def_id), tcx.def_span(def_id),
E0199, E0199,
@ -31,11 +32,11 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
"", "",
rustc_errors::Applicability::MachineApplicable, rustc_errors::Applicability::MachineApplicable,
) )
.emit(); .emit());
} }
(Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => { (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => {
struct_span_code_err!( return Err(struct_span_code_err!(
tcx.dcx(), tcx.dcx(),
tcx.def_span(def_id), tcx.def_span(def_id),
E0200, E0200,
@ -54,11 +55,11 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
"unsafe ", "unsafe ",
rustc_errors::Applicability::MaybeIncorrect, rustc_errors::Applicability::MaybeIncorrect,
) )
.emit(); .emit());
} }
(Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => { (Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => {
struct_span_code_err!( return Err(struct_span_code_err!(
tcx.dcx(), tcx.dcx(),
tcx.def_span(def_id), tcx.def_span(def_id),
E0569, E0569,
@ -77,7 +78,7 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
"unsafe ", "unsafe ",
rustc_errors::Applicability::MaybeIncorrect, rustc_errors::Applicability::MaybeIncorrect,
) )
.emit(); .emit());
} }
(_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => { (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => {
@ -92,4 +93,5 @@ pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
} }
} }
} }
Ok(())
} }

View File

@ -172,19 +172,15 @@ pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
tcx.sess.time("coherence_checking", || { tcx.sess.time("coherence_checking", || {
// Check impls constrain their parameters // Check impls constrain their parameters
let res = let mut res =
tcx.hir().try_par_for_each_module(|module| tcx.ensure().check_mod_impl_wf(module)); tcx.hir().try_par_for_each_module(|module| tcx.ensure().check_mod_impl_wf(module));
// FIXME(matthewjasper) We shouldn't need to use `track_errors` anywhere in this function for &trait_def_id in tcx.all_local_trait_impls(()).keys() {
// or the compiler in general. res = res.and(tcx.ensure().coherent_trait(trait_def_id));
res.and(tcx.sess.track_errors(|| { }
for &trait_def_id in tcx.all_local_trait_impls(()).keys() {
tcx.ensure().coherent_trait(trait_def_id);
}
}))
// these queries are executed for side-effects (error reporting): // these queries are executed for side-effects (error reporting):
.and(tcx.ensure().crate_inherent_impls(())) res.and(tcx.ensure().crate_inherent_impls(()))
.and(tcx.ensure().crate_inherent_impls_overlap_check(())) .and(tcx.ensure().crate_inherent_impls_overlap_check(()))
})?; })?;
if tcx.features().rustc_attrs { if tcx.features().rustc_attrs {

View File

@ -220,6 +220,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
(self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true), (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true),
(self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true), (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true),
(self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false), (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false),
(self.tcx.lang_items().async_fn_trait(), Ident::with_dummy_span(sym::async_call), true),
(
self.tcx.lang_items().async_fn_mut_trait(),
Ident::with_dummy_span(sym::async_call_mut),
true,
),
(
self.tcx.lang_items().async_fn_once_trait(),
Ident::with_dummy_span(sym::async_call_once),
false,
),
] { ] {
let Some(trait_def_id) = opt_trait_def_id else { continue }; let Some(trait_def_id) = opt_trait_def_id else { continue };

View File

@ -294,6 +294,75 @@ impl<T> Trait<T> for X {
); );
} }
} }
(ty::Dynamic(t, _, ty::DynKind::Dyn), ty::Alias(ty::Opaque, alias))
if let Some(def_id) = t.principal_def_id()
&& tcx.explicit_item_bounds(alias.def_id).skip_binder().iter().any(
|(pred, _span)| match pred.kind().skip_binder() {
ty::ClauseKind::Trait(trait_predicate)
if trait_predicate.polarity
== ty::ImplPolarity::Positive =>
{
trait_predicate.def_id() == def_id
}
_ => false,
},
) =>
{
diag.help(format!(
"you can box the `{}` to coerce it to `Box<{}>`, but you'll have to \
change the expected type as well",
values.found, values.expected,
));
}
(ty::Dynamic(t, _, ty::DynKind::Dyn), _)
if let Some(def_id) = t.principal_def_id() =>
{
let mut impl_def_ids = vec![];
tcx.for_each_relevant_impl(def_id, values.found, |did| {
impl_def_ids.push(did)
});
if let [_] = &impl_def_ids[..] {
let trait_name = tcx.item_name(def_id);
diag.help(format!(
"`{}` implements `{trait_name}` so you could box the found value \
and coerce it to the trait object `Box<dyn {trait_name}>`, you \
will have to change the expected type as well",
values.found,
));
}
}
(_, ty::Dynamic(t, _, ty::DynKind::Dyn))
if let Some(def_id) = t.principal_def_id() =>
{
let mut impl_def_ids = vec![];
tcx.for_each_relevant_impl(def_id, values.expected, |did| {
impl_def_ids.push(did)
});
if let [_] = &impl_def_ids[..] {
let trait_name = tcx.item_name(def_id);
diag.help(format!(
"`{}` implements `{trait_name}` so you could change the expected \
type to `Box<dyn {trait_name}>`",
values.expected,
));
}
}
(ty::Dynamic(t, _, ty::DynKind::DynStar), _)
if let Some(def_id) = t.principal_def_id() =>
{
let mut impl_def_ids = vec![];
tcx.for_each_relevant_impl(def_id, values.found, |did| {
impl_def_ids.push(did)
});
if let [_] = &impl_def_ids[..] {
let trait_name = tcx.item_name(def_id);
diag.help(format!(
"`{}` implements `{trait_name}`, `#[feature(dyn_star)]` is likely \
not enabled; that feature it is currently incomplete",
values.found,
));
}
}
(_, ty::Alias(ty::Opaque, opaque_ty)) (_, ty::Alias(ty::Opaque, opaque_ty))
| (ty::Alias(ty::Opaque, opaque_ty), _) => { | (ty::Alias(ty::Opaque, opaque_ty), _) => {
if opaque_ty.def_id.is_local() if opaque_ty.def_id.is_local()

View File

@ -120,7 +120,8 @@ struct QueryModifiers {
/// Forward the result on ensure if the query gets recomputed, and /// Forward the result on ensure if the query gets recomputed, and
/// return `Ok(())` otherwise. Only applicable to queries returning /// return `Ok(())` otherwise. Only applicable to queries returning
/// `Result<(), ErrorGuaranteed>` /// `Result<T, ErrorGuaranteed>`. The `T` is not returned from `ensure`
/// invocations.
ensure_forwards_result_if_red: Option<Ident>, ensure_forwards_result_if_red: Option<Ident>,
} }

View File

@ -236,7 +236,14 @@ provide! { tcx, def_id, other, cdata,
impl_polarity => { table_direct } impl_polarity => { table_direct }
defaultness => { table_direct } defaultness => { table_direct }
constness => { table_direct } constness => { table_direct }
coerce_unsized_info => { table } coerce_unsized_info => {
Ok(cdata
.root
.tables
.coerce_unsized_info
.get(cdata, def_id.index)
.map(|lazy| lazy.decode((cdata, tcx)))
.process_decoded(tcx, || panic!("{def_id:?} does not have coerce_unsized_info"))) }
mir_const_qualif => { table } mir_const_qualif => { table }
rendered_const => { table } rendered_const => { table }
asyncness => { table_direct } asyncness => { table_direct }

View File

@ -1994,7 +1994,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
// if this is an impl of `CoerceUnsized`, create its // if this is an impl of `CoerceUnsized`, create its
// "unsized info", else just store None // "unsized info", else just store None
if Some(trait_ref.def_id) == tcx.lang_items().coerce_unsized_trait() { if Some(trait_ref.def_id) == tcx.lang_items().coerce_unsized_trait() {
let coerce_unsized_info = tcx.coerce_unsized_info(def_id); let coerce_unsized_info = tcx.coerce_unsized_info(def_id).unwrap();
record!(self.tables.coerce_unsized_info[def_id] <- coerce_unsized_info); record!(self.tables.coerce_unsized_info[def_id] <- coerce_unsized_info);
} }
} }

View File

@ -200,8 +200,6 @@ pub enum InvalidProgramInfo<'tcx> {
/// (which unfortunately typeck does not reject). /// (which unfortunately typeck does not reject).
/// Not using `FnAbiError` as that contains a nested `LayoutError`. /// Not using `FnAbiError` as that contains a nested `LayoutError`.
FnAbiAdjustForForeignAbi(call::AdjustForForeignAbiError), FnAbiAdjustForForeignAbi(call::AdjustForForeignAbiError),
/// We are runnning into a nonsense situation due to ConstProp violating our invariants.
ConstPropNonsense,
} }
/// Details of why a pointer had to be in-bounds. /// Details of why a pointer had to be in-bounds.

View File

@ -244,18 +244,23 @@ impl<'tcx> MirSource<'tcx> {
} }
} }
/// Additional information carried by a MIR body when it is lowered from a coroutine.
/// This information is modified as it is lowered during the `StateTransform` MIR pass,
/// so not all fields will be active at a given time. For example, the `yield_ty` is
/// taken out of the field after yields are turned into returns, and the `coroutine_drop`
/// body is only populated after the state transform pass.
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)] #[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct CoroutineInfo<'tcx> { pub struct CoroutineInfo<'tcx> {
/// The yield type of the function, if it is a coroutine. /// The yield type of the function. This field is removed after the state transform pass.
pub yield_ty: Option<Ty<'tcx>>, pub yield_ty: Option<Ty<'tcx>>,
/// The resume type of the function, if it is a coroutine. /// The resume type of the function. This field is removed after the state transform pass.
pub resume_ty: Option<Ty<'tcx>>, pub resume_ty: Option<Ty<'tcx>>,
/// Coroutine drop glue. /// Coroutine drop glue. This field is populated after the state transform pass.
pub coroutine_drop: Option<Body<'tcx>>, pub coroutine_drop: Option<Body<'tcx>>,
/// The layout of a coroutine. Produced by the state transformation. /// The layout of a coroutine. This field is populated after the state transform pass.
pub coroutine_layout: Option<CoroutineLayout<'tcx>>, pub coroutine_layout: Option<CoroutineLayout<'tcx>>,
/// If this is a coroutine then record the type of source expression that caused this coroutine /// If this is a coroutine then record the type of source expression that caused this coroutine
@ -303,6 +308,12 @@ pub struct Body<'tcx> {
/// and used for debuginfo. Indexed by a `SourceScope`. /// and used for debuginfo. Indexed by a `SourceScope`.
pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>, pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
/// Additional information carried by a MIR body when it is lowered from a coroutine.
///
/// Note that the coroutine drop shim, any promoted consts, and other synthetic MIR
/// bodies that come from processing a coroutine body are not typically coroutines
/// themselves, and should probably set this to `None` to avoid carrying redundant
/// information.
pub coroutine: Option<Box<CoroutineInfo<'tcx>>>, pub coroutine: Option<Box<CoroutineInfo<'tcx>>>,
/// Declarations of locals. /// Declarations of locals.

View File

@ -1,6 +1,7 @@
use crate::mir; use crate::mir;
use crate::query::CyclePlaceholder; use crate::query::CyclePlaceholder;
use crate::traits; use crate::traits;
use crate::ty::adjustment::CoerceUnsizedInfo;
use crate::ty::{self, Ty}; use crate::ty::{self, Ty};
use std::intrinsics::transmute_unchecked; use std::intrinsics::transmute_unchecked;
use std::mem::{size_of, MaybeUninit}; use std::mem::{size_of, MaybeUninit};
@ -105,6 +106,10 @@ impl EraseType for Result<Option<ty::Instance<'_>>, rustc_errors::ErrorGuarantee
[u8; size_of::<Result<Option<ty::Instance<'static>>, rustc_errors::ErrorGuaranteed>>()]; [u8; size_of::<Result<Option<ty::Instance<'static>>, rustc_errors::ErrorGuaranteed>>()];
} }
impl EraseType for Result<CoerceUnsizedInfo, rustc_errors::ErrorGuaranteed> {
type Result = [u8; size_of::<Result<CoerceUnsizedInfo, rustc_errors::ErrorGuaranteed>>()];
}
impl EraseType for Result<Option<ty::EarlyBinder<ty::Const<'_>>>, rustc_errors::ErrorGuaranteed> { impl EraseType for Result<Option<ty::EarlyBinder<ty::Const<'_>>>, rustc_errors::ErrorGuaranteed> {
type Result = [u8; size_of::< type Result = [u8; size_of::<
Result<Option<ty::EarlyBinder<ty::Const<'static>>>, rustc_errors::ErrorGuaranteed>, Result<Option<ty::EarlyBinder<ty::Const<'static>>>, rustc_errors::ErrorGuaranteed>,

View File

@ -977,10 +977,11 @@ rustc_queries! {
} }
/// Caches `CoerceUnsized` kinds for impls on custom types. /// Caches `CoerceUnsized` kinds for impls on custom types.
query coerce_unsized_info(key: DefId) -> ty::adjustment::CoerceUnsizedInfo { query coerce_unsized_info(key: DefId) -> Result<ty::adjustment::CoerceUnsizedInfo, ErrorGuaranteed> {
desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) } desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() } cache_on_disk_if { key.is_local() }
separate_provide_extern separate_provide_extern
ensure_forwards_result_if_red
} }
query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> { query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
@ -1000,8 +1001,9 @@ rustc_queries! {
desc { |tcx| "checking whether `{}` has a body", tcx.def_path_str(def_id) } desc { |tcx| "checking whether `{}` has a body", tcx.def_path_str(def_id) }
} }
query coherent_trait(def_id: DefId) -> () { query coherent_trait(def_id: DefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) } desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) }
ensure_forwards_result_if_red
} }
/// Borrow-checks the function body. If this is a closure, returns /// Borrow-checks the function body. If this is a closure, returns
@ -1032,6 +1034,7 @@ rustc_queries! {
"checking whether impl `{}` follows the orphan rules", "checking whether impl `{}` follows the orphan rules",
tcx.def_path_str(key), tcx.def_path_str(key),
} }
ensure_forwards_result_if_red
} }
/// Check whether the function has any recursion that could cause the inliner to trigger /// Check whether the function has any recursion that could cause the inliner to trigger
@ -1300,6 +1303,7 @@ rustc_queries! {
query specialization_graph_of(trait_id: DefId) -> Result<&'tcx specialization_graph::Graph, ErrorGuaranteed> { query specialization_graph_of(trait_id: DefId) -> Result<&'tcx specialization_graph::Graph, ErrorGuaranteed> {
desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) } desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) }
cache_on_disk_if { true } cache_on_disk_if { true }
ensure_forwards_result_if_red
} }
query object_safety_violations(trait_id: DefId) -> &'tcx [ObjectSafetyViolation] { query object_safety_violations(trait_id: DefId) -> &'tcx [ObjectSafetyViolation] {
desc { |tcx| "determining object safety of trait `{}`", tcx.def_path_str(trait_id) } desc { |tcx| "determining object safety of trait `{}`", tcx.def_path_str(trait_id) }
@ -1356,9 +1360,9 @@ rustc_queries! {
/// ///
/// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types /// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types
/// correctly. /// correctly.
query has_structural_eq_impls(ty: Ty<'tcx>) -> bool { query has_structural_eq_impl(ty: Ty<'tcx>) -> bool {
desc { desc {
"computing whether `{}` implements `PartialStructuralEq` and `StructuralEq`", "computing whether `{}` implements `StructuralPartialEq`",
ty ty
} }
} }

View File

@ -20,6 +20,7 @@ use std::marker::PhantomData;
use std::mem; use std::mem;
use std::num::NonZeroUsize; use std::num::NonZeroUsize;
use std::ops::{ControlFlow, Deref}; use std::ops::{ControlFlow, Deref};
use std::ptr::NonNull;
/// An entity in the Rust type system, which can be one of /// An entity in the Rust type system, which can be one of
/// several kinds (types, lifetimes, and consts). /// several kinds (types, lifetimes, and consts).
@ -31,10 +32,29 @@ use std::ops::{ControlFlow, Deref};
/// `Region` and `Const` are all interned. /// `Region` and `Const` are all interned.
#[derive(Copy, Clone, PartialEq, Eq, Hash)] #[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct GenericArg<'tcx> { pub struct GenericArg<'tcx> {
ptr: NonZeroUsize, ptr: NonNull<()>,
marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>)>, marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>)>,
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSend for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSend
{
}
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSync for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): rustc_data_structures::sync::DynSync
{
}
unsafe impl<'tcx> Send for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): Send
{
}
unsafe impl<'tcx> Sync for GenericArg<'tcx> where
&'tcx (Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>): Sync
{
}
impl<'tcx> IntoDiagnosticArg for GenericArg<'tcx> { impl<'tcx> IntoDiagnosticArg for GenericArg<'tcx> {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> { fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
self.to_string().into_diagnostic_arg() self.to_string().into_diagnostic_arg()
@ -60,21 +80,21 @@ impl<'tcx> GenericArgKind<'tcx> {
GenericArgKind::Lifetime(lt) => { GenericArgKind::Lifetime(lt) => {
// Ensure we can use the tag bits. // Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0); assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
(REGION_TAG, lt.0.0 as *const ty::RegionKind<'tcx> as usize) (REGION_TAG, NonNull::from(lt.0.0).cast())
} }
GenericArgKind::Type(ty) => { GenericArgKind::Type(ty) => {
// Ensure we can use the tag bits. // Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0); assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
(TYPE_TAG, ty.0.0 as *const WithCachedTypeInfo<ty::TyKind<'tcx>> as usize) (TYPE_TAG, NonNull::from(ty.0.0).cast())
} }
GenericArgKind::Const(ct) => { GenericArgKind::Const(ct) => {
// Ensure we can use the tag bits. // Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0); assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
(CONST_TAG, ct.0.0 as *const WithCachedTypeInfo<ty::ConstData<'tcx>> as usize) (CONST_TAG, NonNull::from(ct.0.0).cast())
} }
}; };
GenericArg { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData } GenericArg { ptr: ptr.map_addr(|addr| addr | tag), marker: PhantomData }
} }
} }
@ -123,20 +143,22 @@ impl<'tcx> From<ty::Term<'tcx>> for GenericArg<'tcx> {
impl<'tcx> GenericArg<'tcx> { impl<'tcx> GenericArg<'tcx> {
#[inline] #[inline]
pub fn unpack(self) -> GenericArgKind<'tcx> { pub fn unpack(self) -> GenericArgKind<'tcx> {
let ptr = self.ptr.get(); let ptr = unsafe {
self.ptr.map_addr(|addr| NonZeroUsize::new_unchecked(addr.get() & !TAG_MASK))
};
// SAFETY: use of `Interned::new_unchecked` here is ok because these // SAFETY: use of `Interned::new_unchecked` here is ok because these
// pointers were originally created from `Interned` types in `pack()`, // pointers were originally created from `Interned` types in `pack()`,
// and this is just going in the other direction. // and this is just going in the other direction.
unsafe { unsafe {
match ptr & TAG_MASK { match self.ptr.addr().get() & TAG_MASK {
REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked( REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked(
&*((ptr & !TAG_MASK) as *const ty::RegionKind<'tcx>), ptr.cast::<ty::RegionKind<'tcx>>().as_ref(),
))), ))),
TYPE_TAG => GenericArgKind::Type(Ty(Interned::new_unchecked( TYPE_TAG => GenericArgKind::Type(Ty(Interned::new_unchecked(
&*((ptr & !TAG_MASK) as *const WithCachedTypeInfo<ty::TyKind<'tcx>>), ptr.cast::<WithCachedTypeInfo<ty::TyKind<'tcx>>>().as_ref(),
))), ))),
CONST_TAG => GenericArgKind::Const(ty::Const(Interned::new_unchecked( CONST_TAG => GenericArgKind::Const(ty::Const(Interned::new_unchecked(
&*((ptr & !TAG_MASK) as *const WithCachedTypeInfo<ty::ConstData<'tcx>>), ptr.cast::<WithCachedTypeInfo<ty::ConstData<'tcx>>>().as_ref(),
))), ))),
_ => intrinsics::unreachable(), _ => intrinsics::unreachable(),
} }

View File

@ -63,6 +63,7 @@ use std::marker::PhantomData;
use std::mem; use std::mem;
use std::num::NonZeroUsize; use std::num::NonZeroUsize;
use std::ops::ControlFlow; use std::ops::ControlFlow;
use std::ptr::NonNull;
use std::{fmt, str}; use std::{fmt, str};
pub use crate::ty::diagnostics::*; pub use crate::ty::diagnostics::*;
@ -848,10 +849,23 @@ pub type PolyCoercePredicate<'tcx> = ty::Binder<'tcx, CoercePredicate<'tcx>>;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Term<'tcx> { pub struct Term<'tcx> {
ptr: NonZeroUsize, ptr: NonNull<()>,
marker: PhantomData<(Ty<'tcx>, Const<'tcx>)>, marker: PhantomData<(Ty<'tcx>, Const<'tcx>)>,
} }
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSend for Term<'tcx> where
&'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSend
{
}
#[cfg(parallel_compiler)]
unsafe impl<'tcx> rustc_data_structures::sync::DynSync for Term<'tcx> where
&'tcx (Ty<'tcx>, Const<'tcx>): rustc_data_structures::sync::DynSync
{
}
unsafe impl<'tcx> Send for Term<'tcx> where &'tcx (Ty<'tcx>, Const<'tcx>): Send {}
unsafe impl<'tcx> Sync for Term<'tcx> where &'tcx (Ty<'tcx>, Const<'tcx>): Sync {}
impl Debug for Term<'_> { impl Debug for Term<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let data = if let Some(ty) = self.ty() { let data = if let Some(ty) = self.ty() {
@ -914,17 +928,19 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Term<'tcx> {
impl<'tcx> Term<'tcx> { impl<'tcx> Term<'tcx> {
#[inline] #[inline]
pub fn unpack(self) -> TermKind<'tcx> { pub fn unpack(self) -> TermKind<'tcx> {
let ptr = self.ptr.get(); let ptr = unsafe {
self.ptr.map_addr(|addr| NonZeroUsize::new_unchecked(addr.get() & !TAG_MASK))
};
// SAFETY: use of `Interned::new_unchecked` here is ok because these // SAFETY: use of `Interned::new_unchecked` here is ok because these
// pointers were originally created from `Interned` types in `pack()`, // pointers were originally created from `Interned` types in `pack()`,
// and this is just going in the other direction. // and this is just going in the other direction.
unsafe { unsafe {
match ptr & TAG_MASK { match self.ptr.addr().get() & TAG_MASK {
TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked( TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked(
&*((ptr & !TAG_MASK) as *const WithCachedTypeInfo<ty::TyKind<'tcx>>), ptr.cast::<WithCachedTypeInfo<ty::TyKind<'tcx>>>().as_ref(),
))), ))),
CONST_TAG => TermKind::Const(ty::Const(Interned::new_unchecked( CONST_TAG => TermKind::Const(ty::Const(Interned::new_unchecked(
&*((ptr & !TAG_MASK) as *const WithCachedTypeInfo<ty::ConstData<'tcx>>), ptr.cast::<WithCachedTypeInfo<ty::ConstData<'tcx>>>().as_ref(),
))), ))),
_ => core::intrinsics::unreachable(), _ => core::intrinsics::unreachable(),
} }
@ -986,16 +1002,16 @@ impl<'tcx> TermKind<'tcx> {
TermKind::Ty(ty) => { TermKind::Ty(ty) => {
// Ensure we can use the tag bits. // Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0); assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
(TYPE_TAG, ty.0.0 as *const WithCachedTypeInfo<ty::TyKind<'tcx>> as usize) (TYPE_TAG, NonNull::from(ty.0.0).cast())
} }
TermKind::Const(ct) => { TermKind::Const(ct) => {
// Ensure we can use the tag bits. // Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0); assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
(CONST_TAG, ct.0.0 as *const WithCachedTypeInfo<ty::ConstData<'tcx>> as usize) (CONST_TAG, NonNull::from(ct.0.0).cast())
} }
}; };
Term { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData } Term { ptr: ptr.map_addr(|addr| addr | tag), marker: PhantomData }
} }
} }

View File

@ -350,7 +350,7 @@ impl<'tcx> TyCtxt<'tcx> {
validate: impl Fn(Self, DefId) -> Result<(), ErrorGuaranteed>, validate: impl Fn(Self, DefId) -> Result<(), ErrorGuaranteed>,
) -> Option<ty::Destructor> { ) -> Option<ty::Destructor> {
let drop_trait = self.lang_items().drop_trait()?; let drop_trait = self.lang_items().drop_trait()?;
self.ensure().coherent_trait(drop_trait); self.ensure().coherent_trait(drop_trait).ok()?;
let ty = self.type_of(adt_did).instantiate_identity(); let ty = self.type_of(adt_did).instantiate_identity();
let mut dtor_candidate = None; let mut dtor_candidate = None;
@ -1249,19 +1249,18 @@ impl<'tcx> Ty<'tcx> {
/// Primitive types (`u32`, `str`) have structural equality by definition. For composite data /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
/// types, equality for the type as a whole is structural when it is the same as equality /// types, equality for the type as a whole is structural when it is the same as equality
/// between all components (fields, array elements, etc.) of that type. For ADTs, structural /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
/// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for /// equality is indicated by an implementation of `StructuralPartialEq` for that type.
/// that type.
/// ///
/// This function is "shallow" because it may return `true` for a composite type whose fields /// This function is "shallow" because it may return `true` for a composite type whose fields
/// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T` /// are not `StructuralPartialEq`. For example, `[T; 4]` has structural equality regardless of `T`
/// because equality for arrays is determined by the equality of each array element. If you /// because equality for arrays is determined by the equality of each array element. If you
/// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
/// down, you will need to use a type visitor. /// down, you will need to use a type visitor.
#[inline] #[inline]
pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool { pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind() { match self.kind() {
// Look for an impl of both `PartialStructuralEq` and `StructuralEq`. // Look for an impl of `StructuralPartialEq`.
ty::Adt(..) => tcx.has_structural_eq_impls(self), ty::Adt(..) => tcx.has_structural_eq_impl(self),
// Primitive types that satisfy `Eq`. // Primitive types that satisfy `Eq`.
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true, ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true,

View File

@ -110,7 +110,7 @@ mir_build_extern_static_requires_unsafe_unsafe_op_in_unsafe_fn_allowed =
mir_build_float_pattern = floating-point types cannot be used in patterns mir_build_float_pattern = floating-point types cannot be used in patterns
mir_build_indirect_structural_match = mir_build_indirect_structural_match =
to use a constant of type `{$non_sm_ty}` in a pattern, `{$non_sm_ty}` must be annotated with `#[derive(PartialEq, Eq)]` to use a constant of type `{$non_sm_ty}` in a pattern, `{$non_sm_ty}` must be annotated with `#[derive(PartialEq)]`
mir_build_inform_irrefutable = `let` bindings require an "irrefutable pattern", like a `struct` or an `enum` with only one variant mir_build_inform_irrefutable = `let` bindings require an "irrefutable pattern", like a `struct` or an `enum` with only one variant
@ -254,7 +254,7 @@ mir_build_non_partial_eq_match =
to use a constant of type `{$non_peq_ty}` in a pattern, the type must implement `PartialEq` to use a constant of type `{$non_peq_ty}` in a pattern, the type must implement `PartialEq`
mir_build_nontrivial_structural_match = mir_build_nontrivial_structural_match =
to use a constant of type `{$non_sm_ty}` in a pattern, the constant's initializer must be trivial or `{$non_sm_ty}` must be annotated with `#[derive(PartialEq, Eq)]` to use a constant of type `{$non_sm_ty}` in a pattern, the constant's initializer must be trivial or `{$non_sm_ty}` must be annotated with `#[derive(PartialEq)]`
mir_build_pattern_not_covered = refutable pattern in {$origin} mir_build_pattern_not_covered = refutable pattern in {$origin}
.pattern_ty = the matched value is of type `{$pattern_ty}` .pattern_ty = the matched value is of type `{$pattern_ty}`
@ -297,9 +297,9 @@ mir_build_trailing_irrefutable_let_patterns = trailing irrefutable {$count ->
} into the body } into the body
mir_build_type_not_structural = mir_build_type_not_structural =
to use a constant of type `{$non_sm_ty}` in a pattern, `{$non_sm_ty}` must be annotated with `#[derive(PartialEq, Eq)]` to use a constant of type `{$non_sm_ty}` in a pattern, `{$non_sm_ty}` must be annotated with `#[derive(PartialEq)]`
mir_build_type_not_structural_more_info = see https://doc.rust-lang.org/stable/std/marker/trait.StructuralEq.html for details mir_build_type_not_structural_more_info = see https://doc.rust-lang.org/stable/std/marker/trait.StructuralPartialEq.html for details
mir_build_type_not_structural_tip = the traits must be derived, manual `impl`s are not sufficient mir_build_type_not_structural_tip = the traits must be derived, manual `impl`s are not sufficient

View File

@ -1,21 +1,12 @@
//! Propagates constants for early reporting of statically known //! Propagates constants for early reporting of statically known
//! assertion failures //! assertion failures
use rustc_const_eval::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, OpTy, PlaceTy, Pointer,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::bit_set::BitSet; use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec; use rustc_index::IndexVec;
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*; use rustc_middle::mir::*;
use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::{ParamEnv, TyCtxt};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, ParamEnv, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_target::abi::Size; use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi as CallAbi;
/// The maximum number of bytes that we'll allocate space for a local or the return value. /// The maximum number of bytes that we'll allocate space for a local or the return value.
/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just /// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
@ -49,162 +40,6 @@ pub(crate) macro throw_machine_stop_str($($tt:tt)*) {{
throw_machine_stop!(Zst) throw_machine_stop!(Zst)
}} }}
pub(crate) struct ConstPropMachine<'mir, 'tcx> {
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx>>,
pub written_only_inside_own_block_locals: FxHashSet<Local>,
pub can_const_prop: IndexVec<Local, ConstPropMode>,
}
impl ConstPropMachine<'_, '_> {
pub fn new(can_const_prop: IndexVec<Local, ConstPropMode>) -> Self {
Self {
stack: Vec::new(),
written_only_inside_own_block_locals: Default::default(),
can_const_prop,
}
}
}
impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> {
compile_time_machine!(<'mir, 'tcx>);
const PANIC_ON_ALLOC_FAIL: bool = true; // all allocations are small (see `MAX_ALLOC_LIMIT`)
const POST_MONO_CHECKS: bool = false; // this MIR is still generic!
type MemoryKind = !;
#[inline(always)]
fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
false // no reason to enforce alignment
}
#[inline(always)]
fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
false // for now, we don't enforce validity
}
fn load_mir(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_instance: ty::InstanceDef<'tcx>,
) -> InterpResult<'tcx, &'tcx Body<'tcx>> {
throw_machine_stop_str!("calling functions isn't supported in ConstProp")
}
fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: &str) -> InterpResult<'tcx> {
throw_machine_stop_str!("panicking isn't supported in ConstProp")
}
fn find_mir_or_eval_fn(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_abi: CallAbi,
_args: &[FnArg<'tcx>],
_destination: &PlaceTy<'tcx>,
_target: Option<BasicBlock>,
_unwind: UnwindAction,
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, ty::Instance<'tcx>)>> {
Ok(None)
}
fn call_intrinsic(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_args: &[OpTy<'tcx>],
_destination: &PlaceTy<'tcx>,
_target: Option<BasicBlock>,
_unwind: UnwindAction,
) -> InterpResult<'tcx> {
throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp")
}
fn assert_panic(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_msg: &rustc_middle::mir::AssertMessage<'tcx>,
_unwind: rustc_middle::mir::UnwindAction,
) -> InterpResult<'tcx> {
bug!("panics terminators are not evaluated in ConstProp")
}
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
// We can't do this because aliasing of memory can differ between const eval and llvm
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
}
fn before_access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local,
) -> InterpResult<'tcx> {
assert_eq!(frame, 0);
match ecx.machine.can_const_prop[local] {
ConstPropMode::NoPropagation => {
throw_machine_stop_str!(
"tried to write to a local that is marked as not propagatable"
)
}
ConstPropMode::OnlyInsideOwnBlock => {
ecx.machine.written_only_inside_own_block_locals.insert(local);
}
ConstPropMode::FullConstProp => {}
}
Ok(())
}
fn before_access_global(
_tcx: TyCtxtAt<'tcx>,
_machine: &Self,
_alloc_id: AllocId,
alloc: ConstAllocation<'tcx>,
_static_def_id: Option<DefId>,
is_write: bool,
) -> InterpResult<'tcx> {
if is_write {
throw_machine_stop_str!("can't write to global");
}
// If the static allocation is mutable, then we can't const prop it as its content
// might be different at runtime.
if alloc.inner().mutability.is_mut() {
throw_machine_stop_str!("can't access mutable globals in ConstProp");
}
Ok(())
}
#[inline(always)]
fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
throw_machine_stop_str!("exposing pointers isn't supported in ConstProp")
}
#[inline(always)]
fn init_frame_extra(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
Ok(frame)
}
#[inline(always)]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
&ecx.machine.stack
}
#[inline(always)]
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
&mut ecx.machine.stack
}
}
/// The mode that `ConstProp` is allowed to run in for a given `Local`. /// The mode that `ConstProp` is allowed to run in for a given `Local`.
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub enum ConstPropMode { pub enum ConstPropMode {

View File

@ -3,37 +3,26 @@
use std::fmt::Debug; use std::fmt::Debug;
use either::Left; use rustc_const_eval::interpret::{ImmTy, Projectable};
use rustc_const_eval::interpret::{InterpCx, InterpResult, Scalar};
use rustc_const_eval::interpret::Immediate; use rustc_data_structures::fx::FxHashSet;
use rustc_const_eval::interpret::{
InterpCx, InterpResult, MemoryKind, OpTy, Scalar, StackPopCleanup,
};
use rustc_const_eval::ReportErrorExt;
use rustc_hir::def::DefKind; use rustc_hir::def::DefKind;
use rustc_hir::HirId; use rustc_hir::HirId;
use rustc_index::bit_set::BitSet; use rustc_index::bit_set::BitSet;
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*; use rustc_middle::mir::*;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::GenericArgs; use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
use rustc_middle::ty::{
self, ConstInt, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
};
use rustc_span::Span; use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout}; use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
use crate::const_prop::CanConstProp; use crate::const_prop::CanConstProp;
use crate::const_prop::ConstPropMachine;
use crate::const_prop::ConstPropMode; use crate::const_prop::ConstPropMode;
use crate::errors::AssertLint; use crate::dataflow_const_prop::DummyMachine;
use crate::errors::{AssertLint, AssertLintKind};
use crate::MirLint; use crate::MirLint;
/// The maximum number of bytes that we'll allocate space for a local or the return value.
/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
/// Severely regress performance.
const MAX_ALLOC_LIMIT: u64 = 1024;
pub struct ConstPropLint; pub struct ConstPropLint;
impl<'tcx> MirLint<'tcx> for ConstPropLint { impl<'tcx> MirLint<'tcx> for ConstPropLint {
@ -81,11 +70,85 @@ impl<'tcx> MirLint<'tcx> for ConstPropLint {
/// Finds optimization opportunities on the MIR. /// Finds optimization opportunities on the MIR.
struct ConstPropagator<'mir, 'tcx> { struct ConstPropagator<'mir, 'tcx> {
ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, ecx: InterpCx<'mir, 'tcx, DummyMachine>,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>, param_env: ParamEnv<'tcx>,
worklist: Vec<BasicBlock>, worklist: Vec<BasicBlock>,
visited_blocks: BitSet<BasicBlock>, visited_blocks: BitSet<BasicBlock>,
locals: IndexVec<Local, Value<'tcx>>,
body: &'mir Body<'tcx>,
written_only_inside_own_block_locals: FxHashSet<Local>,
can_const_prop: IndexVec<Local, ConstPropMode>,
}
#[derive(Debug, Clone)]
enum Value<'tcx> {
Immediate(ImmTy<'tcx>),
Aggregate { variant: VariantIdx, fields: IndexVec<FieldIdx, Value<'tcx>> },
Uninit,
}
impl<'tcx> From<ImmTy<'tcx>> for Value<'tcx> {
fn from(v: ImmTy<'tcx>) -> Self {
Self::Immediate(v)
}
}
impl<'tcx> Value<'tcx> {
fn project(
&self,
proj: &[PlaceElem<'tcx>],
prop: &ConstPropagator<'_, 'tcx>,
) -> Option<&Value<'tcx>> {
let mut this = self;
for proj in proj {
this = match (*proj, this) {
(PlaceElem::Field(idx, _), Value::Aggregate { fields, .. }) => {
fields.get(idx).unwrap_or(&Value::Uninit)
}
(PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => {
let idx = prop.get_const(idx.into())?.immediate()?;
let idx = prop.ecx.read_target_usize(idx).ok()?;
fields.get(FieldIdx::from_u32(idx.try_into().ok()?)).unwrap_or(&Value::Uninit)
}
(
PlaceElem::ConstantIndex { offset, min_length: _, from_end: false },
Value::Aggregate { fields, .. },
) => fields
.get(FieldIdx::from_u32(offset.try_into().ok()?))
.unwrap_or(&Value::Uninit),
_ => return None,
};
}
Some(this)
}
fn project_mut(&mut self, proj: &[PlaceElem<'_>]) -> Option<&mut Value<'tcx>> {
let mut this = self;
for proj in proj {
this = match (proj, this) {
(PlaceElem::Field(idx, _), Value::Aggregate { fields, .. }) => {
fields.ensure_contains_elem(*idx, || Value::Uninit)
}
(PlaceElem::Field(..), val @ Value::Uninit) => {
*val = Value::Aggregate {
variant: VariantIdx::new(0),
fields: Default::default(),
};
val.project_mut(&[*proj])?
}
_ => return None,
};
}
Some(this)
}
fn immediate(&self) -> Option<&ImmTy<'tcx>> {
match self {
Value::Immediate(op) => Some(op),
_ => None,
}
}
} }
impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> { impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> {
@ -121,49 +184,10 @@ impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn new(body: &'mir Body<'tcx>, tcx: TyCtxt<'tcx>) -> ConstPropagator<'mir, 'tcx> { fn new(body: &'mir Body<'tcx>, tcx: TyCtxt<'tcx>) -> ConstPropagator<'mir, 'tcx> {
let def_id = body.source.def_id(); let def_id = body.source.def_id();
let args = &GenericArgs::identity_for_item(tcx, def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id); let param_env = tcx.param_env_reveal_all_normalized(def_id);
let can_const_prop = CanConstProp::check(tcx, param_env, body); let can_const_prop = CanConstProp::check(tcx, param_env, body);
let mut ecx = InterpCx::new( let ecx = InterpCx::new(tcx, tcx.def_span(def_id), param_env, DummyMachine);
tcx,
tcx.def_span(def_id),
param_env,
ConstPropMachine::new(can_const_prop),
);
let ret_layout = ecx
.layout_of(body.bound_return_ty().instantiate(tcx, args))
.ok()
// Don't bother allocating memory for large values.
// I don't know how return types can seem to be unsized but this happens in the
// `type/type-unsatisfiable.rs` test.
.filter(|ret_layout| {
ret_layout.is_sized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
})
.unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap());
let ret = ecx
.allocate(ret_layout, MemoryKind::Stack)
.expect("couldn't perform small allocation")
.into();
ecx.push_stack_frame(
Instance::new(def_id, args),
body,
&ret,
StackPopCleanup::Root { cleanup: false },
)
.expect("failed to push initial stack frame");
for local in body.local_decls.indices() {
// Mark everything initially live.
// This is somewhat dicey since some of them might be unsized and it is incoherent to
// mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter
// stopping us before those unsized immediates can cause issues deeper in the
// interpreter.
ecx.frame_mut().locals[local].make_live_uninit();
}
ConstPropagator { ConstPropagator {
ecx, ecx,
@ -171,61 +195,47 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
param_env, param_env,
worklist: vec![START_BLOCK], worklist: vec![START_BLOCK],
visited_blocks: BitSet::new_empty(body.basic_blocks.len()), visited_blocks: BitSet::new_empty(body.basic_blocks.len()),
locals: IndexVec::from_elem_n(Value::Uninit, body.local_decls.len()),
body,
can_const_prop,
written_only_inside_own_block_locals: Default::default(),
} }
} }
fn body(&self) -> &'mir Body<'tcx> {
self.ecx.frame().body
}
fn local_decls(&self) -> &'mir LocalDecls<'tcx> { fn local_decls(&self) -> &'mir LocalDecls<'tcx> {
&self.body().local_decls &self.body.local_decls
} }
fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> { fn get_const(&self, place: Place<'tcx>) -> Option<&Value<'tcx>> {
let op = match self.ecx.eval_place_to_op(place, None) { self.locals[place.local].project(&place.projection, self)
Ok(op) => {
if op
.as_mplace_or_imm()
.right()
.is_some_and(|imm| matches!(*imm, Immediate::Uninit))
{
// Make sure nobody accidentally uses this value.
return None;
}
op
}
Err(e) => {
trace!("get_const failed: {:?}", e.into_kind().debug());
return None;
}
};
// Try to read the local as an immediate so that if it is representable as a scalar, we can
// handle it as such, but otherwise, just return the value as is.
Some(match self.ecx.read_immediate_raw(&op) {
Ok(Left(imm)) => imm.into(),
_ => op,
})
} }
/// Remove `local` from the pool of `Locals`. Allows writing to them, /// Remove `local` from the pool of `Locals`. Allows writing to them,
/// but not reading from them anymore. /// but not reading from them anymore.
fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) { fn remove_const(&mut self, local: Local) {
ecx.frame_mut().locals[local].make_live_uninit(); self.locals[local] = Value::Uninit;
ecx.machine.written_only_inside_own_block_locals.remove(&local); self.written_only_inside_own_block_locals.remove(&local);
}
fn access_mut(&mut self, place: &Place<'_>) -> Option<&mut Value<'tcx>> {
match self.can_const_prop[place.local] {
ConstPropMode::NoPropagation => return None,
ConstPropMode::OnlyInsideOwnBlock => {
self.written_only_inside_own_block_locals.insert(place.local);
}
ConstPropMode::FullConstProp => {}
}
self.locals[place.local].project_mut(place.projection)
} }
fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> { fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> {
source_info.scope.lint_root(&self.body().source_scopes) source_info.scope.lint_root(&self.body.source_scopes)
} }
fn use_ecx<F, T>(&mut self, location: Location, f: F) -> Option<T> fn use_ecx<F, T>(&mut self, f: F) -> Option<T>
where where
F: FnOnce(&mut Self) -> InterpResult<'tcx, T>, F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
{ {
// Overwrite the PC -- whatever the interpreter does to it does not make any sense anyway.
self.ecx.frame_mut().loc = Left(location);
match f(self) { match f(self) {
Ok(val) => Some(val), Ok(val) => Some(val),
Err(error) => { Err(error) => {
@ -244,7 +254,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
} }
/// Returns the value, if any, of evaluating `c`. /// Returns the value, if any, of evaluating `c`.
fn eval_constant(&mut self, c: &ConstOperand<'tcx>, location: Location) -> Option<OpTy<'tcx>> { fn eval_constant(&mut self, c: &ConstOperand<'tcx>) -> Option<ImmTy<'tcx>> {
// FIXME we need to revisit this for #67176 // FIXME we need to revisit this for #67176
if c.has_param() { if c.has_param() {
return None; return None;
@ -258,46 +268,62 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// manually normalized. // manually normalized.
let val = self.tcx.try_normalize_erasing_regions(self.param_env, c.const_).ok()?; let val = self.tcx.try_normalize_erasing_regions(self.param_env, c.const_).ok()?;
self.use_ecx(location, |this| this.ecx.eval_mir_constant(&val, Some(c.span), None)) self.use_ecx(|this| this.ecx.eval_mir_constant(&val, Some(c.span), None))?
.as_mplace_or_imm()
.right()
} }
/// Returns the value, if any, of evaluating `place`. /// Returns the value, if any, of evaluating `place`.
fn eval_place(&mut self, place: Place<'tcx>, location: Location) -> Option<OpTy<'tcx>> { #[instrument(level = "trace", skip(self), ret)]
trace!("eval_place(place={:?})", place); fn eval_place(&mut self, place: Place<'tcx>) -> Option<ImmTy<'tcx>> {
self.use_ecx(location, |this| this.ecx.eval_place_to_op(place, None)) match self.get_const(place)? {
Value::Immediate(imm) => Some(imm.clone()),
Value::Aggregate { .. } => None,
Value::Uninit => None,
}
} }
/// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant` /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
/// or `eval_place`, depending on the variant of `Operand` used. /// or `eval_place`, depending on the variant of `Operand` used.
fn eval_operand(&mut self, op: &Operand<'tcx>, location: Location) -> Option<OpTy<'tcx>> { fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<ImmTy<'tcx>> {
match *op { match *op {
Operand::Constant(ref c) => self.eval_constant(c, location), Operand::Constant(ref c) => self.eval_constant(c),
Operand::Move(place) | Operand::Copy(place) => self.eval_place(place, location), Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
} }
} }
fn report_assert_as_lint(&self, source_info: &SourceInfo, lint: AssertLint<impl Debug>) { fn report_assert_as_lint(
&self,
location: Location,
lint_kind: AssertLintKind,
assert_kind: AssertKind<impl Debug>,
) {
let source_info = self.body.source_info(location);
if let Some(lint_root) = self.lint_root(*source_info) { if let Some(lint_root) = self.lint_root(*source_info) {
self.tcx.emit_node_span_lint(lint.lint(), lint_root, source_info.span, lint); let span = source_info.span;
self.tcx.emit_node_span_lint(
lint_kind.lint(),
lint_root,
span,
AssertLint { span, assert_kind, lint_kind },
);
} }
} }
fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> { fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> {
if let (val, true) = self.use_ecx(location, |this| { let arg = self.eval_operand(arg)?;
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?; if let (val, true) = self.use_ecx(|this| {
let val = this.ecx.read_immediate(&arg)?;
let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?; let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?;
Ok((val, overflow)) Ok((val, overflow))
})? { })? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
// appropriate to use. // appropriate to use.
assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow"); assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
let source_info = self.body().source_info(location);
self.report_assert_as_lint( self.report_assert_as_lint(
source_info, location,
AssertLint::ArithmeticOverflow( AssertLintKind::ArithmeticOverflow,
source_info.span, AssertKind::OverflowNeg(val.to_const_int()),
AssertKind::OverflowNeg(val.to_const_int()),
),
); );
return None; return None;
} }
@ -312,11 +338,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
right: &Operand<'tcx>, right: &Operand<'tcx>,
location: Location, location: Location,
) -> Option<()> { ) -> Option<()> {
let r = self.use_ecx(location, |this| { let r =
this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?) self.eval_operand(right).and_then(|r| self.use_ecx(|this| this.ecx.read_immediate(&r)));
}); let l =
let l = self self.eval_operand(left).and_then(|l| self.use_ecx(|this| this.ecx.read_immediate(&l)));
.use_ecx(location, |this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
// Check for exceeding shifts *even if* we cannot evaluate the LHS. // Check for exceeding shifts *even if* we cannot evaluate the LHS.
if matches!(op, BinOp::Shr | BinOp::Shl) { if matches!(op, BinOp::Shr | BinOp::Shl) {
let r = r.clone()?; let r = r.clone()?;
@ -328,7 +353,6 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let r_bits = r.to_scalar().to_bits(right_size).ok(); let r_bits = r.to_scalar().to_bits(right_size).ok();
if r_bits.is_some_and(|b| b >= left_size.bits() as u128) { if r_bits.is_some_and(|b| b >= left_size.bits() as u128) {
debug!("check_binary_op: reporting assert for {:?}", location); debug!("check_binary_op: reporting assert for {:?}", location);
let source_info = self.body().source_info(location);
let panic = AssertKind::Overflow( let panic = AssertKind::Overflow(
op, op,
match l { match l {
@ -342,27 +366,21 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}, },
r.to_const_int(), r.to_const_int(),
); );
self.report_assert_as_lint( self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic);
source_info,
AssertLint::ArithmeticOverflow(source_info.span, panic),
);
return None; return None;
} }
} }
if let (Some(l), Some(r)) = (l, r) { if let (Some(l), Some(r)) = (l, r) {
// The remaining operators are handled through `overflowing_binary_op`. // The remaining operators are handled through `overflowing_binary_op`.
if self.use_ecx(location, |this| { if self.use_ecx(|this| {
let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?; let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?;
Ok(overflow) Ok(overflow)
})? { })? {
let source_info = self.body().source_info(location);
self.report_assert_as_lint( self.report_assert_as_lint(
source_info, location,
AssertLint::ArithmeticOverflow( AssertLintKind::ArithmeticOverflow,
source_info.span, AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
),
); );
return None; return None;
} }
@ -411,7 +429,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// value the local has right now. // value the local has right now.
// Thus, all locals that have their reference taken // Thus, all locals that have their reference taken
// must not take part in propagation. // must not take part in propagation.
Self::remove_const(&mut self.ecx, place.local); self.remove_const(place.local);
return None; return None;
} }
@ -453,17 +471,17 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
cond: &Operand<'tcx>, cond: &Operand<'tcx>,
location: Location, location: Location,
) -> Option<!> { ) -> Option<!> {
let value = &self.eval_operand(cond, location)?; let value = &self.eval_operand(cond)?;
trace!("assertion on {:?} should be {:?}", value, expected); trace!("assertion on {:?} should be {:?}", value, expected);
let expected = Scalar::from_bool(expected); let expected = Scalar::from_bool(expected);
let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(value))?; let value_const = self.use_ecx(|this| this.ecx.read_scalar(value))?;
if expected != value_const { if expected != value_const {
// Poison all places this operand references so that further code // Poison all places this operand references so that further code
// doesn't use the invalid value // doesn't use the invalid value
if let Some(place) = cond.place() { if let Some(place) = cond.place() {
Self::remove_const(&mut self.ecx, place.local); self.remove_const(place.local);
} }
enum DbgVal<T> { enum DbgVal<T> {
@ -481,7 +499,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let mut eval_to_int = |op| { let mut eval_to_int = |op| {
// This can be `None` if the lhs wasn't const propagated and we just // This can be `None` if the lhs wasn't const propagated and we just
// triggered the assert on the value of the rhs. // triggered the assert on the value of the rhs.
self.eval_operand(op, location) self.eval_operand(op)
.and_then(|op| self.ecx.read_immediate(&op).ok()) .and_then(|op| self.ecx.read_immediate(&op).ok())
.map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int())) .map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int()))
}; };
@ -503,11 +521,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// Need proper const propagator for these. // Need proper const propagator for these.
_ => return None, _ => return None,
}; };
let source_info = self.body().source_info(location); self.report_assert_as_lint(location, AssertLintKind::UnconditionalPanic, msg);
self.report_assert_as_lint(
source_info,
AssertLint::UnconditionalPanic(source_info.span, msg),
);
} }
None None
@ -515,16 +529,176 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn ensure_not_propagated(&self, local: Local) { fn ensure_not_propagated(&self, local: Local) {
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
let val = self.get_const(local.into());
assert!( assert!(
self.get_const(local.into()).is_none() matches!(val, Some(Value::Uninit))
|| self || self
.layout_of(self.local_decls()[local].ty) .layout_of(self.local_decls()[local].ty)
.map_or(true, |layout| layout.is_zst()), .map_or(true, |layout| layout.is_zst()),
"failed to remove values for `{local:?}`, value={:?}", "failed to remove values for `{local:?}`, value={val:?}",
self.get_const(local.into()),
) )
} }
} }
#[instrument(level = "trace", skip(self), ret)]
fn eval_rvalue(
&mut self,
rvalue: &Rvalue<'tcx>,
location: Location,
dest: &Place<'tcx>,
) -> Option<()> {
if !dest.projection.is_empty() {
return None;
}
use rustc_middle::mir::Rvalue::*;
let layout = self.ecx.layout_of(dest.ty(self.body, self.tcx).ty).ok()?;
trace!(?layout);
let val: Value<'_> = match *rvalue {
ThreadLocalRef(_) => return None,
Use(ref operand) => self.eval_operand(operand)?.into(),
CopyForDeref(place) => self.eval_place(place)?.into(),
BinaryOp(bin_op, box (ref left, ref right)) => {
let left = self.eval_operand(left)?;
let left = self.use_ecx(|this| this.ecx.read_immediate(&left))?;
let right = self.eval_operand(right)?;
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
let val =
self.use_ecx(|this| this.ecx.wrapping_binary_op(bin_op, &left, &right))?;
val.into()
}
CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
let left = self.eval_operand(left)?;
let left = self.use_ecx(|this| this.ecx.read_immediate(&left))?;
let right = self.eval_operand(right)?;
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
let (val, overflowed) =
self.use_ecx(|this| this.ecx.overflowing_binary_op(bin_op, &left, &right))?;
let overflowed = ImmTy::from_bool(overflowed, self.tcx);
Value::Aggregate {
variant: VariantIdx::new(0),
fields: [Value::from(val), overflowed.into()].into_iter().collect(),
}
}
UnaryOp(un_op, ref operand) => {
let operand = self.eval_operand(operand)?;
let val = self.use_ecx(|this| this.ecx.read_immediate(&operand))?;
let val = self.use_ecx(|this| this.ecx.wrapping_unary_op(un_op, &val))?;
val.into()
}
Aggregate(ref kind, ref fields) => Value::Aggregate {
fields: fields
.iter()
.map(|field| self.eval_operand(field).map_or(Value::Uninit, Value::Immediate))
.collect(),
variant: match **kind {
AggregateKind::Adt(_, variant, _, _, _) => variant,
AggregateKind::Array(_)
| AggregateKind::Tuple
| AggregateKind::Closure(_, _)
| AggregateKind::Coroutine(_, _) => VariantIdx::new(0),
},
},
Repeat(ref op, n) => {
trace!(?op, ?n);
return None;
}
Len(place) => {
let len = match self.get_const(place)? {
Value::Immediate(src) => src.len(&self.ecx).ok()?,
Value::Aggregate { fields, .. } => fields.len() as u64,
Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() {
ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?,
_ => return None,
},
};
ImmTy::from_scalar(Scalar::from_target_usize(len, self), layout).into()
}
Ref(..) | AddressOf(..) => return None,
NullaryOp(ref null_op, ty) => {
let op_layout = self.use_ecx(|this| this.ecx.layout_of(ty))?;
let val = match null_op {
NullOp::SizeOf => op_layout.size.bytes(),
NullOp::AlignOf => op_layout.align.abi.bytes(),
NullOp::OffsetOf(fields) => {
op_layout.offset_of_subfield(self, fields.iter()).bytes()
}
};
ImmTy::from_scalar(Scalar::from_target_usize(val, self), layout).into()
}
ShallowInitBox(..) => return None,
Cast(ref kind, ref value, to) => match kind {
CastKind::IntToInt | CastKind::IntToFloat => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
res.into()
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
res.into()
}
CastKind::Transmute => {
let value = self.eval_operand(value)?;
let to = self.ecx.layout_of(to).ok()?;
// `offset` for immediates only supports scalar/scalar-pair ABIs,
// so bail out if the target is not one.
match (value.layout.abi, to.abi) {
(Abi::Scalar(..), Abi::Scalar(..)) => {}
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => {}
_ => return None,
}
value.offset(Size::ZERO, to, &self.ecx).ok()?.into()
}
_ => return None,
},
Discriminant(place) => {
let variant = match self.get_const(place)? {
Value::Immediate(op) => {
let op = op.clone();
self.use_ecx(|this| this.ecx.read_discriminant(&op))?
}
Value::Aggregate { variant, .. } => *variant,
Value::Uninit => return None,
};
let imm = self.use_ecx(|this| {
this.ecx.discriminant_for_variant(
place.ty(this.local_decls(), this.tcx).ty,
variant,
)
})?;
imm.into()
}
};
trace!(?val);
*self.access_mut(dest)? = val;
Some(())
}
} }
impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
@ -546,7 +720,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) { fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
trace!("visit_constant: {:?}", constant); trace!("visit_constant: {:?}", constant);
self.super_constant(constant, location); self.super_constant(constant, location);
self.eval_constant(constant, location); self.eval_constant(constant);
} }
fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) { fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
@ -554,15 +728,12 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
let Some(()) = self.check_rvalue(rvalue, location) else { return }; let Some(()) = self.check_rvalue(rvalue, location) else { return };
match self.ecx.machine.can_const_prop[place.local] { match self.can_const_prop[place.local] {
// Do nothing if the place is indirect. // Do nothing if the place is indirect.
_ if place.is_indirect() => {} _ if place.is_indirect() => {}
ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local), ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local),
ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => { ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => {
if self if self.eval_rvalue(rvalue, location, place).is_none() {
.use_ecx(location, |this| this.ecx.eval_rvalue_into_place(rvalue, *place))
.is_none()
{
// Const prop failed, so erase the destination, ensuring that whatever happens // Const prop failed, so erase the destination, ensuring that whatever happens
// from here on, does not know about the previous value. // from here on, does not know about the previous value.
// This is important in case we have // This is important in case we have
@ -578,7 +749,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
Nuking the entire site from orbit, it's the only way to be sure", Nuking the entire site from orbit, it's the only way to be sure",
place, place,
); );
Self::remove_const(&mut self.ecx, place.local); self.remove_const(place.local);
} }
} }
} }
@ -592,28 +763,24 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
self.super_statement(statement, location); self.super_statement(statement, location);
match statement.kind { match statement.kind {
StatementKind::SetDiscriminant { ref place, .. } => { StatementKind::SetDiscriminant { ref place, variant_index } => {
match self.ecx.machine.can_const_prop[place.local] { match self.can_const_prop[place.local] {
// Do nothing if the place is indirect. // Do nothing if the place is indirect.
_ if place.is_indirect() => {} _ if place.is_indirect() => {}
ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local), ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local),
ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => { ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => {
if self.use_ecx(location, |this| this.ecx.statement(statement)).is_some() { match self.access_mut(place) {
trace!("propped discriminant into {:?}", place); Some(Value::Aggregate { variant, .. }) => *variant = variant_index,
} else { _ => self.remove_const(place.local),
Self::remove_const(&mut self.ecx, place.local);
} }
} }
} }
} }
StatementKind::StorageLive(local) => { StatementKind::StorageLive(local) => {
let frame = self.ecx.frame_mut(); self.remove_const(local);
frame.locals[local].make_live_uninit();
} }
StatementKind::StorageDead(local) => { StatementKind::StorageDead(local) => {
let frame = self.ecx.frame_mut(); self.remove_const(local);
// We don't actually track liveness, so the local remains live. But forget its value.
frame.locals[local].make_live_uninit();
} }
_ => {} _ => {}
} }
@ -626,9 +793,8 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
self.check_assertion(*expected, msg, cond, location); self.check_assertion(*expected, msg, cond, location);
} }
TerminatorKind::SwitchInt { ref discr, ref targets } => { TerminatorKind::SwitchInt { ref discr, ref targets } => {
if let Some(ref value) = self.eval_operand(discr, location) if let Some(ref value) = self.eval_operand(discr)
&& let Some(value_const) = && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
self.use_ecx(location, |this| this.ecx.read_scalar(value))
&& let Ok(constant) = value_const.try_to_int() && let Ok(constant) = value_const.try_to_int()
&& let Ok(constant) = constant.to_bits(constant.size()) && let Ok(constant) = constant.to_bits(constant.size())
{ {
@ -665,7 +831,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
// which were modified in the current block. // which were modified in the current block.
// Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`. // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`.
let mut written_only_inside_own_block_locals = let mut written_only_inside_own_block_locals =
std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals); std::mem::take(&mut self.written_only_inside_own_block_locals);
// This loop can get very hot for some bodies: it check each local in each bb. // This loop can get very hot for some bodies: it check each local in each bb.
// To avoid this quadratic behaviour, we only clear the locals that were modified inside // To avoid this quadratic behaviour, we only clear the locals that were modified inside
@ -673,17 +839,13 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
// The order in which we remove consts does not matter. // The order in which we remove consts does not matter.
#[allow(rustc::potential_query_instability)] #[allow(rustc::potential_query_instability)]
for local in written_only_inside_own_block_locals.drain() { for local in written_only_inside_own_block_locals.drain() {
debug_assert_eq!( debug_assert_eq!(self.can_const_prop[local], ConstPropMode::OnlyInsideOwnBlock);
self.ecx.machine.can_const_prop[local], self.remove_const(local);
ConstPropMode::OnlyInsideOwnBlock
);
Self::remove_const(&mut self.ecx, local);
} }
self.ecx.machine.written_only_inside_own_block_locals = self.written_only_inside_own_block_locals = written_only_inside_own_block_locals;
written_only_inside_own_block_locals;
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
for (local, &mode) in self.ecx.machine.can_const_prop.iter_enumerated() { for (local, &mode) in self.can_const_prop.iter_enumerated() {
match mode { match mode {
ConstPropMode::FullConstProp => {} ConstPropMode::FullConstProp => {}
ConstPropMode::NoPropagation | ConstPropMode::OnlyInsideOwnBlock => { ConstPropMode::NoPropagation | ConstPropMode::OnlyInsideOwnBlock => {

View File

@ -1231,7 +1231,12 @@ fn create_coroutine_drop_shim<'tcx>(
drop_clean: BasicBlock, drop_clean: BasicBlock,
) -> Body<'tcx> { ) -> Body<'tcx> {
let mut body = body.clone(); let mut body = body.clone();
body.arg_count = 1; // make sure the resume argument is not included here // Take the coroutine info out of the body, since the drop shim is
// not a coroutine body itself; it just has its drop built out of it.
let _ = body.coroutine.take();
// Make sure the resume argument is not included here, since we're
// building a body for `drop_in_place`.
body.arg_count = 1;
let source_info = SourceInfo::outermost(body.span); let source_info = SourceInfo::outermost(body.span);

View File

@ -59,167 +59,154 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
_ => {} _ => {}
} }
trace!("InstrumentCoverage starting for {def_id:?}"); instrument_function_for_coverage(tcx, mir_body);
Instrumentor::new(tcx, mir_body).inject_counters();
trace!("InstrumentCoverage done for {def_id:?}");
} }
} }
struct Instrumentor<'a, 'tcx> { fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir::Body<'tcx>) {
let def_id = mir_body.source.def_id();
let _span = debug_span!("instrument_function_for_coverage", ?def_id).entered();
let hir_info = extract_hir_info(tcx, def_id.expect_local());
let basic_coverage_blocks = CoverageGraph::from_mir(mir_body);
////////////////////////////////////////////////////
// Compute coverage spans from the `CoverageGraph`.
let Some(coverage_spans) =
spans::generate_coverage_spans(mir_body, &hir_info, &basic_coverage_blocks)
else {
// No relevant spans were found in MIR, so skip instrumenting this function.
return;
};
////////////////////////////////////////////////////
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
// every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
// and all `Expression` dependencies (operands) are also generated, for any other
// `BasicCoverageBlock`s not already associated with a coverage span.
let bcb_has_coverage_spans = |bcb| coverage_spans.bcb_has_coverage_spans(bcb);
let coverage_counters =
CoverageCounters::make_bcb_counters(&basic_coverage_blocks, bcb_has_coverage_spans);
let mappings = create_mappings(tcx, &hir_info, &coverage_spans, &coverage_counters);
if mappings.is_empty() {
// No spans could be converted into valid mappings, so skip this function.
debug!("no spans could be converted into valid mappings; skipping");
return;
}
inject_coverage_statements(
mir_body,
&basic_coverage_blocks,
bcb_has_coverage_spans,
&coverage_counters,
);
mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
function_source_hash: hir_info.function_source_hash,
num_counters: coverage_counters.num_counters(),
expressions: coverage_counters.into_expressions(),
mappings,
}));
}
/// For each coverage span extracted from MIR, create a corresponding
/// mapping.
///
/// Precondition: All BCBs corresponding to those spans have been given
/// coverage counters.
fn create_mappings<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
mir_body: &'a mut mir::Body<'tcx>, hir_info: &ExtractedHirInfo,
hir_info: ExtractedHirInfo, coverage_spans: &CoverageSpans,
basic_coverage_blocks: CoverageGraph, coverage_counters: &CoverageCounters,
) -> Vec<Mapping> {
let source_map = tcx.sess.source_map();
let body_span = hir_info.body_span;
let source_file = source_map.lookup_source_file(body_span.lo());
use rustc_session::RemapFileNameExt;
let file_name = Symbol::intern(&source_file.name.for_codegen(tcx.sess).to_string_lossy());
let term_for_bcb = |bcb| {
coverage_counters
.bcb_counter(bcb)
.expect("all BCBs with spans were given counters")
.as_term()
};
coverage_spans
.all_bcb_mappings()
.filter_map(|&BcbMapping { kind: bcb_mapping_kind, span }| {
let kind = match bcb_mapping_kind {
BcbMappingKind::Code(bcb) => MappingKind::Code(term_for_bcb(bcb)),
};
let code_region = make_code_region(source_map, file_name, span, body_span)?;
Some(Mapping { kind, code_region })
})
.collect::<Vec<_>>()
} }
impl<'a, 'tcx> Instrumentor<'a, 'tcx> { /// For each BCB node or BCB edge that has an associated coverage counter,
fn new(tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self { /// inject any necessary coverage statements into MIR.
let hir_info = extract_hir_info(tcx, mir_body.source.def_id().expect_local()); fn inject_coverage_statements<'tcx>(
mir_body: &mut mir::Body<'tcx>,
debug!(?hir_info, "instrumenting {:?}", mir_body.source.def_id()); basic_coverage_blocks: &CoverageGraph,
bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
let basic_coverage_blocks = CoverageGraph::from_mir(mir_body); coverage_counters: &CoverageCounters,
) {
Self { tcx, mir_body, hir_info, basic_coverage_blocks } // Process the counters associated with BCB nodes.
for (bcb, counter_kind) in coverage_counters.bcb_node_counters() {
let do_inject = match counter_kind {
// Counter-increment statements always need to be injected.
BcbCounter::Counter { .. } => true,
// The only purpose of expression-used statements is to detect
// when a mapping is unreachable, so we only inject them for
// expressions with one or more mappings.
BcbCounter::Expression { .. } => bcb_has_coverage_spans(bcb),
};
if do_inject {
inject_statement(
mir_body,
make_mir_coverage_kind(counter_kind),
basic_coverage_blocks[bcb].leader_bb(),
);
}
} }
fn inject_counters(&'a mut self) { // Process the counters associated with BCB edges.
//////////////////////////////////////////////////// for (from_bcb, to_bcb, counter_kind) in coverage_counters.bcb_edge_counters() {
// Compute coverage spans from the `CoverageGraph`. let do_inject = match counter_kind {
let Some(coverage_spans) = CoverageSpans::generate_coverage_spans( // Counter-increment statements always need to be injected.
self.mir_body, BcbCounter::Counter { .. } => true,
&self.hir_info, // BCB-edge expressions never have mappings, so they never need
&self.basic_coverage_blocks, // a corresponding statement.
) else { BcbCounter::Expression { .. } => false,
// No relevant spans were found in MIR, so skip instrumenting this function.
return;
}; };
if !do_inject {
continue;
}
//////////////////////////////////////////////////// // We need to inject a coverage statement into a new BB between the
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure // last BB of `from_bcb` and the first BB of `to_bcb`.
// every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock` let from_bb = basic_coverage_blocks[from_bcb].last_bb();
// and all `Expression` dependencies (operands) are also generated, for any other let to_bb = basic_coverage_blocks[to_bcb].leader_bb();
// `BasicCoverageBlock`s not already associated with a coverage span.
let bcb_has_coverage_spans = |bcb| coverage_spans.bcb_has_coverage_spans(bcb); let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb);
let coverage_counters = CoverageCounters::make_bcb_counters( debug!(
&self.basic_coverage_blocks, "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
bcb_has_coverage_spans, requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
); );
let mappings = self.create_mappings(&coverage_spans, &coverage_counters); // Inject a counter into the newly-created BB.
if mappings.is_empty() { inject_statement(mir_body, make_mir_coverage_kind(counter_kind), new_bb);
// No spans could be converted into valid mappings, so skip this function.
debug!("no spans could be converted into valid mappings; skipping");
return;
}
self.inject_coverage_statements(bcb_has_coverage_spans, &coverage_counters);
self.mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
function_source_hash: self.hir_info.function_source_hash,
num_counters: coverage_counters.num_counters(),
expressions: coverage_counters.into_expressions(),
mappings,
}));
} }
}
/// For each coverage span extracted from MIR, create a corresponding fn make_mir_coverage_kind(counter_kind: &BcbCounter) -> CoverageKind {
/// mapping. match *counter_kind {
/// BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
/// Precondition: All BCBs corresponding to those spans have been given BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
/// coverage counters.
fn create_mappings(
&self,
coverage_spans: &CoverageSpans,
coverage_counters: &CoverageCounters,
) -> Vec<Mapping> {
let source_map = self.tcx.sess.source_map();
let body_span = self.hir_info.body_span;
let source_file = source_map.lookup_source_file(body_span.lo());
use rustc_session::RemapFileNameExt;
let file_name =
Symbol::intern(&source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
let term_for_bcb = |bcb| {
coverage_counters
.bcb_counter(bcb)
.expect("all BCBs with spans were given counters")
.as_term()
};
coverage_spans
.all_bcb_mappings()
.filter_map(|&BcbMapping { kind: bcb_mapping_kind, span }| {
let kind = match bcb_mapping_kind {
BcbMappingKind::Code(bcb) => MappingKind::Code(term_for_bcb(bcb)),
};
let code_region = make_code_region(source_map, file_name, span, body_span)?;
Some(Mapping { kind, code_region })
})
.collect::<Vec<_>>()
}
/// For each BCB node or BCB edge that has an associated coverage counter,
/// inject any necessary coverage statements into MIR.
fn inject_coverage_statements(
&mut self,
bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
coverage_counters: &CoverageCounters,
) {
// Process the counters associated with BCB nodes.
for (bcb, counter_kind) in coverage_counters.bcb_node_counters() {
let do_inject = match counter_kind {
// Counter-increment statements always need to be injected.
BcbCounter::Counter { .. } => true,
// The only purpose of expression-used statements is to detect
// when a mapping is unreachable, so we only inject them for
// expressions with one or more mappings.
BcbCounter::Expression { .. } => bcb_has_coverage_spans(bcb),
};
if do_inject {
inject_statement(
self.mir_body,
self.make_mir_coverage_kind(counter_kind),
self.basic_coverage_blocks[bcb].leader_bb(),
);
}
}
// Process the counters associated with BCB edges.
for (from_bcb, to_bcb, counter_kind) in coverage_counters.bcb_edge_counters() {
let do_inject = match counter_kind {
// Counter-increment statements always need to be injected.
BcbCounter::Counter { .. } => true,
// BCB-edge expressions never have mappings, so they never need
// a corresponding statement.
BcbCounter::Expression { .. } => false,
};
if !do_inject {
continue;
}
// We need to inject a coverage statement into a new BB between the
// last BB of `from_bcb` and the first BB of `to_bcb`.
let from_bb = self.basic_coverage_blocks[from_bcb].last_bb();
let to_bb = self.basic_coverage_blocks[to_bcb].leader_bb();
let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
debug!(
"Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
);
// Inject a counter into the newly-created BB.
inject_statement(self.mir_body, self.make_mir_coverage_kind(counter_kind), new_bb);
}
}
fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
match *counter_kind {
BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
}
} }
} }
@ -329,7 +316,7 @@ fn make_code_region(
start_line = source_map.doctest_offset_line(&file.name, start_line); start_line = source_map.doctest_offset_line(&file.name, start_line);
end_line = source_map.doctest_offset_line(&file.name, end_line); end_line = source_map.doctest_offset_line(&file.name, end_line);
Some(CodeRegion { check_code_region(CodeRegion {
file_name, file_name,
start_line: start_line as u32, start_line: start_line as u32,
start_col: start_col as u32, start_col: start_col as u32,
@ -338,6 +325,39 @@ fn make_code_region(
}) })
} }
/// If `llvm-cov` sees a code region that is improperly ordered (end < start),
/// it will immediately exit with a fatal error. To prevent that from happening,
/// discard regions that are improperly ordered, or might be interpreted in a
/// way that makes them improperly ordered.
fn check_code_region(code_region: CodeRegion) -> Option<CodeRegion> {
let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = code_region;
// Line/column coordinates are supposed to be 1-based. If we ever emit
// coordinates of 0, `llvm-cov` might misinterpret them.
let all_nonzero = [start_line, start_col, end_line, end_col].into_iter().all(|x| x != 0);
// Coverage mappings use the high bit of `end_col` to indicate that a
// region is actually a "gap" region, so make sure it's unset.
let end_col_has_high_bit_unset = (end_col & (1 << 31)) == 0;
// If a region is improperly ordered (end < start), `llvm-cov` will exit
// with a fatal error, which is inconvenient for users and hard to debug.
let is_ordered = (start_line, start_col) <= (end_line, end_col);
if all_nonzero && end_col_has_high_bit_unset && is_ordered {
Some(code_region)
} else {
debug!(
?code_region,
?all_nonzero,
?end_col_has_high_bit_unset,
?is_ordered,
"Skipping code region that would be misinterpreted or rejected by LLVM"
);
// If this happens in a debug build, ICE to make it easier to notice.
debug_assert!(false, "Improper code region: {code_region:?}");
None
}
}
fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
// Only instrument functions, methods, and closures (not constants since they are evaluated // Only instrument functions, methods, and closures (not constants since they are evaluated
// at compile time by Miri). // at compile time by Miri).
@ -351,7 +371,18 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
return false; return false;
} }
// Don't instrument functions with `#[automatically_derived]` on their
// enclosing impl block, on the assumption that most users won't care about
// coverage for derived impls.
if let Some(impl_of) = tcx.impl_of_method(def_id.to_def_id())
&& tcx.is_automatically_derived(impl_of)
{
trace!("InstrumentCoverage skipped for {def_id:?} (automatically derived)");
return false;
}
if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_COVERAGE) { if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
trace!("InstrumentCoverage skipped for {def_id:?} (`#[coverage(off)]`)");
return false; return false;
} }

View File

@ -26,45 +26,6 @@ pub(super) struct CoverageSpans {
} }
impl CoverageSpans { impl CoverageSpans {
/// Extracts coverage-relevant spans from MIR, and associates them with
/// their corresponding BCBs.
///
/// Returns `None` if no coverage-relevant spans could be extracted.
pub(super) fn generate_coverage_spans(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &CoverageGraph,
) -> Option<Self> {
let mut mappings = vec![];
let coverage_spans = CoverageSpansGenerator::generate_coverage_spans(
mir_body,
hir_info,
basic_coverage_blocks,
);
mappings.extend(coverage_spans.into_iter().map(|CoverageSpan { bcb, span, .. }| {
// Each span produced by the generator represents an ordinary code region.
BcbMapping { kind: BcbMappingKind::Code(bcb), span }
}));
if mappings.is_empty() {
return None;
}
// Identify which BCBs have one or more mappings.
let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes());
let mut insert = |bcb| {
bcb_has_mappings.insert(bcb);
};
for &BcbMapping { kind, span: _ } in &mappings {
match kind {
BcbMappingKind::Code(bcb) => insert(bcb),
}
}
Some(Self { bcb_has_mappings, mappings })
}
pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool { pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
self.bcb_has_mappings.contains(bcb) self.bcb_has_mappings.contains(bcb)
} }
@ -74,6 +35,43 @@ impl CoverageSpans {
} }
} }
/// Extracts coverage-relevant spans from MIR, and associates them with
/// their corresponding BCBs.
///
/// Returns `None` if no coverage-relevant spans could be extracted.
pub(super) fn generate_coverage_spans(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &CoverageGraph,
) -> Option<CoverageSpans> {
let mut mappings = vec![];
let sorted_spans =
from_mir::mir_to_initial_sorted_coverage_spans(mir_body, hir_info, basic_coverage_blocks);
let coverage_spans = SpansRefiner::refine_sorted_spans(basic_coverage_blocks, sorted_spans);
mappings.extend(coverage_spans.into_iter().map(|CoverageSpan { bcb, span, .. }| {
// Each span produced by the generator represents an ordinary code region.
BcbMapping { kind: BcbMappingKind::Code(bcb), span }
}));
if mappings.is_empty() {
return None;
}
// Identify which BCBs have one or more mappings.
let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes());
let mut insert = |bcb| {
bcb_has_mappings.insert(bcb);
};
for &BcbMapping { kind, span: _ } in &mappings {
match kind {
BcbMappingKind::Code(bcb) => insert(bcb),
}
}
Some(CoverageSpans { bcb_has_mappings, mappings })
}
/// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that /// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that
/// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s. /// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
/// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent /// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
@ -130,7 +128,7 @@ impl CoverageSpan {
/// * Merge spans that represent continuous (both in source code and control flow), non-branching /// * Merge spans that represent continuous (both in source code and control flow), non-branching
/// execution /// execution
/// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures) /// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
struct CoverageSpansGenerator<'a> { struct SpansRefiner<'a> {
/// The BasicCoverageBlock Control Flow Graph (BCB CFG). /// The BasicCoverageBlock Control Flow Graph (BCB CFG).
basic_coverage_blocks: &'a CoverageGraph, basic_coverage_blocks: &'a CoverageGraph,
@ -173,40 +171,15 @@ struct CoverageSpansGenerator<'a> {
refined_spans: Vec<CoverageSpan>, refined_spans: Vec<CoverageSpan>,
} }
impl<'a> CoverageSpansGenerator<'a> { impl<'a> SpansRefiner<'a> {
/// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be /// Takes the initial list of (sorted) spans extracted from MIR, and "refines"
/// counted. /// them by merging compatible adjacent spans, removing redundant spans,
/// /// and carving holes in spans when they overlap in unwanted ways.
/// The basic steps are: fn refine_sorted_spans(
///
/// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
/// `BasicCoverageBlockData`.
/// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
/// are sorted with longer spans before shorter spans; and equal spans are sorted
/// (deterministically) based on "dominator" relationship (if any).
/// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
/// if another span or spans are already counting the same code region), or should be merged
/// into a broader combined span (because it represents a contiguous, non-branching, and
/// uninterrupted region of source code).
///
/// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
/// closures have their own MIR, their `Span` in their enclosing function should be left
/// "uncovered".
///
/// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
/// to be).
pub(super) fn generate_coverage_spans(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &'a CoverageGraph, basic_coverage_blocks: &'a CoverageGraph,
sorted_spans: Vec<CoverageSpan>,
) -> Vec<CoverageSpan> { ) -> Vec<CoverageSpan> {
let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans( let this = Self {
mir_body,
hir_info,
basic_coverage_blocks,
);
let coverage_spans = Self {
basic_coverage_blocks, basic_coverage_blocks,
sorted_spans_iter: sorted_spans.into_iter(), sorted_spans_iter: sorted_spans.into_iter(),
some_curr: None, some_curr: None,
@ -217,7 +190,7 @@ impl<'a> CoverageSpansGenerator<'a> {
refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2), refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
}; };
coverage_spans.to_refined_spans() this.to_refined_spans()
} }
/// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and

View File

@ -12,6 +12,12 @@ use crate::coverage::graph::{
use crate::coverage::spans::CoverageSpan; use crate::coverage::spans::CoverageSpan;
use crate::coverage::ExtractedHirInfo; use crate::coverage::ExtractedHirInfo;
/// Traverses the MIR body to produce an initial collection of coverage-relevant
/// spans, each associated with a node in the coverage graph (BCB) and possibly
/// other metadata.
///
/// The returned spans are sorted in a specific order that is expected by the
/// subsequent span-refinement step.
pub(super) fn mir_to_initial_sorted_coverage_spans( pub(super) fn mir_to_initial_sorted_coverage_spans(
mir_body: &mir::Body<'_>, mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo, hir_info: &ExtractedHirInfo,

View File

@ -403,7 +403,12 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
operand, operand,
&mut |elem, op| match elem { &mut |elem, op| match elem {
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(), TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(), TrackElem::Variant(idx) => {
if op.layout.for_variant(&self.ecx, idx).abi.is_uninhabited() {
return None;
}
self.ecx.project_downcast(op, idx).ok()
}
TrackElem::Discriminant => { TrackElem::Discriminant => {
let variant = self.ecx.read_discriminant(op).ok()?; let variant = self.ecx.read_discriminant(op).ok()?;
let discr_value = let discr_value =

View File

@ -201,45 +201,39 @@ impl<'a> DecorateLint<'a, ()> for UnsafeOpInUnsafeFn {
} }
} }
pub(crate) enum AssertLint<P> { pub(crate) struct AssertLint<P> {
ArithmeticOverflow(Span, AssertKind<P>), pub span: Span,
UnconditionalPanic(Span, AssertKind<P>), pub assert_kind: AssertKind<P>,
pub lint_kind: AssertLintKind,
}
pub(crate) enum AssertLintKind {
ArithmeticOverflow,
UnconditionalPanic,
} }
impl<'a, P: std::fmt::Debug> DecorateLint<'a, ()> for AssertLint<P> { impl<'a, P: std::fmt::Debug> DecorateLint<'a, ()> for AssertLint<P> {
fn decorate_lint<'b>(self, diag: &'b mut DiagnosticBuilder<'a, ()>) { fn decorate_lint<'b>(self, diag: &'b mut DiagnosticBuilder<'a, ()>) {
let span = self.span(); let message = self.assert_kind.diagnostic_message();
let assert_kind = self.panic(); self.assert_kind.add_args(&mut |name, value| {
let message = assert_kind.diagnostic_message();
assert_kind.add_args(&mut |name, value| {
diag.arg(name, value); diag.arg(name, value);
}); });
diag.span_label(span, message); diag.span_label(self.span, message);
} }
fn msg(&self) -> DiagnosticMessage { fn msg(&self) -> DiagnosticMessage {
match self { match self.lint_kind {
AssertLint::ArithmeticOverflow(..) => fluent::mir_transform_arithmetic_overflow, AssertLintKind::ArithmeticOverflow => fluent::mir_transform_arithmetic_overflow,
AssertLint::UnconditionalPanic(..) => fluent::mir_transform_operation_will_panic, AssertLintKind::UnconditionalPanic => fluent::mir_transform_operation_will_panic,
} }
} }
} }
impl<P> AssertLint<P> { impl AssertLintKind {
pub fn lint(&self) -> &'static Lint { pub fn lint(&self) -> &'static Lint {
match self { match self {
AssertLint::ArithmeticOverflow(..) => lint::builtin::ARITHMETIC_OVERFLOW, AssertLintKind::ArithmeticOverflow => lint::builtin::ARITHMETIC_OVERFLOW,
AssertLint::UnconditionalPanic(..) => lint::builtin::UNCONDITIONAL_PANIC, AssertLintKind::UnconditionalPanic => lint::builtin::UNCONDITIONAL_PANIC,
}
}
pub fn span(&self) -> Span {
match self {
AssertLint::ArithmeticOverflow(sp, _) | AssertLint::UnconditionalPanic(sp, _) => *sp,
}
}
pub fn panic(self) -> AssertKind<P> {
match self {
AssertLint::ArithmeticOverflow(_, p) | AssertLint::UnconditionalPanic(_, p) => p,
} }
} }
} }

View File

@ -1113,7 +1113,13 @@ fn find_vtable_types_for_unsizing<'tcx>(
assert_eq!(source_adt_def, target_adt_def); assert_eq!(source_adt_def, target_adt_def);
let CustomCoerceUnsized::Struct(coerce_index) = let CustomCoerceUnsized::Struct(coerce_index) =
crate::custom_coerce_unsize_info(tcx, source_ty, target_ty); match crate::custom_coerce_unsize_info(tcx, source_ty, target_ty) {
Ok(ccu) => ccu,
Err(e) => {
let e = Ty::new_error(tcx.tcx, e);
return (e, e);
}
};
let source_fields = &source_adt_def.non_enum_variant().fields; let source_fields = &source_adt_def.non_enum_variant().fields;
let target_fields = &target_adt_def.non_enum_variant().fields; let target_fields = &target_adt_def.non_enum_variant().fields;

View File

@ -15,6 +15,7 @@ use rustc_middle::query::{Providers, TyCtxtAt};
use rustc_middle::traits; use rustc_middle::traits;
use rustc_middle::ty::adjustment::CustomCoerceUnsized; use rustc_middle::ty::adjustment::CustomCoerceUnsized;
use rustc_middle::ty::{self, Ty}; use rustc_middle::ty::{self, Ty};
use rustc_span::ErrorGuaranteed;
mod collector; mod collector;
mod errors; mod errors;
@ -28,7 +29,7 @@ fn custom_coerce_unsize_info<'tcx>(
tcx: TyCtxtAt<'tcx>, tcx: TyCtxtAt<'tcx>,
source_ty: Ty<'tcx>, source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>, target_ty: Ty<'tcx>,
) -> CustomCoerceUnsized { ) -> Result<CustomCoerceUnsized, ErrorGuaranteed> {
let trait_ref = ty::TraitRef::from_lang_item( let trait_ref = ty::TraitRef::from_lang_item(
tcx.tcx, tcx.tcx,
LangItem::CoerceUnsized, LangItem::CoerceUnsized,
@ -40,7 +41,7 @@ fn custom_coerce_unsize_info<'tcx>(
Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData { Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
impl_def_id, impl_def_id,
.. ..
})) => tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap(), })) => Ok(tcx.coerce_unsized_info(impl_def_id)?.custom_kind.unwrap()),
impl_source => { impl_source => {
bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source); bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source);
} }

View File

@ -391,12 +391,18 @@ impl IntRange {
/// first. /// first.
impl fmt::Debug for IntRange { impl fmt::Debug for IntRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Finite(lo) = self.lo { if self.is_singleton() {
// Only finite ranges can be singletons.
let Finite(lo) = self.lo else { unreachable!() };
write!(f, "{lo}")?; write!(f, "{lo}")?;
} } else {
write!(f, "{}", RangeEnd::Excluded)?; if let Finite(lo) = self.lo {
if let Finite(hi) = self.hi { write!(f, "{lo}")?;
write!(f, "{hi}")?; }
write!(f, "{}", RangeEnd::Excluded)?;
if let Finite(hi) = self.hi {
write!(f, "{hi}")?;
}
} }
Ok(()) Ok(())
} }

View File

@ -101,15 +101,23 @@ pub trait TypeCx: Sized + fmt::Debug {
/// The types of the fields for this constructor. The result must have a length of /// The types of the fields for this constructor. The result must have a length of
/// `ctor_arity()`. /// `ctor_arity()`.
fn ctor_sub_tys(&self, ctor: &Constructor<Self>, ty: &Self::Ty) -> &[Self::Ty]; fn ctor_sub_tys<'a>(
&'a self,
ctor: &'a Constructor<Self>,
ty: &'a Self::Ty,
) -> impl Iterator<Item = Self::Ty> + ExactSizeIterator + Captures<'a>;
/// The set of all the constructors for `ty`. /// The set of all the constructors for `ty`.
/// ///
/// This must follow the invariants of `ConstructorSet` /// This must follow the invariants of `ConstructorSet`
fn ctors_for_ty(&self, ty: &Self::Ty) -> Result<ConstructorSet<Self>, Self::Error>; fn ctors_for_ty(&self, ty: &Self::Ty) -> Result<ConstructorSet<Self>, Self::Error>;
/// Best-effort `Debug` implementation. /// Write the name of the variant represented by `pat`. Used for the best-effort `Debug` impl of
fn debug_pat(f: &mut fmt::Formatter<'_>, pat: &DeconstructedPat<'_, Self>) -> fmt::Result; /// `DeconstructedPat`. Only invoqued when `pat.ctor()` is `Struct | Variant(_) | UnionField`.
fn write_variant_name(
f: &mut fmt::Formatter<'_>,
pat: &crate::pat::DeconstructedPat<'_, Self>,
) -> fmt::Result;
/// Raise a bug. /// Raise a bug.
fn bug(&self, fmt: fmt::Arguments<'_>) -> !; fn bug(&self, fmt: fmt::Arguments<'_>) -> !;

View File

@ -142,7 +142,75 @@ impl<'p, Cx: TypeCx> DeconstructedPat<'p, Cx> {
/// This is best effort and not good enough for a `Display` impl. /// This is best effort and not good enough for a `Display` impl.
impl<'p, Cx: TypeCx> fmt::Debug for DeconstructedPat<'p, Cx> { impl<'p, Cx: TypeCx> fmt::Debug for DeconstructedPat<'p, Cx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Cx::debug_pat(f, self) let pat = self;
let mut first = true;
let mut start_or_continue = |s| {
if first {
first = false;
""
} else {
s
}
};
let mut start_or_comma = || start_or_continue(", ");
match pat.ctor() {
Struct | Variant(_) | UnionField => {
Cx::write_variant_name(f, pat)?;
// Without `cx`, we can't know which field corresponds to which, so we can't
// get the names of the fields. Instead we just display everything as a tuple
// struct, which should be good enough.
write!(f, "(")?;
for p in pat.iter_fields() {
write!(f, "{}", start_or_comma())?;
write!(f, "{p:?}")?;
}
write!(f, ")")
}
// Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
// be careful to detect strings here. However a string literal pattern will never
// be reported as a non-exhaustiveness witness, so we can ignore this issue.
Ref => {
let subpattern = pat.iter_fields().next().unwrap();
write!(f, "&{:?}", subpattern)
}
Slice(slice) => {
let mut subpatterns = pat.iter_fields();
write!(f, "[")?;
match slice.kind {
SliceKind::FixedLen(_) => {
for p in subpatterns {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
}
SliceKind::VarLen(prefix_len, _) => {
for p in subpatterns.by_ref().take(prefix_len) {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
write!(f, "{}", start_or_comma())?;
write!(f, "..")?;
for p in subpatterns {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
}
}
write!(f, "]")
}
Bool(b) => write!(f, "{b}"),
// Best-effort, will render signed ranges incorrectly
IntRange(range) => write!(f, "{range:?}"),
F32Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
F64Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
Str(value) => write!(f, "{value:?}"),
Opaque(..) => write!(f, "<constant pattern>"),
Or => {
for pat in pat.iter_fields() {
write!(f, "{}{:?}", start_or_continue(" | "), pat)?;
}
Ok(())
}
Wildcard | Missing { .. } | NonExhaustive | Hidden => write!(f, "_ : {:?}", pat.ty()),
}
} }
} }
@ -241,8 +309,7 @@ impl<Cx: TypeCx> WitnessPat<Cx> {
/// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern
/// `Some(_)`. /// `Some(_)`.
pub(crate) fn wild_from_ctor(pcx: &PlaceCtxt<'_, Cx>, ctor: Constructor<Cx>) -> Self { pub(crate) fn wild_from_ctor(pcx: &PlaceCtxt<'_, Cx>, ctor: Constructor<Cx>) -> Self {
let field_tys = pcx.ctor_sub_tys(&ctor); let fields = pcx.ctor_sub_tys(&ctor).map(|ty| Self::wildcard(ty)).collect();
let fields = field_tys.iter().cloned().map(|ty| Self::wildcard(ty)).collect();
Self::new(ctor, fields, pcx.ty.clone()) Self::new(ctor, fields, pcx.ty.clone())
} }

View File

@ -3,7 +3,6 @@ use std::fmt;
use std::iter::once; use std::iter::once;
use rustc_arena::{DroplessArena, TypedArena}; use rustc_arena::{DroplessArena, TypedArena};
use rustc_data_structures::captures::Captures;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_hir::HirId; use rustc_hir::HirId;
use rustc_index::{Idx, IndexVec}; use rustc_index::{Idx, IndexVec};
@ -20,7 +19,7 @@ use rustc_target::abi::{FieldIdx, Integer, VariantIdx, FIRST_VARIANT};
use crate::constructor::{ use crate::constructor::{
IntRange, MaybeInfiniteInt, OpaqueId, RangeEnd, Slice, SliceKind, VariantVisibility, IntRange, MaybeInfiniteInt, OpaqueId, RangeEnd, Slice, SliceKind, VariantVisibility,
}; };
use crate::{errors, TypeCx}; use crate::{errors, Captures, TypeCx};
use crate::constructor::Constructor::*; use crate::constructor::Constructor::*;
@ -210,11 +209,11 @@ impl<'p, 'tcx> RustcMatchCheckCtxt<'p, 'tcx> {
/// Returns the types of the fields for a given constructor. The result must have a length of /// Returns the types of the fields for a given constructor. The result must have a length of
/// `ctor.arity()`. /// `ctor.arity()`.
#[instrument(level = "trace", skip(self))] #[instrument(level = "trace", skip(self))]
pub(crate) fn ctor_sub_tys( pub(crate) fn ctor_sub_tys<'a>(
&self, &'a self,
ctor: &Constructor<'p, 'tcx>, ctor: &'a Constructor<'p, 'tcx>,
ty: RevealedTy<'tcx>, ty: RevealedTy<'tcx>,
) -> &[RevealedTy<'tcx>] { ) -> impl Iterator<Item = RevealedTy<'tcx>> + ExactSizeIterator + Captures<'a> {
fn reveal_and_alloc<'a, 'tcx>( fn reveal_and_alloc<'a, 'tcx>(
cx: &'a RustcMatchCheckCtxt<'_, 'tcx>, cx: &'a RustcMatchCheckCtxt<'_, 'tcx>,
iter: impl Iterator<Item = Ty<'tcx>>, iter: impl Iterator<Item = Ty<'tcx>>,
@ -222,7 +221,7 @@ impl<'p, 'tcx> RustcMatchCheckCtxt<'p, 'tcx> {
cx.dropless_arena.alloc_from_iter(iter.map(|ty| cx.reveal_opaque_ty(ty))) cx.dropless_arena.alloc_from_iter(iter.map(|ty| cx.reveal_opaque_ty(ty)))
} }
let cx = self; let cx = self;
match ctor { let slice = match ctor {
Struct | Variant(_) | UnionField => match ty.kind() { Struct | Variant(_) | UnionField => match ty.kind() {
ty::Tuple(fs) => reveal_and_alloc(cx, fs.iter()), ty::Tuple(fs) => reveal_and_alloc(cx, fs.iter()),
ty::Adt(adt, args) => { ty::Adt(adt, args) => {
@ -263,7 +262,8 @@ impl<'p, 'tcx> RustcMatchCheckCtxt<'p, 'tcx> {
Or => { Or => {
bug!("called `Fields::wildcards` on an `Or` ctor") bug!("called `Fields::wildcards` on an `Or` ctor")
} }
} };
slice.iter().copied()
} }
/// The number of fields for this constructor. /// The number of fields for this constructor.
@ -850,103 +850,6 @@ impl<'p, 'tcx> RustcMatchCheckCtxt<'p, 'tcx> {
Pat { ty: pat.ty().inner(), span: DUMMY_SP, kind } Pat { ty: pat.ty().inner(), span: DUMMY_SP, kind }
} }
/// Best-effort `Debug` implementation.
pub(crate) fn debug_pat(
f: &mut fmt::Formatter<'_>,
pat: &crate::pat::DeconstructedPat<'_, Self>,
) -> fmt::Result {
let mut first = true;
let mut start_or_continue = |s| {
if first {
first = false;
""
} else {
s
}
};
let mut start_or_comma = || start_or_continue(", ");
match pat.ctor() {
Struct | Variant(_) | UnionField => match pat.ty().kind() {
ty::Adt(def, _) if def.is_box() => {
// Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
// of `std`). So this branch is only reachable when the feature is enabled and
// the pattern is a box pattern.
let subpattern = pat.iter_fields().next().unwrap();
write!(f, "box {subpattern:?}")
}
ty::Adt(..) | ty::Tuple(..) => {
let variant =
match pat.ty().kind() {
ty::Adt(adt, _) => Some(adt.variant(
RustcMatchCheckCtxt::variant_index_for_adt(pat.ctor(), *adt),
)),
ty::Tuple(_) => None,
_ => unreachable!(),
};
if let Some(variant) = variant {
write!(f, "{}", variant.name)?;
}
// Without `cx`, we can't know which field corresponds to which, so we can't
// get the names of the fields. Instead we just display everything as a tuple
// struct, which should be good enough.
write!(f, "(")?;
for p in pat.iter_fields() {
write!(f, "{}", start_or_comma())?;
write!(f, "{p:?}")?;
}
write!(f, ")")
}
_ => write!(f, "_"),
},
// Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
// be careful to detect strings here. However a string literal pattern will never
// be reported as a non-exhaustiveness witness, so we can ignore this issue.
Ref => {
let subpattern = pat.iter_fields().next().unwrap();
write!(f, "&{:?}", subpattern)
}
Slice(slice) => {
let mut subpatterns = pat.iter_fields();
write!(f, "[")?;
match slice.kind {
SliceKind::FixedLen(_) => {
for p in subpatterns {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
}
SliceKind::VarLen(prefix_len, _) => {
for p in subpatterns.by_ref().take(prefix_len) {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
write!(f, "{}", start_or_comma())?;
write!(f, "..")?;
for p in subpatterns {
write!(f, "{}{:?}", start_or_comma(), p)?;
}
}
}
write!(f, "]")
}
Bool(b) => write!(f, "{b}"),
// Best-effort, will render signed ranges incorrectly
IntRange(range) => write!(f, "{range:?}"),
F32Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
F64Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
Str(value) => write!(f, "{value}"),
Opaque(..) => write!(f, "<constant pattern>"),
Or => {
for pat in pat.iter_fields() {
write!(f, "{}{:?}", start_or_continue(" | "), pat)?;
}
Ok(())
}
Wildcard | Missing { .. } | NonExhaustive | Hidden => write!(f, "_ : {:?}", pat.ty()),
}
}
} }
impl<'p, 'tcx> TypeCx for RustcMatchCheckCtxt<'p, 'tcx> { impl<'p, 'tcx> TypeCx for RustcMatchCheckCtxt<'p, 'tcx> {
@ -964,11 +867,11 @@ impl<'p, 'tcx> TypeCx for RustcMatchCheckCtxt<'p, 'tcx> {
fn ctor_arity(&self, ctor: &crate::constructor::Constructor<Self>, ty: &Self::Ty) -> usize { fn ctor_arity(&self, ctor: &crate::constructor::Constructor<Self>, ty: &Self::Ty) -> usize {
self.ctor_arity(ctor, *ty) self.ctor_arity(ctor, *ty)
} }
fn ctor_sub_tys( fn ctor_sub_tys<'a>(
&self, &'a self,
ctor: &crate::constructor::Constructor<Self>, ctor: &'a crate::constructor::Constructor<Self>,
ty: &Self::Ty, ty: &'a Self::Ty,
) -> &[Self::Ty] { ) -> impl Iterator<Item = Self::Ty> + ExactSizeIterator + Captures<'a> {
self.ctor_sub_tys(ctor, *ty) self.ctor_sub_tys(ctor, *ty)
} }
fn ctors_for_ty( fn ctors_for_ty(
@ -978,12 +881,21 @@ impl<'p, 'tcx> TypeCx for RustcMatchCheckCtxt<'p, 'tcx> {
self.ctors_for_ty(*ty) self.ctors_for_ty(*ty)
} }
fn debug_pat( fn write_variant_name(
f: &mut fmt::Formatter<'_>, f: &mut fmt::Formatter<'_>,
pat: &crate::pat::DeconstructedPat<'_, Self>, pat: &crate::pat::DeconstructedPat<'_, Self>,
) -> fmt::Result { ) -> fmt::Result {
Self::debug_pat(f, pat) if let ty::Adt(adt, _) = pat.ty().kind() {
if adt.is_box() {
write!(f, "Box")?
} else {
let variant = adt.variant(Self::variant_index_for_adt(pat.ctor(), *adt));
write!(f, "{}", variant.name)?;
}
}
Ok(())
} }
fn bug(&self, fmt: fmt::Arguments<'_>) -> ! { fn bug(&self, fmt: fmt::Arguments<'_>) -> ! {
span_bug!(self.scrut_span, "{}", fmt) span_bug!(self.scrut_span, "{}", fmt)
} }

View File

@ -750,7 +750,10 @@ impl<'a, Cx: TypeCx> PlaceCtxt<'a, Cx> {
pub(crate) fn ctor_arity(&self, ctor: &Constructor<Cx>) -> usize { pub(crate) fn ctor_arity(&self, ctor: &Constructor<Cx>) -> usize {
self.mcx.tycx.ctor_arity(ctor, self.ty) self.mcx.tycx.ctor_arity(ctor, self.ty)
} }
pub(crate) fn ctor_sub_tys(&self, ctor: &Constructor<Cx>) -> &[Cx::Ty] { pub(crate) fn ctor_sub_tys(
&'a self,
ctor: &'a Constructor<Cx>,
) -> impl Iterator<Item = Cx::Ty> + ExactSizeIterator + Captures<'a> {
self.mcx.tycx.ctor_sub_tys(ctor, self.ty) self.mcx.tycx.ctor_sub_tys(ctor, self.ty)
} }
pub(crate) fn ctors_for_ty(&self) -> Result<ConstructorSet<Cx>, Cx::Error> { pub(crate) fn ctors_for_ty(&self) -> Result<ConstructorSet<Cx>, Cx::Error> {
@ -1058,8 +1061,7 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> {
) -> Matrix<'p, Cx> { ) -> Matrix<'p, Cx> {
let ctor_sub_tys = pcx.ctor_sub_tys(ctor); let ctor_sub_tys = pcx.ctor_sub_tys(ctor);
let arity = ctor_sub_tys.len(); let arity = ctor_sub_tys.len();
let specialized_place_ty = let specialized_place_ty = ctor_sub_tys.chain(self.place_ty[1..].iter().cloned()).collect();
ctor_sub_tys.iter().chain(self.place_ty[1..].iter()).cloned().collect();
let ctor_sub_validity = self.place_validity[0].specialize(ctor); let ctor_sub_validity = self.place_validity[0].specialize(ctor);
let specialized_place_validity = std::iter::repeat(ctor_sub_validity) let specialized_place_validity = std::iter::repeat(ctor_sub_validity)
.take(arity) .take(arity)

View File

@ -24,13 +24,13 @@ use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, CRATE_DEF_ID}; use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::{self, Visitor}; use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{AssocItemKind, ForeignItemKind, ItemId, PatKind}; use rustc_hir::{AssocItemKind, ForeignItemKind, ItemId, PatKind};
use rustc_middle::bug;
use rustc_middle::hir::nested_filter; use rustc_middle::hir::nested_filter;
use rustc_middle::middle::privacy::{EffectiveVisibilities, EffectiveVisibility, Level}; use rustc_middle::middle::privacy::{EffectiveVisibilities, EffectiveVisibility, Level};
use rustc_middle::query::Providers; use rustc_middle::query::Providers;
use rustc_middle::ty::GenericArgs; use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, Const, GenericParamDefKind}; use rustc_middle::ty::{self, Const, GenericParamDefKind};
use rustc_middle::ty::{TraitRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor}; use rustc_middle::ty::{TraitRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
use rustc_middle::{bug, span_bug};
use rustc_session::lint; use rustc_session::lint;
use rustc_span::hygiene::Transparency; use rustc_span::hygiene::Transparency;
use rustc_span::symbol::{kw, sym, Ident}; use rustc_span::symbol::{kw, sym, Ident};
@ -1064,29 +1064,22 @@ impl<'tcx> Visitor<'tcx> for NamePrivacyVisitor<'tcx> {
struct TypePrivacyVisitor<'tcx> { struct TypePrivacyVisitor<'tcx> {
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
module_def_id: LocalModDefId,
maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>, maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
current_item: LocalDefId,
span: Span, span: Span,
} }
impl<'tcx> TypePrivacyVisitor<'tcx> { impl<'tcx> TypePrivacyVisitor<'tcx> {
/// Gets the type-checking results for the current body.
/// As this will ICE if called outside bodies, only call when working with
/// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
#[track_caller]
fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
self.maybe_typeck_results
.expect("`TypePrivacyVisitor::typeck_results` called outside of body")
}
fn item_is_accessible(&self, did: DefId) -> bool { fn item_is_accessible(&self, did: DefId) -> bool {
self.tcx.visibility(did).is_accessible_from(self.current_item, self.tcx) self.tcx.visibility(did).is_accessible_from(self.module_def_id, self.tcx)
} }
// Take node-id of an expression or pattern and check its type for privacy. // Take node-id of an expression or pattern and check its type for privacy.
fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool { fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool {
self.span = span; self.span = span;
let typeck_results = self.typeck_results(); let typeck_results = self
.maybe_typeck_results
.unwrap_or_else(|| span_bug!(span, "`hir::Expr` or `hir::Pat` outside of a body"));
let result: ControlFlow<()> = try { let result: ControlFlow<()> = try {
self.visit(typeck_results.node_type(id))?; self.visit(typeck_results.node_type(id))?;
self.visit(typeck_results.node_args(id))?; self.visit(typeck_results.node_args(id))?;
@ -1107,35 +1100,13 @@ impl<'tcx> TypePrivacyVisitor<'tcx> {
} }
impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> { impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
type NestedFilter = nested_filter::All; fn visit_nested_body(&mut self, body_id: hir::BodyId) {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map(&mut self) -> Self::Map {
self.tcx.hir()
}
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
// for each module in `effective_visibilities`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
let old_maybe_typeck_results = let old_maybe_typeck_results =
self.maybe_typeck_results.replace(self.tcx.typeck_body(body)); self.maybe_typeck_results.replace(self.tcx.typeck_body(body_id));
let body = self.tcx.hir().body(body); self.visit_body(self.tcx.hir().body(body_id));
self.visit_body(body);
self.maybe_typeck_results = old_maybe_typeck_results; self.maybe_typeck_results = old_maybe_typeck_results;
} }
fn visit_generic_arg(&mut self, generic_arg: &'tcx hir::GenericArg<'tcx>) {
match generic_arg {
hir::GenericArg::Type(t) => self.visit_ty(t),
hir::GenericArg::Infer(inf) => self.visit_infer(inf),
hir::GenericArg::Lifetime(_) | hir::GenericArg::Const(_) => {}
}
}
fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) { fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) {
self.span = hir_ty.span; self.span = hir_ty.span;
if let Some(typeck_results) = self.maybe_typeck_results { if let Some(typeck_results) = self.maybe_typeck_results {
@ -1163,19 +1134,19 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
return; return;
} }
} else { } else {
// We don't do anything for const infers here. // FIXME: check types of const infers here.
} }
} else { } else {
bug!("visit_infer without typeck_results"); span_bug!(self.span, "`hir::InferArg` outside of a body");
} }
intravisit::walk_inf(self, inf); intravisit::walk_inf(self, inf);
} }
fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef<'tcx>) { fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef<'tcx>) {
self.span = trait_ref.path.span; self.span = trait_ref.path.span;
if self.maybe_typeck_results.is_none() { if self.maybe_typeck_results.is_some() {
// Avoid calling `hir_trait_to_predicates` in bodies, it will ICE. // Privacy of traits in bodies is checked as a part of trait object types.
// The traits' privacy in bodies is already checked as a part of trait object types. } else {
let bounds = rustc_hir_analysis::hir_trait_to_predicates( let bounds = rustc_hir_analysis::hir_trait_to_predicates(
self.tcx, self.tcx,
trait_ref, trait_ref,
@ -1223,7 +1194,10 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
hir::ExprKind::MethodCall(segment, ..) => { hir::ExprKind::MethodCall(segment, ..) => {
// Method calls have to be checked specially. // Method calls have to be checked specially.
self.span = segment.ident.span; self.span = segment.ident.span;
if let Some(def_id) = self.typeck_results().type_dependent_def_id(expr.hir_id) { let typeck_results = self
.maybe_typeck_results
.unwrap_or_else(|| span_bug!(self.span, "`hir::Expr` outside of a body"));
if let Some(def_id) = typeck_results.type_dependent_def_id(expr.hir_id) {
if self.visit(self.tcx.type_of(def_id).instantiate_identity()).is_break() { if self.visit(self.tcx.type_of(def_id).instantiate_identity()).is_break() {
return; return;
} }
@ -1251,9 +1225,13 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
Res::Def(kind, def_id) => Some((kind, def_id)), Res::Def(kind, def_id) => Some((kind, def_id)),
_ => None, _ => None,
}, },
hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
.maybe_typeck_results match self.maybe_typeck_results {
.and_then(|typeck_results| typeck_results.type_dependent_def(id)), Some(typeck_results) => typeck_results.type_dependent_def(id),
// FIXME: Check type-relative associated types in signatures.
None => None,
}
}
}; };
let def = def.filter(|(kind, _)| { let def = def.filter(|(kind, _)| {
matches!( matches!(
@ -1307,15 +1285,6 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
intravisit::walk_local(self, local); intravisit::walk_local(self, local);
} }
// Check types in item interfaces.
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let orig_current_item = mem::replace(&mut self.current_item, item.owner_id.def_id);
let old_maybe_typeck_results = self.maybe_typeck_results.take();
intravisit::walk_item(self, item);
self.maybe_typeck_results = old_maybe_typeck_results;
self.current_item = orig_current_item;
}
} }
impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> { impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> {
@ -1785,13 +1754,8 @@ fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalModDefId) {
// Check privacy of explicitly written types and traits as well as // Check privacy of explicitly written types and traits as well as
// inferred types of expressions and patterns. // inferred types of expressions and patterns.
let mut visitor = TypePrivacyVisitor { let mut visitor = TypePrivacyVisitor { tcx, module_def_id, maybe_typeck_results: None, span };
tcx, tcx.hir().visit_item_likes_in_module(module_def_id, &mut visitor);
maybe_typeck_results: None,
current_item: module_def_id.to_local_def_id(),
span,
};
intravisit::walk_mod(&mut visitor, module, hir_id);
} }
fn effective_visibilities(tcx: TyCtxt<'_>, (): ()) -> &EffectiveVisibilities { fn effective_visibilities(tcx: TyCtxt<'_>, (): ()) -> &EffectiveVisibilities {

View File

@ -332,20 +332,6 @@ impl Session {
} }
} }
// FIXME(matthewjasper) Remove this method, it should never be needed.
pub fn track_errors<F, T>(&self, f: F) -> Result<T, ErrorGuaranteed>
where
F: FnOnce() -> T,
{
let old_count = self.dcx().err_count();
let result = f();
if self.dcx().err_count() == old_count {
Ok(result)
} else {
Err(self.dcx().delayed_bug("`self.err_count()` changed but an error was not emitted"))
}
}
/// Used for code paths of expensive computations that should only take place when /// Used for code paths of expensive computations that should only take place when
/// warnings or errors are emitted. If no messages are emitted ("good path"), then /// warnings or errors are emitted. If no messages are emitted ("good path"), then
/// it's likely a bug. /// it's likely a bug.
@ -1524,16 +1510,25 @@ pub trait RemapFileNameExt {
where where
Self: 'a; Self: 'a;
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_>; /// Returns a possibly remapped filename based on the passed scope and remap cli options.
///
/// One and only one scope should be passed to this method. For anything related to
/// "codegen" see the [`RemapFileNameExt::for_codegen`] method.
fn for_scope(&self, sess: &Session, scope: RemapPathScopeComponents) -> Self::Output<'_>;
/// Return a possibly remapped filename, to be used in "codegen" related parts.
fn for_codegen(&self, sess: &Session) -> Self::Output<'_>; fn for_codegen(&self, sess: &Session) -> Self::Output<'_>;
} }
impl RemapFileNameExt for rustc_span::FileName { impl RemapFileNameExt for rustc_span::FileName {
type Output<'a> = rustc_span::FileNameDisplay<'a>; type Output<'a> = rustc_span::FileNameDisplay<'a>;
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_> { fn for_scope(&self, sess: &Session, scope: RemapPathScopeComponents) -> Self::Output<'_> {
if sess.opts.unstable_opts.remap_path_scope.contains(scopes) { assert!(
scope.bits().count_ones() == 1,
"one and only one scope should be passed to for_scope"
);
if sess.opts.unstable_opts.remap_path_scope.contains(scope) {
self.prefer_remapped_unconditionaly() self.prefer_remapped_unconditionaly()
} else { } else {
self.prefer_local() self.prefer_local()
@ -1552,8 +1547,12 @@ impl RemapFileNameExt for rustc_span::FileName {
impl RemapFileNameExt for rustc_span::RealFileName { impl RemapFileNameExt for rustc_span::RealFileName {
type Output<'a> = &'a Path; type Output<'a> = &'a Path;
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_> { fn for_scope(&self, sess: &Session, scope: RemapPathScopeComponents) -> Self::Output<'_> {
if sess.opts.unstable_opts.remap_path_scope.contains(scopes) { assert!(
scope.bits().count_ones() == 1,
"one and only one scope should be passed to for_scope"
);
if sess.opts.unstable_opts.remap_path_scope.contains(scope) {
self.remapped_path_if_available() self.remapped_path_if_available()
} else { } else {
self.local_path_if_available() self.local_path_if_available()

View File

@ -310,7 +310,6 @@ symbols! {
Some, Some,
SpanCtxt, SpanCtxt,
String, String,
StructuralEq,
StructuralPartialEq, StructuralPartialEq,
SubdiagnosticMessage, SubdiagnosticMessage,
Sync, Sync,
@ -425,8 +424,14 @@ symbols! {
assume, assume,
assume_init, assume_init,
async_await, async_await,
async_call,
async_call_mut,
async_call_once,
async_closure, async_closure,
async_fn,
async_fn_in_trait, async_fn_in_trait,
async_fn_mut,
async_fn_once,
async_fn_track_caller, async_fn_track_caller,
async_for_loop, async_for_loop,
async_iterator, async_iterator,
@ -910,6 +915,7 @@ symbols! {
io_stderr, io_stderr,
io_stdout, io_stdout,
irrefutable_let_patterns, irrefutable_let_patterns,
is_val_statically_known,
isa_attribute, isa_attribute,
isize, isize,
issue, issue,
@ -1618,7 +1624,6 @@ symbols! {
struct_variant, struct_variant,
structural_match, structural_match,
structural_peq, structural_peq,
structural_teq,
sub, sub,
sub_assign, sub_assign,
sub_with_overflow, sub_with_overflow,
@ -1793,6 +1798,7 @@ symbols! {
warn, warn,
wasm_abi, wasm_abi,
wasm_import_module, wasm_import_module,
wasm_preview2,
wasm_target_feature, wasm_target_feature,
while_let, while_let,
windows, windows,

View File

@ -38,9 +38,6 @@ pub fn options() -> TargetOptions {
// supposed to be imported and have all other symbols generate errors if // supposed to be imported and have all other symbols generate errors if
// they remain undefined. // they remain undefined.
concat!($prefix, "--allow-undefined"), concat!($prefix, "--allow-undefined"),
// Rust code should never have warnings, and warnings are often
// indicative of bugs, let's prevent them.
concat!($prefix, "--fatal-warnings"),
// LLD only implements C++-like demangling, which doesn't match our own // LLD only implements C++-like demangling, which doesn't match our own
// mangling scheme. Tell LLD to not demangle anything and leave it up to // mangling scheme. Tell LLD to not demangle anything and leave it up to
// us to demangle these symbols later. Currently rustc does not perform // us to demangle these symbols later. Currently rustc does not perform

View File

@ -1574,6 +1574,7 @@ supported_targets! {
("wasm32-unknown-emscripten", wasm32_unknown_emscripten), ("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
("wasm32-unknown-unknown", wasm32_unknown_unknown), ("wasm32-unknown-unknown", wasm32_unknown_unknown),
("wasm32-wasi", wasm32_wasi), ("wasm32-wasi", wasm32_wasi),
("wasm32-wasi-preview2", wasm32_wasi_preview2),
("wasm32-wasi-preview1-threads", wasm32_wasi_preview1_threads), ("wasm32-wasi-preview1-threads", wasm32_wasi_preview1_threads),
("wasm64-unknown-unknown", wasm64_unknown_unknown), ("wasm64-unknown-unknown", wasm64_unknown_unknown),

View File

@ -5,10 +5,7 @@ use crate::spec::{
pub fn target() -> Target { pub fn target() -> Target {
// Reset flags for non-Em flavors back to empty to satisfy sanity checking tests. // Reset flags for non-Em flavors back to empty to satisfy sanity checking tests.
let pre_link_args = LinkArgs::new(); let pre_link_args = LinkArgs::new();
let post_link_args = TargetOptions::link_args( let post_link_args = TargetOptions::link_args(LinkerFlavor::EmCc, &["-sABORTING_MALLOC=0"]);
LinkerFlavor::EmCc,
&["-sABORTING_MALLOC=0", "-Wl,--fatal-warnings"],
);
let opts = TargetOptions { let opts = TargetOptions {
os: "emscripten".into(), os: "emscripten".into(),

View File

@ -72,11 +72,12 @@
//! best we can with this target. Don't start relying on too much here unless //! best we can with this target. Don't start relying on too much here unless
//! you know what you're getting in to! //! you know what you're getting in to!
use crate::spec::{base, crt_objects, Cc, LinkSelfContainedDefault, LinkerFlavor, Target}; use crate::spec::{base, crt_objects, cvs, Cc, LinkSelfContainedDefault, LinkerFlavor, Target};
pub fn target() -> Target { pub fn target() -> Target {
let mut options = base::wasm::options(); let mut options = base::wasm::options();
options.families = cvs!["wasm", "wasi"];
options.os = "wasi".into(); options.os = "wasi".into();
options.add_pre_link_args( options.add_pre_link_args(

View File

@ -0,0 +1,64 @@
//! The `wasm32-wasi-preview2` target is the next evolution of the
//! wasm32-wasi target. While the wasi specification is still under
//! active development, the {review 2 iteration is considered an "island
//! of stability" that should allow users to rely on it indefinitely.
//!
//! The `wasi` target is a proposal to define a standardized set of WebAssembly
//! component imports that allow it to interoperate with the host system in a
//! standardized way. This set of imports is intended to empower WebAssembly
//! binaries with host capabilities such as filesystem access, network access, etc.
//!
//! Wasi Preview 2 relies on the WebAssembly component model which is an extension of
//! the core WebAssembly specification which allows interoperability between WebAssembly
//! modules (known as "components") through high-level, shared-nothing APIs instead of the
//! low-level, shared-everything linear memory model of the core WebAssembly specification.
//!
//! You can see more about wasi at <https://wasi.dev> and the component model at
//! <https://github.com/WebAssembly/component-model>.
use crate::spec::crt_objects;
use crate::spec::LinkSelfContainedDefault;
use crate::spec::{base, Target};
pub fn target() -> Target {
let mut options = base::wasm::options();
options.os = "wasi".into();
options.env = "preview2".into();
options.linker = Some("wasm-component-ld".into());
options.pre_link_objects_self_contained = crt_objects::pre_wasi_self_contained();
options.post_link_objects_self_contained = crt_objects::post_wasi_self_contained();
// FIXME: Figure out cases in which WASM needs to link with a native toolchain.
options.link_self_contained = LinkSelfContainedDefault::True;
// Right now this is a bit of a workaround but we're currently saying that
// the target by default has a static crt which we're taking as a signal
// for "use the bundled crt". If that's turned off then the system's crt
// will be used, but this means that default usage of this target doesn't
// need an external compiler but it's still interoperable with an external
// compiler if configured correctly.
options.crt_static_default = true;
options.crt_static_respected = true;
// Allow `+crt-static` to create a "cdylib" output which is just a wasm file
// without a main function.
options.crt_static_allows_dylibs = true;
// WASI's `sys::args::init` function ignores its arguments; instead,
// `args::args()` makes the WASI API calls itself.
options.main_needs_argc_argv = false;
// And, WASI mangles the name of "main" to distinguish between different
// signatures.
options.entry_name = "__main_void".into();
Target {
llvm_target: "wasm32-unknown-unknown".into(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20".into(),
arch: "wasm32".into(),
options,
}
}

View File

@ -7,7 +7,7 @@ use super::{
use crate::errors; use crate::errors;
use crate::infer::InferCtxt; use crate::infer::InferCtxt;
use crate::traits::{NormalizeExt, ObligationCtxt}; use crate::traits::{ImplDerivedObligationCause, NormalizeExt, ObligationCtxt};
use hir::def::CtorOf; use hir::def::CtorOf;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
@ -2973,7 +2973,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
| ObligationCauseCode::ObjectTypeBound(..) => {} | ObligationCauseCode::ObjectTypeBound(..) => {}
ObligationCauseCode::RustCall => { ObligationCauseCode::RustCall => {
if let Some(pred) = predicate.to_opt_poly_trait_pred() if let Some(pred) = predicate.to_opt_poly_trait_pred()
&& Some(pred.def_id()) == self.tcx.lang_items().sized_trait() && Some(pred.def_id()) == tcx.lang_items().sized_trait()
{ {
err.note("argument required to be sized due to `extern \"rust-call\"` ABI"); err.note("argument required to be sized due to `extern \"rust-call\"` ABI");
} }
@ -3022,15 +3022,15 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let def_id = trait_pred.def_id(); let def_id = trait_pred.def_id();
let visible_item = if let Some(local) = def_id.as_local() { let visible_item = if let Some(local) = def_id.as_local() {
// Check for local traits being reachable. // Check for local traits being reachable.
let vis = &self.tcx.resolutions(()).effective_visibilities; let vis = &tcx.resolutions(()).effective_visibilities;
// Account for non-`pub` traits in the root of the local crate. // Account for non-`pub` traits in the root of the local crate.
let is_locally_reachable = self.tcx.parent(def_id).is_crate_root(); let is_locally_reachable = tcx.parent(def_id).is_crate_root();
vis.is_reachable(local) || is_locally_reachable vis.is_reachable(local) || is_locally_reachable
} else { } else {
// Check for foreign traits being reachable. // Check for foreign traits being reachable.
self.tcx.visible_parent_map(()).get(&def_id).is_some() tcx.visible_parent_map(()).get(&def_id).is_some()
}; };
if Some(def_id) == self.tcx.lang_items().sized_trait() if Some(def_id) == tcx.lang_items().sized_trait()
&& let Some(hir::Node::TraitItem(hir::TraitItem { && let Some(hir::Node::TraitItem(hir::TraitItem {
ident, ident,
kind: hir::TraitItemKind::Type(bounds, None), kind: hir::TraitItemKind::Type(bounds, None),
@ -3039,7 +3039,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Do not suggest relaxing if there is an explicit `Sized` obligation. // Do not suggest relaxing if there is an explicit `Sized` obligation.
&& !bounds.iter() && !bounds.iter()
.filter_map(|bound| bound.trait_ref()) .filter_map(|bound| bound.trait_ref())
.any(|tr| tr.trait_def_id() == self.tcx.lang_items().sized_trait()) .any(|tr| tr.trait_def_id() == tcx.lang_items().sized_trait())
{ {
let (span, separator) = if let [.., last] = bounds { let (span, separator) = if let [.., last] = bounds {
(last.span().shrink_to_hi(), " +") (last.span().shrink_to_hi(), " +")
@ -3102,10 +3102,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} }
ObligationCauseCode::Coercion { source, target } => { ObligationCauseCode::Coercion { source, target } => {
let mut file = None; let mut file = None;
let source = let source = tcx.short_ty_string(self.resolve_vars_if_possible(source), &mut file);
self.tcx.short_ty_string(self.resolve_vars_if_possible(source), &mut file); let target = tcx.short_ty_string(self.resolve_vars_if_possible(target), &mut file);
let target =
self.tcx.short_ty_string(self.resolve_vars_if_possible(target), &mut file);
err.note(with_forced_trimmed_paths!(format!( err.note(with_forced_trimmed_paths!(format!(
"required for the cast from `{source}` to `{target}`", "required for the cast from `{source}` to `{target}`",
))); )));
@ -3155,10 +3153,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} else { } else {
// FIXME: we may suggest array::repeat instead // FIXME: we may suggest array::repeat instead
err.help("consider using `core::array::from_fn` to initialize the array"); err.help("consider using `core::array::from_fn` to initialize the array");
err.help("see https://doc.rust-lang.org/stable/std/array/fn.from_fn.html# for more information"); err.help("see https://doc.rust-lang.org/stable/std/array/fn.from_fn.html for more information");
} }
if self.tcx.sess.is_nightly_build() if tcx.sess.is_nightly_build()
&& matches!(is_constable, IsConstable::Fn | IsConstable::Ctor) && matches!(is_constable, IsConstable::Fn | IsConstable::Ctor)
{ {
err.help( err.help(
@ -3168,8 +3166,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} }
} }
ObligationCauseCode::VariableType(hir_id) => { ObligationCauseCode::VariableType(hir_id) => {
let parent_node = self.tcx.hir().parent_id(hir_id); let parent_node = tcx.hir().parent_id(hir_id);
match self.tcx.opt_hir_node(parent_node) { match tcx.opt_hir_node(parent_node) {
Some(Node::Local(hir::Local { ty: Some(ty), .. })) => { Some(Node::Local(hir::Local { ty: Some(ty), .. })) => {
err.span_suggestion_verbose( err.span_suggestion_verbose(
ty.span.shrink_to_lo(), ty.span.shrink_to_lo(),
@ -3207,7 +3205,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.note("all local variables must have a statically known size"); err.note("all local variables must have a statically known size");
} }
} }
if !self.tcx.features().unsized_locals { if !tcx.features().unsized_locals {
err.help("unsized locals are gated as an unstable feature"); err.help("unsized locals are gated as an unstable feature");
} }
} }
@ -3289,7 +3287,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.note("all function arguments must have a statically known size"); err.note("all function arguments must have a statically known size");
} }
if tcx.sess.opts.unstable_features.is_nightly_build() if tcx.sess.opts.unstable_features.is_nightly_build()
&& !self.tcx.features().unsized_fn_params && !tcx.features().unsized_fn_params
{ {
err.help("unsized fn params are gated as an unstable feature"); err.help("unsized fn params are gated as an unstable feature");
} }
@ -3358,7 +3356,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
"all values captured by value by a closure must have a statically known size", "all values captured by value by a closure must have a statically known size",
); );
let hir::ExprKind::Closure(closure) = let hir::ExprKind::Closure(closure) =
self.tcx.hir_node_by_def_id(closure_def_id).expect_expr().kind tcx.hir_node_by_def_id(closure_def_id).expect_expr().kind
else { else {
bug!("expected closure in SizedClosureCapture obligation"); bug!("expected closure in SizedClosureCapture obligation");
}; };
@ -3369,7 +3367,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
} }
} }
ObligationCauseCode::SizedCoroutineInterior(coroutine_def_id) => { ObligationCauseCode::SizedCoroutineInterior(coroutine_def_id) => {
let what = match self.tcx.coroutine_kind(coroutine_def_id) { let what = match tcx.coroutine_kind(coroutine_def_id) {
None None
| Some(hir::CoroutineKind::Coroutine(_)) | Some(hir::CoroutineKind::Coroutine(_))
| Some(hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _)) => { | Some(hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _)) => {
@ -3420,10 +3418,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
'print: { 'print: {
if !is_upvar_tys_infer_tuple { if !is_upvar_tys_infer_tuple {
let mut file = None; let mut file = None;
let ty_str = self.tcx.short_ty_string(ty, &mut file); let ty_str = tcx.short_ty_string(ty, &mut file);
let msg = format!("required because it appears within the type `{ty_str}`"); let msg = format!("required because it appears within the type `{ty_str}`");
match ty.kind() { match ty.kind() {
ty::Adt(def, _) => match self.tcx.opt_item_ident(def.did()) { ty::Adt(def, _) => match tcx.opt_item_ident(def.did()) {
Some(ident) => err.span_note(ident.span, msg), Some(ident) => err.span_note(ident.span, msg),
None => err.note(msg), None => err.note(msg),
}, },
@ -3446,7 +3444,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
{ {
break 'print; break 'print;
} }
err.span_note(self.tcx.def_span(def_id), msg) err.span_note(tcx.def_span(def_id), msg)
} }
ty::CoroutineWitness(def_id, args) => { ty::CoroutineWitness(def_id, args) => {
use std::fmt::Write; use std::fmt::Write;
@ -3463,7 +3461,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.note(msg.trim_end_matches(", ").to_string()) err.note(msg.trim_end_matches(", ").to_string())
} }
ty::Coroutine(def_id, _) => { ty::Coroutine(def_id, _) => {
let sp = self.tcx.def_span(def_id); let sp = tcx.def_span(def_id);
// Special-case this to say "async block" instead of `[static coroutine]`. // Special-case this to say "async block" instead of `[static coroutine]`.
let kind = tcx.coroutine_kind(def_id).unwrap(); let kind = tcx.coroutine_kind(def_id).unwrap();
@ -3475,7 +3473,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
) )
} }
ty::Closure(def_id, _) => err.span_note( ty::Closure(def_id, _) => err.span_note(
self.tcx.def_span(def_id), tcx.def_span(def_id),
"required because it's used within this closure", "required because it's used within this closure",
), ),
ty::Str => err.note("`str` is considered to contain a `[u8]` slice for auto trait purposes"), ty::Str => err.note("`str` is considered to contain a `[u8]` slice for auto trait purposes"),
@ -3519,14 +3517,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
self.resolve_vars_if_possible(data.derived.parent_trait_pred); self.resolve_vars_if_possible(data.derived.parent_trait_pred);
let parent_def_id = parent_trait_pred.def_id(); let parent_def_id = parent_trait_pred.def_id();
let mut file = None; let mut file = None;
let self_ty = let self_ty_str =
self.tcx.short_ty_string(parent_trait_pred.skip_binder().self_ty(), &mut file); tcx.short_ty_string(parent_trait_pred.skip_binder().self_ty(), &mut file);
let msg = format!( let trait_name = parent_trait_pred.print_modifiers_and_trait_path().to_string();
"required for `{self_ty}` to implement `{}`", let msg = format!("required for `{self_ty_str}` to implement `{trait_name}`");
parent_trait_pred.print_modifiers_and_trait_path()
);
let mut is_auto_trait = false; let mut is_auto_trait = false;
match self.tcx.hir().get_if_local(data.impl_or_alias_def_id) { match tcx.hir().get_if_local(data.impl_or_alias_def_id) {
Some(Node::Item(hir::Item { Some(Node::Item(hir::Item {
kind: hir::ItemKind::Trait(is_auto, ..), kind: hir::ItemKind::Trait(is_auto, ..),
ident, ident,
@ -3538,7 +3534,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err.span_note(ident.span, msg); err.span_note(ident.span, msg);
} }
Some(Node::Item(hir::Item { Some(Node::Item(hir::Item {
kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }), kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, generics, .. }),
.. ..
})) => { })) => {
let mut spans = Vec::with_capacity(2); let mut spans = Vec::with_capacity(2);
@ -3565,6 +3561,15 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
); );
} }
err.span_note(spans, msg); err.span_note(spans, msg);
point_at_assoc_type_restriction(
tcx,
err,
&self_ty_str,
&trait_name,
predicate,
&generics,
&data,
);
} }
_ => { _ => {
err.note(msg); err.note(msg);
@ -3618,9 +3623,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
pluralize!(count) pluralize!(count)
)); ));
let mut file = None; let mut file = None;
let self_ty = self let self_ty =
.tcx tcx.short_ty_string(parent_trait_pred.skip_binder().self_ty(), &mut file);
.short_ty_string(parent_trait_pred.skip_binder().self_ty(), &mut file);
err.note(format!( err.note(format!(
"required for `{self_ty}` to implement `{}`", "required for `{self_ty}` to implement `{}`",
parent_trait_pred.print_modifiers_and_trait_path() parent_trait_pred.print_modifiers_and_trait_path()
@ -3678,10 +3682,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
multispan.push_span_label(span, "required by this bound"); multispan.push_span_label(span, "required by this bound");
err.span_note( err.span_note(
multispan, multispan,
format!( format!("required by a bound on the type alias `{}`", tcx.item_name(def_id)),
"required by a bound on the type alias `{}`",
self.infcx.tcx.item_name(def_id)
),
); );
} }
ObligationCauseCode::FunctionArgumentObligation { ObligationCauseCode::FunctionArgumentObligation {
@ -3712,25 +3713,23 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}); });
} }
ObligationCauseCode::CompareImplItemObligation { trait_item_def_id, kind, .. } => { ObligationCauseCode::CompareImplItemObligation { trait_item_def_id, kind, .. } => {
let item_name = self.tcx.item_name(trait_item_def_id); let item_name = tcx.item_name(trait_item_def_id);
let msg = format!( let msg = format!(
"the requirement `{predicate}` appears on the `impl`'s {kind} \ "the requirement `{predicate}` appears on the `impl`'s {kind} \
`{item_name}` but not on the corresponding trait's {kind}", `{item_name}` but not on the corresponding trait's {kind}",
); );
let sp = self let sp = tcx
.tcx
.opt_item_ident(trait_item_def_id) .opt_item_ident(trait_item_def_id)
.map(|i| i.span) .map(|i| i.span)
.unwrap_or_else(|| self.tcx.def_span(trait_item_def_id)); .unwrap_or_else(|| tcx.def_span(trait_item_def_id));
let mut assoc_span: MultiSpan = sp.into(); let mut assoc_span: MultiSpan = sp.into();
assoc_span.push_span_label( assoc_span.push_span_label(
sp, sp,
format!("this trait's {kind} doesn't have the requirement `{predicate}`"), format!("this trait's {kind} doesn't have the requirement `{predicate}`"),
); );
if let Some(ident) = self if let Some(ident) = tcx
.tcx
.opt_associated_item(trait_item_def_id) .opt_associated_item(trait_item_def_id)
.and_then(|i| self.tcx.opt_item_ident(i.container_id(self.tcx))) .and_then(|i| tcx.opt_item_ident(i.container_id(tcx)))
{ {
assoc_span.push_span_label(ident.span, "in this trait"); assoc_span.push_span_label(ident.span, "in this trait");
} }
@ -4820,6 +4819,29 @@ fn hint_missing_borrow<'tcx>(
} }
} }
/// Collect all the paths that reference `Self`.
/// Used to suggest replacing associated types with an explicit type in `where` clauses.
#[derive(Debug)]
pub struct SelfVisitor<'v> {
pub paths: Vec<&'v hir::Ty<'v>>,
pub name: Option<Symbol>,
}
impl<'v> Visitor<'v> for SelfVisitor<'v> {
fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
if let hir::TyKind::Path(path) = ty.kind
&& let hir::QPath::TypeRelative(inner_ty, segment) = path
&& (Some(segment.ident.name) == self.name || self.name.is_none())
&& let hir::TyKind::Path(inner_path) = inner_ty.kind
&& let hir::QPath::Resolved(None, inner_path) = inner_path
&& let Res::SelfTyAlias { .. } = inner_path.res
{
self.paths.push(ty);
}
hir::intravisit::walk_ty(self, ty);
}
}
/// Collect all the returned expressions within the input expression. /// Collect all the returned expressions within the input expression.
/// Used to point at the return spans when we want to suggest some change to them. /// Used to point at the return spans when we want to suggest some change to them.
#[derive(Default)] #[derive(Default)]
@ -5064,6 +5086,134 @@ pub fn suggest_desugaring_async_fn_to_impl_future_in_trait<'tcx>(
Some(sugg) Some(sugg)
} }
/// On `impl` evaluation cycles, look for `Self::AssocTy` restrictions in `where` clauses, explain
/// they are not allowed and if possible suggest alternatives.
fn point_at_assoc_type_restriction(
tcx: TyCtxt<'_>,
err: &mut Diagnostic,
self_ty_str: &str,
trait_name: &str,
predicate: ty::Predicate<'_>,
generics: &hir::Generics<'_>,
data: &ImplDerivedObligationCause<'_>,
) {
let ty::PredicateKind::Clause(clause) = predicate.kind().skip_binder() else {
return;
};
let ty::ClauseKind::Projection(proj) = clause else {
return;
};
let name = tcx.item_name(proj.projection_ty.def_id);
let mut predicates = generics.predicates.iter().peekable();
let mut prev: Option<&hir::WhereBoundPredicate<'_>> = None;
while let Some(pred) = predicates.next() {
let hir::WherePredicate::BoundPredicate(pred) = pred else {
continue;
};
let mut bounds = pred.bounds.iter().peekable();
while let Some(bound) = bounds.next() {
let Some(trait_ref) = bound.trait_ref() else {
continue;
};
if bound.span() != data.span {
continue;
}
if let hir::TyKind::Path(path) = pred.bounded_ty.kind
&& let hir::QPath::TypeRelative(ty, segment) = path
&& segment.ident.name == name
&& let hir::TyKind::Path(inner_path) = ty.kind
&& let hir::QPath::Resolved(None, inner_path) = inner_path
&& let Res::SelfTyAlias { .. } = inner_path.res
{
// The following block is to determine the right span to delete for this bound
// that will leave valid code after the suggestion is applied.
let span = if pred.origin == hir::PredicateOrigin::WhereClause
&& generics
.predicates
.iter()
.filter(|p| {
matches!(
p,
hir::WherePredicate::BoundPredicate(p)
if hir::PredicateOrigin::WhereClause == p.origin
)
})
.count()
== 1
{
// There's only one `where` bound, that needs to be removed. Remove the whole
// `where` clause.
generics.where_clause_span
} else if let Some(hir::WherePredicate::BoundPredicate(next)) = predicates.peek()
&& pred.origin == next.origin
{
// There's another bound, include the comma for the current one.
pred.span.until(next.span)
} else if let Some(prev) = prev
&& pred.origin == prev.origin
{
// Last bound, try to remove the previous comma.
prev.span.shrink_to_hi().to(pred.span)
} else if pred.origin == hir::PredicateOrigin::WhereClause {
pred.span.with_hi(generics.where_clause_span.hi())
} else {
pred.span
};
err.span_suggestion_verbose(
span,
"associated type for the current `impl` cannot be restricted in `where` \
clauses, remove this bound",
"",
Applicability::MaybeIncorrect,
);
}
if let Some(new) =
tcx.associated_items(data.impl_or_alias_def_id).find_by_name_and_kind(
tcx,
Ident::with_dummy_span(name),
ty::AssocKind::Type,
data.impl_or_alias_def_id,
)
{
// The associated type is specified in the `impl` we're
// looking at. Point at it.
let span = tcx.def_span(new.def_id);
err.span_label(
span,
format!(
"associated type `<{self_ty_str} as {trait_name}>::{name}` is specified \
here",
),
);
// Search for the associated type `Self::{name}`, get
// its type and suggest replacing the bound with it.
let mut visitor = SelfVisitor { paths: vec![], name: Some(name) };
visitor.visit_trait_ref(trait_ref);
for path in visitor.paths {
err.span_suggestion_verbose(
path.span,
"replace the associated type with the type specified in this `impl`",
tcx.type_of(new.def_id).skip_binder().to_string(),
Applicability::MachineApplicable,
);
}
} else {
let mut visitor = SelfVisitor { paths: vec![], name: None };
visitor.visit_trait_ref(trait_ref);
let span: MultiSpan =
visitor.paths.iter().map(|p| p.span).collect::<Vec<Span>>().into();
err.span_note(
span,
"associated types for the current `impl` cannot be restricted in `where` \
clauses",
);
}
}
prev = Some(pred);
}
}
fn get_deref_type_and_refs(mut ty: Ty<'_>) -> (Ty<'_>, Vec<hir::Mutability>) { fn get_deref_type_and_refs(mut ty: Ty<'_>) -> (Ty<'_>, Vec<hir::Mutability>) {
let mut refs = vec![]; let mut refs = vec![];

View File

@ -39,7 +39,7 @@ pub fn search_for_structural_match_violation<'tcx>(
/// This implements the traversal over the structure of a given type to try to /// This implements the traversal over the structure of a given type to try to
/// find instances of ADTs (specifically structs or enums) that do not implement /// find instances of ADTs (specifically structs or enums) that do not implement
/// the structural-match traits (`StructuralPartialEq` and `StructuralEq`). /// `StructuralPartialEq`.
struct Search<'tcx> { struct Search<'tcx> {
span: Span, span: Span,

View File

@ -6,13 +6,12 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_trait_selection::traits::{ObligationCause, ObligationCtxt}; use rustc_trait_selection::traits::{ObligationCause, ObligationCtxt};
/// This method returns true if and only if `adt_ty` itself has been marked as /// This method returns true if and only if `adt_ty` itself has been marked as
/// eligible for structural-match: namely, if it implements both /// eligible for structural-match: namely, if it implements
/// `StructuralPartialEq` and `StructuralEq` (which are respectively injected by /// `StructuralPartialEq` (which is injected by `#[derive(PartialEq)]`).
/// `#[derive(PartialEq)]` and `#[derive(Eq)]`).
/// ///
/// Note that this does *not* recursively check if the substructure of `adt_ty` /// Note that this does *not* recursively check if the substructure of `adt_ty`
/// implements the traits. /// implements the trait.
fn has_structural_eq_impls<'tcx>(tcx: TyCtxt<'tcx>, adt_ty: Ty<'tcx>) -> bool { fn has_structural_eq_impl<'tcx>(tcx: TyCtxt<'tcx>, adt_ty: Ty<'tcx>) -> bool {
let infcx = &tcx.infer_ctxt().build(); let infcx = &tcx.infer_ctxt().build();
let cause = ObligationCause::dummy(); let cause = ObligationCause::dummy();
@ -21,11 +20,6 @@ fn has_structural_eq_impls<'tcx>(tcx: TyCtxt<'tcx>, adt_ty: Ty<'tcx>) -> bool {
let structural_peq_def_id = let structural_peq_def_id =
infcx.tcx.require_lang_item(LangItem::StructuralPeq, Some(cause.span)); infcx.tcx.require_lang_item(LangItem::StructuralPeq, Some(cause.span));
ocx.register_bound(cause.clone(), ty::ParamEnv::empty(), adt_ty, structural_peq_def_id); ocx.register_bound(cause.clone(), ty::ParamEnv::empty(), adt_ty, structural_peq_def_id);
// for now, require `#[derive(Eq)]`. (Doing so is a hack to work around
// the type `for<'a> fn(&'a ())` failing to implement `Eq` itself.)
let structural_teq_def_id =
infcx.tcx.require_lang_item(LangItem::StructuralTeq, Some(cause.span));
ocx.register_bound(cause, ty::ParamEnv::empty(), adt_ty, structural_teq_def_id);
// We deliberately skip *reporting* fulfillment errors (via // We deliberately skip *reporting* fulfillment errors (via
// `report_fulfillment_errors`), for two reasons: // `report_fulfillment_errors`), for two reasons:
@ -40,5 +34,5 @@ fn has_structural_eq_impls<'tcx>(tcx: TyCtxt<'tcx>, adt_ty: Ty<'tcx>) -> bool {
} }
pub(crate) fn provide(providers: &mut Providers) { pub(crate) fn provide(providers: &mut Providers) {
providers.has_structural_eq_impls = has_structural_eq_impls; providers.has_structural_eq_impl = has_structural_eq_impl;
} }

File diff suppressed because it is too large Load Diff

View File

@ -2349,112 +2349,151 @@ fn test_cursor() {
let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.lower_bound(Bound::Unbounded); let mut cur = map.lower_bound(Bound::Unbounded);
assert_eq!(cur.key(), Some(&1)); assert_eq!(cur.peek_next(), Some((&1, &'a')));
cur.move_next();
assert_eq!(cur.key(), Some(&2));
assert_eq!(cur.peek_next(), Some((&3, &'c')));
cur.move_prev();
assert_eq!(cur.key(), Some(&1));
assert_eq!(cur.peek_prev(), None); assert_eq!(cur.peek_prev(), None);
assert_eq!(cur.prev(), None);
assert_eq!(cur.next(), Some((&1, &'a')));
assert_eq!(cur.next(), Some((&2, &'b')));
assert_eq!(cur.peek_next(), Some((&3, &'c')));
assert_eq!(cur.prev(), Some((&2, &'b')));
assert_eq!(cur.peek_prev(), Some((&1, &'a')));
let mut cur = map.upper_bound(Bound::Excluded(&1)); let mut cur = map.upper_bound(Bound::Excluded(&1));
assert_eq!(cur.key(), None); assert_eq!(cur.peek_prev(), None);
cur.move_next(); assert_eq!(cur.next(), Some((&1, &'a')));
assert_eq!(cur.key(), Some(&1)); assert_eq!(cur.prev(), Some((&1, &'a')));
cur.move_prev();
assert_eq!(cur.key(), None);
assert_eq!(cur.peek_prev(), Some((&3, &'c')));
} }
#[test] #[test]
fn test_cursor_mut() { fn test_cursor_mut() {
let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]); let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]);
let mut cur = map.lower_bound_mut(Bound::Excluded(&3)); let mut cur = map.lower_bound_mut(Bound::Excluded(&3));
assert_eq!(cur.key(), Some(&5)); assert_eq!(cur.peek_next(), Some((&5, &mut 'e')));
cur.insert_before(4, 'd'); assert_eq!(cur.peek_prev(), Some((&3, &mut 'c')));
assert_eq!(cur.key(), Some(&5));
cur.insert_before(4, 'd').unwrap();
assert_eq!(cur.peek_next(), Some((&5, &mut 'e')));
assert_eq!(cur.peek_prev(), Some((&4, &mut 'd'))); assert_eq!(cur.peek_prev(), Some((&4, &mut 'd')));
cur.move_next();
assert_eq!(cur.key(), None); assert_eq!(cur.next(), Some((&5, &mut 'e')));
cur.insert_before(6, 'f'); assert_eq!(cur.peek_next(), None);
assert_eq!(cur.key(), None); assert_eq!(cur.peek_prev(), Some((&5, &mut 'e')));
assert_eq!(cur.remove_current(), None); cur.insert_before(6, 'f').unwrap();
assert_eq!(cur.key(), None); assert_eq!(cur.peek_next(), None);
cur.insert_after(0, '?'); assert_eq!(cur.peek_prev(), Some((&6, &mut 'f')));
assert_eq!(cur.key(), None); assert_eq!(cur.remove_prev(), Some((6, 'f')));
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd'), (5, 'e'), (6, 'f')])); assert_eq!(cur.remove_prev(), Some((5, 'e')));
assert_eq!(cur.remove_next(), None);
assert_eq!(map, BTreeMap::from([(1, 'a'), (3, 'c'), (4, 'd')]));
let mut cur = map.upper_bound_mut(Bound::Included(&5)); let mut cur = map.upper_bound_mut(Bound::Included(&5));
assert_eq!(cur.key(), Some(&5)); assert_eq!(cur.peek_next(), None);
assert_eq!(cur.remove_current(), Some((5, 'e'))); assert_eq!(cur.prev(), Some((&4, &mut 'd')));
assert_eq!(cur.key(), Some(&6)); assert_eq!(cur.peek_next(), Some((&4, &mut 'd')));
assert_eq!(cur.remove_current_and_move_back(), Some((6, 'f'))); assert_eq!(cur.peek_prev(), Some((&3, &mut 'c')));
assert_eq!(cur.key(), Some(&4)); assert_eq!(cur.remove_next(), Some((4, 'd')));
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')])); assert_eq!(map, BTreeMap::from([(1, 'a'), (3, 'c')]));
}
#[test]
fn test_cursor_mut_key() {
let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]);
let mut cur = unsafe { map.lower_bound_mut(Bound::Excluded(&3)).with_mutable_key() };
assert_eq!(cur.peek_next(), Some((&mut 5, &mut 'e')));
assert_eq!(cur.peek_prev(), Some((&mut 3, &mut 'c')));
cur.insert_before(4, 'd').unwrap();
assert_eq!(cur.peek_next(), Some((&mut 5, &mut 'e')));
assert_eq!(cur.peek_prev(), Some((&mut 4, &mut 'd')));
assert_eq!(cur.next(), Some((&mut 5, &mut 'e')));
assert_eq!(cur.peek_next(), None);
assert_eq!(cur.peek_prev(), Some((&mut 5, &mut 'e')));
cur.insert_before(6, 'f').unwrap();
assert_eq!(cur.peek_next(), None);
assert_eq!(cur.peek_prev(), Some((&mut 6, &mut 'f')));
assert_eq!(cur.remove_prev(), Some((6, 'f')));
assert_eq!(cur.remove_prev(), Some((5, 'e')));
assert_eq!(cur.remove_next(), None);
assert_eq!(map, BTreeMap::from([(1, 'a'), (3, 'c'), (4, 'd')]));
let mut cur = unsafe { map.upper_bound_mut(Bound::Included(&5)).with_mutable_key() };
assert_eq!(cur.peek_next(), None);
assert_eq!(cur.prev(), Some((&mut 4, &mut 'd')));
assert_eq!(cur.peek_next(), Some((&mut 4, &mut 'd')));
assert_eq!(cur.peek_prev(), Some((&mut 3, &mut 'c')));
assert_eq!(cur.remove_next(), Some((4, 'd')));
assert_eq!(map, BTreeMap::from([(1, 'a'), (3, 'c')]));
}
#[test]
fn test_cursor_empty() {
let mut map = BTreeMap::new();
let mut cur = map.lower_bound_mut(Bound::Excluded(&3));
assert_eq!(cur.peek_next(), None);
assert_eq!(cur.peek_prev(), None);
cur.insert_after(0, 0).unwrap();
assert_eq!(cur.peek_next(), Some((&0, &mut 0)));
assert_eq!(cur.peek_prev(), None);
assert_eq!(map, BTreeMap::from([(0, 0)]));
} }
#[should_panic(expected = "key must be ordered above the previous element")]
#[test] #[test]
fn test_cursor_mut_insert_before_1() { fn test_cursor_mut_insert_before_1() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(0, 'd'); cur.insert_before(0, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered above the previous element")]
#[test] #[test]
fn test_cursor_mut_insert_before_2() { fn test_cursor_mut_insert_before_2() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(1, 'd'); cur.insert_before(1, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered below the current element")]
#[test] #[test]
fn test_cursor_mut_insert_before_3() { fn test_cursor_mut_insert_before_3() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(2, 'd'); cur.insert_before(2, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered below the current element")]
#[test] #[test]
fn test_cursor_mut_insert_before_4() { fn test_cursor_mut_insert_before_4() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(3, 'd'); cur.insert_before(3, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered above the current element")]
#[test] #[test]
fn test_cursor_mut_insert_after_1() { fn test_cursor_mut_insert_after_1() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(1, 'd'); cur.insert_after(1, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered above the current element")]
#[test] #[test]
fn test_cursor_mut_insert_after_2() { fn test_cursor_mut_insert_after_2() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(2, 'd'); cur.insert_after(2, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered below the next element")]
#[test] #[test]
fn test_cursor_mut_insert_after_3() { fn test_cursor_mut_insert_after_3() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(3, 'd'); cur.insert_after(3, 'd').unwrap_err();
} }
#[should_panic(expected = "key must be ordered below the next element")]
#[test] #[test]
fn test_cursor_mut_insert_after_4() { fn test_cursor_mut_insert_after_4() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2)); let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(4, 'd'); cur.insert_after(4, 'd').unwrap_err();
} }
#[test] #[test]
@ -2462,14 +2501,14 @@ fn cursor_peek_prev_agrees_with_cursor_mut() {
let mut map = BTreeMap::from([(1, 1), (2, 2), (3, 3)]); let mut map = BTreeMap::from([(1, 1), (2, 2), (3, 3)]);
let cursor = map.lower_bound(Bound::Excluded(&3)); let cursor = map.lower_bound(Bound::Excluded(&3));
assert!(cursor.key().is_none()); assert!(cursor.peek_next().is_none());
let prev = cursor.peek_prev(); let prev = cursor.peek_prev();
assert_matches!(prev, Some((&3, _))); assert_matches!(prev, Some((&3, _)));
// Shadow names so the two parts of this test match. // Shadow names so the two parts of this test match.
let mut cursor = map.lower_bound_mut(Bound::Excluded(&3)); let mut cursor = map.lower_bound_mut(Bound::Excluded(&3));
assert!(cursor.key().is_none()); assert!(cursor.peek_next().is_none());
let prev = cursor.peek_prev(); let prev = cursor.peek_prev();
assert_matches!(prev, Some((&3, _))); assert_matches!(prev, Some((&3, _)));

View File

@ -648,17 +648,36 @@ impl<K, V, Type> NodeRef<marker::Owned, K, V, Type> {
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
/// Adds a key-value pair to the end of the node, and returns /// Adds a key-value pair to the end of the node, and returns
/// the mutable reference of the inserted value. /// a handle to the inserted value.
pub fn push(&mut self, key: K, val: V) -> &mut V { ///
/// # Safety
///
/// The returned handle has an unbound lifetime.
pub unsafe fn push_with_handle<'b>(
&mut self,
key: K,
val: V,
) -> Handle<NodeRef<marker::Mut<'b>, K, V, marker::Leaf>, marker::KV> {
let len = self.len_mut(); let len = self.len_mut();
let idx = usize::from(*len); let idx = usize::from(*len);
assert!(idx < CAPACITY); assert!(idx < CAPACITY);
*len += 1; *len += 1;
unsafe { unsafe {
self.key_area_mut(idx).write(key); self.key_area_mut(idx).write(key);
self.val_area_mut(idx).write(val) self.val_area_mut(idx).write(val);
Handle::new_kv(
NodeRef { height: self.height, node: self.node, _marker: PhantomData },
idx,
)
} }
} }
/// Adds a key-value pair to the end of the node, and returns
/// the mutable reference of the inserted value.
pub fn push(&mut self, key: K, val: V) -> *mut V {
// SAFETY: The unbound handle is no longer accessible.
unsafe { self.push_with_handle(key, val).into_val_mut() }
}
} }
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
@ -1100,10 +1119,10 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>
unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() } unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }
} }
pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) { pub fn into_kv_mut(self) -> (&'a mut K, &'a mut V) {
debug_assert!(self.idx < self.node.len()); debug_assert!(self.idx < self.node.len());
let leaf = self.node.into_leaf_mut(); let leaf = self.node.into_leaf_mut();
let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() }; let k = unsafe { leaf.keys.get_unchecked_mut(self.idx).assume_init_mut() };
let v = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }; let v = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() };
(k, v) (k, v)
} }

View File

@ -2517,6 +2517,66 @@ extern "rust-intrinsic" {
where where
G: FnOnce<ARG, Output = RET>, G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>; F: FnOnce<ARG, Output = RET>;
/// Returns whether the argument's value is statically known at
/// compile-time.
///
/// This is useful when there is a way of writing the code that will
/// be *faster* when some variables have known values, but *slower*
/// in the general case: an `if is_val_statically_known(var)` can be used
/// to select between these two variants. The `if` will be optimized away
/// and only the desired branch remains.
///
/// Formally speaking, this function non-deterministically returns `true`
/// or `false`, and the caller has to ensure sound behavior for both cases.
/// In other words, the following code has *Undefined Behavior*:
///
/// ```
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::hint::unreachable_unchecked;
/// use std::intrinsics::is_val_statically_known;
///
/// unsafe {
/// if !is_val_statically_known(0) { unreachable_unchecked(); }
/// }
/// ```
///
/// This also means that the following code's behavior is unspecified; it
/// may panic, or it may not:
///
/// ```no_run
/// #![feature(is_val_statically_known)]
/// #![feature(core_intrinsics)]
/// # #![allow(internal_features)]
/// use std::intrinsics::is_val_statically_known;
///
/// unsafe {
/// assert_eq!(is_val_statically_known(0), is_val_statically_known(0));
/// }
/// ```
///
/// Unsafe code may not rely on `is_val_statically_known` returning any
/// particular value, ever. However, the compiler will generally make it
/// return `true` only if the value of the argument is actually known.
///
/// When calling this in a `const fn`, both paths must be semantically
/// equivalent, that is, the result of the `true` branch and the `false`
/// branch must return the same value and have the same side-effects *no
/// matter what*.
#[rustc_const_unstable(feature = "is_val_statically_known", issue = "none")]
#[rustc_nounwind]
#[cfg(not(bootstrap))]
pub fn is_val_statically_known<T: Copy>(arg: T) -> bool;
}
// FIXME: Seems using `unstable` here completely ignores `rustc_allow_const_fn_unstable`
// and thus compiling stage0 core doesn't work.
#[rustc_const_stable(feature = "is_val_statically_known", since = "0.0.0")]
#[cfg(bootstrap)]
pub const unsafe fn is_val_statically_known<T: Copy>(_arg: T) -> bool {
false
} }
// Some functions are defined here because they accidentally got made // Some functions are defined here because they accidentally got made

View File

@ -200,6 +200,7 @@
// //
// Language features: // Language features:
// tidy-alphabetical-start // tidy-alphabetical-start
#![cfg_attr(not(bootstrap), feature(is_val_statically_known))]
#![feature(abi_unadjusted)] #![feature(abi_unadjusted)]
#![feature(adt_const_params)] #![feature(adt_const_params)]
#![feature(allow_internal_unsafe)] #![feature(allow_internal_unsafe)]

View File

@ -187,7 +187,7 @@ pub trait Unsize<T: ?Sized> {
/// Required trait for constants used in pattern matches. /// Required trait for constants used in pattern matches.
/// ///
/// Any type that derives `PartialEq` automatically implements this trait, /// Any type that derives `PartialEq` automatically implements this trait,
/// *regardless* of whether its type-parameters implement `Eq`. /// *regardless* of whether its type-parameters implement `PartialEq`.
/// ///
/// If a `const` item contains some type that does not implement this trait, /// If a `const` item contains some type that does not implement this trait,
/// then that type either (1.) does not implement `PartialEq` (which means the /// then that type either (1.) does not implement `PartialEq` (which means the
@ -200,7 +200,7 @@ pub trait Unsize<T: ?Sized> {
/// a pattern match. /// a pattern match.
/// ///
/// See also the [structural match RFC][RFC1445], and [issue 63438] which /// See also the [structural match RFC][RFC1445], and [issue 63438] which
/// motivated migrating from attribute-based design to this trait. /// motivated migrating from an attribute-based design to this trait.
/// ///
/// [RFC1445]: https://github.com/rust-lang/rfcs/blob/master/text/1445-restrict-constants-in-patterns.md /// [RFC1445]: https://github.com/rust-lang/rfcs/blob/master/text/1445-restrict-constants-in-patterns.md
/// [issue 63438]: https://github.com/rust-lang/rust/issues/63438 /// [issue 63438]: https://github.com/rust-lang/rust/issues/63438
@ -218,7 +218,7 @@ marker_impls! {
isize, i8, i16, i32, i64, i128, isize, i8, i16, i32, i64, i128,
bool, bool,
char, char,
str /* Technically requires `[u8]: StructuralEq` */, str /* Technically requires `[u8]: StructuralPartialEq` */,
(), (),
{T, const N: usize} [T; N], {T, const N: usize} [T; N],
{T} [T], {T} [T],
@ -275,6 +275,7 @@ marker_impls! {
#[unstable(feature = "structural_match", issue = "31434")] #[unstable(feature = "structural_match", issue = "31434")]
#[diagnostic::on_unimplemented(message = "the type `{Self}` does not `#[derive(Eq)]`")] #[diagnostic::on_unimplemented(message = "the type `{Self}` does not `#[derive(Eq)]`")]
#[lang = "structural_teq"] #[lang = "structural_teq"]
#[cfg(bootstrap)]
pub trait StructuralEq { pub trait StructuralEq {
// Empty. // Empty.
} }
@ -282,6 +283,7 @@ pub trait StructuralEq {
// FIXME: Remove special cases of these types from the compiler pattern checking code and always check `T: StructuralEq` instead // FIXME: Remove special cases of these types from the compiler pattern checking code and always check `T: StructuralEq` instead
marker_impls! { marker_impls! {
#[unstable(feature = "structural_match", issue = "31434")] #[unstable(feature = "structural_match", issue = "31434")]
#[cfg(bootstrap)]
StructuralEq for StructuralEq for
usize, u8, u16, u32, u64, u128, usize, u8, u16, u32, u64, u128,
isize, i8, i16, i32, i64, i128, isize, i8, i16, i32, i64, i128,
@ -859,6 +861,7 @@ impl<T: ?Sized> Default for PhantomData<T> {
impl<T: ?Sized> StructuralPartialEq for PhantomData<T> {} impl<T: ?Sized> StructuralPartialEq for PhantomData<T> {}
#[unstable(feature = "structural_match", issue = "31434")] #[unstable(feature = "structural_match", issue = "31434")]
#[cfg(bootstrap)]
impl<T: ?Sized> StructuralEq for PhantomData<T> {} impl<T: ?Sized> StructuralEq for PhantomData<T> {}
/// Compiler-internal trait used to indicate the type of enum discriminants. /// Compiler-internal trait used to indicate the type of enum discriminants.
@ -1038,6 +1041,20 @@ pub trait PointerLike {}
#[unstable(feature = "adt_const_params", issue = "95174")] #[unstable(feature = "adt_const_params", issue = "95174")]
#[diagnostic::on_unimplemented(message = "`{Self}` can't be used as a const parameter type")] #[diagnostic::on_unimplemented(message = "`{Self}` can't be used as a const parameter type")]
#[allow(multiple_supertrait_upcastable)] #[allow(multiple_supertrait_upcastable)]
#[cfg(not(bootstrap))]
pub trait ConstParamTy: StructuralPartialEq + Eq {}
/// A marker for types which can be used as types of `const` generic parameters.
///
/// These types must have a proper equivalence relation (`Eq`) and it must be automatically
/// derived (`StructuralPartialEq`). There's a hard-coded check in the compiler ensuring
/// that all fields are also `ConstParamTy`, which implies that recursively, all fields
/// are `StructuralPartialEq`.
#[lang = "const_param_ty"]
#[unstable(feature = "adt_const_params", issue = "95174")]
#[rustc_on_unimplemented(message = "`{Self}` can't be used as a const parameter type")]
#[allow(multiple_supertrait_upcastable)]
#[cfg(bootstrap)]
pub trait ConstParamTy: StructuralEq + StructuralPartialEq + Eq {} pub trait ConstParamTy: StructuralEq + StructuralPartialEq + Eq {}
/// Derive macro generating an impl of the trait `ConstParamTy`. /// Derive macro generating an impl of the trait `ConstParamTy`.

View File

@ -1374,26 +1374,59 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn checked_pow(self, mut exp: u32) -> Option<Self> { pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return Some(1); if unsafe { intrinsics::is_val_statically_known(self) }
} && self.unsigned_abs().is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
return Some(1);
while exp > 1 {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
} }
exp /= 2; if self == -1 { // Avoid divide by zero
base = try_opt!(base.checked_mul(base)); return Some(if exp & 1 != 0 { -1 } else { 1 });
}
// SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self.wrapping_abs()) as u32 };
if exp > Self::BITS / power_used { return None; } // Division of constants is free
// SAFETY: exp <= Self::BITS / power_used
let res = unsafe { intrinsics::unchecked_shl(
1 as Self,
intrinsics::unchecked_mul(power_used, exp) as Self
)};
// LLVM doesn't always optimize out the checks
// at the ir level.
let sign = self.is_negative() && exp & 1 != 0;
if !sign && res == Self::MIN {
None
} else if sign {
Some(res.wrapping_neg())
} else {
Some(res)
}
} else {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.checked_mul(base)
} }
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.checked_mul(base)
} }
/// Strict exponentiation. Computes `self.pow(exp)`, panicking if /// Strict exponentiation. Computes `self.pow(exp)`, panicking if
@ -2058,27 +2091,58 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn wrapping_pow(self, mut exp: u32) -> Self { pub const fn wrapping_pow(self, mut exp: u32) -> Self {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return 1; if unsafe { intrinsics::is_val_statically_known(self) }
} && self.unsigned_abs().is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
return 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc.wrapping_mul(base);
} }
exp /= 2; if self == -1 { // Avoid divide by zero
base = base.wrapping_mul(base); return if exp & 1 != 0 { -1 } else { 1 };
} }
// SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self.wrapping_abs()) as u32 };
if exp > Self::BITS / power_used { return 0; } // Division of constants is free
// since exp!=0, finally the exp must be 1. // SAFETY: exp <= Self::BITS / power_used
// Deal with the final bit of the exponent separately, since let res = unsafe { intrinsics::unchecked_shl(
// squaring the base afterwards is not necessary and may cause a 1 as Self,
// needless overflow. intrinsics::unchecked_mul(power_used, exp) as Self
acc.wrapping_mul(base) )};
// LLVM doesn't always optimize out the checks
// at the ir level.
let sign = self.is_negative() && exp & 1 != 0;
if sign {
res.wrapping_neg()
} else {
res
}
} else {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc: Self = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc.wrapping_mul(base);
}
exp /= 2;
base = base.wrapping_mul(base);
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.wrapping_mul(base)
}
} }
/// Calculates `self` + `rhs` /// Calculates `self` + `rhs`
@ -2561,36 +2625,68 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return (1,false); if unsafe { intrinsics::is_val_statically_known(self) }
} && self.unsigned_abs().is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
let mut overflown = false; return (1, false);
// Scratch space for storing results of overflowing_mul. }
let mut r; if self == -1 { // Avoid divide by zero
return (if exp & 1 != 0 { -1 } else { 1 }, false);
}
// SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self.wrapping_abs()) as u32 };
if exp > Self::BITS / power_used { return (0, true); } // Division of constants is free
while exp > 1 { // SAFETY: exp <= Self::BITS / power_used
if (exp & 1) == 1 { let res = unsafe { intrinsics::unchecked_shl(
r = acc.overflowing_mul(base); 1 as Self,
acc = r.0; intrinsics::unchecked_mul(power_used, exp) as Self
)};
// LLVM doesn't always optimize out the checks
// at the ir level.
let sign = self.is_negative() && exp & 1 != 0;
let overflow = res == Self::MIN;
if sign {
(res.wrapping_neg(), overflow)
} else {
(res, overflow)
}
} else {
if exp == 0 {
return (1,false);
}
let mut base = self;
let mut acc: Self = 1;
let mut overflown = false;
// Scratch space for storing results of overflowing_mul.
let mut r;
while exp > 1 {
if (exp & 1) == 1 {
r = acc.overflowing_mul(base);
acc = r.0;
overflown |= r.1;
}
exp /= 2;
r = base.overflowing_mul(base);
base = r.0;
overflown |= r.1; overflown |= r.1;
} }
exp /= 2;
r = base.overflowing_mul(base);
base = r.0;
overflown |= r.1;
}
// since exp!=0, finally the exp must be 1. // since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since // Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a // squaring the base afterwards is not necessary and may cause a
// needless overflow. // needless overflow.
r = acc.overflowing_mul(base); r = acc.overflowing_mul(base);
r.1 |= overflown; r.1 |= overflown;
r r
}
} }
/// Raises self to the power of `exp`, using exponentiation by squaring. /// Raises self to the power of `exp`, using exponentiation by squaring.
@ -2608,28 +2704,68 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
#[rustc_inherit_overflow_checks] #[rustc_inherit_overflow_checks]
#[track_caller] // Hides the hackish overflow check for powers of two.
pub const fn pow(self, mut exp: u32) -> Self { pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return 1; if unsafe { intrinsics::is_val_statically_known(self) }
} && self.unsigned_abs().is_power_of_two()
let mut base = self; {
let mut acc = 1; if self == 1 { // Avoid divide by zero
return 1;
while exp > 1 { }
if (exp & 1) == 1 { if self == -1 { // Avoid divide by zero
acc = acc * base; return if exp & 1 != 0 { -1 } else { 1 };
}
// SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self.wrapping_abs()) as u32 };
if exp > Self::BITS / power_used { // Division of constants is free
#[allow(arithmetic_overflow)]
return Self::MAX * Self::MAX * 0;
} }
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1. // SAFETY: exp <= Self::BITS / power_used
// Deal with the final bit of the exponent separately, since let res = unsafe { intrinsics::unchecked_shl(
// squaring the base afterwards is not necessary and may cause a 1 as Self,
// needless overflow. intrinsics::unchecked_mul(power_used, exp) as Self
acc * base )};
// LLVM doesn't always optimize out the checks
// at the ir level.
let sign = self.is_negative() && exp & 1 != 0;
#[allow(arithmetic_overflow)]
if !sign && res == Self::MIN {
// So it panics.
_ = Self::MAX * Self::MAX;
}
if sign {
res.wrapping_neg()
} else {
res
}
} else {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
}
} }
/// Returns the square root of the number, rounded down. /// Returns the square root of the number, rounded down.

View File

@ -288,6 +288,43 @@ macro_rules! nonzero_integer {
unsafe { intrinsics::cttz_nonzero(self.get() as $UnsignedPrimitive) as u32 } unsafe { intrinsics::cttz_nonzero(self.get() as $UnsignedPrimitive) as u32 }
} }
/// Returns the number of ones in the binary representation of `self`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(non_zero_count_ones)]
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("# use std::num::{self, ", stringify!($Ty), "};")]
///
/// let one = num::NonZeroU32::new(1)?;
/// let three = num::NonZeroU32::new(3)?;
#[doc = concat!("let a = ", stringify!($Ty), "::new(0b100_0000)?;")]
#[doc = concat!("let b = ", stringify!($Ty), "::new(0b100_0011)?;")]
///
/// assert_eq!(a.count_ones(), one);
/// assert_eq!(b.count_ones(), three);
/// # Some(())
/// # }
/// ```
///
#[unstable(feature = "non_zero_count_ones", issue = "120287")]
#[rustc_const_unstable(feature = "non_zero_count_ones", issue = "120287")]
#[doc(alias = "popcount")]
#[doc(alias = "popcnt")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
pub const fn count_ones(self) -> NonZeroU32 {
// SAFETY:
// `self` is non-zero, which means it has at least one bit set, which means
// that the result of `count_ones` is non-zero.
unsafe { NonZeroU32::new_unchecked(self.get().count_ones()) }
}
nonzero_integer_signedness_dependent_methods! { nonzero_integer_signedness_dependent_methods! {
Self = $Ty, Self = $Ty,
Primitive = $signedness $Int, Primitive = $signedness $Int,

View File

@ -1364,28 +1364,49 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn checked_pow(self, mut exp: u32) -> Option<Self> { pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return Some(1); if unsafe { intrinsics::is_val_statically_known(self) }
} && self.is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
return Some(1);
while exp > 1 {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
} }
exp /= 2; // SAFETY: We just checked this is a power of two. and above zero.
base = try_opt!(base.checked_mul(base)); let power_used = unsafe { intrinsics::cttz_nonzero(self) as u32 };
if exp > Self::BITS / power_used { return None; } // Division of constants is free
// SAFETY: exp <= Self::BITS / power_used
unsafe { Some(intrinsics::unchecked_shl(
1 as Self,
intrinsics::unchecked_mul(power_used, exp) as Self
)) }
// LLVM doesn't always optimize out the checks
// at the ir level.
} else {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.checked_mul(base)
} }
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.checked_mul(base)
} }
/// Strict exponentiation. Computes `self.pow(exp)`, panicking if /// Strict exponentiation. Computes `self.pow(exp)`, panicking if
@ -1887,27 +1908,48 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn wrapping_pow(self, mut exp: u32) -> Self { pub const fn wrapping_pow(self, mut exp: u32) -> Self {
if exp == 0 { // SAFETY: This path has the same behavior as the other.
return 1; if unsafe { intrinsics::is_val_statically_known(self) }
} && self.is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
return 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc.wrapping_mul(base);
} }
exp /= 2; // SAFETY: We just checked this is a power of two. and above zero.
base = base.wrapping_mul(base); let power_used = unsafe { intrinsics::cttz_nonzero(self) as u32 };
} if exp > Self::BITS / power_used { return 0; } // Division of constants is free
// since exp!=0, finally the exp must be 1. // SAFETY: exp <= Self::BITS / power_used
// Deal with the final bit of the exponent separately, since unsafe { intrinsics::unchecked_shl(
// squaring the base afterwards is not necessary and may cause a 1 as Self,
// needless overflow. intrinsics::unchecked_mul(power_used, exp) as Self
acc.wrapping_mul(base) )}
// LLVM doesn't always optimize out the checks
// at the ir level.
} else {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc: Self = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc.wrapping_mul(base);
}
exp /= 2;
base = base.wrapping_mul(base);
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc.wrapping_mul(base)
}
} }
/// Calculates `self` + `rhs` /// Calculates `self` + `rhs`
@ -2341,37 +2383,58 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
if exp == 0{ // SAFETY: This path has the same behavior as the other.
return (1,false); if unsafe { intrinsics::is_val_statically_known(self) }
} && self.is_power_of_two()
let mut base = self; {
let mut acc: Self = 1; if self == 1 { // Avoid divide by zero
let mut overflown = false; return (1, false);
// Scratch space for storing results of overflowing_mul. }
let mut r; // SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self) as u32 };
if exp > Self::BITS / power_used { return (0, true); } // Division of constants is free
while exp > 1 { // SAFETY: exp <= Self::BITS / power_used
if (exp & 1) == 1 { unsafe { (intrinsics::unchecked_shl(
r = acc.overflowing_mul(base); 1 as Self,
acc = r.0; intrinsics::unchecked_mul(power_used, exp) as Self
), false) }
// LLVM doesn't always optimize out the checks
// at the ir level.
} else {
if exp == 0{
return (1,false);
}
let mut base = self;
let mut acc: Self = 1;
let mut overflown = false;
// Scratch space for storing results of overflowing_mul.
let mut r;
while exp > 1 {
if (exp & 1) == 1 {
r = acc.overflowing_mul(base);
acc = r.0;
overflown |= r.1;
}
exp /= 2;
r = base.overflowing_mul(base);
base = r.0;
overflown |= r.1; overflown |= r.1;
} }
exp /= 2;
r = base.overflowing_mul(base); // since exp!=0, finally the exp must be 1.
base = r.0; // Deal with the final bit of the exponent separately, since
overflown |= r.1; // squaring the base afterwards is not necessary and may cause a
// needless overflow.
r = acc.overflowing_mul(base);
r.1 |= overflown;
r
} }
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
r = acc.overflowing_mul(base);
r.1 |= overflown;
r
} }
/// Raises self to the power of `exp`, using exponentiation by squaring. /// Raises self to the power of `exp`, using exponentiation by squaring.
@ -2387,28 +2450,64 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
#[must_use = "this returns the result of the operation, \ #[must_use = "this returns the result of the operation, \
without modifying the original"] without modifying the original"]
#[rustc_allow_const_fn_unstable(is_val_statically_known, const_int_unchecked_arith)]
#[inline] #[inline]
#[rustc_inherit_overflow_checks] #[rustc_inherit_overflow_checks]
#[track_caller] // Hides the hackish overflow check for powers of two.
pub const fn pow(self, mut exp: u32) -> Self { pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 { // LLVM now knows that `self` is a constant value, but not a
return 1; // constant in Rust. This allows us to compute the power used at
} // compile-time.
let mut base = self; //
let mut acc = 1; // This will likely add a branch in debug builds, but this should
// be ok.
while exp > 1 { //
if (exp & 1) == 1 { // This is a massive performance boost in release builds as you can
acc = acc * base; // get the power of a power of two and the exponent through a `shl`
// instruction, but we must add a couple more checks for parity with
// our own `pow`.
// SAFETY: This path has the same behavior as the other.
if unsafe { intrinsics::is_val_statically_known(self) }
&& self.is_power_of_two()
{
if self == 1 { // Avoid divide by zero
return 1;
}
// SAFETY: We just checked this is a power of two. and above zero.
let power_used = unsafe { intrinsics::cttz_nonzero(self) as u32 };
if exp > Self::BITS / power_used { // Division of constants is free
#[allow(arithmetic_overflow)]
return Self::MAX * Self::MAX * 0;
} }
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1. // SAFETY: exp <= Self::BITS / power_used
// Deal with the final bit of the exponent separately, since unsafe { intrinsics::unchecked_shl(
// squaring the base afterwards is not necessary and may cause a 1 as Self,
// needless overflow. intrinsics::unchecked_mul(power_used, exp) as Self
acc * base )}
// LLVM doesn't always optimize out the checks
// at the ir level.
} else {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
}
} }
/// Returns the square root of the number, rounded down. /// Returns the square root of the number, rounded down.

View File

@ -0,0 +1,108 @@
use crate::future::Future;
use crate::marker::Tuple;
/// An async-aware version of the [`Fn`](crate::ops::Fn) trait.
///
/// All `async fn` and functions returning futures implement this trait.
#[unstable(feature = "async_fn_traits", issue = "none")]
#[rustc_paren_sugar]
#[fundamental]
#[must_use = "async closures are lazy and do nothing unless called"]
#[cfg_attr(not(bootstrap), lang = "async_fn")]
pub trait AsyncFn<Args: Tuple>: AsyncFnMut<Args> {
/// Future returned by [`AsyncFn::async_call`].
#[unstable(feature = "async_fn_traits", issue = "none")]
type CallFuture<'a>: Future<Output = Self::Output>
where
Self: 'a;
/// Call the [`AsyncFn`], returning a future which may borrow from the called closure.
#[unstable(feature = "async_fn_traits", issue = "none")]
extern "rust-call" fn async_call(&self, args: Args) -> Self::CallFuture<'_>;
}
/// An async-aware version of the [`FnMut`](crate::ops::FnMut) trait.
///
/// All `async fn` and functions returning futures implement this trait.
#[unstable(feature = "async_fn_traits", issue = "none")]
#[rustc_paren_sugar]
#[fundamental]
#[must_use = "async closures are lazy and do nothing unless called"]
#[cfg_attr(not(bootstrap), lang = "async_fn_mut")]
pub trait AsyncFnMut<Args: Tuple>: AsyncFnOnce<Args> {
/// Future returned by [`AsyncFnMut::async_call_mut`].
#[unstable(feature = "async_fn_traits", issue = "none")]
type CallMutFuture<'a>: Future<Output = Self::Output>
where
Self: 'a;
/// Call the [`AsyncFnMut`], returning a future which may borrow from the called closure.
#[unstable(feature = "async_fn_traits", issue = "none")]
extern "rust-call" fn async_call_mut(&mut self, args: Args) -> Self::CallMutFuture<'_>;
}
/// An async-aware version of the [`FnOnce`](crate::ops::FnOnce) trait.
///
/// All `async fn` and functions returning futures implement this trait.
#[unstable(feature = "async_fn_traits", issue = "none")]
#[rustc_paren_sugar]
#[fundamental]
#[must_use = "async closures are lazy and do nothing unless called"]
#[cfg_attr(not(bootstrap), lang = "async_fn_once")]
pub trait AsyncFnOnce<Args: Tuple> {
/// Future returned by [`AsyncFnOnce::async_call_once`].
#[unstable(feature = "async_fn_traits", issue = "none")]
type CallOnceFuture: Future<Output = Self::Output>;
/// Output type of the called closure's future.
#[unstable(feature = "async_fn_traits", issue = "none")]
type Output;
/// Call the [`AsyncFnOnce`], returning a future which may move out of the called closure.
#[unstable(feature = "async_fn_traits", issue = "none")]
extern "rust-call" fn async_call_once(self, args: Args) -> Self::CallOnceFuture;
}
mod impls {
use super::{AsyncFn, AsyncFnMut, AsyncFnOnce};
use crate::future::Future;
use crate::marker::Tuple;
#[unstable(feature = "async_fn_traits", issue = "none")]
impl<F: Fn<A>, A: Tuple> AsyncFn<A> for F
where
<F as FnOnce<A>>::Output: Future,
{
type CallFuture<'a> = <F as FnOnce<A>>::Output where Self: 'a;
extern "rust-call" fn async_call(&self, args: A) -> Self::CallFuture<'_> {
self.call(args)
}
}
#[unstable(feature = "async_fn_traits", issue = "none")]
impl<F: FnMut<A>, A: Tuple> AsyncFnMut<A> for F
where
<F as FnOnce<A>>::Output: Future,
{
type CallMutFuture<'a> = <F as FnOnce<A>>::Output where Self: 'a;
extern "rust-call" fn async_call_mut(&mut self, args: A) -> Self::CallMutFuture<'_> {
self.call_mut(args)
}
}
#[unstable(feature = "async_fn_traits", issue = "none")]
impl<F: FnOnce<A>, A: Tuple> AsyncFnOnce<A> for F
where
<F as FnOnce<A>>::Output: Future,
{
type CallOnceFuture = <F as FnOnce<A>>::Output;
type Output = <<F as FnOnce<A>>::Output as Future>::Output;
extern "rust-call" fn async_call_once(self, args: A) -> Self::CallOnceFuture {
self.call_once(args)
}
}
}

View File

@ -139,6 +139,7 @@
#![stable(feature = "rust1", since = "1.0.0")] #![stable(feature = "rust1", since = "1.0.0")]
mod arith; mod arith;
mod async_function;
mod bit; mod bit;
mod control_flow; mod control_flow;
mod coroutine; mod coroutine;
@ -173,6 +174,9 @@ pub use self::drop::Drop;
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub use self::function::{Fn, FnMut, FnOnce}; pub use self::function::{Fn, FnMut, FnOnce};
#[unstable(feature = "async_fn_traits", issue = "none")]
pub use self::async_function::{AsyncFn, AsyncFnMut, AsyncFnOnce};
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub use self::index::{Index, IndexMut}; pub use self::index::{Index, IndexMut};

View File

@ -220,7 +220,7 @@ impl<T: ?Sized> *const T {
/// provenance. (Reconstructing address space information, if required, is your responsibility.) /// provenance. (Reconstructing address space information, if required, is your responsibility.)
/// ///
/// Using this method means that code is *not* following [Strict /// Using this method means that code is *not* following [Strict
/// Provenance][../index.html#strict-provenance] rules. Supporting /// Provenance][super#strict-provenance] rules. Supporting
/// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`addr`][pointer::addr] wherever possible. /// use [`addr`][pointer::addr] wherever possible.
@ -232,7 +232,7 @@ impl<T: ?Sized> *const T {
/// available. /// available.
/// ///
/// It is unclear whether this method can be given a satisfying unambiguous specification. This /// It is unclear whether this method can be given a satisfying unambiguous specification. This
/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance]. /// API and its claimed semantics are part of [Exposed Provenance][super#exposed-provenance].
/// ///
/// [`from_exposed_addr`]: from_exposed_addr /// [`from_exposed_addr`]: from_exposed_addr
#[must_use] #[must_use]

View File

@ -649,7 +649,7 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// address makes sense in the address space that this pointer will be used with. /// address makes sense in the address space that this pointer will be used with.
/// ///
/// Using this function means that code is *not* following [Strict /// Using this function means that code is *not* following [Strict
/// Provenance][../index.html#strict-provenance] rules. "Guessing" a /// Provenance][self#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by /// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible. /// use [`with_addr`][pointer::with_addr] wherever possible.
@ -660,7 +660,7 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// pointer has to pick up. /// pointer has to pick up.
/// ///
/// It is unclear whether this function can be given a satisfying unambiguous specification. This /// It is unclear whether this function can be given a satisfying unambiguous specification. This
/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance]. /// API and its claimed semantics are part of [Exposed Provenance][self#exposed-provenance].
#[must_use] #[must_use]
#[inline(always)] #[inline(always)]
#[unstable(feature = "exposed_provenance", issue = "95228")] #[unstable(feature = "exposed_provenance", issue = "95228")]
@ -689,7 +689,7 @@ where
/// address makes sense in the address space that this pointer will be used with. /// address makes sense in the address space that this pointer will be used with.
/// ///
/// Using this function means that code is *not* following [Strict /// Using this function means that code is *not* following [Strict
/// Provenance][../index.html#strict-provenance] rules. "Guessing" a /// Provenance][self#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by /// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible. /// use [`with_addr`][pointer::with_addr] wherever possible.
@ -700,7 +700,7 @@ where
/// pointer has to pick up. /// pointer has to pick up.
/// ///
/// It is unclear whether this function can be given a satisfying unambiguous specification. This /// It is unclear whether this function can be given a satisfying unambiguous specification. This
/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance]. /// API and its claimed semantics are part of [Exposed Provenance][self#exposed-provenance].
#[must_use] #[must_use]
#[inline(always)] #[inline(always)]
#[unstable(feature = "exposed_provenance", issue = "95228")] #[unstable(feature = "exposed_provenance", issue = "95228")]

View File

@ -227,7 +227,7 @@ impl<T: ?Sized> *mut T {
/// provenance. (Reconstructing address space information, if required, is your responsibility.) /// provenance. (Reconstructing address space information, if required, is your responsibility.)
/// ///
/// Using this method means that code is *not* following [Strict /// Using this method means that code is *not* following [Strict
/// Provenance][../index.html#strict-provenance] rules. Supporting /// Provenance][super#strict-provenance] rules. Supporting
/// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
/// by tools that help you to stay conformant with the Rust memory model, so it is recommended /// by tools that help you to stay conformant with the Rust memory model, so it is recommended
/// to use [`addr`][pointer::addr] wherever possible. /// to use [`addr`][pointer::addr] wherever possible.
@ -239,7 +239,7 @@ impl<T: ?Sized> *mut T {
/// available. /// available.
/// ///
/// It is unclear whether this method can be given a satisfying unambiguous specification. This /// It is unclear whether this method can be given a satisfying unambiguous specification. This
/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance]. /// API and its claimed semantics are part of [Exposed Provenance][super#exposed-provenance].
/// ///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut /// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use] #[must_use]

View File

@ -2,7 +2,7 @@
use crate::cmp::Ordering::{self, *}; use crate::cmp::Ordering::{self, *};
use crate::marker::ConstParamTy; use crate::marker::ConstParamTy;
use crate::marker::{StructuralEq, StructuralPartialEq}; use crate::marker::StructuralPartialEq;
// Recursive macro for implementing n-ary tuple functions and operations // Recursive macro for implementing n-ary tuple functions and operations
// //
@ -64,7 +64,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! { maybe_tuple_doc! {
$($T)+ @ $($T)+ @
#[unstable(feature = "structural_match", issue = "31434")] #[unstable(feature = "structural_match", issue = "31434")]
impl<$($T),+> StructuralEq for ($($T,)+) #[cfg(bootstrap)]
impl<$($T),+> crate::marker::StructuralEq for ($($T,)+)
{} {}
} }

View File

@ -85,6 +85,9 @@ pub mod linux;
#[cfg(any(target_os = "wasi", doc))] #[cfg(any(target_os = "wasi", doc))]
pub mod wasi; pub mod wasi;
#[cfg(any(all(target_os = "wasi", target_env = "preview2"), doc))]
pub mod wasi_preview2;
// windows // windows
#[cfg(not(all( #[cfg(not(all(
doc, doc,

View File

@ -28,7 +28,8 @@
//! [`OsStr`]: crate::ffi::OsStr //! [`OsStr`]: crate::ffi::OsStr
//! [`OsString`]: crate::ffi::OsString //! [`OsString`]: crate::ffi::OsString
#![stable(feature = "rust1", since = "1.0.0")] #![cfg_attr(not(target_env = "preview2"), stable(feature = "rust1", since = "1.0.0"))]
#![cfg_attr(target_env = "preview2", unstable(feature = "wasm_preview2", issue = "none"))]
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
#![doc(cfg(target_os = "wasi"))] #![doc(cfg(target_os = "wasi"))]

View File

@ -0,0 +1,5 @@
//! Platform-specific extensions to `std` for Preview 2 of the WebAssembly System Interface (WASI).
//!
//! This module is currently empty, but will be filled over time as wasi-libc support for WASI Preview 2 is stabilized.
#![stable(feature = "raw_ext", since = "1.1.0")]

View File

@ -40,6 +40,9 @@ cfg_if::cfg_if! {
} else if #[cfg(target_os = "wasi")] { } else if #[cfg(target_os = "wasi")] {
mod wasi; mod wasi;
pub use self::wasi::*; pub use self::wasi::*;
} else if #[cfg(all(target_os = "wasi", target_env = "preview2"))] {
mod wasi_preview2;
pub use self::wasi_preview2::*;
} else if #[cfg(target_family = "wasm")] { } else if #[cfg(target_family = "wasm")] {
mod wasm; mod wasm;
pub use self::wasm::*; pub use self::wasm::*;

View File

@ -364,7 +364,4 @@ pub mod netc {
pub sin6_flowinfo: u32, pub sin6_flowinfo: u32,
pub sin6_scope_id: u32, pub sin6_scope_id: u32,
} }
#[derive(Copy, Clone)]
pub struct sockaddr {}
} }

View File

@ -0,0 +1,123 @@
use crate::io as std_io;
use crate::mem;
#[inline]
pub fn is_interrupted(errno: i32) -> bool {
errno == wasi::ERRNO_INTR.raw().into()
}
pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind {
use std_io::ErrorKind;
let Ok(errno) = u16::try_from(errno) else {
return ErrorKind::Uncategorized;
};
macro_rules! match_errno {
($($($errno:ident)|+ => $errkind:ident),*, _ => $wildcard:ident $(,)?) => {
match errno {
$(e if $(e == ::wasi::$errno.raw())||+ => ErrorKind::$errkind),*,
_ => ErrorKind::$wildcard,
}
};
}
match_errno! {
ERRNO_2BIG => ArgumentListTooLong,
ERRNO_ACCES => PermissionDenied,
ERRNO_ADDRINUSE => AddrInUse,
ERRNO_ADDRNOTAVAIL => AddrNotAvailable,
ERRNO_AFNOSUPPORT => Unsupported,
ERRNO_AGAIN => WouldBlock,
// ALREADY => "connection already in progress",
// BADF => "bad file descriptor",
// BADMSG => "bad message",
ERRNO_BUSY => ResourceBusy,
// CANCELED => "operation canceled",
// CHILD => "no child processes",
ERRNO_CONNABORTED => ConnectionAborted,
ERRNO_CONNREFUSED => ConnectionRefused,
ERRNO_CONNRESET => ConnectionReset,
ERRNO_DEADLK => Deadlock,
// DESTADDRREQ => "destination address required",
ERRNO_DOM => InvalidInput,
// DQUOT => /* reserved */,
ERRNO_EXIST => AlreadyExists,
// FAULT => "bad address",
ERRNO_FBIG => FileTooLarge,
ERRNO_HOSTUNREACH => HostUnreachable,
// IDRM => "identifier removed",
// ILSEQ => "illegal byte sequence",
// INPROGRESS => "operation in progress",
ERRNO_INTR => Interrupted,
ERRNO_INVAL => InvalidInput,
ERRNO_IO => Uncategorized,
// ISCONN => "socket is connected",
ERRNO_ISDIR => IsADirectory,
ERRNO_LOOP => FilesystemLoop,
// MFILE => "file descriptor value too large",
ERRNO_MLINK => TooManyLinks,
// MSGSIZE => "message too large",
// MULTIHOP => /* reserved */,
ERRNO_NAMETOOLONG => InvalidFilename,
ERRNO_NETDOWN => NetworkDown,
// NETRESET => "connection aborted by network",
ERRNO_NETUNREACH => NetworkUnreachable,
// NFILE => "too many files open in system",
// NOBUFS => "no buffer space available",
ERRNO_NODEV => NotFound,
ERRNO_NOENT => NotFound,
// NOEXEC => "executable file format error",
// NOLCK => "no locks available",
// NOLINK => /* reserved */,
ERRNO_NOMEM => OutOfMemory,
// NOMSG => "no message of the desired type",
// NOPROTOOPT => "protocol not available",
ERRNO_NOSPC => StorageFull,
ERRNO_NOSYS => Unsupported,
ERRNO_NOTCONN => NotConnected,
ERRNO_NOTDIR => NotADirectory,
ERRNO_NOTEMPTY => DirectoryNotEmpty,
// NOTRECOVERABLE => "state not recoverable",
// NOTSOCK => "not a socket",
ERRNO_NOTSUP => Unsupported,
// NOTTY => "inappropriate I/O control operation",
ERRNO_NXIO => NotFound,
// OVERFLOW => "value too large to be stored in data type",
// OWNERDEAD => "previous owner died",
ERRNO_PERM => PermissionDenied,
ERRNO_PIPE => BrokenPipe,
// PROTO => "protocol error",
ERRNO_PROTONOSUPPORT => Unsupported,
// PROTOTYPE => "protocol wrong type for socket",
// RANGE => "result too large",
ERRNO_ROFS => ReadOnlyFilesystem,
ERRNO_SPIPE => NotSeekable,
ERRNO_SRCH => NotFound,
// STALE => /* reserved */,
ERRNO_TIMEDOUT => TimedOut,
ERRNO_TXTBSY => ResourceBusy,
ERRNO_XDEV => CrossesDevices,
ERRNO_NOTCAPABLE => PermissionDenied,
_ => Uncategorized,
}
}
pub fn abort_internal() -> ! {
unsafe { libc::abort() }
}
pub fn hashmap_random_keys() -> (u64, u64) {
let mut ret = (0u64, 0u64);
unsafe {
let base = &mut ret as *mut (u64, u64) as *mut u8;
let len = mem::size_of_val(&ret);
wasi::random_get(base, len).expect("random_get failure");
}
return ret;
}
#[inline]
pub(crate) fn err2io(err: wasi::Errno) -> std_io::Error {
std_io::Error::from_raw_os_error(err.raw().into())
}

View File

@ -14,9 +14,6 @@
//! compiling for wasm. That way it's a compile time error for something that's //! compiling for wasm. That way it's a compile time error for something that's
//! guaranteed to be a runtime error! //! guaranteed to be a runtime error!
use crate::io as std_io;
use crate::mem;
#[path = "../unix/alloc.rs"] #[path = "../unix/alloc.rs"]
pub mod alloc; pub mod alloc;
pub mod args; pub mod args;
@ -72,123 +69,12 @@ cfg_if::cfg_if! {
mod common; mod common;
pub use common::*; pub use common::*;
#[inline] mod helpers;
pub fn is_interrupted(errno: i32) -> bool { // These exports are listed individually to work around Rust's glob import
errno == wasi::ERRNO_INTR.raw().into() // conflict rules. If we glob export `helpers` and `common` together, then
} // the compiler complains about conflicts.
pub use helpers::abort_internal;
pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind { pub use helpers::decode_error_kind;
use std_io::ErrorKind; use helpers::err2io;
pub use helpers::hashmap_random_keys;
let Ok(errno) = u16::try_from(errno) else { pub use helpers::is_interrupted;
return ErrorKind::Uncategorized;
};
macro_rules! match_errno {
($($($errno:ident)|+ => $errkind:ident),*, _ => $wildcard:ident $(,)?) => {
match errno {
$(e if $(e == ::wasi::$errno.raw())||+ => ErrorKind::$errkind),*,
_ => ErrorKind::$wildcard,
}
};
}
match_errno! {
ERRNO_2BIG => ArgumentListTooLong,
ERRNO_ACCES => PermissionDenied,
ERRNO_ADDRINUSE => AddrInUse,
ERRNO_ADDRNOTAVAIL => AddrNotAvailable,
ERRNO_AFNOSUPPORT => Unsupported,
ERRNO_AGAIN => WouldBlock,
// ALREADY => "connection already in progress",
// BADF => "bad file descriptor",
// BADMSG => "bad message",
ERRNO_BUSY => ResourceBusy,
// CANCELED => "operation canceled",
// CHILD => "no child processes",
ERRNO_CONNABORTED => ConnectionAborted,
ERRNO_CONNREFUSED => ConnectionRefused,
ERRNO_CONNRESET => ConnectionReset,
ERRNO_DEADLK => Deadlock,
// DESTADDRREQ => "destination address required",
ERRNO_DOM => InvalidInput,
// DQUOT => /* reserved */,
ERRNO_EXIST => AlreadyExists,
// FAULT => "bad address",
ERRNO_FBIG => FileTooLarge,
ERRNO_HOSTUNREACH => HostUnreachable,
// IDRM => "identifier removed",
// ILSEQ => "illegal byte sequence",
// INPROGRESS => "operation in progress",
ERRNO_INTR => Interrupted,
ERRNO_INVAL => InvalidInput,
ERRNO_IO => Uncategorized,
// ISCONN => "socket is connected",
ERRNO_ISDIR => IsADirectory,
ERRNO_LOOP => FilesystemLoop,
// MFILE => "file descriptor value too large",
ERRNO_MLINK => TooManyLinks,
// MSGSIZE => "message too large",
// MULTIHOP => /* reserved */,
ERRNO_NAMETOOLONG => InvalidFilename,
ERRNO_NETDOWN => NetworkDown,
// NETRESET => "connection aborted by network",
ERRNO_NETUNREACH => NetworkUnreachable,
// NFILE => "too many files open in system",
// NOBUFS => "no buffer space available",
ERRNO_NODEV => NotFound,
ERRNO_NOENT => NotFound,
// NOEXEC => "executable file format error",
// NOLCK => "no locks available",
// NOLINK => /* reserved */,
ERRNO_NOMEM => OutOfMemory,
// NOMSG => "no message of the desired type",
// NOPROTOOPT => "protocol not available",
ERRNO_NOSPC => StorageFull,
ERRNO_NOSYS => Unsupported,
ERRNO_NOTCONN => NotConnected,
ERRNO_NOTDIR => NotADirectory,
ERRNO_NOTEMPTY => DirectoryNotEmpty,
// NOTRECOVERABLE => "state not recoverable",
// NOTSOCK => "not a socket",
ERRNO_NOTSUP => Unsupported,
// NOTTY => "inappropriate I/O control operation",
ERRNO_NXIO => NotFound,
// OVERFLOW => "value too large to be stored in data type",
// OWNERDEAD => "previous owner died",
ERRNO_PERM => PermissionDenied,
ERRNO_PIPE => BrokenPipe,
// PROTO => "protocol error",
ERRNO_PROTONOSUPPORT => Unsupported,
// PROTOTYPE => "protocol wrong type for socket",
// RANGE => "result too large",
ERRNO_ROFS => ReadOnlyFilesystem,
ERRNO_SPIPE => NotSeekable,
ERRNO_SRCH => NotFound,
// STALE => /* reserved */,
ERRNO_TIMEDOUT => TimedOut,
ERRNO_TXTBSY => ResourceBusy,
ERRNO_XDEV => CrossesDevices,
ERRNO_NOTCAPABLE => PermissionDenied,
_ => Uncategorized,
}
}
pub fn abort_internal() -> ! {
unsafe { libc::abort() }
}
pub fn hashmap_random_keys() -> (u64, u64) {
let mut ret = (0u64, 0u64);
unsafe {
let base = &mut ret as *mut (u64, u64) as *mut u8;
let len = mem::size_of_val(&ret);
wasi::random_get(base, len).expect("random_get failure");
}
return ret;
}
#[inline]
fn err2io(err: wasi::Errno) -> std_io::Error {
std_io::Error::from_raw_os_error(err.raw().into())
}

View File

@ -0,0 +1,78 @@
//! System bindings for the wasi preview 2 target.
//!
//! This is the next evolution of the original wasi target, and is intended to
//! replace that target over time.
//!
//! To begin with, this target mirrors the wasi target 1 to 1, but over
//! time this will change significantly.
#[path = "../unix/alloc.rs"]
pub mod alloc;
#[path = "../wasi/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
#[path = "../wasi/env.rs"]
pub mod env;
#[path = "../wasi/fd.rs"]
pub mod fd;
#[path = "../wasi/fs.rs"]
pub mod fs;
#[allow(unused)]
#[path = "../wasm/atomics/futex.rs"]
pub mod futex;
#[path = "../wasi/io.rs"]
pub mod io;
#[path = "../wasi/net.rs"]
pub mod net;
#[path = "../wasi/os.rs"]
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
#[path = "../unix/path.rs"]
pub mod path;
#[path = "../unsupported/pipe.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
#[path = "../wasi/stdio.rs"]
pub mod stdio;
#[path = "../wasi/thread.rs"]
pub mod thread;
#[path = "../unsupported/thread_local_dtor.rs"]
pub mod thread_local_dtor;
#[path = "../unsupported/thread_local_key.rs"]
pub mod thread_local_key;
#[path = "../wasi/time.rs"]
pub mod time;
cfg_if::cfg_if! {
if #[cfg(target_feature = "atomics")] {
compile_error!("The wasm32-wasi-preview2 target does not support atomics");
} else {
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
#[path = "../unsupported/once.rs"]
pub mod once;
#[path = "../unsupported/thread_parking.rs"]
pub mod thread_parking;
}
}
#[path = "../unsupported/common.rs"]
#[deny(unsafe_op_in_unsafe_fn)]
#[allow(unused)]
mod common;
pub use common::*;
#[path = "../wasi/helpers.rs"]
mod helpers;
// These exports are listed individually to work around Rust's glob import
// conflict rules. If we glob export `helpers` and `common` together, then
// the compiler complains about conflicts.
pub use helpers::abort_internal;
pub use helpers::decode_error_kind;
use helpers::err2io;
pub use helpers::hashmap_random_keys;
pub use helpers::is_interrupted;

View File

@ -367,10 +367,13 @@ fn copy_self_contained_objects(
let srcdir = builder let srcdir = builder
.wasi_root(target) .wasi_root(target)
.unwrap_or_else(|| { .unwrap_or_else(|| {
panic!("Target {:?} does not have a \"wasi-root\" key", target.triple) panic!(
"Target {:?} does not have a \"wasi-root\" key in Config.toml",
target.triple
)
}) })
.join("lib") .join("lib")
.join(target.to_string().replace("-preview1", "")); .join(target.to_string().replace("-preview1", "").replace("-preview2", ""));
for &obj in &["libc.a", "crt1-command.o", "crt1-reactor.o"] { for &obj in &["libc.a", "crt1-command.o", "crt1-reactor.o"] {
copy_and_stamp( copy_and_stamp(
builder, builder,

View File

@ -88,7 +88,7 @@ const EXTRA_CHECK_CFGS: &[(Option<Mode>, &str, Option<&[&'static str]>)] = &[
(Some(Mode::Std), "no_sync", None), (Some(Mode::Std), "no_sync", None),
(Some(Mode::Std), "backtrace_in_libstd", None), (Some(Mode::Std), "backtrace_in_libstd", None),
/* Extra values not defined in the built-in targets yet, but used in std */ /* Extra values not defined in the built-in targets yet, but used in std */
(Some(Mode::Std), "target_env", Some(&["libnx"])), (Some(Mode::Std), "target_env", Some(&["libnx", "preview2"])),
// (Some(Mode::Std), "target_os", Some(&[])), // (Some(Mode::Std), "target_os", Some(&[])),
// #[cfg(bootstrap)] zkvm // #[cfg(bootstrap)] zkvm
(Some(Mode::Std), "target_os", Some(&["zkvm"])), (Some(Mode::Std), "target_os", Some(&["zkvm"])),

View File

@ -59,6 +59,7 @@
- [*-unknown-openbsd](platform-support/openbsd.md) - [*-unknown-openbsd](platform-support/openbsd.md)
- [\*-unknown-uefi](platform-support/unknown-uefi.md) - [\*-unknown-uefi](platform-support/unknown-uefi.md)
- [wasm32-wasi-preview1-threads](platform-support/wasm32-wasi-preview1-threads.md) - [wasm32-wasi-preview1-threads](platform-support/wasm32-wasi-preview1-threads.md)
- [wasm32-wasi-preview2](platform-support/wasm32-wasi-preview2.md)
- [wasm64-unknown-unknown](platform-support/wasm64-unknown-unknown.md) - [wasm64-unknown-unknown](platform-support/wasm64-unknown-unknown.md)
- [\*-win7-windows-msvc](platform-support/win7-windows-msvc.md) - [\*-win7-windows-msvc](platform-support/win7-windows-msvc.md)
- [x86_64-fortanix-unknown-sgx](platform-support/x86_64-fortanix-unknown-sgx.md) - [x86_64-fortanix-unknown-sgx](platform-support/x86_64-fortanix-unknown-sgx.md)

View File

@ -360,6 +360,7 @@ target | std | host | notes
`thumbv7a-pc-windows-msvc` | ? | | `thumbv7a-pc-windows-msvc` | ? | |
`thumbv7a-uwp-windows-msvc` | ✓ | | `thumbv7a-uwp-windows-msvc` | ✓ | |
`thumbv7neon-unknown-linux-musleabihf` | ? | | Thumb2-mode ARMv7-A Linux with NEON, MUSL `thumbv7neon-unknown-linux-musleabihf` | ? | | Thumb2-mode ARMv7-A Linux with NEON, MUSL
[`wasm32-wasi-preview2`](platform-support/wasm32-wasi-preview2.md) | ✓ | | WebAssembly
[`wasm64-unknown-unknown`](platform-support/wasm64-unknown-unknown.md) | ? | | WebAssembly [`wasm64-unknown-unknown`](platform-support/wasm64-unknown-unknown.md) | ? | | WebAssembly
`x86_64-apple-ios-macabi` | ✓ | | Apple Catalyst on x86_64 `x86_64-apple-ios-macabi` | ✓ | | Apple Catalyst on x86_64
[`x86_64-apple-tvos`](platform-support/apple-tvos.md) | ? | | x86 64-bit tvOS [`x86_64-apple-tvos`](platform-support/apple-tvos.md) | ? | | x86 64-bit tvOS

View File

@ -0,0 +1,30 @@
# `wasm32-wasi-preview2`
**Tier: 3**
The `wasm32-wasi-preview2` target is a new and still (as of January 2024) an
experimental target. This target is an extension to `wasm32-wasi-preview1` target,
originally known as `wasm32-wasi`. It is the next evolution in the development of
wasi (the [WebAssembly System Interface](https://wasi.dev)) that uses the WebAssembly
[component model] to allow for a standardized set of syscalls that are intended to empower
WebAssembly binaries with native host capabilities.
[component model]: https://github.com/WebAssembly/component-model
## Target maintainers
- Alex Crichton, https://github.com/alexcrichton
- Ryan Levick, https://github.com/rylev
## Requirements
This target is cross-compiled. The target supports `std` fully.
## Platform requirements
The WebAssembly runtime should support the wasi preview 2 API set.
This target is not a stable target. This means that there are only a few engines
which implement wasi preview 2, for example:
* Wasmtime - `-W component-model`

View File

@ -36,6 +36,10 @@ For a full history of changes in the Rust 2024 style edition, see the git
history of the style guide. Notable changes in the Rust 2024 style edition history of the style guide. Notable changes in the Rust 2024 style edition
include: include:
- [#114764](https://github.com/rust-lang/rust/pull/114764) As the last member
of a delimited expression, delimited expressions are generally combinable,
regardless of the number of members. Previously only applied with exactly
one member (except for closures with explicit blocks).
- Miscellaneous `rustfmt` bugfixes. - Miscellaneous `rustfmt` bugfixes.
- Use version-sort (sort `x8`, `x16`, `x32`, `x64`, `x128` in that order). - Use version-sort (sort `x8`, `x16`, `x32`, `x64`, `x128` in that order).
- Change "ASCIIbetical" sort to Unicode-aware "non-lowercase before lowercase". - Change "ASCIIbetical" sort to Unicode-aware "non-lowercase before lowercase".

View File

@ -818,11 +818,11 @@ E.g., `&&Some(foo)` matches, `Foo(4, Bar)` does not.
## Combinable expressions ## Combinable expressions
Where a function call has a single argument, and that argument is formatted When the last argument in a function call is formatted across
across multiple-lines, format the outer call as if it were a single-line call, multiple-lines, format the outer call as if it were a single-line call,
if the result fits. Apply the same combining behaviour to any similar if the result fits. Apply the same combining behaviour to any similar
expressions which have multi-line, block-indented lists of sub-expressions expressions which have multi-line, block-indented lists of sub-expressions
delimited by parentheses (e.g., macros or tuple struct literals). E.g., delimited by parentheses, brackets, or braces. E.g.,
```rust ```rust
foo(bar( foo(bar(
@ -848,20 +848,61 @@ let arr = [combinable(
an_expr, an_expr,
another_expr, another_expr,
)]; )];
let x = Thing(an_expr, another_expr, match cond {
A => 1,
B => 2,
});
let x = format!("Stuff: {}", [
an_expr,
another_expr,
]);
let x = func(an_expr, another_expr, SomeStruct {
field: this_is_long,
another_field: 123,
});
``` ```
Apply this behavior recursively. Apply this behavior recursively.
For a function with multiple arguments, if the last argument is a multi-line If the last argument is a multi-line closure with an explicit block,
closure with an explicit block, there are no other closure arguments, and all only apply the combining behavior if there are no other closure arguments.
the arguments and the first line of the closure fit on the first line, use the
same combining behavior:
```rust ```rust
// Combinable
foo(first_arg, x, |param| { foo(first_arg, x, |param| {
action(); action();
foo(param) foo(param)
}) })
// Not combinable, because the closure is not the last argument
foo(
first_arg,
|param| {
action();
foo(param)
},
whatever,
)
// Not combinable, because the first line of the closure does not fit
foo(
first_arg,
x,
move |very_long_param_causing_line_to_overflow| -> Bar {
action();
foo(param)
},
)
// Not combinable, because there is more than one closure argument
foo(
first_arg,
|x| x.bar(),
|param| {
action();
foo(param)
},
)
``` ```
## Ranges ## Ranges

Some files were not shown because too many files have changed in this diff Show More