Auto merge of #85022 - JohnTitor:rollup-2q4706v, r=JohnTitor

Rollup of 9 pull requests

Successful merges:

 - #84779 (Add support for --test-args to cargotest)
 - #84781 (Don't check bootstrap artifacts by default)
 - #84787 (bump deps)
 - #84815 (Update coverage docs and command line help)
 - #84875 (Removes unneeded check of `#[no_coverage]` in mapgen)
 - #84897 (Coverage instruments closure bodies in macros (not the macro body))
 - #84911 (Retry clang+llvm download)
 - #84972 (CTFE inbounds-error-messages tweak)
 - #84990 (Sort rustdoc-gui tests)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2021-05-07 07:44:53 +00:00
commit ac888e8675
32 changed files with 688 additions and 469 deletions

View File

@ -864,24 +864,24 @@ dependencies = [
[[package]]
name = "curl"
version = "0.4.34"
version = "0.4.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e268162af1a5fe89917ae25ba3b0a77c8da752bdc58e7dbb4f15b91fbd33756e"
checksum = "d0bac9f84ca0977c4d9b8db998689de55b9e976656a6bc87fada2ca710d504c7"
dependencies = [
"curl-sys",
"libc",
"openssl-probe",
"openssl-sys",
"schannel",
"socket2",
"socket2 0.4.0",
"winapi 0.3.9",
]
[[package]]
name = "curl-sys"
version = "0.4.39+curl-7.74.0"
version = "0.4.42+curl-7.76.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07a8ce861e7b68a0b394e814d7ee9f1b2750ff8bd10372c6ad3bacc10e86f874"
checksum = "4636d8d6109c842707018a104051436bffb8991ea20b2d1293db70b6e0ee4c7c"
dependencies = [
"cc",
"libc",
@ -2273,7 +2273,7 @@ version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897"
dependencies = [
"socket2",
"socket2 0.3.16",
"winapi 0.3.9",
]
@ -2390,15 +2390,15 @@ dependencies = [
[[package]]
name = "openssl"
version = "0.10.30"
version = "0.10.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4"
checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577"
dependencies = [
"bitflags",
"cfg-if 0.1.10",
"cfg-if 1.0.0",
"foreign-types",
"lazy_static",
"libc",
"once_cell",
"openssl-sys",
]
@ -2410,18 +2410,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
[[package]]
name = "openssl-src"
version = "111.12.0+1.1.1h"
version = "111.15.0+1.1.1k"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61"
checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a"
dependencies = [
"cc",
]
[[package]]
name = "openssl-sys"
version = "0.9.58"
version = "0.9.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de"
checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f"
dependencies = [
"autocfg",
"cc",
@ -4863,6 +4863,16 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "socket2"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
dependencies = [
"libc",
"winapi 0.3.9",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"

View File

@ -8,7 +8,6 @@ use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
use rustc_hir::def_id::{DefId, DefIdSet, LOCAL_CRATE};
use rustc_llvm::RustString;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::CodeRegion;
use rustc_span::Symbol;
@ -249,7 +248,7 @@ fn save_function_record(
///
/// We can find the unused functions (including generic functions) by the set difference of all MIR
/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
/// `collect_and_partition_mono_items`).
/// `codegened_and_inlined_items`).
///
/// *HOWEVER* the codegenned `DefId`s are partitioned across multiple `CodegenUnit`s (CGUs), and
/// this function is processing a `function_coverage_map` for the functions (`Instance`/`DefId`)
@ -281,11 +280,8 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
let mut unused_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default();
for &non_codegenned_def_id in all_def_ids.difference(codegenned_def_ids) {
let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
continue;
}
// Make sure the non-codegenned (unused) function has a file_name
// Make sure the non-codegenned (unused) function has at least one MIR
// `Coverage` statement with a code region, and return its file name.
if let Some(non_codegenned_file_name) = tcx.covered_file_name(non_codegenned_def_id) {
let def_ids =
unused_def_ids_by_file.entry(*non_codegenned_file_name).or_insert_with(Vec::new);

View File

@ -306,6 +306,9 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
ptr.alloc_id,
allocation_size.bytes()
),
DanglingIntPointer(_, CheckInAllocMsg::InboundsTest) => {
write!(f, "null pointer is not allowed for this operation")
}
DanglingIntPointer(i, msg) => {
write!(f, "{} failed: 0x{:x} is not a valid pointer", msg, i)
}

View File

@ -526,7 +526,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
min_ptr,
Size::from_bytes(size),
None,
CheckInAllocMsg::InboundsTest,
CheckInAllocMsg::PointerArithmeticTest,
)?;
Ok(offset_ptr)
}

View File

@ -369,6 +369,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
Err((if int == 0 {
// This is UB, seriously.
// (`DanglingIntPointer` with these exact arguments has special printing code.)
err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
} else {
// This is just something we cannot support during const-eval.

View File

@ -330,7 +330,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
vtable,
3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
Some(self.ecx.tcx.data_layout.pointer_align.abi),
CheckInAllocMsg::InboundsTest,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
err_ub!(DanglingIntPointer(..)) |
@ -416,7 +416,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
place.ptr,
size,
Some(align),
CheckInAllocMsg::InboundsTest,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
err_ub!(AlignmentCheckFailed { required, has }) =>

View File

@ -32,7 +32,7 @@ use rustc_middle::mir::{
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::DefId;
use rustc_span::source_map::SourceMap;
use rustc_span::{CharPos, Pos, SourceFile, Span, Symbol};
use rustc_span::{CharPos, ExpnKind, Pos, SourceFile, Span, Symbol};
/// A simple error message wrapper for `coverage::Error`s.
#[derive(Debug)]
@ -113,8 +113,29 @@ struct Instrumentor<'a, 'tcx> {
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
let source_map = tcx.sess.source_map();
let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, mir_body.source.def_id());
let body_span = hir_body.value.span;
let def_id = mir_body.source.def_id();
let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
let mut body_span = hir_body.value.span;
if tcx.is_closure(def_id) {
// If the MIR function is a closure, and if the closure body span
// starts from a macro, but it's content is not in that macro, try
// to find a non-macro callsite, and instrument the spans there
// instead.
loop {
let expn_data = body_span.ctxt().outer_expn_data();
if expn_data.is_root() {
break;
}
if let ExpnKind::Macro(..) = expn_data.kind {
body_span = expn_data.call_site;
} else {
break;
}
}
}
let source_file = source_map.lookup_source_file(body_span.lo());
let fn_sig_span = match some_fn_sig.filter(|fn_sig| {
fn_sig.span.ctxt() == body_span.ctxt()

View File

@ -1080,12 +1080,12 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"gather statistics about the input (default: no)"),
instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
"instrument the generated code to support LLVM source-based code coverage \
reports (note, the compiler build config must include `profiler = true`, \
and is mutually exclusive with `-C profile-generate`/`-C profile-use`); \
implies `-Z symbol-mangling-version=v0`; disables/overrides some Rust \
optimizations. Optional values are: `=all` (default coverage), \
`=except-unused-generics`, `=except-unused-functions`, or `=off` \
(default: instrument-coverage=off)"),
reports (note, the compiler build config must include `profiler = true`); \
implies `-Z symbol-mangling-version=v0`. Optional values are:
`=all` (implicit value)
`=except-unused-generics`
`=except-unused-functions`
`=off` (default)"),
instrument_mcount: bool = (false, parse_bool, [TRACKED],
"insert function instrument code for mcount-based tracing (default: no)"),
keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],

View File

@ -280,7 +280,7 @@ impl Step for CodegenBackend {
}
macro_rules! tool_check_step {
($name:ident, $path:literal, $($alias:literal, )* $source_type:path) => {
($name:ident, $path:literal, $($alias:literal, )* $source_type:path $(, $default:literal )?) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub target: TargetSelection,
@ -289,7 +289,7 @@ macro_rules! tool_check_step {
impl Step for $name {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
const DEFAULT: bool = true $( && $default )?;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.paths(&[ $path, $($alias),* ])
@ -368,7 +368,7 @@ tool_check_step!(Rustdoc, "src/tools/rustdoc", "src/librustdoc", SourceType::InT
// rejected.
tool_check_step!(Clippy, "src/tools/clippy", SourceType::InTree);
tool_check_step!(Bootstrap, "src/bootstrap", SourceType::InTree);
tool_check_step!(Bootstrap, "src/bootstrap", SourceType::InTree, false);
/// Cargo's output path for the standard library in a given stage, compiled
/// by a particular compiler for the specified target.

View File

@ -183,6 +183,7 @@ impl Step for Cargotest {
builder,
cmd.arg(&cargo)
.arg(&out_dir)
.args(builder.config.cmd.test_args())
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler)),
);
@ -830,6 +831,7 @@ impl Step for RustdocGUI {
command.arg("src/test/rustdoc-gui/lib.rs").arg("-o").arg(&out_dir);
builder.run(&mut command);
let mut tests = Vec::new();
for file in fs::read_dir("src/test/rustdoc-gui").unwrap() {
let file = file.unwrap();
let file_path = file.path();
@ -838,13 +840,17 @@ impl Step for RustdocGUI {
if !file_name.to_str().unwrap().ends_with(".goml") {
continue;
}
tests.push(file_path);
}
tests.sort_unstable();
for test in tests {
let mut command = Command::new(&nodejs);
command
.arg("src/tools/rustdoc-gui/tester.js")
.arg("--doc-folder")
.arg(out_dir.join("test_docs"))
.arg("--test-file")
.arg(file_path);
.arg(test);
builder.run(&mut command);
}
} else {

View File

@ -18,7 +18,8 @@ if isMacOS; then
bindir="$(xcode-select --print-path)/Toolchains/XcodeDefault.xctoolchain/usr/bin"
else
file="${MIRRORS_BASE}/clang%2Bllvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
curl -f "${file}" | tar xJf -
retry curl -f "${file}" -o "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
tar xJf "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
bindir="$(pwd)/clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin/bin"
fi
@ -48,7 +49,8 @@ elif isWindows && [[ ${CUSTOM_MINGW-0} -ne 1 ]]; then
mkdir -p citools/clang-rust
cd citools
curl -f "${MIRRORS_BASE}/LLVM-${LLVM_VERSION}-win64.exe" -o "LLVM-${LLVM_VERSION}-win64.exe"
retry curl -f "${MIRRORS_BASE}/LLVM-${LLVM_VERSION}-win64.exe" \
-o "LLVM-${LLVM_VERSION}-win64.exe"
7z x -oclang-rust/ "LLVM-${LLVM_VERSION}-win64.exe"
ciCommandSetEnv RUST_CONFIGURE_ARGS \
"${RUST_CONFIGURE_ARGS} --set llvm.clang-cl=$(pwd)/clang-rust/bin/clang-cl.exe"

View File

@ -0,0 +1,346 @@
# `instrument-coverage`
The tracking issue for this feature is: [#79121].
[#79121]: https://github.com/rust-lang/rust/issues/79121
---
## Introduction
The Rust compiler includes two code coverage implementations:
- A GCC-compatible, gcov-based coverage implementation, enabled with `-Z profile`, which derives coverage data based on DebugInfo.
- A source-based code coverage implementation, enabled with `-Z instrument-coverage`, which uses LLVM's native, efficient coverage instrumentation to generate very precise coverage data.
This document describes how to enable and use the LLVM instrumentation-based coverage, via the `-Z instrument-coverage` compiler flag.
## How it works
When `-Z instrument-coverage` is enabled, the Rust compiler enhances rust-based libraries and binaries by:
- Automatically injecting calls to an LLVM intrinsic ([`llvm.instrprof.increment`]), at functions and branches in compiled code, to increment counters when conditional sections of code are executed.
- Embedding additional information in the data section of each library and binary (using the [LLVM Code Coverage Mapping Format] _Version 4_, supported _only_ in LLVM 11 and up), to define the code regions (start and end positions in the source code) being counted.
When running a coverage-instrumented program, the counter values are written to a `profraw` file at program termination. LLVM bundles tools that read the counter results, combine those results with the coverage map (embedded in the program binary), and generate coverage reports in multiple formats.
[`llvm.instrprof.increment`]: https://llvm.org/docs/LangRef.html#llvm-instrprof-increment-intrinsic
[llvm code coverage mapping format]: https://llvm.org/docs/CoverageMappingFormat.html
> **Note**: `-Z instrument-coverage` also automatically enables `-Z symbol-mangling-version=v0` (tracking issue [#60705]). The `v0` symbol mangler is strongly recommended, but be aware that this demangler is also experimental. The `v0` demangler can be overridden by explicitly adding `-Z symbol-mangling-version=legacy`.
[#60705]: https://github.com/rust-lang/rust/issues/60705
## Enable coverage profiling in the Rust compiler
Rust's source-based code coverage requires the Rust "profiler runtime". Without it, compiling with `-Z instrument-coverage` generates an error that the profiler runtime is missing.
The Rust `nightly` distribution channel includes the profiler runtime, by default.
> **Important**: If you are building the Rust compiler from the source distribution, the profiler runtime is _not_ enabled in the default `config.toml.example`. Edit your `config.toml` file and ensure the `profiler` feature is set it to `true` (either under the `[build]` section, or under the settings for an individual `[target.<triple>]`):
>
> ```toml
> # Build the profiler runtime (required when compiling with options that depend
> # on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`).
> profiler = true
> ```
### Building the demangler
LLVM coverage reporting tools generate results that can include function names and other symbol references, and the raw coverage results report symbols using the compiler's "mangled" version of the symbol names, which can be difficult to interpret. To work around this issue, LLVM coverage tools also support a user-specified symbol name demangler.
One option for a Rust demangler is [`rustfilt`], which can be installed with:
```shell
cargo install rustfilt
```
Another option, if you are building from the Rust compiler source distribution, is to use the `rust-demangler` tool included in the Rust source distribution, which can be built with:
```shell
$ ./x.py build rust-demangler
```
[`rustfilt`]: https://crates.io/crates/rustfilt
## Compiling with coverage enabled
Set the `-Z instrument-coverage` compiler flag in order to enable LLVM source-based code coverage profiling.
The default option generates coverage for all functions, including unused (never called) functions and generics. The compiler flag supports an optional value to tailor this behavior. (See [`-Z instrument-coverage=<options>`](#-z-instrument-coverageoptions), below.)
With `cargo`, you can instrument your program binary _and_ dependencies at the same time.
For example (if your project's Cargo.toml builds a binary by default):
```shell
$ cd your-project
$ cargo clean
$ RUSTFLAGS="-Z instrument-coverage" cargo build
```
If `cargo` is not configured to use your `profiler`-enabled version of `rustc`, set the path explicitly via the `RUSTC` environment variable. Here is another example, using a `stage1` build of `rustc` to compile an `example` binary (from the [`json5format`] crate):
```shell
$ RUSTC=$HOME/rust/build/x86_64-unknown-linux-gnu/stage1/bin/rustc \
RUSTFLAGS="-Z instrument-coverage" \
cargo build --example formatjson5
```
> **Note**: that some compiler options, combined with `-Z instrument-coverage`, can produce LLVM IR and/or linked binaries that are incompatible with LLVM coverage maps. For example, coverage requires references to actual functions in LLVM IR. If any covered function is optimized out, the coverage tools may not be able to process the coverage results. If you need to pass additional options, with coverage enabled, test them early, to confirm you will get the coverage results you expect.
## Running the instrumented binary to generate raw coverage profiling data
In the previous example, `cargo` generated the coverage-instrumented binary `formatjson5`:
```shell
$ echo "{some: 'thing'}" | target/debug/examples/formatjson5 -
```
```json5
{
some: "thing",
}
```
After running this program, a new file, `default.profraw`, should be in the current working directory. It's often preferable to set a specific file name or path. You can change the output file using the environment variable `LLVM_PROFILE_FILE`:
```shell
$ echo "{some: 'thing'}" \
| LLVM_PROFILE_FILE="formatjson5.profraw" target/debug/examples/formatjson5 -
...
$ ls formatjson5.profraw
formatjson5.profraw
```
If `LLVM_PROFILE_FILE` contains a path to a non-existent directory, the missing directory structure will be created. Additionally, the following special pattern strings are rewritten:
- `%p` - The process ID.
- `%h` - The hostname of the machine running the program.
- `%t` - The value of the TMPDIR environment variable.
- `%Nm` - the instrumented binarys signature: The runtime creates a pool of N raw profiles, used for on-line profile merging. The runtime takes care of selecting a raw profile from the pool, locking it, and updating it before the program exits. `N` must be between `1` and `9`, and defaults to `1` if omitted (with simply `%m`).
- `%c` - Does not add anything to the filename, but enables a mode (on some platforms, including Darwin) in which profile counter updates are continuously synced to a file. This means that if the instrumented program crashes, or is killed by a signal, perfect coverage information can still be recovered.
## Installing LLVM coverage tools
LLVM's supplies two tools—`llvm-profdata` and `llvm-cov`—that process coverage data and generate reports. There are several ways to find and/or install these tools, but note that the coverage mapping data generated by the Rust compiler requires LLVM version 11 or higher. (`llvm-cov --version` typically shows the tool's LLVM version number.):
- The LLVM tools may be installed (or installable) directly to your OS (such as via `apt-get`, for Linux).
- If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`.
- You can install compatible versions of these tools via `rustup`.
The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-binutils`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
```shell
$ rustup component add llvm-tools-preview
$ cargo install cargo-binutils
$ cargo profdata -- --help # note the additional "--" preceding the tool-specific arguments
```
[`cargo-binutils`]: https://crates.io/crates/cargo-binutils
## Creating coverage reports
Raw profiles have to be indexed before they can be used to generate coverage reports. This is done using [`llvm-profdata merge`] (or `cargo profdata -- merge`), which can combine multiple raw profiles and index them at the same time:
```shell
$ llvm-profdata merge -sparse formatjson5.profraw -o formatjson5.profdata
```
Finally, the `.profdata` file is used, in combination with the coverage map (from the program binary) to generate coverage reports using [`llvm-cov report`] (or `cargo cov -- report`), for a coverage summaries; and [`llvm-cov show`] (or `cargo cov -- show`), to see detailed coverage of lines and regions (character ranges) overlaid on the original source code.
These commands have several display and filtering options. For example:
```shell
$ llvm-cov show -Xdemangler=rustfilt target/debug/examples/formatjson5 \
-instr-profile=formatjson5.profdata \
-show-line-counts-or-regions \
-show-instantiations \
-name=add_quoted_string
```
<img alt="Screenshot of sample `llvm-cov show` result, for function add_quoted_string" src="img/llvm-cov-show-01.png" class="center"/>
<br/>
<br/>
Some of the more notable options in this example include:
- `--Xdemangler=rustfilt` - the command name or path used to demangle Rust symbols (`rustfilt` in the example, but this could also be a path to the `rust-demangler` tool)
- `target/debug/examples/formatjson5` - the instrumented binary (from which to extract the coverage map)
- `--instr-profile=<path-to-file>.profdata` - the location of the `.profdata` file created by `llvm-profdata merge` (from the `.profraw` file generated by the instrumented binary)
- `--name=<exact-function-name>` - to show coverage for a specific function (or, consider using another filter option, such as `--name-regex=<pattern>`)
[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show
> **Note**: Coverage can also be disabled on an individual function by annotating the function with the [`no_coverage` attribute] (which requires the feature flag `#![feature(no_coverage)]`).
[`no_coverage` attribute]: ../language-features/no-coverage.md
## Interpreting reports
There are four statistics tracked in a coverage summary:
- Function coverage is the percentage of functions that have been executed at least once. A function is considered to be executed if any of its instantiations are executed.
- Instantiation coverage is the percentage of function instantiations that have been executed at least once. Generic functions and functions generated from macros are two kinds of functions that may have multiple instantiations.
- Line coverage is the percentage of code lines that have been executed at least once. Only executable lines within function bodies are considered to be code lines.
- Region coverage is the percentage of code regions that have been executed at least once. A code region may span multiple lines: for example, in a large function body with no control flow. In other cases, a single line can contain multiple code regions: `return x || (y && z)` has countable code regions for `x` (which may resolve the expression, if `x` is `true`), `|| (y && z)` (executed only if `x` was `false`), and `return` (executed in either situation).
Of these four statistics, function coverage is usually the least granular while region coverage is the most granular. The project-wide totals for each statistic are listed in the summary.
## Test coverage
A typical use case for coverage analysis is test coverage. Rust's source-based coverage tools can both measure your tests' code coverage as percentage, and pinpoint functions and branches not tested.
The following example (using the [`json5format`] crate, for demonstration purposes) show how to generate and analyze coverage results for all tests in a crate.
Since `cargo test` both builds and runs the tests, we set both the additional `RUSTFLAGS`, to add the `-Z instrument-coverage` flag, and `LLVM_PROFILE_FILE`, to set a custom filename for the raw profiling data generated during the test runs. Since there may be more than one test binary, apply `%m` in the filename pattern. This generates unique names for each test binary. (Otherwise, each executed test binary would overwrite the coverage results from the previous binary.)
```shell
$ RUSTFLAGS="-Z instrument-coverage" \
LLVM_PROFILE_FILE="json5format-%m.profraw" \
cargo test --tests
```
Make note of the test binary file paths, displayed after the word "`Running`" in the test output:
```text
...
Compiling json5format v0.1.3 ($HOME/json5format)
Finished test [unoptimized + debuginfo] target(s) in 14.60s
Running target/debug/deps/json5format-fececd4653271682
running 25 tests
...
test result: ok. 25 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
Running target/debug/deps/lib-30768f9c53506dc5
running 31 tests
...
test result: ok. 31 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
```
You should have one or more `.profraw` files now, one for each test binary. Run the `profdata` tool to merge them:
```shell
$ cargo profdata -- merge \
-sparse json5format-*.profraw -o json5format.profdata
```
Then run the `cov` tool, with the `profdata` file and all test binaries:
```shell
$ cargo cov -- report \
--use-color --ignore-filename-regex='/.cargo/registry' \
--instr-profile=json5format.profdata \
--object target/debug/deps/lib-30768f9c53506dc5 \
--object target/debug/deps/json5format-fececd4653271682
$ cargo cov -- show \
--use-color --ignore-filename-regex='/.cargo/registry' \
--instr-profile=json5format.profdata \
--object target/debug/deps/lib-30768f9c53506dc5 \
--object target/debug/deps/json5format-fececd4653271682 \
--show-instantiations --show-line-counts-or-regions \
--Xdemangler=rustfilt | less -R
```
> **Note**: The command line option `--ignore-filename-regex=/.cargo/registry`, which excludes the sources for dependencies from the coverage results.\_
### Tips for listing the binaries automatically
For `bash` users, one suggested way to automatically complete the `cov` command with the list of binaries is with a command like:
```bash
$ cargo cov -- report \
$( \
for file in \
$( \
RUSTFLAGS="-Z instrument-coverage" \
cargo test --tests --no-run --message-format=json \
| jq -r "select(.profile.test == true) | .filenames[]" \
| grep -v dSYM - \
); \
do \
printf "%s %s " -object $file; \
done \
) \
--instr-profile=json5format.profdata --summary-only # and/or other options
```
Adding `--no-run --message-format=json` to the _same_ `cargo test` command used to run
the tests (including the same environment variables and flags) generates output in a JSON
format that `jq` can easily query.
The `printf` command takes this list and generates the `--object <binary>` arguments
for each listed test binary.
### Including doc tests
The previous examples run `cargo test` with `--tests`, which excludes doc tests.[^79417]
To include doc tests in the coverage results, drop the `--tests` flag, and apply the
`-Z instrument-coverage` flag, and some doc-test-specific options in the
`RUSTDOCFLAGS` environment variable. (The `cargo profdata` command does not change.)
```bash
$ RUSTFLAGS="-Z instrument-coverage" \
RUSTDOCFLAGS="-Z instrument-coverage -Z unstable-options --persist-doctests target/debug/doctestbins" \
LLVM_PROFILE_FILE="json5format-%m.profraw" \
cargo test
$ cargo profdata -- merge \
-sparse json5format-*.profraw -o json5format.profdata
```
The `-Z unstable-options --persist-doctests` flag is required, to save the test binaries
(with their coverage maps) for `llvm-cov`.
```bash
$ cargo cov -- report \
$( \
for file in \
$( \
RUSTFLAGS="-Z instrument-coverage" \
RUSTDOCFLAGS="-Z instrument-coverage -Z unstable-options --persist-doctests target/debug/doctestbins" \
cargo test --no-run --message-format=json \
| jq -r "select(.profile.test == true) | .filenames[]" \
| grep -v dSYM - \
) \
target/debug/doctestbins/*/rust_out; \
do \
[[ -x $file ]] && printf "%s %s " -object $file; \
done \
) \
--instr-profile=json5format.profdata --summary-only # and/or other options
```
> **Note**: The differences in this `cargo cov` command, compared with the version without
> doc tests, include:
- The `cargo test ... --no-run` command is updated with the same environment variables
and flags used to _build_ the tests, _including_ the doc tests. (`LLVM_PROFILE_FILE`
is only used when _running_ the tests.)
- The file glob pattern `target/debug/doctestbins/*/rust_out` adds the `rust_out`
binaries generated for doc tests (note, however, that some `rust_out` files may not
be executable binaries).
- `[[ -x $file ]] &&` filters the files passed on to the `printf`, to include only
executable binaries.
[^79417]:
There is ongoing work to resolve a known issue
[(#79417)](https://github.com/rust-lang/rust/issues/79417) that doc test coverage
generates incorrect source line numbers in `llvm-cov show` results.
## `-Z instrument-coverage=<options>`
- `-Z instrument-coverage=all`: Instrument all functions, including unused functions and unused generics. (This is the same as `-Z instrument-coverage`, with no value.)
- `-Z instrument-coverage=except-unused-generics`: Instrument all functions except unused generics.
- `-Z instrument-coverage=except-unused-functions`: Instrument only used (called) functions and instantiated generic functions.
- `-Z instrument-coverage=off`: Do not instrument any functions. (This is the same as simply not including the `-Z instrument-coverage` option.)
## Other references
Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang]. (This document is partially based on the Clang guide.)
[source-based code coverage in clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
[`json5format`]: https://crates.io/crates/json5format

View File

@ -1,327 +1,5 @@
# `source-based-code-coverage`
The tracking issue for this feature is: [#79121].
See compiler flag [`-Z instrument-coverage`].
------------------------
## Introduction
The Rust compiler includes two code coverage implementations:
* A GCC-compatible, gcov-based coverage implementation, enabled with [`-Zprofile`], which operates on DebugInfo.
* A source-based code coverage implementation, enabled with `-Zinstrument-coverage`, which uses LLVM's native coverage instrumentation to generate very precise coverage data.
This document describes how to enable and use the LLVM instrumentation-based coverage, via the `-Zinstrument-coverage` compiler flag.
## How it works
When `-Zinstrument-coverage` is enabled, the Rust compiler enhances rust-based libraries and binaries by:
* Automatically injecting calls to an LLVM intrinsic ([`llvm.instrprof.increment`]), at functions and branches in compiled code, to increment counters when conditional sections of code are executed.
* Embedding additional information in the data section of each library and binary (using the [LLVM Code Coverage Mapping Format] _Version 4_, supported _only_ in LLVM 11 and up), to define the code regions (start and end positions in the source code) being counted.
When running a coverage-instrumented program, the counter values are written to a `profraw` file at program termination. LLVM bundles tools that read the counter results, combine those results with the coverage map (embedded in the program binary), and generate coverage reports in multiple formats.
## Enable coverage profiling in the Rust compiler
Rust's source-based code coverage requires the Rust "profiler runtime". Without it, compiling with `-Zinstrument-coverage` generates an error that the profiler runtime is missing.
The Rust `nightly` distribution channel should include the profiler runtime, by default.
*IMPORTANT:* If you are building the Rust compiler from the source distribution, the profiler runtime is *not* enabled in the default `config.toml.example`. Edit your `config.toml` file and ensure the `profiler` feature is set it to `true`:
```toml
# Build the profiler runtime (required when compiling with options that depend
# on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`).
profiler = true
```
If changed, rebuild the Rust compiler (see [rustc-dev-guide-how-to-build-and-run]).
### Building the demangler
LLVM coverage reporting tools generate results that can include function names and other symbol references, and the raw coverage results report symbols using the compiler's "mangled" version of the symbol names, which can be difficult to interpret. To work around this issue, LLVM coverage tools also support a user-specified symbol name demangler.
One option for a Rust demangler is [`rustfilt`], which can be installed with:
```shell
cargo install rustfilt
```
Another option, if you are building from the Rust compiler source distribution, is to use the `rust-demangler` tool included in the Rust source distribution, which can be built with:
```shell
$ ./x.py build rust-demangler
```
## Compiling with coverage enabled
Set the `-Zinstrument-coverage` compiler flag in order to enable LLVM source-based code coverage profiling.
With `cargo`, you can instrument your program binary *and* dependencies at the same time.
For example (if your project's Cargo.toml builds a binary by default):
```shell
$ cd your-project
$ cargo clean
$ RUSTFLAGS="-Zinstrument-coverage" cargo build
```
If `cargo` is not configured to use your `profiler`-enabled version of `rustc`, set the path explicitly via the `RUSTC` environment variable. Here is another example, using a `stage1` build of `rustc` to compile an `example` binary (from the [`json5format`] crate):
```shell
$ RUSTC=$HOME/rust/build/x86_64-unknown-linux-gnu/stage1/bin/rustc \
RUSTFLAGS="-Zinstrument-coverage" \
cargo build --example formatjson5
```
Note that some compiler options, combined with `-Zinstrument-coverage`, can produce LLVM IR and/or linked binaries that are incompatible with LLVM coverage maps. For example, coverage requires references to actual functions in LLVM IR. If any covered function is optimized out, the coverage tools may not be able to process the coverage results. If you need to pass additional options, with coverage enabled, test them early, to confirm you will get the coverage results you expect.
## Running the instrumented binary to generate raw coverage profiling data
In the previous example, `cargo` generated the coverage-instrumented binary `formatjson5`:
```shell
$ echo "{some: 'thing'}" | target/debug/examples/formatjson5 -
```
```json5
{
some: 'thing',
}
```
After running this program, a new file, `default.profraw`, should be in the current working directory. It's often preferable to set a specific file name or path. You can change the output file using the environment variable `LLVM_PROFILE_FILE`:
```shell
$ echo "{some: 'thing'}" \
| LLVM_PROFILE_FILE="formatjson5.profraw" target/debug/examples/formatjson5 -
...
$ ls formatjson5.profraw
formatjson5.profraw
```
If `LLVM_PROFILE_FILE` contains a path to a non-existent directory, the missing directory structure will be created. Additionally, the following special pattern strings are rewritten:
* `%p` - The process ID.
* `%h` - The hostname of the machine running the program.
* `%t` - The value of the TMPDIR environment variable.
* `%Nm` - the instrumented binarys signature: The runtime creates a pool of N raw profiles, used for on-line profile merging. The runtime takes care of selecting a raw profile from the pool, locking it, and updating it before the program exits. `N` must be between `1` and `9`, and defaults to `1` if omitted (with simply `%m`).
* `%c` - Does not add anything to the filename, but enables a mode (on some platforms, including Darwin) in which profile counter updates are continuously synced to a file. This means that if the instrumented program crashes, or is killed by a signal, perfect coverage information can still be recovered.
## Installing LLVM coverage tools
LLVM's supplies two tools—`llvm-profdata` and `llvm-cov`—that process coverage data and generate reports. There are several ways to find and/or install these tools, but note that the coverage mapping data generated by the Rust compiler requires LLVM version 11 or higher. (`llvm-cov --version` typically shows the tool's LLVM version number.):
* The LLVM tools may be installed (or installable) directly to your OS (such as via `apt-get`, for Linux).
* If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`.
* You can install compatible versions of these tools via `rustup`.
The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-binutils`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
```shell
$ rustup component add llvm-tools-preview
$ cargo install cargo-binutils
$ cargo profdata -- --help # note the additional "--" preceding the tool-specific arguments
```
## Creating coverage reports
Raw profiles have to be indexed before they can be used to generate coverage reports. This is done using [`llvm-profdata merge`] (or `cargo profdata -- merge`), which can combine multiple raw profiles and index them at the same time:
```shell
$ llvm-profdata merge -sparse formatjson5.profraw -o formatjson5.profdata
```
Finally, the `.profdata` file is used, in combination with the coverage map (from the program binary) to generate coverage reports using [`llvm-cov report`] (or `cargo cov -- report`), for a coverage summaries; and [`llvm-cov show`] (or `cargo cov -- show`), to see detailed coverage of lines and regions (character ranges) overlaid on the original source code.
These commands have several display and filtering options. For example:
```shell
$ llvm-cov show -Xdemangler=rustfilt target/debug/examples/formatjson5 \
-instr-profile=formatjson5.profdata \
-show-line-counts-or-regions \
-show-instantiations \
-name=add_quoted_string
```
<img alt="Screenshot of sample `llvm-cov show` result, for function add_quoted_string" src="img/llvm-cov-show-01.png" class="center"/>
<br/>
<br/>
Some of the more notable options in this example include:
* `--Xdemangler=rustfilt` - the command name or path used to demangle Rust symbols (`rustfilt` in the example, but this could also be a path to the `rust-demangler` tool)
* `target/debug/examples/formatjson5` - the instrumented binary (from which to extract the coverage map)
* `--instr-profile=<path-to-file>.profdata` - the location of the `.profdata` file created by `llvm-profdata merge` (from the `.profraw` file generated by the instrumented binary)
* `--name=<exact-function-name>` - to show coverage for a specific function (or, consider using another filter option, such as `--name-regex=<pattern>`)
## Interpreting reports
There are four statistics tracked in a coverage summary:
* Function coverage is the percentage of functions that have been executed at least once. A function is considered to be executed if any of its instantiations are executed.
* Instantiation coverage is the percentage of function instantiations that have been executed at least once. Generic functions and functions generated from macros are two kinds of functions that may have multiple instantiations.
* Line coverage is the percentage of code lines that have been executed at least once. Only executable lines within function bodies are considered to be code lines.
* Region coverage is the percentage of code regions that have been executed at least once. A code region may span multiple lines: for example, in a large function body with no control flow. In other cases, a single line can contain multiple code regions: `return x || (y && z)` has countable code regions for `x` (which may resolve the expression, if `x` is `true`), `|| (y && z)` (executed only if `x` was `false`), and `return` (executed in either situation).
Of these four statistics, function coverage is usually the least granular while region coverage is the most granular. The project-wide totals for each statistic are listed in the summary.
## Test coverage
A typical use case for coverage analysis is test coverage. Rust's source-based coverage tools can both measure your tests' code coverage as percentage, and pinpoint functions and branches not tested.
The following example (using the [`json5format`] crate, for demonstration purposes) show how to generate and analyze coverage results for all tests in a crate.
Since `cargo test` both builds and runs the tests, we set both the additional `RUSTFLAGS`, to add the `-Zinstrument-coverage` flag, and `LLVM_PROFILE_FILE`, to set a custom filename for the raw profiling data generated during the test runs. Since there may be more than one test binary, apply `%m` in the filename pattern. This generates unique names for each test binary. (Otherwise, each executed test binary would overwrite the coverage results from the previous binary.)
```shell
$ RUSTFLAGS="-Zinstrument-coverage" \
LLVM_PROFILE_FILE="json5format-%m.profraw" \
cargo test --tests
```
Make note of the test binary file paths, displayed after the word "`Running`" in the test output:
```text
...
Compiling json5format v0.1.3 ($HOME/json5format)
Finished test [unoptimized + debuginfo] target(s) in 14.60s
Running target/debug/deps/json5format-fececd4653271682
running 25 tests
...
test result: ok. 25 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
Running target/debug/deps/lib-30768f9c53506dc5
running 31 tests
...
test result: ok. 31 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
```
You should have one or more `.profraw` files now, one for each test binary. Run the `profdata` tool to merge them:
```shell
$ cargo profdata -- merge \
-sparse json5format-*.profraw -o json5format.profdata
```
Then run the `cov` tool, with the `profdata` file and all test binaries:
```shell
$ cargo cov -- report \
--use-color --ignore-filename-regex='/.cargo/registry' \
--instr-profile=json5format.profdata \
--object target/debug/deps/lib-30768f9c53506dc5 \
--object target/debug/deps/json5format-fececd4653271682
$ cargo cov -- show \
--use-color --ignore-filename-regex='/.cargo/registry' \
--instr-profile=json5format.profdata \
--object target/debug/deps/lib-30768f9c53506dc5 \
--object target/debug/deps/json5format-fececd4653271682 \
--show-instantiations --show-line-counts-or-regions \
--Xdemangler=rustfilt | less -R
```
_Note the command line option `--ignore-filename-regex=/.cargo/registry`, which excludes the sources for dependencies from the coverage results._
### Tips for listing the binaries automatically
For `bash` users, one suggested way to automatically complete the `cov` command with the list of binaries is with a command like:
```bash
$ cargo cov -- report \
$( \
for file in \
$( \
RUSTFLAGS="-Zinstrument-coverage" \
cargo test --tests --no-run --message-format=json \
| jq -r "select(.profile.test == true) | .filenames[]" \
| grep -v dSYM - \
); \
do \
printf "%s %s " -object $file; \
done \
) \
--instr-profile=json5format.profdata --summary-only # and/or other options
```
Adding `--no-run --message-format=json` to the _same_ `cargo test` command used to run
the tests (including the same environment variables and flags) generates output in a JSON
format that `jq` can easily query.
The `printf` command takes this list and generates the `--object <binary>` arguments
for each listed test binary.
### Including doc tests
The previous examples run `cargo test` with `--tests`, which excludes doc tests.[^79417]
To include doc tests in the coverage results, drop the `--tests` flag, and apply the
`-Zinstrument-coverage` flag, and some doc-test-specific options in the
`RUSTDOCFLAGS` environment variable. (The `cargo profdata` command does not change.)
```bash
$ RUSTFLAGS="-Zinstrument-coverage" \
RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
LLVM_PROFILE_FILE="json5format-%m.profraw" \
cargo test
$ cargo profdata -- merge \
-sparse json5format-*.profraw -o json5format.profdata
```
The `-Zunstable-options --persist-doctests` flag is required, to save the test binaries
(with their coverage maps) for `llvm-cov`.
```bash
$ cargo cov -- report \
$( \
for file in \
$( \
RUSTFLAGS="-Zinstrument-coverage" \
RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
cargo test --no-run --message-format=json \
| jq -r "select(.profile.test == true) | .filenames[]" \
| grep -v dSYM - \
) \
target/debug/doctestbins/*/rust_out; \
do \
[[ -x $file ]] && printf "%s %s " -object $file; \
done \
) \
--instr-profile=json5format.profdata --summary-only # and/or other options
```
Note, the differences in this `cargo cov` command, compared with the version without
doc tests, include:
* The `cargo test ... --no-run` command is updated with the same environment variables
and flags used to _build_ the tests, _including_ the doc tests. (`LLVM_PROFILE_FILE`
is only used when _running_ the tests.)
* The file glob pattern `target/debug/doctestbins/*/rust_out` adds the `rust_out`
binaries generated for doc tests (note, however, that some `rust_out` files may not
be executable binaries).
* `[[ -x $file ]] &&` filters the files passed on to the `printf`, to include only
executable binaries.
[^79417]: There is ongoing work to resolve a known issue
[(#79417)](https://github.com/rust-lang/rust/issues/79417) that doc test coverage
generates incorrect source line numbers in `llvm-cov show` results.
## Other references
Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang]. (This document is partially based on the Clang guide.)
[#79121]: https://github.com/rust-lang/rust/issues/79121
[`-Zprofile`]: profile.md
[`llvm.instrprof.increment`]: https://llvm.org/docs/LangRef.html#llvm-instrprof-increment-intrinsic
[LLVM Code Coverage Mapping Format]: https://llvm.org/docs/CoverageMappingFormat.html
[rustc-dev-guide-how-to-build-and-run]: https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html
[`rustfilt`]: https://crates.io/crates/rustfilt
[`json5format`]: https://crates.io/crates/json5format
[`cargo-binutils`]: https://crates.io/crates/cargo-binutils
[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show
[source-based code coverage in Clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
[`-z instrument-coverage`]: ./instrument-coverage.html

View File

@ -0,0 +1,30 @@
# `no_coverage`
The tracking issue for this feature is: [#84605]
[#84605]: https://github.com/rust-lang/rust/issues/84605
---
The `no_coverage` attribute can be used to selectively disable coverage
instrumentation in an annotated function. This might be useful to:
- Avoid instrumentation overhead in a performance critical function
- Avoid generating coverage for a function that is not meant to be executed,
but still target 100% coverage for the rest of the program.
## Example
```rust
#![feature(no_coverage)]
// `foo()` will get coverage instrumentation (by default)
fn foo() {
// ...
}
#[no_coverage]
fn bar() {
// ...
}
```

View File

@ -14,9 +14,9 @@
14| 1| }
15| 1|}
16| |
17| |// FIXME(#83985): The auto-generated closure in an async function is failing to include
18| |// the println!() and `let` assignment lines in the coverage code region(s), as it does in the
19| |// non-async function above, unless the `println!()` is inside a covered block.
17| |
18| |
19| |
20| 1|async fn async_func() {
21| 1| println!("async_func was covered");
22| 1| let b = true;
@ -26,9 +26,9 @@
^0
26| 1|}
27| |
28| |// FIXME(#83985): As above, this async function only has the `println!()` macro call, which is not
29| |// showing coverage, so the entire async closure _appears_ uncovered; but this is not exactly true.
30| |// It's only certain kinds of lines and/or their context that results in missing coverage.
28| |
29| |
30| |
31| 1|async fn async_func_just_println() {
32| 1| println!("async_func_just_println was covered");
33| 1|}

View File

@ -37,7 +37,7 @@
37| 0| countdown = 10;
38| 0| }
39| 0| "alt string 2".to_owned()
40| 1| };
40| 0| };
41| 1| println!(
42| 1| "The string or alt: {}"
43| 1| ,
@ -125,36 +125,98 @@
125| 0| countdown = 10;
126| 0| }
127| 0| "closure should be unused".to_owned()
128| 1| };
129| 1|
128| 0| };
129| |
130| 1| let mut countdown = 10;
131| 1| let _short_unused_closure = | _unused_arg: u8 | countdown += 1;
^0
132| 1|
133| 1| // Macros can sometimes confuse the coverage results. Compare this next assignment, with an
134| 1| // unused closure that invokes the `println!()` macro, with the closure assignment above, that
135| 1| // does not use a macro. The closure above correctly shows `0` executions.
136| 1| let _short_unused_closure = | _unused_arg: u8 | println!("not called");
137| 1| // The closure assignment above is executed, with a line count of `1`, but the `println!()`
138| 1| // could not have been called, and yet, there is no indication that it wasn't...
139| 1|
140| 1| // ...but adding block braces gives the expected result, showing the block was not executed.
132| |
133| |
134| 1| let short_used_covered_closure_macro = | used_arg: u8 | println!("called");
135| 1| let short_used_not_covered_closure_macro = | used_arg: u8 | println!("not called");
^0
136| 1| let _short_unused_closure_macro = | _unused_arg: u8 | println!("not called");
^0
137| |
138| |
139| |
140| |
141| 1| let _short_unused_closure_block = | _unused_arg: u8 | { println!("not called") };
^0
142| 1|
142| |
143| 1| let _shortish_unused_closure = | _unused_arg: u8 | {
144| 0| println!("not called")
145| 1| };
146| 1|
145| 0| };
146| |
147| 1| let _as_short_unused_closure = |
148| | _unused_arg: u8
149| 1| | { println!("not called") };
^0
150| 1|
149| 0| | { println!("not called") };
150| |
151| 1| let _almost_as_short_unused_closure = |
152| | _unused_arg: u8
153| 1| | { println!("not called") }
^0
154| 1| ;
155| 1|}
153| 0| | { println!("not called") }
154| | ;
155| |
156| |
157| |
158| |
159| |
160| 1| let _short_unused_closure_line_break_no_block = | _unused_arg: u8 |
161| 0|println!("not called")
162| | ;
163| |
164| 1| let _short_unused_closure_line_break_no_block2 =
165| | | _unused_arg: u8 |
166| 0| println!(
167| 0| "not called"
168| 0| )
169| | ;
170| |
171| 1| let short_used_not_covered_closure_line_break_no_block_embedded_branch =
172| 1| | _unused_arg: u8 |
173| 0| println!(
174| 0| "not called: {}",
175| 0| if is_true { "check" } else { "me" }
176| 0| )
177| | ;
178| |
179| 1| let short_used_not_covered_closure_line_break_block_embedded_branch =
180| 1| | _unused_arg: u8 |
181| 0| {
182| 0| println!(
183| 0| "not called: {}",
184| 0| if is_true { "check" } else { "me" }
185| | )
186| 0| }
187| | ;
188| |
189| 1| let short_used_covered_closure_line_break_no_block_embedded_branch =
190| 1| | _unused_arg: u8 |
191| 1| println!(
192| 1| "not called: {}",
193| 1| if is_true { "check" } else { "me" }
^0
194| 1| )
195| | ;
196| |
197| 1| let short_used_covered_closure_line_break_block_embedded_branch =
198| 1| | _unused_arg: u8 |
199| 1| {
200| 1| println!(
201| 1| "not called: {}",
202| 1| if is_true { "check" } else { "me" }
^0
203| | )
204| 1| }
205| | ;
206| |
207| 1| if is_false {
208| 0| short_used_not_covered_closure_macro(0);
209| 0| short_used_not_covered_closure_line_break_no_block_embedded_branch(0);
210| 0| short_used_not_covered_closure_line_break_block_embedded_branch(0);
211| 1| }
212| 1| short_used_covered_closure_macro(0);
213| 1| short_used_covered_closure_line_break_no_block_embedded_branch(0);
214| 1| short_used_covered_closure_line_break_block_embedded_branch(0);
215| 1|}

View File

@ -14,15 +14,15 @@
14| |
15| |macro_rules! on_error {
16| | ($value:expr, $error_message:expr) => {
17| 0| $value.or_else(|e| {
18| 0| let message = format!($error_message, e);
19| 0| if message.len() > 0 {
20| 0| println!("{}", message);
21| 0| Ok(String::from("ok"))
17| | $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
18| | let message = format!($error_message, e);
19| | if message.len() > 0 {
20| | println!("{}", message);
21| | Ok(String::from("ok"))
22| | } else {
23| 0| bail!("error");
23| | bail!("error");
24| | }
25| 0| })
25| | })
26| | };
27| |}
28| |

View File

@ -14,15 +14,15 @@
14| |
15| |macro_rules! on_error {
16| | ($value:expr, $error_message:expr) => {
17| 0| $value.or_else(|e| {
18| 0| let message = format!($error_message, e);
19| 0| if message.len() > 0 {
20| 0| println!("{}", message);
21| 0| Ok(String::from("ok"))
17| | $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
18| | let message = format!($error_message, e);
19| | if message.len() > 0 {
20| | println!("{}", message);
21| | Ok(String::from("ok"))
22| | } else {
23| 0| bail!("error");
23| | bail!("error");
24| | }
25| 0| })
25| | })
26| | };
27| |}
28| |

View File

@ -11,8 +11,27 @@
11| | println!("called but not covered");
12| |}
13| |
14| 1|fn main() {
15| 1| do_not_add_coverage_1();
16| 1| do_not_add_coverage_2();
17| 1|}
14| |#[no_coverage]
15| |fn do_not_add_coverage_not_called() {
16| | println!("not called and not covered");
17| |}
18| |
19| 1|fn add_coverage_1() {
20| 1| println!("called and covered");
21| 1|}
22| |
23| 1|fn add_coverage_2() {
24| 1| println!("called and covered");
25| 1|}
26| |
27| 0|fn add_coverage_not_called() {
28| 0| println!("not called but covered");
29| 0|}
30| |
31| 1|fn main() {
32| 1| do_not_add_coverage_1();
33| 1| do_not_add_coverage_2();
34| 1| add_coverage_1();
35| 1| add_coverage_2();
36| 1|}

View File

@ -29,22 +29,4 @@
29| |// 2. Since the `panic_unwind.rs` test is allowed to unwind, it is also allowed to execute the
30| |// normal program exit cleanup, including writing out the current values of the coverage
31| |// counters.
32| |// 3. The coverage results show (interestingly) that the `panic!()` call did execute, but it does
33| |// not show coverage of the `if countdown == 1` branch in `main()` that calls
34| |// `might_panic(true)` (causing the call to `panic!()`).
35| |// 4. The reason `main()`s `if countdown == 1` branch, calling `might_panic(true)`, appears
36| |// "uncovered" is, InstrumentCoverage (intentionally) treats `TerminatorKind::Call` terminators
37| |// as non-branching, because when a program executes normally, they always are. Errors handled
38| |// via the try `?` operator produce error handling branches that *are* treated as branches in
39| |// coverage results. By treating calls without try `?` operators as non-branching (assumed to
40| |// return normally and continue) the coverage graph can be simplified, producing smaller,
41| |// faster binaries, and cleaner coverage results.
42| |// 5. The reason the coverage results actually show `panic!()` was called is most likely because
43| |// `panic!()` is a macro, not a simple function call, and there are other `Statement`s and/or
44| |// `Terminator`s that execute with a coverage counter before the panic and unwind occur.
45| |// 6. Since the common practice is not to use `panic!()` for error handling, the coverage
46| |// implementation avoids incurring an additional cost (in program size and execution time) to
47| |// improve coverage results for an event that is generally not "supposed" to happen.
48| |// 7. FIXME(#78544): This issue describes a feature request for a proposed option to enable
49| |// more accurate coverage results for tests that intentionally panic.

View File

@ -14,9 +14,9 @@ fn non_async_func() {
}
}
// FIXME(#83985): The auto-generated closure in an async function is failing to include
// the println!() and `let` assignment lines in the coverage code region(s), as it does in the
// non-async function above, unless the `println!()` is inside a covered block.
async fn async_func() {
println!("async_func was covered");
let b = true;
@ -25,9 +25,9 @@ async fn async_func() {
}
}
// FIXME(#83985): As above, this async function only has the `println!()` macro call, which is not
// showing coverage, so the entire async closure _appears_ uncovered; but this is not exactly true.
// It's only certain kinds of lines and/or their context that results in missing coverage.
async fn async_func_just_println() {
println!("async_func_just_println was covered");
}

View File

@ -130,14 +130,14 @@ fn main() {
let mut countdown = 10;
let _short_unused_closure = | _unused_arg: u8 | countdown += 1;
// Macros can sometimes confuse the coverage results. Compare this next assignment, with an
// unused closure that invokes the `println!()` macro, with the closure assignment above, that
// does not use a macro. The closure above correctly shows `0` executions.
let _short_unused_closure = | _unused_arg: u8 | println!("not called");
// The closure assignment above is executed, with a line count of `1`, but the `println!()`
// could not have been called, and yet, there is no indication that it wasn't...
// ...but adding block braces gives the expected result, showing the block was not executed.
let short_used_covered_closure_macro = | used_arg: u8 | println!("called");
let short_used_not_covered_closure_macro = | used_arg: u8 | println!("not called");
let _short_unused_closure_macro = | _unused_arg: u8 | println!("not called");
let _short_unused_closure_block = | _unused_arg: u8 | { println!("not called") };
let _shortish_unused_closure = | _unused_arg: u8 | {
@ -152,4 +152,64 @@ fn main() {
_unused_arg: u8
| { println!("not called") }
;
let _short_unused_closure_line_break_no_block = | _unused_arg: u8 |
println!("not called")
;
let _short_unused_closure_line_break_no_block2 =
| _unused_arg: u8 |
println!(
"not called"
)
;
let short_used_not_covered_closure_line_break_no_block_embedded_branch =
| _unused_arg: u8 |
println!(
"not called: {}",
if is_true { "check" } else { "me" }
)
;
let short_used_not_covered_closure_line_break_block_embedded_branch =
| _unused_arg: u8 |
{
println!(
"not called: {}",
if is_true { "check" } else { "me" }
)
}
;
let short_used_covered_closure_line_break_no_block_embedded_branch =
| _unused_arg: u8 |
println!(
"not called: {}",
if is_true { "check" } else { "me" }
)
;
let short_used_covered_closure_line_break_block_embedded_branch =
| _unused_arg: u8 |
{
println!(
"not called: {}",
if is_true { "check" } else { "me" }
)
}
;
if is_false {
short_used_not_covered_closure_macro(0);
short_used_not_covered_closure_line_break_no_block_embedded_branch(0);
short_used_not_covered_closure_line_break_block_embedded_branch(0);
}
short_used_covered_closure_macro(0);
short_used_covered_closure_line_break_no_block_embedded_branch(0);
short_used_covered_closure_line_break_block_embedded_branch(0);
}

View File

@ -14,7 +14,7 @@ macro_rules! bail {
macro_rules! on_error {
($value:expr, $error_message:expr) => {
$value.or_else(|e| {
$value.or_else(|e| { // FIXME(85000): no coverage in closure macros
let message = format!($error_message, e);
if message.len() > 0 {
println!("{}", message);

View File

@ -14,7 +14,7 @@ macro_rules! bail {
macro_rules! on_error {
($value:expr, $error_message:expr) => {
$value.or_else(|e| {
$value.or_else(|e| { // FIXME(85000): no coverage in closure macros
let message = format!($error_message, e);
if message.len() > 0 {
println!("{}", message);

View File

@ -11,7 +11,26 @@ fn do_not_add_coverage_2() {
println!("called but not covered");
}
#[no_coverage]
fn do_not_add_coverage_not_called() {
println!("not called and not covered");
}
fn add_coverage_1() {
println!("called and covered");
}
fn add_coverage_2() {
println!("called and covered");
}
fn add_coverage_not_called() {
println!("not called but covered");
}
fn main() {
do_not_add_coverage_1();
do_not_add_coverage_2();
add_coverage_1();
add_coverage_2();
}

View File

@ -29,21 +29,3 @@ fn main() -> Result<(), u8> {
// 2. Since the `panic_unwind.rs` test is allowed to unwind, it is also allowed to execute the
// normal program exit cleanup, including writing out the current values of the coverage
// counters.
// 3. The coverage results show (interestingly) that the `panic!()` call did execute, but it does
// not show coverage of the `if countdown == 1` branch in `main()` that calls
// `might_panic(true)` (causing the call to `panic!()`).
// 4. The reason `main()`s `if countdown == 1` branch, calling `might_panic(true)`, appears
// "uncovered" is, InstrumentCoverage (intentionally) treats `TerminatorKind::Call` terminators
// as non-branching, because when a program executes normally, they always are. Errors handled
// via the try `?` operator produce error handling branches that *are* treated as branches in
// coverage results. By treating calls without try `?` operators as non-branching (assumed to
// return normally and continue) the coverage graph can be simplified, producing smaller,
// faster binaries, and cleaner coverage results.
// 5. The reason the coverage results actually show `panic!()` was called is most likely because
// `panic!()` is a macro, not a simple function call, and there are other `Statement`s and/or
// `Terminator`s that execute with a coverage counter before the panic and unwind occur.
// 6. Since the common practice is not to use `panic!()` for error handling, the coverage
// implementation avoids incurring an additional cost (in program size and execution time) to
// improve coverage results for an event that is generally not "supposed" to happen.
// 7. FIXME(#78544): This issue describes a feature request for a proposed option to enable
// more accurate coverage results for tests that intentionally panic.

View File

@ -296,7 +296,7 @@ error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ inbounds test failed: 0x0 is not a valid pointer
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not allowed for this operation
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5

View File

@ -296,7 +296,7 @@ error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ inbounds test failed: 0x0 is not a valid pointer
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not allowed for this operation
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5

View File

@ -74,7 +74,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::ptr_offset_from(self, origin) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: 0x0 is not a valid pointer
| null pointer is not allowed for this operation
| inside `ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `OFFSET_FROM_NULL` at $DIR/offset_from_ub.rs:36:14
|

View File

@ -23,7 +23,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
| pointer arithmetic failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `AFTER_END` at $DIR/offset_ub.rs:7:43
|
@ -41,7 +41,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
| pointer arithmetic failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `AFTER_ARRAY` at $DIR/offset_ub.rs:8:45
|
@ -131,7 +131,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
| pointer arithmetic failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:15:50
|
@ -167,7 +167,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: 0x0 is not a valid pointer
| pointer arithmetic failed: 0x0 is not a valid pointer
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:19:50
|

View File

@ -4,7 +4,7 @@ error: any use of this value will cause an error
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| inbounds test failed: pointer must be in-bounds at offset $TWO_WORDS, but is outside bounds of alloc2 which has size $WORD
| pointer arithmetic failed: pointer must be in-bounds at offset $TWO_WORDS, but is outside bounds of alloc2 which has size $WORD
| inside `ptr::const_ptr::<impl *const usize>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `_` at $DIR/ptr_comparisons.rs:61:34
|

View File

@ -85,7 +85,9 @@ fn main() {
let cargo = &Path::new(cargo);
for test in TEST_REPOS.iter().rev() {
test_repo(cargo, out_dir, test);
if args[3..].is_empty() || args[3..].iter().any(|s| s.contains(test.name)) {
test_repo(cargo, out_dir, test);
}
}
}