interpret: support for per-byte provenance

This commit is contained in:
Ralf Jung 2022-11-06 13:00:09 +01:00
parent 452cf4f710
commit 2cef9e3d19
18 changed files with 506 additions and 291 deletions

View File

@ -26,7 +26,7 @@ use std::ops::Range;
pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
let alloc = alloc.inner();
let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1);
let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
@ -78,7 +78,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
}
let mut next_offset = 0;
for &(offset, alloc_id) in alloc.provenance().iter() {
for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
@ -489,7 +489,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
// happens to be zero. Instead, we should only check the value of defined bytes
// and set all undefined bytes to zero if this allocation is headed for the
// BSS.
let all_bytes_are_zero = alloc.provenance().is_empty()
let all_bytes_are_zero = alloc.provenance().ptrs().is_empty()
&& alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
.iter()
@ -513,7 +513,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
section.as_str().as_ptr().cast(),
section.as_str().len() as c_uint,
);
assert!(alloc.provenance().is_empty());
assert!(alloc.provenance().ptrs().is_empty());
// The `inspect` method is okay here because we checked for provenance, and
// because we are doing this access to inspect the final interpreter state (not

View File

@ -134,7 +134,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
alloc.mutability = Mutability::Not;
};
// link the alloc id to the actual allocation
leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id));
leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id));
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
None
@ -439,7 +439,7 @@ pub fn intern_const_alloc_recursive<
}
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
for &(_, alloc_id) in alloc.inner().provenance().iter() {
for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() {
if leftover_allocations.insert(alloc_id) {
todo.push(alloc_id);
}

View File

@ -302,8 +302,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.into());
};
debug!(?alloc);
if alloc.mutability == Mutability::Not {
throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
}
@ -797,7 +795,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// This is a new allocation, add the allocation it points to `todo`.
if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
todo.extend(
alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()),
alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
);
}
}
@ -833,7 +831,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Prov, Extra>,
) -> std::fmt::Result {
for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) {
for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
{
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", display_allocation(tcx, alloc))
@ -962,7 +961,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
pub(crate) fn has_provenance(&self) -> bool {
self.alloc.range_has_provenance(&self.tcx, self.range)
!self.alloc.provenance().range_empty(self.range, &self.tcx)
}
}
@ -1060,7 +1059,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Source alloc preparations and access hooks.
let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
// Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
// Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
return Ok(());
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
@ -1079,22 +1078,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(());
};
// Checks provenance edges on the src, which needs to happen before
// `prepare_provenance_copy`.
if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) {
throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start)));
}
if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) {
throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end())));
}
// Prepare getting source provenance.
let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
// first copy the provenance to a temporary buffer, because
// `get_bytes_mut` will clear the provenance, which is correct,
// since we don't want to keep any provenance at the target.
let provenance =
src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies);
// This will also error if copying partial provenance is not supported.
let provenance = src_alloc
.provenance()
.prepare_copy(src_range, dest_offset, num_copies, self)
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
// Prepare a copy of the initialization mask.
let compressed = src_alloc.compress_uninit_range(src_range);
let init = src_alloc.compress_uninit_range(src_range);
// Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
@ -1111,7 +1106,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_err(|e| e.to_interp_error(dest_alloc_id))?
.as_mut_ptr();
if compressed.no_bytes_init() {
if init.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
// is marked as uninitialized but we otherwise omit changing the byte representation which may
// be arbitrary for uninitialized bytes.
@ -1161,12 +1156,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// now fill in all the "init" data
dest_alloc.mark_compressed_init_range(
&compressed,
&init,
alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
num_copies,
);
// copy the provenance to the destination
dest_alloc.mark_provenance_range(provenance);
dest_alloc.provenance_apply_copy(provenance);
Ok(())
}

View File

@ -159,7 +159,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
// the consumer's responsibility to ensure all bytes that have been read
// have defined values.
if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
&& alloc.inner().provenance().len() != 0
&& alloc.inner().provenance().ptrs().len() != 0
{
let msg = "statics with a custom `#[link_section]` must be a \
simple list of bytes on the wasm target with no \

View File

@ -1,16 +1,17 @@
//! The virtual memory representation of the MIR interpreter.
mod provenance_map;
use std::borrow::Cow;
use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::hash;
use std::iter;
use std::ops::{Deref, Range};
use std::ops::Range;
use std::ptr;
use rustc_ast::Mutability;
use rustc_data_structures::intern::Interned;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Align, HasDataLayout, Size};
@ -20,6 +21,7 @@ use super::{
UnsupportedOpInfo,
};
use crate::ty;
use provenance_map::*;
/// This type represents an Allocation in the Miri/CTFE core engine.
///
@ -271,10 +273,10 @@ impl Allocation {
) -> Result<Allocation<Prov, Extra>, Err> {
// Compute new pointer provenance, which also adjusts the bytes.
let mut bytes = self.bytes;
let mut new_provenance = Vec::with_capacity(self.provenance.0.len());
let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
let endian = cx.data_layout().endian;
for &(offset, alloc_id) in self.provenance.iter() {
for &(offset, alloc_id) in self.provenance.ptrs().iter() {
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
@ -286,7 +288,7 @@ impl Allocation {
// Create allocation.
Ok(Allocation {
bytes,
provenance: ProvenanceMap::from_presorted(new_provenance),
provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
init_mask: self.init_mask,
align: self.align,
mutability: self.mutability,
@ -351,7 +353,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
) -> AllocResult<&[u8]> {
self.check_init(range)?;
if !Prov::OFFSET_IS_ADDR {
if self.range_has_provenance(cx, range) {
if !self.provenance.range_empty(range, cx) {
return Err(AllocError::ReadPointerAsBytes);
}
}
@ -370,7 +372,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<&mut [u8]> {
self.mark_init(range, true);
self.clear_provenance(cx, range)?;
self.provenance.clear(range, cx)?;
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
@ -382,7 +384,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<*mut [u8]> {
self.mark_init(range, true);
self.clear_provenance(cx, range)?;
self.provenance.clear(range, cx)?;
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
@ -423,7 +425,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// When reading data with provenance, the easy case is finding provenance exactly where we
// are reading, then we can put data and provenance back together and return that.
if let Some(&prov) = self.provenance.get(&range.start) {
if let Some(prov) = self.provenance.get_ptr(range.start) {
// Now we can return the bits, with their appropriate provenance.
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(Scalar::from_pointer(ptr, cx));
@ -431,10 +433,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// If we can work on pointers byte-wise, join the byte-wise provenances.
if Prov::OFFSET_IS_ADDR {
let mut prov = self.offset_get_provenance(cx, range.start);
for offset in 1..range.size.bytes() {
let this_prov =
self.offset_get_provenance(cx, range.start + Size::from_bytes(offset));
let mut prov = self.provenance.get(range.start, cx);
for offset in Size::from_bytes(1)..range.size {
let this_prov = self.provenance.get(range.start + offset, cx);
prov = Prov::join(prov, this_prov);
}
// Now use this provenance.
@ -452,7 +453,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// Fallback path for when we cannot treat provenance bytewise or ignore it.
assert!(!Prov::OFFSET_IS_ADDR);
if self.range_has_provenance(cx, range) {
if !self.provenance.range_empty(range, cx) {
return Err(AllocError::ReadPointerAsBytes);
}
// There is no provenance, we can just return the bits.
@ -466,7 +467,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
#[instrument(skip(self, cx), level = "debug")]
pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
@ -491,7 +491,8 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// See if we have to also store some provenance.
if let Some(provenance) = provenance {
self.provenance.0.insert(range.start, provenance);
assert_eq!(range.size, cx.data_layout().pointer_size);
self.provenance.insert_ptr(range.start, provenance, cx);
}
Ok(())
@ -500,171 +501,18 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Write "uninit" to the given memory range.
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
self.mark_init(range, false);
self.clear_provenance(cx, range)?;
self.provenance.clear(range, cx)?;
return Ok(());
}
}
/// Provenance.
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
/// Returns all provenance overlapping with the given pointer-offset pair.
fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
self.provenance.range(Size::from_bytes(start)..range.end())
}
/// Get the provenance of a single byte.
fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> {
let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1)));
assert!(prov.len() <= 1);
prov.first().map(|(_offset, prov)| *prov)
}
/// Returns whether this allocation has progrnance overlapping with the given range.
///
/// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
/// limit access to provenance outside of the `Allocation` abstraction.
///
pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
!self.range_get_provenance(cx, range).is_empty()
}
/// Removes all provenance inside the given range.
/// If there is provenance overlapping with the edges, it
/// are removed as well *and* the bytes they cover are marked as
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
where
Prov: Provenance,
{
// Find the start and end of the given range and its outermost provenance.
let (first, last) = {
// Find all provenance overlapping the given range.
let provenance = self.range_get_provenance(cx, range);
if provenance.is_empty() {
return Ok(());
}
(
provenance.first().unwrap().0,
provenance.last().unwrap().0 + cx.data_layout().pointer_size,
)
};
let start = range.start;
let end = range.end();
// We need to handle clearing the provenance from parts of a pointer.
// FIXME: Miri should preserve partial provenance; see
// https://github.com/rust-lang/miri/issues/2181.
if first < start {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(first));
}
warn!(
"Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
);
self.init_mask.set_range(first, start, false);
}
if last > end {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(
last - cx.data_layout().pointer_size,
));
}
warn!(
"Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
);
self.init_mask.set_range(end, last, false);
}
// Forget all the provenance.
// Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
// i.e., this will not remove any other provenance just after the ones we care about.
self.provenance.0.remove_range(first..last);
Ok(())
}
}
/// Stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>);
impl<Prov> ProvenanceMap<Prov> {
pub fn new() -> Self {
ProvenanceMap(SortedMap::new())
}
// The caller must guarantee that the given provenance list is already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
ProvenanceMap(SortedMap::from_presorted_elements(r))
}
}
impl<Prov> Deref for ProvenanceMap<Prov> {
type Target = SortedMap<Size, Prov>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// A partial, owned list of provenance to transfer into another allocation.
///
/// Offsets are already adjusted to the destination allocation.
pub struct AllocationProvenance<Prov> {
dest_provenance: Vec<(Size, Prov)>,
}
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
pub fn prepare_provenance_copy(
&self,
cx: &impl HasDataLayout,
src: AllocRange,
dest: Size,
count: u64,
) -> AllocationProvenance<Prov> {
let provenance = self.range_get_provenance(cx, src);
if provenance.is_empty() {
return AllocationProvenance { dest_provenance: Vec::new() };
}
let size = src.size;
let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize));
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
// at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
// the right sequence of provenance for all N copies.
for i in 0..count {
new_provenance.extend(provenance.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = dest + size * i; // `Size` operations
(
// shift offsets from source allocation to destination allocation
(offset + dest_offset) - src.start, // `Size` operations
reloc,
)
}));
}
AllocationProvenance { dest_provenance: new_provenance }
}
/// Applies a provenance copy.
/// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected
/// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
/// to be clear of provenance.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) {
self.provenance.0.insert_presorted(provenance.dest_provenance);
pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
self.provenance.apply_copy(copy)
}
}

View File

@ -0,0 +1,274 @@
//! Store the provenance for each byte in the range, with a more efficient
//! representation for the common case where PTR_SIZE consecutive bytes have the same provenance.
use std::cmp;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_target::abi::{HasDataLayout, Size};
use super::{alloc_range, AllocError, AllocId, AllocRange, AllocResult, Provenance};
/// Stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub struct ProvenanceMap<Prov = AllocId> {
/// Provenance in this map applies from the given offset for an entire pointer-size worth of
/// bytes. Two entires in this map are always at least a pointer size apart.
ptrs: SortedMap<Size, Prov>,
/// Provenance in this map only applies to the given single byte.
/// This map is disjoint from the previous.
bytes: SortedMap<Size, Prov>,
}
impl<Prov> ProvenanceMap<Prov> {
pub fn new() -> Self {
ProvenanceMap { ptrs: SortedMap::new(), bytes: SortedMap::new() }
}
/// The caller must guarantee that the given provenance list is already sorted
/// by address and contain no duplicates.
pub fn from_presorted_ptrs(r: Vec<(Size, Prov)>) -> Self {
ProvenanceMap { ptrs: SortedMap::from_presorted_elements(r), bytes: SortedMap::new() }
}
}
impl ProvenanceMap {
/// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
/// indeed that is how codegen treats them).
///
/// Only exposed with `AllocId` provenance, since it panics if there is bytewise provenance.
pub fn ptrs(&self) -> &SortedMap<Size, AllocId> {
assert!(self.bytes.is_empty());
&self.ptrs
}
}
impl<Prov: Provenance> ProvenanceMap<Prov> {
/// Returns all ptr-sized provenance in the given range.
/// If the range has length 0, returns provenance that crosses the edge between `start-1` and
/// `start`.
fn range_get_ptrs(&self, range: AllocRange, cx: &impl HasDataLayout) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let adjusted_start = Size::from_bytes(
range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1),
);
self.ptrs.range(adjusted_start..range.end())
}
/// Returns all byte-wise provenance in the given range.
fn range_get_bytes(&self, range: AllocRange) -> &[(Size, Prov)] {
self.bytes.range(range.start..range.end())
}
/// Get the provenance of a single byte.
pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> {
let prov = self.range_get_ptrs(alloc_range(offset, Size::from_bytes(1)), cx);
debug_assert!(prov.len() <= 1);
if let Some(entry) = prov.first() {
// If it overlaps with this byte, it is on this byte.
debug_assert!(self.bytes.get(&offset).is_none());
Some(entry.1)
} else {
// Look up per-byte provenance.
self.bytes.get(&offset).copied()
}
}
/// Check if here is ptr-sized provenance at the given index.
/// Does not mean anything for bytewise provenance! But can be useful as an optimization.
pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
self.ptrs.get(&offset).copied()
}
/// Returns whether this allocation has provenance overlapping with the given range.
///
/// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
/// limit access to provenance outside of the `Allocation` abstraction.
///
pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
self.range_get_ptrs(range, cx).is_empty() && self.range_get_bytes(range).is_empty()
}
/// Yields all the provenances stored in this map.
pub fn provenances(&self) -> impl Iterator<Item = Prov> + '_ {
self.ptrs.values().chain(self.bytes.values()).copied()
}
pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) {
debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx));
self.ptrs.insert(offset, prov);
}
/// Removes all provenance inside the given range.
/// If there is provenance overlapping with the edges, might result in an error.
pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult {
let start = range.start;
let end = range.end();
// Clear the bytewise part -- this is easy.
self.bytes.remove_range(start..end);
// For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
// provenance that overlaps with the given range.
let (first, last) = {
// Find all provenance overlapping the given range.
let provenance = self.range_get_ptrs(range, cx);
if provenance.is_empty() {
// No provenance in this range, we are done.
return Ok(());
}
(
provenance.first().unwrap().0,
provenance.last().unwrap().0 + cx.data_layout().pointer_size,
)
};
// We need to handle clearing the provenance from parts of a pointer.
if first < start {
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
return Err(AllocError::PartialPointerOverwrite(first));
}
// Insert the remaining part in the bytewise provenance.
let prov = self.ptrs[&first];
for offset in first..start {
self.bytes.insert(offset, prov);
}
}
if last > end {
let begin_of_last = last - cx.data_layout().pointer_size;
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
return Err(AllocError::PartialPointerOverwrite(begin_of_last));
}
// Insert the remaining part in the bytewise provenance.
let prov = self.ptrs[&begin_of_last];
for offset in end..last {
self.bytes.insert(offset, prov);
}
}
// Forget all the provenance.
// Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
// i.e., this will not remove any other provenance just after the ones we care about.
self.ptrs.remove_range(first..last);
Ok(())
}
}
/// A partial, owned list of provenance to transfer into another allocation.
///
/// Offsets are already adjusted to the destination allocation.
pub struct ProvenanceCopy<Prov> {
dest_ptrs: Vec<(Size, Prov)>,
dest_bytes: Vec<(Size, Prov)>,
}
impl<Prov: Provenance> ProvenanceMap<Prov> {
#[instrument(skip(self, cx), level = "debug")]
pub fn prepare_copy(
&self,
src: AllocRange,
dest: Size,
count: u64,
cx: &impl HasDataLayout,
) -> AllocResult<ProvenanceCopy<Prov>> {
let shift_offset = move |idx, offset| {
// compute offset for current repetition
let dest_offset = dest + src.size * idx; // `Size` operations
// shift offsets from source allocation to destination allocation
(offset - src.start) + dest_offset // `Size` operations
};
let ptr_size = cx.data_layout().pointer_size;
// # Pointer-sized provenances
// Get the provenances that are entirely within this range.
// (Different from `range_get_ptrs` which asks if they overlap the range.)
let ptrs = if src.size < ptr_size {
// This isn't even large enough to contain a pointer.
&[]
} else {
let adjusted_end =
Size::from_bytes(src.end().bytes().saturating_sub(ptr_size.bytes() - 1));
self.ptrs.range(src.start..adjusted_end)
};
// Buffer for the new list.
let mut dest_ptrs = Vec::with_capacity(ptrs.len() * (count as usize));
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
// at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
// the right sequence of provenance for all N copies.
// Basically, this large array would have to be created anyway in the target allocation.
for i in 0..count {
dest_ptrs.extend(ptrs.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
}
// # Byte-sized provenances
let mut bytes = Vec::new();
// First, if there is a part of a pointer at the start, add that.
if let Some(entry) = self.range_get_ptrs(alloc_range(src.start, Size::ZERO), cx).first() {
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
return Err(AllocError::PartialPointerCopy(entry.0));
}
trace!("start overlapping entry: {entry:?}");
// For really small copies, make sure we don't run off the end of the `src` range.
let entry_end = cmp::min(entry.0 + ptr_size, src.end());
for offset in src.start..entry_end {
bytes.push((offset, entry.1));
}
} else {
trace!("no start overlapping entry");
}
// Then the main part, bytewise provenance from `self.bytes`.
bytes.extend(self.bytes.range(src.start..src.end()));
// And finally possibly parts of a pointer at the end.
if let Some(entry) = self.range_get_ptrs(alloc_range(src.end(), Size::ZERO), cx).first() {
if !Prov::OFFSET_IS_ADDR {
// We can't split up the provenance into less than a pointer.
return Err(AllocError::PartialPointerCopy(entry.0));
}
trace!("end overlapping entry: {entry:?}");
// For really small copies, make sure we don't start before `src` does.
let entry_start = cmp::max(entry.0, src.start);
for offset in entry_start..src.end() {
if bytes.last().map_or(true, |bytes_entry| bytes_entry.0 < offset) {
// The last entry, if it exists, has a lower offset than us.
bytes.push((offset, entry.1));
} else {
// There already is an entry for this offset in there! This can happen when the
// start and end range checks actually end up hitting the same pointer, so we
// already added this in the "pointer at the start" part above.
assert!(entry.0 <= src.start);
}
}
} else {
trace!("no end overlapping entry");
}
trace!("byte provenances: {bytes:?}");
// And again a buffer for the new list on the target side.
let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize));
for i in 0..count {
dest_bytes
.extend(bytes.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
}
Ok(ProvenanceCopy { dest_ptrs, dest_bytes })
}
/// Applies a provenance copy.
/// The affected range, as defined in the parameters to `prepare_copy` is expected
/// to be clear of provenance.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
pub fn apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
self.ptrs.insert_presorted(copy.dest_ptrs);
self.bytes.insert_presorted(copy.dest_bytes);
}
}

View File

@ -127,8 +127,8 @@ pub use self::error::{
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
pub use self::allocation::{
alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
ProvenanceMap,
alloc_range, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation, InitChunk,
InitChunkIter, InitMask,
};
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};

View File

@ -103,8 +103,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
/// pointer), but `derive` adds some unnecessary bounds.
/// The `Debug` rendering is used to distplay bare provenance, and for the default impl of `fmt`.
pub trait Provenance: Copy + fmt::Debug {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
/// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
@ -115,14 +114,23 @@ pub trait Provenance: Copy + fmt::Debug {
/// pointer, and implement ptr-to-int transmutation by stripping provenance.
const OFFSET_IS_ADDR: bool;
/// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
/// (this avoids a separate trait in `allocation.rs` just for this purpose).
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
/// Determines how a pointer should be printed.
///
/// Default impl is only good for when `OFFSET_IS_ADDR == true`.
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
where
Self: Sized;
Self: Sized,
{
assert!(Self::OFFSET_IS_ADDR);
let (prov, addr) = ptr.into_parts(); // address is absolute
write!(f, "{:#x}", addr.bytes())?;
if f.alternate() {
write!(f, "{prov:#?}")?;
} else {
write!(f, "{prov:?}")?;
}
Ok(())
}
/// If `OFFSET_IS_ADDR == false`, provenance must always be able to
/// identify the allocation this ptr points to (i.e., this must return `Some`).
@ -139,9 +147,6 @@ impl Provenance for AllocId {
// so ptr-to-int casts are not possible (since we do not know the global physical offset).
const OFFSET_IS_ADDR: bool = false;
// For now, do not allow this, so that we keep our options open.
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {

View File

@ -685,7 +685,7 @@ pub fn write_allocations<'tcx>(
fn alloc_ids_from_alloc(
alloc: ConstAllocation<'_>,
) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
alloc.inner().provenance().values().map(|id| *id)
alloc.inner().provenance().ptrs().values().map(|id| *id)
}
fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
@ -882,7 +882,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
if i != line_start {
write!(w, " ")?;
}
if let Some(&prov) = alloc.provenance().get(&i) {
if let Some(prov) = alloc.provenance().get_ptr(i) {
// Memory with provenance must be defined
assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
let j = i.bytes_usize();
@ -904,9 +904,9 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
let overflow = ptr_size - remainder;
let remainder_width = provenance_width(remainder.bytes_usize()) - 2;
let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1;
ascii.push('╾');
for _ in 0..remainder.bytes() - 1 {
ascii.push('─');
ascii.push('╾'); // HEAVY LEFT AND LIGHT RIGHT
for _ in 1..remainder.bytes() {
ascii.push('─'); // LIGHT HORIZONTAL
}
if overflow_width > remainder_width && overflow_width >= target.len() {
// The case where the provenance fits into the part in the next line
@ -926,7 +926,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
for _ in 0..overflow.bytes() - 1 {
ascii.push('─');
}
ascii.push('╼');
ascii.push('╼'); // LIGHT LEFT AND HEAVY RIGHT
i += ptr_size;
continue;
} else {
@ -941,6 +941,16 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
ascii.push('╼');
i += ptr_size;
}
} else if let Some(prov) = alloc.provenance().get(i, &tcx) {
// Memory with provenance must be defined
assert!(alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok());
ascii.push('━'); // HEAVY HORIZONTAL
// We have two characters to display this, which is obviously not enough.
// Format is similar to "oversized" above.
let j = i.bytes_usize();
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
write!(w, "╾{:02x}{:#?} (1 ptr byte)╼", c, prov)?;
i += Size::from_bytes(1);
} else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
let j = i.bytes_usize();

View File

@ -112,19 +112,6 @@ impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
}
}
// `Relocations` with default type parameters is a sorted map.
impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::ProvenanceMap<Prov>
where
Prov: HashStable<StableHashingContext<'a>>,
{
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
self.len().hash_stable(hcx, hasher);
for reloc in self.iter() {
reloc.hash_stable(hcx, hasher);
}
}
}
impl<'a> ToStableHashKey<StableHashingContext<'a>> for region::Scope {
type KeyType = region::Scope;

View File

@ -456,7 +456,7 @@ fn collect_items_rec<'tcx>(
recursion_depth_reset = None;
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
for &id in alloc.inner().provenance().values() {
for &id in alloc.inner().provenance().ptrs().values() {
collect_miri(tcx, id, &mut neighbors);
}
}
@ -1404,7 +1404,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIte
}
GlobalAlloc::Memory(alloc) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &inner in alloc.inner().provenance().values() {
for &inner in alloc.inner().provenance().ptrs().values() {
rustc_data_structures::stack::ensure_sufficient_stack(|| {
collect_miri(tcx, inner, output);
});
@ -1443,7 +1443,7 @@ fn collect_const_value<'tcx>(
match value {
ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
for &id in alloc.inner().provenance().values() {
for &id in alloc.inner().provenance().ptrs().values() {
collect_miri(tcx, id, output);
}
}

View File

@ -133,7 +133,7 @@ impl fmt::Display for MiriMemoryKind {
}
/// Pointer provenance.
#[derive(Debug, Clone, Copy)]
#[derive(Clone, Copy)]
pub enum Provenance {
Concrete {
alloc_id: AllocId,
@ -176,18 +176,9 @@ static_assert_size!(Pointer<Provenance>, 24);
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Scalar<Provenance>, 32);
impl interpret::Provenance for Provenance {
/// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
const OFFSET_IS_ADDR: bool = true;
/// We cannot err on partial overwrites, it happens too often in practice (due to unions).
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = false;
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (prov, addr) = ptr.into_parts(); // address is absolute
write!(f, "{:#x}", addr.bytes())?;
match prov {
impl fmt::Debug for Provenance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Provenance::Concrete { alloc_id, sb } => {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
@ -202,9 +193,13 @@ impl interpret::Provenance for Provenance {
write!(f, "[wildcard]")?;
}
}
Ok(())
}
}
impl interpret::Provenance for Provenance {
/// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
const OFFSET_IS_ADDR: bool = true;
fn get_alloc_id(self) -> Option<AllocId> {
match self {

View File

@ -127,7 +127,7 @@ impl VisitTags for Operand<Provenance> {
impl VisitTags for Allocation<Provenance, AllocExtra> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
for (_size, prov) in self.provenance().iter() {
for prov in self.provenance().provenances() {
prov.visit_tags(visit);
}

View File

@ -1,21 +0,0 @@
//@normalize-stderr-test: "\+0x[48]" -> "+HALF_PTR"
#![allow(dead_code)]
// We use packed structs to get around alignment restrictions
#[repr(packed)]
struct Data {
pad: u8,
ptr: &'static i32,
}
static G: i32 = 0;
fn main() {
let mut d = Data { pad: 0, ptr: &G };
// Get a pointer to the beginning of the Data struct (one u8 byte, then the pointer bytes).
let d_alias = &mut d as *mut _ as *mut *const u8;
unsafe {
let _x = d_alias.read_unaligned(); //~ERROR: unable to copy parts of a pointer
}
}

View File

@ -1,14 +0,0 @@
error: unsupported operation: unable to copy parts of a pointer from memory at ALLOC+HALF_PTR
--> $DIR/copy_half_a_pointer.rs:LL:CC
|
LL | let _x = d_alias.read_unaligned();
| ^^^^^^^^^^^^^^^^^^^^^^^^ unable to copy parts of a pointer from memory at ALLOC+HALF_PTR
|
= help: this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support
= note: BACKTRACE:
= note: inside `main` at $DIR/copy_half_a_pointer.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error

View File

@ -2,16 +2,13 @@
//@compile-flags: -Zmiri-disable-alignment-check -Zmiri-disable-stacked-borrows -Zmiri-disable-validation
// Test what happens when we overwrite parts of a pointer.
// Also see <https://github.com/rust-lang/miri/issues/2181>.
fn main() {
let mut p = &42;
unsafe {
let ptr: *mut _ = &mut p;
*(ptr as *mut u8) = 123; // if we ever support 8 bit pointers, this is gonna cause
// "attempted to interpret some raw bytes as a pointer address" instead of
// "attempted to read undefined bytes"
*(ptr as *mut u8) = 123; // this removes provenance from one of the bytes, meaning the entire ptr is considered to have no provenance.
}
let x = *p; //~ ERROR: this operation requires initialized memory
let x = *p; //~ ERROR: no provenance
panic!("this should never print: {}", x);
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
error: Undefined Behavior: dereferencing pointer failed: $HEX[noalloc] is a dangling pointer (it has no provenance)
--> $DIR/pointer_partial_overwrite.rs:LL:CC
|
LL | let x = *p;
| ^^ using uninitialized data, but this operation requires initialized memory
| ^^ dereferencing pointer failed: $HEX[noalloc] is a dangling pointer (it has no provenance)
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -0,0 +1,139 @@
#![feature(strict_provenance)]
#![feature(pointer_byte_offsets)]
use std::{mem, ptr};
const PTR_SIZE: usize = mem::size_of::<&i32>();
fn main() {
basic();
partial_overwrite_then_restore();
bytewise_ptr_methods();
bytewise_custom_memcpy();
bytewise_custom_memcpy_chunked();
}
/// Some basic smoke tests for provenance.
fn basic() {
let x = &42;
let ptr = x as *const i32;
let addr: usize = unsafe { mem::transmute(ptr) }; // an integer without provenance
// But we can give provenance back via `with_addr`.
let ptr_back = ptr.with_addr(addr);
assert_eq!(unsafe { *ptr_back }, 42);
// It is preserved by MaybeUninit.
let addr_mu: mem::MaybeUninit<usize> = unsafe { mem::transmute(ptr) };
let ptr_back: *const i32 = unsafe { mem::transmute(addr_mu) };
assert_eq!(unsafe { *ptr_back }, 42);
}
/// Overwrite one byte of a pointer, then restore it.
fn partial_overwrite_then_restore() {
unsafe fn ptr_bytes<'x>(ptr: &'x mut *const i32) -> &'x mut [mem::MaybeUninit<u8>; PTR_SIZE] {
mem::transmute(ptr)
}
// Returns a value with the same provenance as `x` but 0 for the integer value.
// `x` must be initialized.
unsafe fn zero_with_provenance(x: mem::MaybeUninit<u8>) -> mem::MaybeUninit<u8> {
let ptr = [x; PTR_SIZE];
let ptr: *const i32 = mem::transmute(ptr);
let mut ptr = ptr.with_addr(0);
ptr_bytes(&mut ptr)[0]
}
unsafe {
let ptr = &42;
let mut ptr = ptr as *const i32;
// Get a bytewise view of the pointer.
let ptr_bytes = ptr_bytes(&mut ptr);
// The highest bytes must be 0 for this to work.
let hi = if cfg!(target_endian = "little") { ptr_bytes.len() - 1 } else { 0 };
assert_eq!(*ptr_bytes[hi].as_ptr().cast::<u8>(), 0);
// Overwrite provenance on the last byte.
ptr_bytes[hi] = mem::MaybeUninit::new(0);
// Restore it from the another byte.
ptr_bytes[hi] = zero_with_provenance(ptr_bytes[1]);
// Now ptr should be good again.
assert_eq!(*ptr, 42);
}
}
fn bytewise_ptr_methods() {
let mut ptr1 = &1;
let mut ptr2 = &2;
// Swap them, bytewise.
unsafe {
ptr::swap_nonoverlapping(
&mut ptr1 as *mut _ as *mut mem::MaybeUninit<u8>,
&mut ptr2 as *mut _ as *mut mem::MaybeUninit<u8>,
mem::size_of::<&i32>(),
);
}
// Make sure they still work.
assert_eq!(*ptr1, 2);
assert_eq!(*ptr2, 1);
// TODO: also test ptr::swap, ptr::copy, ptr::copy_nonoverlapping.
}
fn bytewise_custom_memcpy() {
unsafe fn memcpy<T>(to: *mut T, from: *const T) {
let to = to.cast::<mem::MaybeUninit<u8>>();
let from = from.cast::<mem::MaybeUninit<u8>>();
for i in 0..mem::size_of::<T>() {
let b = from.add(i).read();
to.add(i).write(b);
}
}
let ptr1 = &1;
let mut ptr2 = &2;
// Copy, bytewise.
unsafe { memcpy(&mut ptr2, &ptr1) };
// Make sure they still work.
assert_eq!(*ptr1, 1);
assert_eq!(*ptr2, 1);
}
fn bytewise_custom_memcpy_chunked() {
unsafe fn memcpy<T>(to: *mut T, from: *const T) {
assert!(mem::size_of::<T>() % mem::size_of::<usize>() == 0);
let count = mem::size_of::<T>() / mem::size_of::<usize>();
let to = to.cast::<mem::MaybeUninit<usize>>();
let from = from.cast::<mem::MaybeUninit<usize>>();
for i in 0..count {
let b = from.add(i).read();
to.add(i).write(b);
}
}
// Prepare an array where pointers are stored at... interesting... offsets.
let mut data = [0usize; 2 * PTR_SIZE];
let mut offsets = vec![];
for i in 0..mem::size_of::<usize>() {
// We have 2*PTR_SIZE room for each of these pointers.
let base = i * 2 * PTR_SIZE;
// This one is mis-aligned by `i`.
let offset = base + i;
offsets.push(offset);
// Store it there.
unsafe { data.as_mut_ptr().byte_add(offset).cast::<&i32>().write_unaligned(&42) };
}
// Now memcpy that.
let mut data2 = [0usize; 2 * PTR_SIZE];
unsafe { memcpy(&mut data2, &data) };
// And check the result.
for &offset in &offsets {
let ptr = unsafe { data2.as_ptr().byte_add(offset).cast::<&i32>().read_unaligned() };
assert_eq!(*ptr, 42);
}
}