Centralize bounds, alignment and NULL checking for memory accesses in one function: memory.check_ptr_access

That function also takes care of converting a Scalar to a Pointer, should that be needed.  Not all accesses need that though: if the access has size 0, None is returned.
Everyone accessing memory based on a Scalar should use this method to get the Pointer they need.

All operations on the Allocation work on Pointer inputs and expect all the checks to have happened (and will ICE if the bounds are violated).
The operations on Memory work on Scalar inputs and do the checks themselves.

The only other public method to check pointers is memory.ptr_may_be_null, which is needed in a few places.
With this, we can make all the other methods (tests for a pointer being in-bounds and checking alignment) private helper methods, used to implement the two public methods.
That maks the public API surface much easier to use and harder to mis-use.

While I am at it, this also removes the assumption that the vtable part of a `dyn Trait`-fat-pointer is a `Pointer` (as opposed to a pointer cast to an integer, stored as raw bits).
This commit is contained in:
Ralf Jung 2019-06-23 14:26:36 +02:00
parent 305930cffe
commit c50b9d197f
8 changed files with 215 additions and 152 deletions

View File

@ -8,7 +8,7 @@ use crate::ty::layout::{Size, Align};
use syntax::ast::Mutability;
use std::{iter, fmt::{self, Display}};
use crate::mir;
use std::ops::{Deref, DerefMut};
use std::ops::{Range, Deref, DerefMut};
use rustc_data_structures::sorted_map::SortedMap;
use rustc_macros::HashStable;
use rustc_target::abi::HasDataLayout;
@ -146,37 +146,30 @@ impl<Tag> Allocation<Tag> {
impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {}
/// Alignment and bounds checks
impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
/// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
/// of an allocation (i.e., at the first *inaccessible* location) *is* considered
/// in-bounds! This follows C's/LLVM's rules.
/// If you want to check bounds before doing a memory access, better use `check_bounds`.
fn check_bounds_ptr(
&self,
ptr: Pointer<Tag>,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
let allocation_size = self.bytes.len() as u64;
ptr.check_in_alloc(Size::from_bytes(allocation_size), msg)
}
/// Checks if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
#[inline(always)]
pub fn check_bounds(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, cx)?, msg)
}
}
/// Byte accessors
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Just a small local helper function to avoid a bit of code repetition.
/// Returns the range of this allocation that was meant.
#[inline]
fn check_bounds(
&self,
offset: Size,
size: Size
) -> Range<usize> {
let end = offset + size; // this does overflow checking
assert_eq!(
end.bytes() as usize as u64, end.bytes(),
"cannot handle this access on this host architecture"
);
let end = end.bytes() as usize;
assert!(
end <= self.bytes.len(),
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
offset.bytes(), size.bytes(), self.bytes.len()
);
(offset.bytes() as usize)..end
}
/// The last argument controls whether we error out when there are undefined
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_undef_and_ptr` instead,
@ -184,16 +177,17 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
/// on that.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
fn get_bytes_internal(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
check_defined_and_ptr: bool,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx, &[u8]>
{
self.check_bounds(cx, ptr, size, msg)?;
let range = self.check_bounds(ptr.offset, size);
if check_defined_and_ptr {
self.check_defined(ptr, size)?;
@ -205,12 +199,13 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
AllocationExtra::memory_read(self, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&self.bytes[offset..offset + size.bytes() as usize])
Ok(&self.bytes[range])
}
/// Check that these bytes are initialized and not pointer bytes, and then return them
/// as a slice.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
#[inline]
pub fn get_bytes(
&self,
@ -219,11 +214,13 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
size: Size,
) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, true, CheckInAllocMsg::MemoryAccessTest)
self.get_bytes_internal(cx, ptr, size, true)
}
/// It is the caller's responsibility to handle undefined and pointer bytes.
/// However, this still checks that there are no relocations on the *edges*.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
#[inline]
pub fn get_bytes_with_undef_and_ptr(
&self,
@ -232,11 +229,13 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
size: Size,
) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, false, CheckInAllocMsg::MemoryAccessTest)
self.get_bytes_internal(cx, ptr, size, false)
}
/// Just calling this already marks everything as defined and removes relocations,
/// so be sure to actually put data there!
///
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn get_bytes_mut(
&mut self,
cx: &impl HasDataLayout,
@ -244,18 +243,14 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
size: Size,
) -> InterpResult<'tcx, &mut [u8]>
{
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_bounds(cx, ptr, size, CheckInAllocMsg::MemoryAccessTest)?;
let range = self.check_bounds(ptr.offset, size);
self.mark_definedness(ptr, size, true);
self.clear_relocations(cx, ptr, size)?;
AllocationExtra::memory_written(self, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
let offset = ptr.offset.bytes() as usize;
Ok(&mut self.bytes[offset..offset + size.bytes() as usize])
Ok(&mut self.bytes[range])
}
}
@ -276,9 +271,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
let size_with_null = Size::from_bytes((size + 1) as u64);
// Go through `get_bytes` for checks and AllocationExtra hooks.
// We read the null, so we include it in the request, but we want it removed
// from the result!
// from the result, so we do subslicing.
Ok(&self.get_bytes(cx, ptr, size_with_null)?[..size])
}
// This includes the case where `offset` is out-of-bounds to begin with.
None => err!(UnterminatedCString(ptr.erase_tag())),
}
}
@ -306,7 +302,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Writes `src` to the memory starting at `ptr.offset`.
///
/// Will do bounds checks on the allocation.
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn write_bytes(
&mut self,
cx: &impl HasDataLayout,
@ -320,6 +316,8 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
}
/// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn write_repeat(
&mut self,
cx: &impl HasDataLayout,
@ -342,7 +340,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
/// being valid for ZSTs
///
/// Note: This function does not do *any* alignment checks, you need to do these before calling
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn read_scalar(
&self,
cx: &impl HasDataLayout,
@ -378,7 +376,9 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
}
/// Note: This function does not do *any* alignment checks, you need to do these before calling
/// Read a pointer-sized scalar.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn read_ptr_sized(
&self,
cx: &impl HasDataLayout,
@ -395,7 +395,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
/// being valid for ZSTs
///
/// Note: This function does not do *any* alignment checks, you need to do these before calling
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
@ -435,7 +435,9 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
Ok(())
}
/// Note: This function does not do *any* alignment checks, you need to do these before calling
/// Write a pointer-sized scalar.
///
/// It is the callers responsibility to check bounds and alignment beforehand.
pub fn write_ptr_sized(
&mut self,
cx: &impl HasDataLayout,

View File

@ -437,7 +437,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
Ok(Some((size.align_to(align), align)))
}
ty::Dynamic(..) => {
let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?;
let vtable = metadata.expect("dyn trait fat ptr must have vtable");
// the second entry in the vtable is the dynamic size of the object.
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
}

View File

@ -250,7 +250,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Checks that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
/// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
pub fn check_align(
fn check_align(
&self,
ptr: Scalar<M::PointerTag>,
required_align: Align
@ -260,7 +260,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Err(ptr) => {
// check this is not NULL -- which we can ensure only if this is in-bounds
// of some (potentially dead) allocation.
let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
let align = self.check_ptr_bounds(ptr, InboundsCheck::MaybeDead,
CheckInAllocMsg::NullPointerTest)?;
(ptr.offset.bytes(), align)
}
@ -291,12 +291,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
/// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
/// of an allocation (i.e., at the first *inaccessible* location) *is* considered
/// in-bounds! This follows C's/LLVM's rules.
/// If you want to check bounds before doing a memory access, better first obtain
/// an `Allocation` and call `check_bounds`.
pub fn check_bounds_ptr(
/// Checks if the pointer is "in-bounds" of *some* (live or dead) allocation. Notice that
/// a pointer pointing at the end of an allocation (i.e., at the first *inaccessible* location)
/// *is* considered in-bounds! This follows C's/LLVM's rules.
/// `liveness` can be used to rule out dead allocations. Testing in-bounds with a dead
/// allocation is useful e.g. to exclude the possibility of this pointer being NULL.
/// If you want to check bounds before doing a memory access, call `check_ptr_access`.
fn check_ptr_bounds(
&self,
ptr: Pointer<M::PointerTag>,
liveness: InboundsCheck,
@ -306,6 +307,77 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
ptr.check_in_alloc(allocation_size, msg)?;
Ok(align)
}
/// Check if the given scalar is allowed to do a memory access of given `size`
/// and `align`. On success, returns `None` for zero-sized accesses (where
/// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
/// Crucially, if the input is a `Pointer`, we will test it for liveness
/// *even of* the size is 0.
///
/// Everyone accessing memory based on a `Scalar` should use this method to get the
/// `Pointer` they need. And even if you already have a `Pointer`, call this method
/// to make sure it is sufficiently aligned and not dangling. Not doing that may
/// cause ICEs.
pub fn check_ptr_access(
&self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
// Normalize to a `Pointer` if we definitely need one.
let normalized = if size.bytes() == 0 {
// Can be an integer, just take what we got.
sptr
} else {
// A "real" access, we must get a pointer.
Scalar::Ptr(self.force_ptr(sptr)?)
};
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
Ok(bits) => {
let bits = bits as u64; // it's ptr-sized
assert!(size.bytes() == 0);
// Must be non-NULL and aligned.
if bits == 0 {
return err!(InvalidNullPointerUsage);
}
if bits % align.bytes() != 0 {
let bits_pow1 = 1 << bits.trailing_zeros();
return err!(AlignmentCheckFailed {
has: Align::from_bytes(bits_pow1).unwrap(),
required: align,
});
}
None
}
Err(ptr) => {
// Test bounds.
self.check_ptr_bounds(
ptr.offset(size, self)?,
InboundsCheck::Live,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Test align and non-NULL.
self.check_align(ptr.into(), align)?;
// FIXME: Alignment check is too strict, depending on the base address that
// got picked we might be aligned even if this check fails.
// We instead have to fall back to converting to an integer and checking
// the "real" alignment.
// We can still be zero-sized in this branch, in which case we have to
// return `None`.
if size.bytes() == 0 { None } else { Some(ptr) }
}
})
}
/// Test if the pointer might be NULL.
pub fn ptr_may_be_null(
&self,
ptr: Pointer<M::PointerTag>,
) -> bool {
self.check_ptr_bounds(ptr, InboundsCheck::MaybeDead, CheckInAllocMsg::NullPointerTest)
.is_err()
}
}
/// Allocation accessors
@ -629,24 +701,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
/// Byte Accessors
/// Reading and writing.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks.
pub fn read_bytes(
&self,
ptr: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
if size.bytes() == 0 {
Ok(&[])
} else {
let ptr = self.force_ptr(ptr)?;
self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
}
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
Some(ptr) => ptr,
None => return Ok(&[]), // zero-sized access
};
self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
}
}
/// Reading and writing.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks.
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
@ -659,6 +729,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
}
/// Performs appropriate bounds checks.
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
@ -669,15 +740,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
length: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
// Nothing to do for ZST, other than checking alignment and
// non-NULLness which already happened.
return Ok(());
}
let src = self.force_ptr(src)?;
let dest = self.force_ptr(dest)?;
// We need to check *both* before early-aborting due to the size being 0.
let (src, dest) = match (self.check_ptr_access(src, size, src_align)?,
self.check_ptr_access(dest, size * length, dest_align)?)
{
(Some(src), Some(dest)) => (src, dest),
// One of the two sizes is 0.
_ => return Ok(()),
};
// first copy the relocations to a temporary buffer, because
// `get_bytes_mut` will clear the relocations, which is correct,

View File

@ -9,9 +9,9 @@ use rustc::ty::layout::{
};
use rustc::mir::interpret::{
GlobalId, AllocId, CheckInAllocMsg,
GlobalId, AllocId,
ConstValue, Pointer, Scalar,
InterpResult, InterpError, InboundsCheck,
InterpResult, InterpError,
sign_extend, truncate,
};
use super::{
@ -226,19 +226,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
}
let (ptr, ptr_align) = mplace.to_scalar_ptr_align();
if mplace.layout.is_zst() {
// Not all ZSTs have a layout we would handle below, so just short-circuit them
// all here.
self.memory.check_align(ptr, ptr_align)?;
return Ok(Some(ImmTy {
let ptr = match self.memory.check_ptr_access(ptr, mplace.layout.size, ptr_align)? {
Some(ptr) => ptr,
None => return Ok(Some(ImmTy {
imm: Immediate::Scalar(Scalar::zst().into()),
layout: mplace.layout,
}));
}
})), // zero-sized access
};
// check for integer pointers before alignment to report better errors
let ptr = self.force_ptr(ptr)?;
self.memory.check_align(ptr.into(), ptr_align)?;
match mplace.layout.abi {
layout::Abi::Scalar(..) => {
let scalar = self.memory
@ -250,17 +245,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
}))
}
layout::Abi::ScalarPair(ref a, ref b) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (&a.value, &b.value);
let (a_size, b_size) = (a.size(self), b.size(self));
let a_ptr = ptr;
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
let b_ptr = ptr.offset(b_offset, self)?;
let a_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, a_ptr, a_size)?;
let b_align = ptr_align.restrict_for_offset(b_offset);
self.memory.check_align(b_ptr.into(), b_align)?;
let b_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, b_ptr, b_size)?;
@ -639,8 +635,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
Err(ptr) => {
// The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0 && variants_start == variants_end &&
self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
CheckInAllocMsg::NullPointerTest).is_ok();
!self.memory.ptr_may_be_null(ptr);
if !ptr_valid {
return err!(InvalidDiscriminant(raw_discr.erase_tag().into()));
}

View File

@ -222,9 +222,9 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
}
#[inline]
pub(super) fn vtable(self) -> InterpResult<'tcx, Pointer<Tag>> {
pub(super) fn vtable(self) -> Scalar<Tag> {
match self.layout.ty.sty {
ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(),
ty::Dynamic(..) => self.mplace.meta.unwrap(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
}
}
@ -746,15 +746,13 @@ where
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
// wrong type.
assert!(!dest.layout.is_unsized());
// Nothing to do for ZSTs, other than checking alignment
if dest.layout.is_zst() {
return self.memory.check_align(ptr, ptr_align);
}
let ptr = match self.memory.check_ptr_access(ptr, dest.layout.size, ptr_align)? {
Some(ptr) => ptr,
None => return Ok(()), // zero-sized access
};
// check for integer pointers before alignment to report better errors
let ptr = self.force_ptr(ptr)?;
self.memory.check_align(ptr.into(), ptr_align)?;
let tcx = &*self.tcx;
// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
@ -771,6 +769,9 @@ where
)
}
Immediate::ScalarPair(a_val, b_val) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
@ -778,11 +779,8 @@ where
};
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
let b_align = ptr_align.restrict_for_offset(b_offset);
let b_ptr = ptr.offset(b_offset, self)?;
self.memory.check_align(b_ptr.into(), b_align)?;
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
@ -1053,7 +1051,7 @@ where
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
-> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable()?; // also sanity checks the type
let vtable = mplace.vtable(); // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;

View File

@ -425,12 +425,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
}
};
// Find and consult vtable
let vtable = receiver_place.vtable()?;
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized(
self,
vtable.offset(ptr_size * (idx as u64 + 3), self)?,
)?.to_ptr()?;
let vtable = receiver_place.vtable();
let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
let vtable_slot = self.memory.check_ptr_access(
vtable_slot,
ptr_size,
self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST");
let fn_ptr = self.memory.get(vtable_slot.alloc_id)?
.read_ptr_sized(self, vtable_slot)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
// `*mut receiver_place.layout.ty` is almost the layout that we

View File

@ -101,10 +101,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
/// Returns the drop fn instance as well as the actual dynamic type
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
vtable: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let vtable = self.memory.check_ptr_access(
vtable,
self.tcx.data_layout.pointer_size,
self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST");
let drop_fn = self.memory
.get(vtable.alloc_id)?
.read_ptr_sized(self, vtable)?
@ -113,17 +117,23 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
// the drop function takes *mut T where T is the type being dropped, so get that
// The drop function takes `*mut T` where `T` is the type being dropped, so get that.
let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
Ok((drop_instance, ty))
}
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
vtable: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
// We check for size = 3*ptr_size, that covers the drop fn (unused here),
// the size, and the align.
let vtable = self.memory.check_ptr_access(
vtable,
3*pointer_size,
self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST");
let alloc = self.memory.get(vtable.alloc_id)?;
let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?
.to_bits(pointer_size)? as u64;

View File

@ -3,11 +3,11 @@ use std::ops::RangeInclusive;
use syntax_pos::symbol::{sym, Symbol};
use rustc::hir;
use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx};
use rustc::ty::layout::{self, TyLayout, LayoutOf, VariantIdx};
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
Scalar, GlobalAlloc, InterpResult, InterpError, CheckInAllocMsg,
GlobalAlloc, InterpResult, InterpError,
};
use std::hash::Hash;
@ -365,8 +365,12 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let tail = self.ecx.tcx.struct_tail(layout.ty);
match tail.sty {
ty::Dynamic(..) => {
let vtable = try_validation!(meta.unwrap().to_ptr(),
"non-pointer vtable in fat pointer", self.path);
let vtable = meta.unwrap();
try_validation!(self.ecx.memory.check_ptr_access(
vtable,
3*self.ecx.tcx.data_layout.pointer_size, // drop, size, align
self.ecx.tcx.data_layout.pointer_align.abi,
), "dangling or unaligned vtable pointer", self.path);
try_validation!(self.ecx.read_drop_type_from_vtable(vtable),
"invalid drop fn in vtable", self.path);
try_validation!(self.ecx.read_size_and_align_from_vtable(vtable),
@ -384,14 +388,14 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
bug!("Unexpected unsized type tail: {:?}", tail),
}
}
// Make sure this is non-NULL and aligned
// Make sure this is dereferencable and all.
let (size, align) = self.ecx.size_and_align_of(meta, layout)?
// for the purpose of validity, consider foreign types to have
// alignment and size determined by the layout (size will be 0,
// alignment should take attributes into account).
.unwrap_or_else(|| (layout.size, layout.align.abi));
match self.ecx.memory.check_align(ptr, align) {
Ok(_) => {},
let ptr: Option<_> = match self.ecx.memory.check_ptr_access(ptr, size, align) {
Ok(ptr) => ptr,
Err(err) => {
info!("{:?} is not aligned to {:?}", ptr, align);
match err.kind {
@ -403,21 +407,16 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
required.bytes(), has.bytes()), self.path),
_ =>
return validation_failure!(
"dangling (out-of-bounds) reference (might be NULL at \
run-time)",
"dangling (not entirely in bounds) reference",
self.path
),
}
}
}
};
// Recursive checking
if let Some(ref mut ref_tracking) = self.ref_tracking_for_consts {
let place = self.ecx.ref_to_mplace(value)?;
// FIXME(RalfJ): check ZST for inbound pointers
if size != Size::ZERO {
// Non-ZST also have to be dereferencable
let ptr = try_validation!(place.ptr.to_ptr(),
"integer pointer in non-ZST reference", self.path);
if let Some(ptr) = ptr { // not a ZST
// Skip validation entirely for some external statics
let alloc_kind = self.ecx.tcx.alloc_map.lock().get(ptr.alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
@ -429,18 +428,10 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
return Ok(());
}
}
// Maintain the invariant that the place we are checking is
// already verified to be in-bounds.
try_validation!(
self.ecx.memory
.get(ptr.alloc_id)?
.check_bounds(self.ecx, ptr, size, CheckInAllocMsg::InboundsTest),
"dangling (not entirely in bounds) reference", self.path);
}
// Check if we have encountered this pointer+layout combination
// before. Proceed recursively even for integer pointers, no
// reason to skip them! They are (recursively) valid for some ZST,
// but not for others (e.g., `!` is a ZST).
// before. Proceed recursively even for ZST, no
// reason to skip them! E.g., `!` is a ZST and we want to validate it.
let path = &self.path;
ref_tracking.track(place, || {
// We need to clone the path anyway, make sure it gets created
@ -499,14 +490,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
Err(ptr) => {
if lo == 1 && hi == max_hi {
// only NULL is not allowed.
// We can call `check_align` to check non-NULL-ness, but have to also look
// for function pointers.
let non_null =
self.ecx.memory.check_align(
Scalar::Ptr(ptr), Align::from_bytes(1).unwrap()
).is_ok();
if !non_null {
// Only NULL is the niche. So make sure the ptr is NOT NULL.
if self.ecx.memory.ptr_may_be_null(ptr) {
// These conditions are just here to improve the diagnostics so we can
// differentiate between null pointers and dangling pointers
if self.ref_tracking_for_consts.is_some() &&