Auto merge of #126793 - saethlin:mono-rawvec, r=scottmcm

Apply "polymorphization at home" to RawVec

The idea here is to move all the logic in RawVec into functions with explicit size and alignment parameters. This should eliminate all the fussing about how tweaking RawVec code produces large swings in compile times.

This uncovered https://github.com/rust-lang/rust-clippy/issues/12979, so I've modified the relevant test in a way that tries to preserve the spirit of the test without tripping the ICE.
This commit is contained in:
bors 2024-08-12 01:47:06 +00:00
commit 13f8a57cfb
15 changed files with 563 additions and 316 deletions

View File

@ -1,7 +1,7 @@
#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
use core::alloc::LayoutError;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::marker::PhantomData;
use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::{cmp, hint};
@ -40,6 +40,13 @@ struct Cap(usize);
impl Cap {
const ZERO: Cap = unsafe { Cap(0) };
/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
///
/// # Safety: cap must be <= `isize::MAX`.
unsafe fn new<T>(cap: usize) -> Self {
if T::IS_ZST { Cap::ZERO } else { unsafe { Self(cap) } }
}
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
@ -66,7 +73,19 @@ impl Cap {
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
inner: RawVecInner<A>,
_marker: PhantomData<T>,
}
/// Like a `RawVec`, but only generic over the allocator, not the type.
///
/// As such, all the methods need the layout passed-in as a parameter.
///
/// Having this separation reduces the amount of code we need to monomorphize,
/// as most operations don't need the actual type, just its layout.
#[allow(missing_debug_implementations)]
struct RawVecInner<A: Allocator = Global> {
ptr: Unique<u8>,
/// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
///
/// # Safety
@ -90,8 +109,9 @@ impl<T> RawVec<T, Global> {
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
#[must_use]
#[rustc_const_stable(feature = "raw_vec_internals_const", since = "1.81")]
pub const fn new() -> Self {
Self::new_in(Global)
Self { inner: RawVecInner::new::<T>(), _marker: PhantomData }
}
/// Creates a `RawVec` (on the system heap) with exactly the
@ -113,10 +133,7 @@ impl<T> RawVec<T, Global> {
#[must_use]
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global) {
Ok(res) => res,
Err(err) => handle_error(err),
}
Self { inner: RawVecInner::with_capacity(capacity, T::LAYOUT), _marker: PhantomData }
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
@ -124,29 +141,56 @@ impl<T> RawVec<T, Global> {
#[must_use]
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
Self {
inner: RawVecInner::with_capacity_zeroed_in(capacity, Global, T::LAYOUT),
_marker: PhantomData,
}
}
}
impl RawVecInner<Global> {
#[must_use]
#[rustc_const_stable(feature = "raw_vec_internals_const", since = "1.81")]
const fn new<T>() -> Self {
Self::new_in(Global, core::mem::align_of::<T>())
}
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
fn with_capacity(capacity: usize, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global, elem_layout) {
Ok(res) => res,
Err(err) => handle_error(err),
}
}
}
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
const fn min_non_zero_cap(size: usize) -> usize {
if size == 1 {
8
} else if size <= 1024 {
4
} else {
1
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
#[cfg(not(no_global_oom_handling))]
pub(crate) const MIN_NON_ZERO_CAP: usize = min_non_zero_cap(size_of::<T>());
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[inline]
#[rustc_const_stable(feature = "raw_vec_internals_const", since = "1.81")]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
Self { inner: RawVecInner::new_in(alloc, align_of::<T>()), _marker: PhantomData }
}
/// Like `with_capacity`, but parameterized over the choice of
@ -154,9 +198,9 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc) {
Ok(res) => res,
Err(err) => handle_error(err),
Self {
inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT),
_marker: PhantomData,
}
}
@ -164,7 +208,10 @@ impl<T, A: Allocator> RawVec<T, A> {
/// allocator for the returned `RawVec`.
#[inline]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
match RawVecInner::try_with_capacity_in(capacity, alloc, T::LAYOUT) {
Ok(inner) => Ok(Self { inner, _marker: PhantomData }),
Err(e) => Err(e),
}
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
@ -172,9 +219,9 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc) {
Ok(res) => res,
Err(err) => handle_error(err),
Self {
inner: RawVecInner::with_capacity_zeroed_in(capacity, alloc, T::LAYOUT),
_marker: PhantomData,
}
}
@ -200,45 +247,7 @@ impl<T, A: Allocator> RawVec<T, A> {
let me = ManuallyDrop::new(self);
unsafe {
let slice = ptr::slice_from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
fn try_allocate_in(
capacity: usize,
init: AllocInit,
alloc: A,
) -> Result<Self, TryReserveError> {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if T::IS_ZST || capacity == 0 {
Ok(Self::new_in(alloc))
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => return Err(CapacityOverflow.into()),
};
if let Err(err) = alloc_guard(layout.size()) {
return Err(err);
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
#[cfg(not(no_global_oom_handling))]
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc })
Box::from_raw_in(slice, ptr::read(&me.inner.alloc))
}
}
@ -254,8 +263,15 @@ impl<T, A: Allocator> RawVec<T, A> {
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
// SAFETY: Precondition passed to the caller
unsafe {
let ptr = ptr.cast();
let capacity = Cap::new::<T>(capacity);
Self {
inner: RawVecInner::from_raw_parts_in(ptr, capacity, alloc),
_marker: PhantomData,
}
}
}
/// A convenience method for hoisting the non-null precondition out of [`RawVec::from_raw_parts_in`].
@ -264,9 +280,13 @@ impl<T, A: Allocator> RawVec<T, A> {
///
/// See [`RawVec::from_raw_parts_in`].
#[inline]
pub(crate) unsafe fn from_nonnull_in(ptr: NonNull<T>, capacity: usize, alloc: A) -> Self {
let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
Self { ptr: Unique::from(ptr), cap, alloc }
pub unsafe fn from_nonnull_in(ptr: NonNull<T>, capacity: usize, alloc: A) -> Self {
// SAFETY: Precondition passed to the caller
unsafe {
let ptr = ptr.cast();
let capacity = Cap::new::<T>(capacity);
Self { inner: RawVecInner::from_nonnull_in(ptr, capacity, alloc), _marker: PhantomData }
}
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@ -274,43 +294,26 @@ impl<T, A: Allocator> RawVec<T, A> {
/// be careful.
#[inline]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
self.inner.ptr()
}
#[inline]
pub fn non_null(&self) -> NonNull<T> {
NonNull::from(self.ptr)
self.inner.non_null()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
#[inline]
pub fn capacity(&self) -> usize {
if T::IS_ZST { usize::MAX } else { self.cap.0 }
self.inner.capacity(size_of::<T>())
}
/// Returns a shared reference to the allocator backing this `RawVec`.
#[inline]
pub fn allocator(&self) -> &A {
&self.alloc
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
// and could hypothetically handle differences between stride and size, but this memory
// has already been allocated so we know it can't overflow and currently Rust does not
// support such types. So we can do better by skipping some checks and avoid an unwrap.
const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
self.inner.allocator()
}
/// Ensures that the buffer contains at least enough space to hold `len +
@ -335,24 +338,7 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
if let Err(err) = slf.grow_amortized(len, additional) {
handle_error(err);
}
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
self.inner.reserve(len, additional, T::LAYOUT)
}
/// A specialized version of `self.reserve(len, 1)` which requires the
@ -360,21 +346,12 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline(never)]
pub fn grow_one(&mut self) {
if let Err(err) = self.grow_amortized(self.cap.0, 1) {
handle_error(err);
}
self.inner.grow_one(T::LAYOUT)
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
self.inner.try_reserve(len, additional, T::LAYOUT)
}
/// Ensures that the buffer contains at least enough space to hold `len +
@ -396,9 +373,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
if let Err(err) = self.try_reserve_exact(len, additional) {
handle_error(err);
}
self.inner.reserve_exact(len, additional, T::LAYOUT)
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
@ -407,14 +382,7 @@ impl<T, A: Allocator> RawVec<T, A> {
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_exact(len, additional)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
self.inner.try_reserve_exact(len, additional, T::LAYOUT)
}
/// Shrinks the buffer down to the specified capacity. If the given amount
@ -430,22 +398,230 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn shrink_to_fit(&mut self, cap: usize) {
if let Err(err) = self.shrink(cap) {
handle_error(err);
}
self.inner.shrink_to_fit(cap, T::LAYOUT)
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
// SAFETY: We are in a Drop impl, self.inner will not be used again.
unsafe { self.inner.deallocate(T::LAYOUT) }
}
}
impl<A: Allocator> RawVecInner<A> {
#[inline]
#[rustc_const_stable(feature = "raw_vec_internals_const", since = "1.81")]
const fn new_in(alloc: A, align: usize) -> Self {
let ptr = unsafe { core::mem::transmute(align) };
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr, cap: Cap::ZERO, alloc }
}
/// # Safety:
///
/// `cap` must not exceed `isize::MAX`.
#[cfg(not(no_global_oom_handling))]
#[inline]
fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) {
Ok(this) => {
unsafe {
// Make it more obvious that a subsquent Vec::reserve(capacity) will not allocate.
hint::assert_unchecked(!this.needs_to_grow(0, capacity, elem_layout));
}
this
}
Err(err) => handle_error(err),
}
}
#[inline]
fn try_with_capacity_in(
capacity: usize,
alloc: A,
elem_layout: Layout,
) -> Result<Self, TryReserveError> {
Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout)
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) {
Ok(res) => res,
Err(err) => handle_error(err),
}
}
fn try_allocate_in(
capacity: usize,
init: AllocInit,
alloc: A,
elem_layout: Layout,
) -> Result<Self, TryReserveError> {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match layout_array(capacity, elem_layout) {
Ok(layout) => layout,
Err(_) => return Err(CapacityOverflow.into()),
};
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if layout.size() == 0 {
return Ok(Self::new_in(alloc, elem_layout.align()));
}
if let Err(err) = alloc_guard(layout.size()) {
return Err(err);
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
#[cfg(not(no_global_oom_handling))]
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc })
}
#[inline]
unsafe fn from_raw_parts_in(ptr: *mut u8, cap: Cap, alloc: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
#[inline]
unsafe fn from_nonnull_in(ptr: NonNull<u8>, cap: Cap, alloc: A) -> Self {
Self { ptr: Unique::from(ptr), cap, alloc }
}
#[inline]
fn ptr<T>(&self) -> *mut T {
self.non_null::<T>().as_ptr()
}
#[inline]
fn non_null<T>(&self) -> NonNull<T> {
self.ptr.cast().into()
}
#[inline]
fn capacity(&self, elem_size: usize) -> usize {
if elem_size == 0 { usize::MAX } else { self.cap.0 }
}
#[inline]
fn allocator(&self) -> &A {
&self.alloc
}
#[inline]
fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
if elem_layout.size() == 0 || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
// and could hypothetically handle differences between stride and size, but this memory
// has already been allocated so we know it can't overflow and currently Rust does not
// support such types. So we can do better by skipping some checks and avoid an unwrap.
unsafe {
let alloc_size = elem_layout.size().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
Some((self.ptr.into(), layout))
}
}
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn reserve(&mut self, len: usize, additional: usize, elem_layout: Layout) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<A: Allocator>(
slf: &mut RawVecInner<A>,
len: usize,
additional: usize,
elem_layout: Layout,
) {
if let Err(err) = slf.grow_amortized(len, additional, elem_layout) {
handle_error(err);
}
}
if self.needs_to_grow(len, additional, elem_layout) {
do_reserve_and_handle(self, len, additional, elem_layout);
}
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn grow_one(&mut self, elem_layout: Layout) {
if let Err(err) = self.grow_amortized(self.cap.0, 1, elem_layout) {
handle_error(err);
}
}
fn try_reserve(
&mut self,
len: usize,
additional: usize,
elem_layout: Layout,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional, elem_layout) {
self.grow_amortized(len, additional, elem_layout)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
}
Ok(())
}
#[cfg(not(no_global_oom_handling))]
fn reserve_exact(&mut self, len: usize, additional: usize, elem_layout: Layout) {
if let Err(err) = self.try_reserve_exact(len, additional, elem_layout) {
handle_error(err);
}
}
fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
elem_layout: Layout,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional, elem_layout) {
self.grow_exact(len, additional, elem_layout)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
}
Ok(())
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) {
if let Err(err) = self.shrink(cap, elem_layout) {
handle_error(err);
}
}
#[inline]
fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool {
additional > self.capacity(elem_layout.size()).wrapping_sub(len)
}
#[inline]
unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
@ -454,18 +630,16 @@ impl<T, A: Allocator> RawVec<T, A> {
self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
fn grow_amortized(
&mut self,
len: usize,
additional: usize,
elem_layout: Layout,
) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if T::IS_ZST {
if elem_layout.size() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@ -477,33 +651,34 @@ impl<T, A: Allocator> RawVec<T, A> {
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap);
let new_layout = Layout::array::<T>(cap);
let new_layout = layout_array(cap, elem_layout)?;
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
let ptr = finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)?;
// SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if T::IS_ZST {
fn grow_exact(
&mut self,
len: usize,
additional: usize,
elem_layout: Layout,
) -> Result<(), TryReserveError> {
if elem_layout.size() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
let new_layout = layout_array(cap, elem_layout)?;
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
// SAFETY: `finish_grow` would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
let ptr = finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)?;
// SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
unsafe {
self.set_ptr_and_cap(ptr, cap);
}
@ -512,10 +687,10 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
fn shrink(&mut self, cap: usize, elem_layout: Layout) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(elem_layout.size()), "Tried to shrink to a larger capacity");
// SAFETY: Just checked this isn't trying to grow
unsafe { self.shrink_unchecked(cap) }
unsafe { self.shrink_unchecked(cap, elem_layout) }
}
/// `shrink`, but without the capacity check.
@ -529,23 +704,27 @@ impl<T, A: Allocator> RawVec<T, A> {
/// # Safety
/// `cap <= self.capacity()`
#[cfg(not(no_global_oom_handling))]
unsafe fn shrink_unchecked(&mut self, cap: usize) -> Result<(), TryReserveError> {
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
// See current_memory() why this assert is here
const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe fn shrink_unchecked(
&mut self,
cap: usize,
elem_layout: Layout,
) -> Result<(), TryReserveError> {
let (ptr, layout) =
if let Some(mem) = self.current_memory(elem_layout) { mem } else { return Ok(()) };
// If shrinking to 0, deallocate the buffer. We don't reach this point
// for the T::IS_ZST case since current_memory() will have returned
// None.
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
self.ptr =
unsafe { Unique::new_unchecked(ptr::without_provenance_mut(elem_layout.align())) };
self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// Layout cannot overflow here because it would have
// overflowed earlier when capacity was larger.
let new_size = mem::size_of::<T>().unchecked_mul(cap);
let new_size = elem_layout.size().unchecked_mul(cap);
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
@ -558,24 +737,32 @@ impl<T, A: Allocator> RawVec<T, A> {
}
Ok(())
}
/// # Safety
///
/// This function deallocates the owned allocation, but does not update `ptr` or `cap` to
/// prevent double-free or use-after-free. Essentially, do not do anything with the caller
/// after this function returns.
/// Ideally this function would take `self` by move, but it cannot because it exists to be
/// called from a `Drop` impl.
unsafe fn deallocate(&mut self, elem_layout: Layout) {
if let Some((ptr, layout)) = self.current_memory(elem_layout) {
unsafe {
self.alloc.deallocate(ptr, layout);
}
}
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
new_layout: Layout,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
@ -592,15 +779,6 @@ where
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[cold]
@ -627,3 +805,8 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
Ok(())
}
}
#[inline]
fn layout_array(cap: usize, elem_layout: Layout) -> Result<Layout, TryReserveError> {
elem_layout.repeat(cap).map(|(layout, _pad)| layout).map_err(|_| CapacityOverflow.into())
}

View File

@ -43,9 +43,9 @@ fn allocator_param() {
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
assert_eq!(v.inner.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
assert_eq!(v.inner.alloc.fuel.get(), 250);
}
#[test]
@ -86,7 +86,7 @@ struct ZST;
fn zst_sanity<T>(v: &RawVec<T>) {
assert_eq!(v.capacity(), usize::MAX);
assert_eq!(v.ptr(), core::ptr::Unique::<T>::dangling().as_ptr());
assert_eq!(v.current_memory(), None);
assert_eq!(v.inner.current_memory(T::LAYOUT), None);
}
#[test]
@ -106,22 +106,11 @@ fn zst() {
let v: RawVec<ZST> = RawVec::with_capacity_in(100, Global);
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::try_allocate_in(0, AllocInit::Uninitialized, Global).unwrap();
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::try_allocate_in(100, AllocInit::Uninitialized, Global).unwrap();
zst_sanity(&v);
let mut v: RawVec<ZST> =
RawVec::try_allocate_in(usize::MAX, AllocInit::Uninitialized, Global).unwrap();
let mut v: RawVec<ZST> = RawVec::with_capacity_in(usize::MAX, Global);
zst_sanity(&v);
// Check all these operations work as expected with zero-sized elements.
assert!(!v.needs_to_grow(100, usize::MAX - 100));
assert!(v.needs_to_grow(101, usize::MAX - 100));
zst_sanity(&v);
v.reserve(100, usize::MAX - 100);
//v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below
zst_sanity(&v);
@ -138,12 +127,12 @@ fn zst() {
assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err);
assert_eq!(v.grow_amortized(101, usize::MAX - 100), cap_err);
assert_eq!(v.inner.grow_amortized(100, usize::MAX - 100, ZST::LAYOUT), cap_err);
assert_eq!(v.inner.grow_amortized(101, usize::MAX - 100, ZST::LAYOUT), cap_err);
zst_sanity(&v);
assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err);
assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err);
assert_eq!(v.inner.grow_exact(100, usize::MAX - 100, ZST::LAYOUT), cap_err);
assert_eq!(v.inner.grow_exact(101, usize::MAX - 100, ZST::LAYOUT), cap_err);
zst_sanity(&v);
}

View File

@ -5,6 +5,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::alloc::Layout;
use crate::marker::DiscriminantKind;
use crate::{clone, cmp, fmt, hash, intrinsics, ptr};
@ -1238,6 +1239,10 @@ pub trait SizedTypeProperties: Sized {
#[doc(hidden)]
#[unstable(feature = "sized_type_properties", issue = "none")]
const IS_ZST: bool = size_of::<Self>() == 0;
#[doc(hidden)]
#[unstable(feature = "sized_type_properties", issue = "none")]
const LAYOUT: Layout = Layout::new::<Self>();
}
#[doc(hidden)]
#[unstable(feature = "sized_type_properties", issue = "none")]

View File

@ -56,7 +56,7 @@ class StdStringProvider(printer_base):
self._valobj = valobj
vec = valobj["vec"]
self._length = int(vec["len"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["inner"]["ptr"])
def to_string(self):
return self._data_ptr.lazy_string(encoding="utf-8", length=self._length)
@ -74,7 +74,7 @@ class StdOsStringProvider(printer_base):
vec = buf[ZERO_FIELD] if is_windows else buf
self._length = int(vec["len"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["inner"]["ptr"])
def to_string(self):
return self._data_ptr.lazy_string(encoding="utf-8", length=self._length)
@ -96,6 +96,7 @@ class StdStrProvider(printer_base):
def display_hint():
return "string"
def _enumerate_array_elements(element_ptrs):
for (i, element_ptr) in enumerate(element_ptrs):
key = "[{}]".format(i)
@ -112,6 +113,7 @@ def _enumerate_array_elements(element_ptrs):
yield key, element
class StdSliceProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
@ -130,11 +132,14 @@ class StdSliceProvider(printer_base):
def display_hint():
return "array"
class StdVecProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
self._length = int(valobj["len"])
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["inner"]["ptr"])
ptr_ty = gdb.Type.pointer(valobj.type.template_argument(0))
self._data_ptr = self._data_ptr.reinterpret_cast(ptr_ty)
def to_string(self):
return "Vec(size={})".format(self._length)
@ -155,11 +160,13 @@ class StdVecDequeProvider(printer_base):
self._head = int(valobj["head"])
self._size = int(valobj["len"])
# BACKCOMPAT: rust 1.75
cap = valobj["buf"]["cap"]
cap = valobj["buf"]["inner"]["cap"]
if cap.type.code != gdb.TYPE_CODE_INT:
cap = cap[ZERO_FIELD]
self._cap = int(cap)
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["inner"]["ptr"])
ptr_ty = gdb.Type.pointer(valobj.type.template_argument(0))
self._data_ptr = self._data_ptr.reinterpret_cast(ptr_ty)
def to_string(self):
return "VecDeque(size={})".format(self._size)

View File

@ -389,11 +389,11 @@ class StdVecSyntheticProvider:
def update(self):
# type: () -> None
self.length = self.valobj.GetChildMemberWithName("len").GetValueAsUnsigned()
self.buf = self.valobj.GetChildMemberWithName("buf")
self.buf = self.valobj.GetChildMemberWithName("buf").GetChildMemberWithName("inner")
self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr"))
self.element_type = self.data_ptr.GetType().GetPointeeType()
self.element_type = self.valobj.GetType().GetTemplateArgumentType(0)
self.element_type_size = self.element_type.GetByteSize()
def has_children(self):
@ -474,7 +474,7 @@ class StdVecDequeSyntheticProvider:
# type: () -> None
self.head = self.valobj.GetChildMemberWithName("head").GetValueAsUnsigned()
self.size = self.valobj.GetChildMemberWithName("len").GetValueAsUnsigned()
self.buf = self.valobj.GetChildMemberWithName("buf")
self.buf = self.valobj.GetChildMemberWithName("buf").GetChildMemberWithName("inner")
cap = self.buf.GetChildMemberWithName("cap")
if cap.GetType().num_fields == 1:
cap = cap.GetChildAtIndex(0)
@ -482,7 +482,7 @@ class StdVecDequeSyntheticProvider:
self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr"))
self.element_type = self.data_ptr.GetType().GetPointeeType()
self.element_type = self.valobj.GetType().GetTemplateArgumentType(0)
self.element_type_size = self.element_type.GetByteSize()
def has_children(self):

View File

@ -4,10 +4,10 @@
<DisplayString>{{ len={len} }}</DisplayString>
<Expand>
<Item Name="[len]" ExcludeView="simple">len</Item>
<Item Name="[capacity]" ExcludeView="simple">buf.cap.__0</Item>
<Item Name="[capacity]" ExcludeView="simple">buf.inner.cap.__0</Item>
<ArrayItems>
<Size>len</Size>
<ValuePointer>buf.ptr.pointer.pointer</ValuePointer>
<ValuePointer>($T1*)buf.inner.ptr.pointer.pointer</ValuePointer>
</ArrayItems>
</Expand>
</Type>
@ -15,7 +15,7 @@
<DisplayString>{{ len={len} }}</DisplayString>
<Expand>
<Item Name="[len]" ExcludeView="simple">len</Item>
<Item Name="[capacity]" ExcludeView="simple">buf.cap.__0</Item>
<Item Name="[capacity]" ExcludeView="simple">buf.inner.cap.__0</Item>
<CustomListItems>
<Variable Name="i" InitialValue="0" />
<Size>len</Size>
@ -23,7 +23,7 @@
<If Condition="i == len">
<Break/>
</If>
<Item>buf.ptr.pointer.pointer[(i + head) % buf.cap.__0]</Item>
<Item>(($T1*)buf.inner.ptr.pointer.pointer)[(i + head) % buf.inner.cap.__0]</Item>
<Exec>i = i + 1</Exec>
</Loop>
</CustomListItems>
@ -41,17 +41,17 @@
</Expand>
</Type>
<Type Name="alloc::string::String">
<DisplayString>{(char*)vec.buf.ptr.pointer.pointer,[vec.len]s8}</DisplayString>
<StringView>(char*)vec.buf.ptr.pointer.pointer,[vec.len]s8</StringView>
<DisplayString>{(char*)vec.buf.inner.ptr.pointer.pointer,[vec.len]s8}</DisplayString>
<StringView>(char*)vec.buf.inner.ptr.pointer.pointer,[vec.len]s8</StringView>
<Expand>
<Item Name="[len]" ExcludeView="simple">vec.len</Item>
<Item Name="[capacity]" ExcludeView="simple">vec.buf.cap.__0</Item>
<Item Name="[capacity]" ExcludeView="simple">vec.buf.inner.cap.__0</Item>
<Synthetic Name="[chars]">
<DisplayString>{(char*)vec.buf.ptr.pointer.pointer,[vec.len]s8}</DisplayString>
<DisplayString>{(char*)vec.buf.inner.ptr.pointer.pointer,[vec.len]s8}</DisplayString>
<Expand>
<ArrayItems>
<Size>vec.len</Size>
<ValuePointer>(char*)vec.buf.ptr.pointer.pointer</ValuePointer>
<ValuePointer>(char*)vec.buf.inner.ptr.pointer.pointer</ValuePointer>
</ArrayItems>
</Expand>
</Synthetic>

View File

@ -104,14 +104,14 @@
</Type>
<Type Name="std::ffi::os_str::OsString">
<DisplayString>{(char*)inner.inner.bytes.buf.ptr.pointer.pointer,[inner.inner.bytes.len]}</DisplayString>
<DisplayString>{(char*)inner.inner.bytes.buf.inner.ptr.pointer.pointer,[inner.inner.bytes.len]}</DisplayString>
<Expand>
<Synthetic Name="[chars]">
<DisplayString>{(char*)inner.inner.bytes.buf.ptr.pointer.pointer,[inner.inner.bytes.len]}</DisplayString>
<DisplayString>{(char*)inner.inner.bytes.buf.inner.ptr.pointer.pointer,[inner.inner.bytes.len]}</DisplayString>
<Expand>
<ArrayItems>
<Size>inner.inner.bytes.len</Size>
<ValuePointer>(char*)inner.inner.bytes.buf.ptr.pointer.pointer</ValuePointer>
<ValuePointer>(char*)inner.inner.bytes.buf.inner.ptr.pointer.pointer</ValuePointer>
</ArrayItems>
</Expand>
</Synthetic>

View File

@ -10,7 +10,7 @@ use std::sync::Once;
const ATOMIC: AtomicUsize = AtomicUsize::new(5);
const CELL: Cell<usize> = Cell::new(6);
const ATOMIC_TUPLE: ([AtomicUsize; 1], Vec<AtomicUsize>, u8) = ([ATOMIC], Vec::new(), 7);
const ATOMIC_TUPLE: ([AtomicUsize; 1], Option<Box<AtomicUsize>>, u8) = ([ATOMIC], None, 7);
const INTEGER: u8 = 8;
const STRING: String = String::new();
const STR: &str = "012345";
@ -74,7 +74,6 @@ fn main() {
let _ = &(&&&&ATOMIC_TUPLE).0; //~ ERROR: interior mutability
let _ = &ATOMIC_TUPLE.0[0]; //~ ERROR: interior mutability
let _ = ATOMIC_TUPLE.0[0].load(Ordering::SeqCst); //~ ERROR: interior mutability
let _ = &*ATOMIC_TUPLE.1;
let _ = &ATOMIC_TUPLE.2;
let _ = (&&&&ATOMIC_TUPLE).0;
let _ = (&&&&ATOMIC_TUPLE).2;

View File

@ -92,7 +92,7 @@ LL | let _ = ATOMIC_TUPLE.0[0].load(Ordering::SeqCst);
= help: assign this const to a local or static variable, and use the variable here
error: a `const` item with interior mutability should not be borrowed
--> tests/ui/borrow_interior_mutable_const/others.rs:82:13
--> tests/ui/borrow_interior_mutable_const/others.rs:81:13
|
LL | let _ = ATOMIC_TUPLE.0[0];
| ^^^^^^^^^^^^
@ -100,7 +100,7 @@ LL | let _ = ATOMIC_TUPLE.0[0];
= help: assign this const to a local or static variable, and use the variable here
error: a `const` item with interior mutability should not be borrowed
--> tests/ui/borrow_interior_mutable_const/others.rs:87:5
--> tests/ui/borrow_interior_mutable_const/others.rs:86:5
|
LL | CELL.set(2);
| ^^^^
@ -108,7 +108,7 @@ LL | CELL.set(2);
= help: assign this const to a local or static variable, and use the variable here
error: a `const` item with interior mutability should not be borrowed
--> tests/ui/borrow_interior_mutable_const/others.rs:88:16
--> tests/ui/borrow_interior_mutable_const/others.rs:87:16
|
LL | assert_eq!(CELL.get(), 6);
| ^^^^

View File

@ -1,3 +1,5 @@
// FIXME: This test is broken since https://github.com/rust-lang/rust/pull/126793,
// possibly related to the additional struct between Vec and Unique.
//@revisions: default uniq
// We disable the GC for this test because it would change what is printed.
//@compile-flags: -Zmiri-tree-borrows -Zmiri-provenance-gc=0

View File

@ -2,7 +2,9 @@
Warning: this tree is indicative only. Some tags may have been hidden.
0.. 2
| Act | └─┬──<TAG=root of the allocation>
|-----| └─┬──<TAG=base.as_ptr(), base.as_ptr()>
|-----| └─┬──<TAG=raw_parts.0>
|-----| └────<TAG=reconstructed.as_ptr(), reconstructed.as_ptr()>
|-----| ├────<TAG=base.as_ptr()>
|-----| ├────<TAG=base.as_ptr()>
|-----| └─┬──<TAG=raw_parts.0>
|-----| ├────<TAG=reconstructed.as_ptr()>
|-----| └────<TAG=reconstructed.as_ptr()>
──────────────────────────────────────────────────

View File

@ -81,10 +81,10 @@
// cdb-check:vec,d [...] : { len=4 } [Type: [...]::Vec<u64,alloc::alloc::Global>]
// cdb-check: [len] : 4 [Type: [...]]
// cdb-check: [capacity] : [...] [Type: [...]]
// cdb-check: [0] : 4 [Type: unsigned __int64]
// cdb-check: [1] : 5 [Type: unsigned __int64]
// cdb-check: [2] : 6 [Type: unsigned __int64]
// cdb-check: [3] : 7 [Type: unsigned __int64]
// cdb-check: [0] : 4 [Type: u64]
// cdb-check: [1] : 5 [Type: u64]
// cdb-check: [2] : 6 [Type: u64]
// cdb-check: [3] : 7 [Type: u64]
// cdb-command: dx str_slice
// cdb-check:str_slice : "IAMA string slice!" [Type: ref$<str$>]
@ -141,8 +141,8 @@
// cdb-check: [<Raw View>] [Type: alloc::collections::vec_deque::VecDeque<i32,alloc::alloc::Global>]
// cdb-check: [len] : 0x2 [Type: unsigned [...]]
// cdb-check: [capacity] : 0x8 [Type: unsigned [...]]
// cdb-check: [0x0] : 90 [Type: int]
// cdb-check: [0x1] : 20 [Type: int]
// cdb-check: [0x0] : 90 [Type: i32]
// cdb-check: [0x1] : 20 [Type: i32]
#![allow(unused_variables)]
use std::collections::{LinkedList, VecDeque};

View File

@ -7,7 +7,7 @@
// gdb-command:run
// gdb-command:print plain_string
// gdbr-check:$1 = alloc::string::String {vec: alloc::vec::Vec<u8, alloc::alloc::Global> {buf: alloc::raw_vec::RawVec<u8, alloc::alloc::Global> {ptr: core::ptr::unique::Unique<u8> {pointer: core::ptr::non_null::NonNull<u8> {pointer: 0x[...]}, _marker: core::marker::PhantomData<u8>}, cap: alloc::raw_vec::Cap (5), alloc: alloc::alloc::Global}, len: 5}}
// gdbr-check:$1 = alloc::string::String {vec: alloc::vec::Vec<u8, alloc::alloc::Global> {buf: alloc::raw_vec::RawVec<u8, alloc::alloc::Global> {inner: alloc::raw_vec::RawVecInner<alloc::alloc::Global> {ptr: core::ptr::unique::Unique<u8> {pointer: core::ptr::non_null::NonNull<u8> {pointer: 0x[...]}, _marker: core::marker::PhantomData<u8>}, cap: alloc::raw_vec::Cap (5), alloc: alloc::alloc::Global}, _marker: core::marker::PhantomData<u8>}, len: 5}}
// gdb-command:print plain_str
// gdbr-check:$2 = "Hello"

View File

@ -5,63 +5,93 @@ fn vec_deref_to_slice(_1: &Vec<u8>) -> &[u8] {
let mut _0: &[u8];
scope 1 (inlined <Vec<u8> as Deref>::deref) {
debug self => _1;
let mut _4: *const u8;
let mut _5: usize;
let mut _7: usize;
scope 2 (inlined Vec::<u8>::as_ptr) {
debug self => _1;
let mut _2: &alloc::raw_vec::RawVec<u8>;
scope 3 (inlined alloc::raw_vec::RawVec::<u8>::ptr) {
debug self => _2;
let mut _3: std::ptr::NonNull<u8>;
scope 4 (inlined Unique::<u8>::as_ptr) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _3;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 5 (inlined NonNull::<u8>::as_ptr) {
let mut _3: &alloc::raw_vec::RawVecInner;
scope 4 (inlined alloc::raw_vec::RawVecInner::ptr::<u8>) {
debug self => _3;
let mut _6: std::ptr::NonNull<u8>;
scope 5 (inlined alloc::raw_vec::RawVecInner::non_null::<u8>) {
debug self => _3;
let mut _4: std::ptr::NonNull<u8>;
scope 6 (inlined Unique::<u8>::cast::<u8>) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _4;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 7 (inlined NonNull::<u8>::cast::<u8>) {
debug self => _4;
scope 8 (inlined NonNull::<u8>::as_ptr) {
debug self => _4;
let mut _5: *const u8;
}
}
}
scope 9 (inlined #[track_caller] <Unique<u8> as Into<NonNull<u8>>>::into) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 10 (inlined <NonNull<u8> as From<Unique<u8>>>::from) {
debug ((unique: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((unique: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 11 (inlined Unique::<u8>::as_non_null_ptr) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
}
}
}
}
scope 12 (inlined NonNull::<u8>::as_ptr) {
debug self => _6;
}
}
}
}
scope 6 (inlined std::slice::from_raw_parts::<'_, u8>) {
debug data => _4;
debug len => _5;
let _6: *const [u8];
scope 7 (inlined core::ub_checks::check_language_ub) {
scope 8 (inlined core::ub_checks::check_language_ub::runtime) {
scope 13 (inlined std::slice::from_raw_parts::<'_, u8>) {
debug data => _5;
debug len => _7;
let _8: *const [u8];
scope 14 (inlined core::ub_checks::check_language_ub) {
scope 15 (inlined core::ub_checks::check_language_ub::runtime) {
}
}
scope 9 (inlined std::mem::size_of::<u8>) {
scope 16 (inlined std::mem::size_of::<u8>) {
}
scope 10 (inlined align_of::<u8>) {
scope 17 (inlined align_of::<u8>) {
}
scope 11 (inlined slice_from_raw_parts::<u8>) {
debug data => _4;
debug len => _5;
scope 12 (inlined std::ptr::from_raw_parts::<[u8], u8>) {
debug data_pointer => _4;
debug metadata => _5;
scope 18 (inlined slice_from_raw_parts::<u8>) {
debug data => _5;
debug len => _7;
scope 19 (inlined std::ptr::from_raw_parts::<[u8], u8>) {
debug data_pointer => _5;
debug metadata => _7;
}
}
}
}
bb0: {
StorageLive(_4);
StorageLive(_2);
_2 = &((*_1).0: alloc::raw_vec::RawVec<u8>);
StorageLive(_3);
_3 = ((((*_1).0: alloc::raw_vec::RawVec<u8>).0: std::ptr::Unique<u8>).0: std::ptr::NonNull<u8>);
_4 = (_3.0: *const u8);
_3 = &(((*_1).0: alloc::raw_vec::RawVec<u8>).0: alloc::raw_vec::RawVecInner);
StorageLive(_6);
StorageLive(_4);
_4 = (((((*_1).0: alloc::raw_vec::RawVec<u8>).0: alloc::raw_vec::RawVecInner).0: std::ptr::Unique<u8>).0: std::ptr::NonNull<u8>);
_5 = (_4.0: *const u8);
_6 = NonNull::<u8> { pointer: _5 };
StorageDead(_4);
StorageDead(_6);
StorageDead(_3);
StorageDead(_2);
StorageLive(_5);
_5 = ((*_1).1: usize);
StorageLive(_6);
_6 = *const [u8] from (_4, _5);
_0 = &(*_6);
StorageDead(_6);
StorageDead(_5);
StorageDead(_4);
StorageLive(_7);
_7 = ((*_1).1: usize);
StorageLive(_8);
_8 = *const [u8] from (_5, _7);
_0 = &(*_8);
StorageDead(_8);
StorageDead(_7);
return;
}
}

View File

@ -5,63 +5,93 @@ fn vec_deref_to_slice(_1: &Vec<u8>) -> &[u8] {
let mut _0: &[u8];
scope 1 (inlined <Vec<u8> as Deref>::deref) {
debug self => _1;
let mut _4: *const u8;
let mut _5: usize;
let mut _7: usize;
scope 2 (inlined Vec::<u8>::as_ptr) {
debug self => _1;
let mut _2: &alloc::raw_vec::RawVec<u8>;
scope 3 (inlined alloc::raw_vec::RawVec::<u8>::ptr) {
debug self => _2;
let mut _3: std::ptr::NonNull<u8>;
scope 4 (inlined Unique::<u8>::as_ptr) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _3;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 5 (inlined NonNull::<u8>::as_ptr) {
let mut _3: &alloc::raw_vec::RawVecInner;
scope 4 (inlined alloc::raw_vec::RawVecInner::ptr::<u8>) {
debug self => _3;
let mut _6: std::ptr::NonNull<u8>;
scope 5 (inlined alloc::raw_vec::RawVecInner::non_null::<u8>) {
debug self => _3;
let mut _4: std::ptr::NonNull<u8>;
scope 6 (inlined Unique::<u8>::cast::<u8>) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _4;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 7 (inlined NonNull::<u8>::cast::<u8>) {
debug self => _4;
scope 8 (inlined NonNull::<u8>::as_ptr) {
debug self => _4;
let mut _5: *const u8;
}
}
}
scope 9 (inlined #[track_caller] <Unique<u8> as Into<NonNull<u8>>>::into) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 10 (inlined <NonNull<u8> as From<Unique<u8>>>::from) {
debug ((unique: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((unique: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
scope 11 (inlined Unique::<u8>::as_non_null_ptr) {
debug ((self: Unique<u8>).0: std::ptr::NonNull<u8>) => _6;
debug ((self: Unique<u8>).1: std::marker::PhantomData<u8>) => const PhantomData::<u8>;
}
}
}
}
scope 12 (inlined NonNull::<u8>::as_ptr) {
debug self => _6;
}
}
}
}
scope 6 (inlined std::slice::from_raw_parts::<'_, u8>) {
debug data => _4;
debug len => _5;
let _6: *const [u8];
scope 7 (inlined core::ub_checks::check_language_ub) {
scope 8 (inlined core::ub_checks::check_language_ub::runtime) {
scope 13 (inlined std::slice::from_raw_parts::<'_, u8>) {
debug data => _5;
debug len => _7;
let _8: *const [u8];
scope 14 (inlined core::ub_checks::check_language_ub) {
scope 15 (inlined core::ub_checks::check_language_ub::runtime) {
}
}
scope 9 (inlined std::mem::size_of::<u8>) {
scope 16 (inlined std::mem::size_of::<u8>) {
}
scope 10 (inlined align_of::<u8>) {
scope 17 (inlined align_of::<u8>) {
}
scope 11 (inlined slice_from_raw_parts::<u8>) {
debug data => _4;
debug len => _5;
scope 12 (inlined std::ptr::from_raw_parts::<[u8], u8>) {
debug data_pointer => _4;
debug metadata => _5;
scope 18 (inlined slice_from_raw_parts::<u8>) {
debug data => _5;
debug len => _7;
scope 19 (inlined std::ptr::from_raw_parts::<[u8], u8>) {
debug data_pointer => _5;
debug metadata => _7;
}
}
}
}
bb0: {
StorageLive(_4);
StorageLive(_2);
_2 = &((*_1).0: alloc::raw_vec::RawVec<u8>);
StorageLive(_3);
_3 = ((((*_1).0: alloc::raw_vec::RawVec<u8>).0: std::ptr::Unique<u8>).0: std::ptr::NonNull<u8>);
_4 = (_3.0: *const u8);
_3 = &(((*_1).0: alloc::raw_vec::RawVec<u8>).0: alloc::raw_vec::RawVecInner);
StorageLive(_6);
StorageLive(_4);
_4 = (((((*_1).0: alloc::raw_vec::RawVec<u8>).0: alloc::raw_vec::RawVecInner).0: std::ptr::Unique<u8>).0: std::ptr::NonNull<u8>);
_5 = (_4.0: *const u8);
_6 = NonNull::<u8> { pointer: _5 };
StorageDead(_4);
StorageDead(_6);
StorageDead(_3);
StorageDead(_2);
StorageLive(_5);
_5 = ((*_1).1: usize);
StorageLive(_6);
_6 = *const [u8] from (_4, _5);
_0 = &(*_6);
StorageDead(_6);
StorageDead(_5);
StorageDead(_4);
StorageLive(_7);
_7 = ((*_1).1: usize);
StorageLive(_8);
_8 = *const [u8] from (_5, _7);
_0 = &(*_8);
StorageDead(_8);
StorageDead(_7);
return;
}
}