Auto merge of #108106 - the8472:layout-opt, r=wesleywiser

Improve niche placement by trying two strategies and picking the better result

Fixes #104807
Fixes #105371

Determining which sort order is better requires calculating the struct size (so we can calculate the niche offset). But that in turn depends on the field order, so happens after sorting. So the simple way to solve that is to run the whole thing twice and pick the better result.

1st commit is just code motion, the meat is in the later ones.
This commit is contained in:
bors 2023-04-29 08:55:04 +00:00
commit f2299490c1
12 changed files with 514 additions and 301 deletions

View File

@ -1,4 +1,5 @@
use super::*;
use std::fmt::Write;
use std::{borrow::Borrow, cmp, iter, ops::Bound};
#[cfg(feature = "randomize")]
@ -49,220 +50,60 @@ pub trait LayoutCalculator {
repr: &ReprOptions,
kind: StructKind,
) -> Option<LayoutS> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
let optimize = !repr.inhibit_struct_field_reordering_opt();
if optimize {
let end =
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index.raw[..end];
let effective_field_align = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
}
};
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
// Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
// data-carrying variants can be packed into the space after/before the niche.
// If the default field ordering does not give us a niche at the front then we do a second
// run and bias niches to the right and then check which one is closer to one of the struct's
// edges.
if let Some(layout) = &layout {
if let Some(niche) = layout.largest_niche {
let head_space = niche.offset.bytes();
let niche_length = niche.value.size(dl).bytes();
let tail_space = layout.size.bytes() - head_space - niche_length;
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
// we don't guarantee
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
// randomize field ordering with
let mut rng =
Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
// This may end up doing redundant work if the niche is already in the last field
// (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
// the unpadded size so we try anyway.
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
.expect("alt layout should always work");
let niche = alt_layout
.largest_niche
.expect("alt layout should have a niche like the regular one");
let alt_head_space = niche.offset.bytes();
let alt_niche_len = niche.value.size(dl).bytes();
let alt_tail_space = alt_layout.size.bytes() - alt_head_space - alt_niche_len;
// Shuffle the ordering of the fields
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
} else {
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
optimizing.sort_by_key(|&x| {
// Place ZSTs first to avoid "interesting offsets",
// especially with only one or two non-ZST fields.
// Then place largest alignments first, largest niches within an alignment group last
let f = fields[x];
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
});
}
debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
StructKind::Prefixed(..) => {
// Sort in ascending alignment so that the layout stays optimal
// regardless of the prefix.
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
let f = fields[x];
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(effective_field_align(f), niche_size)
});
let prefer_alt_layout =
alt_head_space > head_space && alt_head_space > tail_space;
debug!(
"sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
layout: {}\n\
alt_layout: {}\n",
layout.size.bytes(),
head_space,
niche_length,
tail_space,
alt_head_space,
alt_niche_len,
alt_tail_space,
layout.fields.count(),
prefer_alt_layout,
format_field_niches(&layout, &fields, &dl),
format_field_niches(&alt_layout, &fields, &dl),
);
if prefer_alt_layout {
return Some(alt_layout);
}
}
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
// regardless of the status of `-Z randomize-layout`
}
}
// inverse_memory_index holds field indices by increasing memory offset.
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
// We now write field offsets to the corresponding offset slot;
// field 5 with offset 0 puts 0 in offsets[5].
// At the bottom of this function, we invert `inverse_memory_index` to
// produce `memory_index` (see `invert_mapping`).
let mut sized = true;
let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
let mut offset = Size::ZERO;
let mut largest_niche = None;
let mut largest_niche_available = 0;
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
let prefix_align =
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
align = align.max(AbiAndPrefAlign::new(prefix_align));
offset = prefix_size.align_to(prefix_align);
}
for &i in &inverse_memory_index {
let field = &fields[i];
if !sized {
self.delay_bug(&format!(
"univariant: field #{} comes after unsized field",
offsets.len(),
));
}
if field.0.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
field.align().min(AbiAndPrefAlign::new(pack))
} else {
field.align()
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
if let Some(mut niche) = field.largest_niche() {
let available = niche.available(dl);
if available > largest_niche_available {
largest_niche_available = available;
niche.offset += offset;
largest_niche = Some(niche);
}
}
offset = offset.checked_add(field.size(), dl)?;
}
if let Some(repr_align) = repr.align {
align = align.max(AbiAndPrefAlign::new(repr_align));
}
debug!("univariant min_size: {:?}", offset);
let min_size = offset;
// As stated above, inverse_memory_index holds field indices by increasing offset.
// This makes it an already-sorted view of the offsets vec.
// To invert it, consider:
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
// Field 5 would be the first element, so memory_index is i:
// Note: if we didn't optimize, it's already right.
let memory_index = if optimize {
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
};
let size = min_size.align_to(align.abi);
let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
// All other fields must be ZSTs.
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0
&& align.abi == field.align().abi
&& size == field.size()
{
match field.abi() {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi();
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi();
}
_ => {}
}
}
}
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
match (a.abi(), b.abi()) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
} else {
((j, b), (i, a))
};
let pair = self.scalar_pair(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
offsets
}
_ => panic!(),
};
if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
&& offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
&& align == pair.align
&& size == pair.size
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
}
}
_ => {}
}
}
_ => {}
}
}
if fields.iter().any(|f| f.abi().is_uninhabited()) {
abi = Abi::Uninhabited;
}
Some(LayoutS {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
align,
size,
})
layout
}
fn layout_of_never_type(&self) -> LayoutS {
@ -934,3 +775,323 @@ pub trait LayoutCalculator {
})
}
}
/// Determines towards which end of a struct layout optimizations will try to place the best niches.
enum NicheBias {
Start,
End,
}
fn univariant(
this: &(impl LayoutCalculator + ?Sized),
dl: &TargetDataLayout,
fields: &IndexSlice<FieldIdx, Layout<'_>>,
repr: &ReprOptions,
kind: StructKind,
niche_bias: NicheBias,
) -> Option<LayoutS> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
let optimize = !repr.inhibit_struct_field_reordering_opt();
if optimize && fields.len() > 1 {
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index.raw[..end];
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
// we don't guarantee
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
// randomize field ordering with
let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
// Shuffle the ordering of the fields
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
} else {
let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
let largest_niche_size = fields
.iter()
.filter_map(|f| f.largest_niche())
.map(|n| n.available(dl))
.max()
.unwrap_or(0);
// Calculates a sort key to group fields by their alignment or possibly some size-derived
// pseudo-alignment.
let alignment_group_key = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
let align = layout.align().abi.bytes();
let size = layout.size().bytes();
let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if largest_niche_size > 0 {
match niche_bias {
// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
// to the front in the first case (for aligned loads) but keep the bool in front
// in the second case for its niches.
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
// When moving niches towards the end of the struct then for
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
// in the align-1 group because its bool can be moved closer to the end.
NicheBias::End if niche_size == largest_niche_size => {
align.trailing_zeros()
}
NicheBias::End => size_as_align,
}
} else {
size_as_align
};
size_as_align as u64
}
};
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
// Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
// to get one niche into the preferred position. If it ever supported multiple niches
// then a more advanced pick-and-pack approach could provide better results.
// But even for the single-niche cache it's not optimal. E.g. for
// A(u32, (bool, u8), u16) it would be possible to move the bool to the front
// but it would require packing the tuple together with the u16 to build a 4-byte
// group so that the u32 can be placed after it without padding. This kind
// of packing can't be achieved by sorting.
optimizing.sort_by_key(|&x| {
let f = fields[x];
let field_size = f.size().bytes();
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
let niche_size_key = match niche_bias {
// large niche first
NicheBias::Start => !niche_size,
// large niche last
NicheBias::End => niche_size,
};
let inner_niche_offset_key = match niche_bias {
NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
NicheBias::End => f.largest_niche().map_or(0, |n| {
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
}),
};
(
// Place ZSTs first to avoid "interesting offsets", especially with only one
// or two non-ZST fields. This helps Scalar/ScalarPair layouts.
!f.0.is_zst(),
// Then place largest alignments first.
cmp::Reverse(alignment_group_key(f)),
// Then prioritize niche placement within alignment group according to
// `niche_bias_start`.
niche_size_key,
// Then among fields with equally-sized niches prefer the ones
// closer to the start/end of the field.
inner_niche_offset_key,
)
});
}
StructKind::Prefixed(..) => {
// Sort in ascending alignment so that the layout stays optimal
// regardless of the prefix.
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
let f = fields[x];
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(alignment_group_key(f), niche_size)
});
}
}
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
// regardless of the status of `-Z randomize-layout`
}
}
// inverse_memory_index holds field indices by increasing memory offset.
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
// We now write field offsets to the corresponding offset slot;
// field 5 with offset 0 puts 0 in offsets[5].
// At the bottom of this function, we invert `inverse_memory_index` to
// produce `memory_index` (see `invert_mapping`).
let mut sized = true;
let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
let mut offset = Size::ZERO;
let mut largest_niche = None;
let mut largest_niche_available = 0;
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
let prefix_align =
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
align = align.max(AbiAndPrefAlign::new(prefix_align));
offset = prefix_size.align_to(prefix_align);
}
for &i in &inverse_memory_index {
let field = &fields[i];
if !sized {
this.delay_bug(&format!(
"univariant: field #{} comes after unsized field",
offsets.len(),
));
}
if field.0.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
field.align().min(AbiAndPrefAlign::new(pack))
} else {
field.align()
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
if let Some(mut niche) = field.largest_niche() {
let available = niche.available(dl);
// Pick up larger niches.
let prefer_new_niche = match niche_bias {
NicheBias::Start => available > largest_niche_available,
// if there are several niches of the same size then pick the last one
NicheBias::End => available >= largest_niche_available,
};
if prefer_new_niche {
largest_niche_available = available;
niche.offset += offset;
largest_niche = Some(niche);
}
}
offset = offset.checked_add(field.size(), dl)?;
}
if let Some(repr_align) = repr.align {
align = align.max(AbiAndPrefAlign::new(repr_align));
}
debug!("univariant min_size: {:?}", offset);
let min_size = offset;
// As stated above, inverse_memory_index holds field indices by increasing offset.
// This makes it an already-sorted view of the offsets vec.
// To invert it, consider:
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
// Field 5 would be the first element, so memory_index is i:
// Note: if we didn't optimize, it's already right.
let memory_index = if optimize {
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
};
let size = min_size.align_to(align.abi);
let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
// All other fields must be ZSTs.
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
{
match field.abi() {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi();
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi();
}
_ => {}
}
}
}
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
match (a.abi(), b.abi()) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
} else {
((j, b), (i, a))
};
let pair = this.scalar_pair(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
offsets
}
_ => panic!(),
};
if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
&& offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
&& align == pair.align
&& size == pair.size
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
}
}
_ => {}
}
}
_ => {}
}
}
if fields.iter().any(|f| f.abi().is_uninhabited()) {
abi = Abi::Uninhabited;
}
Some(LayoutS {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
align,
size,
})
}
fn format_field_niches(
layout: &LayoutS,
fields: &IndexSlice<FieldIdx, Layout<'_>>,
dl: &TargetDataLayout,
) -> String {
let mut s = String::new();
for i in layout.fields.index_by_increasing_offset() {
let offset = layout.fields.offset(i);
let f = fields[i.into()];
write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
if let Some(n) = f.largest_niche() {
write!(
s,
" n{}b{}s{}",
n.offset.bytes(),
n.available(dl).ilog2(),
n.value.size(dl).bytes()
)
.unwrap();
}
write!(s, "] ").unwrap();
}
s
}

View File

@ -1,4 +1,5 @@
// compile-flags: -O
// min-llvm-version: 16.0
#![crate_type = "lib"]
pub fn foo(t: &mut Vec<usize>) {

View File

@ -19,4 +19,5 @@ pub fn outer_function(x: S, y: S) -> usize {
// CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds %"[closure@{{.*.rs}}:9:23: 9:25]", ptr [[spill]]
// CHECK-NOT: [[load:%.*]] = load ptr, ptr
// CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])
// CHECK: call void @llvm.memcpy{{.*}}(ptr {{align .*}} [[spill]], ptr {{align .*}} %x
// CHECK: [[inner:%.*]] = getelementptr inbounds %"{{.*}}", ptr [[spill]]
// CHECK: call void @llvm.memcpy{{.*}}(ptr {{align .*}} [[inner]], ptr {{align .*}} %x

View File

@ -1,4 +1,5 @@
// min-llvm-version: 15.0
// only-64bit llvm appears to use stores instead of memset on 32bit
// compile-flags: -C opt-level=3 -Z merge-functions=disabled
// The below two functions ensure that both `String::new()` and `"".to_string()`
@ -9,12 +10,9 @@
// CHECK-LABEL: define void @string_new
#[no_mangle]
pub fn string_new() -> String {
// CHECK-NOT: load i8
// CHECK: store i{{32|64}}
// CHECK: store ptr inttoptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store ptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store i{{32|64}}
// CHECK-NEXT: call void @llvm.memset
// CHECK-NEXT: ret void
String::new()
}
@ -22,12 +20,9 @@ pub fn string_new() -> String {
// CHECK-LABEL: define void @empty_to_string
#[no_mangle]
pub fn empty_to_string() -> String {
// CHECK-NOT: load i8
// CHECK: store i{{32|64}}
// CHECK: store ptr inttoptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store ptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store i{{32|64}}
// CHECK-NEXT: call void @llvm.memset
// CHECK-NEXT: ret void
"".to_string()
}
@ -38,12 +33,9 @@ pub fn empty_to_string() -> String {
// CHECK-LABEL: @empty_vec
#[no_mangle]
pub fn empty_vec() -> Vec<u8> {
// CHECK: store i{{32|64}}
// CHECK-NOT: load i8
// CHECK: store ptr inttoptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store ptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store i{{32|64}}
// CHECK-NEXT: call void @llvm.memset
// CHECK-NEXT: ret void
vec![]
}
@ -51,12 +43,9 @@ pub fn empty_vec() -> Vec<u8> {
// CHECK-LABEL: @empty_vec_clone
#[no_mangle]
pub fn empty_vec_clone() -> Vec<u8> {
// CHECK: store i{{32|64}}
// CHECK-NOT: load i8
// CHECK: store ptr inttoptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store ptr
// CHECK-NEXT: getelementptr
// CHECK-NEXT: store i{{32|64}}
// CHECK-NEXT: call void @llvm.memset
// CHECK-NEXT: ret void
vec![].clone()
}

View File

@ -2,38 +2,34 @@ print-type-size type: `[async fn body@$DIR/async-awaiting-fut.rs:21:21: 24:2]`:
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Suspend0`: 3077 bytes
print-type-size local `.__awaitee`: 3077 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `.__awaitee`: 3077 bytes
print-type-size variant `Returned`: 0 bytes
print-type-size variant `Panicked`: 0 bytes
print-type-size type: `[async fn body@$DIR/async-awaiting-fut.rs:10:64: 19:2]`: 3077 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 2051 bytes
print-type-size padding: 1026 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size variant `Unresumed`: 1025 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Suspend0`: 2052 bytes
print-type-size local `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `..generator_field4`: 1 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size padding: 1 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size local `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size local `..generator_field4`: 1 bytes
print-type-size local `.__awaitee`: 1 bytes
print-type-size variant `Suspend1`: 3076 bytes
print-type-size padding: 1024 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size padding: 1026 bytes
print-type-size local `..generator_field4`: 1 bytes, alignment: 1 bytes
print-type-size padding: 1 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size local `.__awaitee`: 1025 bytes
print-type-size variant `Suspend2`: 2052 bytes
print-type-size local `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `..generator_field4`: 1 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size padding: 1 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size local `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size local `..generator_field4`: 1 bytes
print-type-size local `.__awaitee`: 1 bytes
print-type-size variant `Returned`: 2051 bytes
print-type-size padding: 1026 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 2051 bytes
print-type-size padding: 1026 bytes
print-type-size upvar `.fut`: 1025 bytes, alignment: 1 bytes
print-type-size variant `Returned`: 1025 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size variant `Panicked`: 1025 bytes
print-type-size upvar `.fut`: 1025 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size type: `std::mem::ManuallyDrop<[async fn body@$DIR/async-awaiting-fut.rs:10:64: 19:2]>`: 3077 bytes, alignment: 1 bytes
print-type-size field `.value`: 3077 bytes
print-type-size type: `std::mem::MaybeUninit<[async fn body@$DIR/async-awaiting-fut.rs:10:64: 19:2]>`: 3077 bytes, alignment: 1 bytes
@ -43,11 +39,11 @@ print-type-size field `.value`: 3077 bytes
print-type-size type: `[async fn body@$DIR/async-awaiting-fut.rs:8:35: 8:37]`: 1025 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.arg`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 1024 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.arg`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 1024 bytes
print-type-size variant `Panicked`: 1024 bytes
print-type-size upvar `.arg`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 1024 bytes
print-type-size type: `std::mem::ManuallyDrop<[async fn body@$DIR/async-awaiting-fut.rs:8:35: 8:37]>`: 1025 bytes, alignment: 1 bytes
print-type-size field `.value`: 1025 bytes
print-type-size type: `std::mem::MaybeUninit<[async fn body@$DIR/async-awaiting-fut.rs:8:35: 8:37]>`: 1025 bytes, alignment: 1 bytes

View File

@ -2,20 +2,20 @@ print-type-size type: `[async fn body@$DIR/large-arg.rs:6:21: 8:2]`: 3076 bytes,
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 0 bytes
print-type-size variant `Suspend0`: 3075 bytes
print-type-size local `.__awaitee`: 3075 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size local `.__awaitee`: 3075 bytes
print-type-size variant `Returned`: 0 bytes
print-type-size variant `Panicked`: 0 bytes
print-type-size type: `[async fn body@$DIR/large-arg.rs:10:30: 12:2]`: 3075 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Suspend0`: 3074 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size local `.__awaitee`: 2050 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Panicked`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size type: `std::mem::ManuallyDrop<[async fn body@$DIR/large-arg.rs:10:30: 12:2]>`: 3075 bytes, alignment: 1 bytes
print-type-size field `.value`: 3075 bytes
print-type-size type: `std::mem::MaybeUninit<[async fn body@$DIR/large-arg.rs:10:30: 12:2]>`: 3075 bytes, alignment: 1 bytes
@ -25,14 +25,14 @@ print-type-size field `.value`: 3075 bytes
print-type-size type: `[async fn body@$DIR/large-arg.rs:13:26: 15:2]`: 2050 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Suspend0`: 2049 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size local `.__awaitee`: 1025 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Panicked`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size type: `std::mem::ManuallyDrop<[async fn body@$DIR/large-arg.rs:13:26: 15:2]>`: 2050 bytes, alignment: 1 bytes
print-type-size field `.value`: 2050 bytes
print-type-size type: `std::mem::MaybeUninit<[async fn body@$DIR/large-arg.rs:13:26: 15:2]>`: 2050 bytes, alignment: 1 bytes
@ -42,11 +42,11 @@ print-type-size field `.value`: 2050 bytes
print-type-size type: `[async fn body@$DIR/large-arg.rs:16:26: 18:2]`: 1025 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Returned`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size variant `Panicked`: 1024 bytes
print-type-size upvar `.t`: 1024 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.t`: 1024 bytes
print-type-size type: `std::mem::ManuallyDrop<[async fn body@$DIR/large-arg.rs:16:26: 18:2]>`: 1025 bytes, alignment: 1 bytes
print-type-size field `.value`: 1025 bytes
print-type-size type: `std::mem::MaybeUninit<[async fn body@$DIR/large-arg.rs:16:26: 18:2]>`: 1025 bytes, alignment: 1 bytes

View File

@ -465,7 +465,7 @@ LL | const LAYOUT_INVALID_ZERO: Layout = unsafe { Layout::from_size_align_unchec
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 8, align: 4) {
00 10 00 00 00 00 00 00 │ ........
00 00 00 00 00 10 00 00 │ ........
}
error[E0080]: it is undefined behavior to use this value
@ -476,7 +476,7 @@ LL | const LAYOUT_INVALID_THREE: Layout = unsafe { Layout::from_size_align_unche
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 8, align: 4) {
09 00 00 00 03 00 00 00 │ ........
03 00 00 00 09 00 00 00 │ ........
}
error[E0080]: it is undefined behavior to use this value

View File

@ -465,7 +465,7 @@ LL | const LAYOUT_INVALID_ZERO: Layout = unsafe { Layout::from_size_align_unchec
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 16, align: 8) {
00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 │ ................
00 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00 │ ................
}
error[E0080]: it is undefined behavior to use this value
@ -476,7 +476,7 @@ LL | const LAYOUT_INVALID_THREE: Layout = unsafe { Layout::from_size_align_unche
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 16, align: 8) {
09 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 │ ................
03 00 00 00 00 00 00 00 09 00 00 00 00 00 00 00 │ ................
}
error[E0080]: it is undefined behavior to use this value

View File

@ -370,12 +370,6 @@ error: layout_of(NicheFirst) = Layout {
pref: $PREF_ALIGN,
},
abi: ScalarPair(
Union {
value: Int(
I8,
false,
),
},
Initialized {
value: Int(
I8,
@ -383,10 +377,16 @@ error: layout_of(NicheFirst) = Layout {
),
valid_range: 0..=4,
},
Union {
value: Int(
I8,
false,
),
},
),
fields: Arbitrary {
offsets: [
Size(1 bytes),
Size(0 bytes),
],
memory_index: [
0,
@ -394,7 +394,7 @@ error: layout_of(NicheFirst) = Layout {
},
largest_niche: Some(
Niche {
offset: Size(1 bytes),
offset: Size(0 bytes),
value: Int(
I8,
false,
@ -429,29 +429,29 @@ error: layout_of(NicheFirst) = Layout {
I8,
false,
),
valid_range: 0..=255,
valid_range: 0..=2,
},
Initialized {
value: Int(
I8,
false,
),
valid_range: 0..=2,
valid_range: 0..=255,
},
),
fields: Arbitrary {
offsets: [
Size(1 bytes),
Size(0 bytes),
Size(1 bytes),
],
memory_index: [
1,
0,
1,
],
},
largest_niche: Some(
Niche {
offset: Size(1 bytes),
offset: Size(0 bytes),
value: Int(
I8,
false,
@ -514,12 +514,6 @@ error: layout_of(NicheSecond) = Layout {
pref: $PREF_ALIGN,
},
abi: ScalarPair(
Union {
value: Int(
I8,
false,
),
},
Initialized {
value: Int(
I8,
@ -527,10 +521,16 @@ error: layout_of(NicheSecond) = Layout {
),
valid_range: 0..=4,
},
Union {
value: Int(
I8,
false,
),
},
),
fields: Arbitrary {
offsets: [
Size(1 bytes),
Size(0 bytes),
],
memory_index: [
0,
@ -538,7 +538,7 @@ error: layout_of(NicheSecond) = Layout {
},
largest_niche: Some(
Niche {
offset: Size(1 bytes),
offset: Size(0 bytes),
value: Int(
I8,
false,
@ -573,29 +573,29 @@ error: layout_of(NicheSecond) = Layout {
I8,
false,
),
valid_range: 0..=255,
valid_range: 0..=2,
},
Initialized {
value: Int(
I8,
false,
),
valid_range: 0..=2,
valid_range: 0..=255,
},
),
fields: Arbitrary {
offsets: [
Size(0 bytes),
Size(1 bytes),
Size(0 bytes),
],
memory_index: [
0,
1,
0,
],
},
largest_niche: Some(
Niche {
offset: Size(1 bytes),
offset: Size(0 bytes),
value: Int(
I8,
false,

View File

@ -1,15 +1,15 @@
print-type-size type: `[async fn body@$DIR/async.rs:8:36: 11:2]`: 16386 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 8192 bytes
print-type-size variant `Suspend0`: 16385 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 8192 bytes
print-type-size local `.arg`: 8192 bytes
print-type-size local `.__awaitee`: 1 bytes
print-type-size variant `Returned`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 8192 bytes
print-type-size variant `Panicked`: 8192 bytes
print-type-size upvar `.arg`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.arg`: 8192 bytes
print-type-size type: `std::mem::ManuallyDrop<[u8; 8192]>`: 8192 bytes, alignment: 1 bytes
print-type-size field `.value`: 8192 bytes
print-type-size type: `std::mem::MaybeUninit<[u8; 8192]>`: 8192 bytes, alignment: 1 bytes

View File

@ -1,10 +1,10 @@
print-type-size type: `[generator@$DIR/generator.rs:10:5: 10:14]`: 8193 bytes, alignment: 1 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `Unresumed`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.array`: 8192 bytes
print-type-size variant `Suspend0`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.array`: 8192 bytes
print-type-size variant `Returned`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.array`: 8192 bytes
print-type-size variant `Panicked`: 8192 bytes
print-type-size upvar `.array`: 8192 bytes, offset: 0 bytes, alignment: 1 bytes
print-type-size upvar `.array`: 8192 bytes

View File

@ -4,9 +4,14 @@
#![allow(dead_code)]
#![feature(never_type)]
#![feature(pointer_is_aligned)]
#![feature(ptr_from_ref)]
#![feature(strict_provenance)]
use std::mem::size_of;
use std::num::NonZeroU8;
use std::num::{NonZeroU8, NonZeroU16};
use std::ptr;
use std::ptr::NonNull;
use std::borrow::Cow;
struct t {a: u8, b: i8}
struct u {a: u8, b: i8, c: u8}
@ -181,6 +186,41 @@ struct Reorder2 {
ary: [u8; 6],
}
// We want the niche in the front, which means we can't treat the array as quasi-aligned more than
// 4 bytes even though we also want to place it at an 8-aligned offset where possible.
// So the ideal layout would look like: (char, u32, [u8; 8], u8)
// The current layout algorithm does (char, [u8; 8], u32, u8)
#[repr(align(8))]
struct ReorderWithNiche {
a: u32,
b: char,
c: u8,
ary: [u8; 8]
}
#[repr(C)]
struct EndNiche8([u8; 7], bool);
#[repr(C)]
struct MiddleNiche4(u8, u8, bool, u8);
struct ReorderEndNiche {
a: EndNiche8,
b: MiddleNiche4,
}
// standins for std types which we want to be laid out in a reasonable way
struct RawVecDummy {
ptr: NonNull<u8>,
cap: usize,
}
struct VecDummy {
r: RawVecDummy,
len: usize,
}
pub fn main() {
assert_eq!(size_of::<u8>(), 1 as usize);
assert_eq!(size_of::<u32>(), 4 as usize);
@ -270,4 +310,29 @@ pub fn main() {
let v = Reorder2 {a: 0, b: 0, ary: [0; 6]};
assert_eq!(size_of::<Reorder2>(), 10);
assert!((&v.ary).as_ptr().is_aligned_to(2), "[u8; 6] should group with align-2 fields");
let v = VecDummy { r: RawVecDummy { ptr: NonNull::dangling(), cap: 0 }, len: 1 };
assert_eq!(ptr::from_ref(&v), ptr::from_ref(&v.r.ptr).cast(),
"sort niches to the front where possible");
// Ideal layouts: (bool, u8, NonZeroU16) or (NonZeroU16, u8, bool)
// Currently the layout algorithm will choose the latter because it doesn't attempt
// to aggregate multiple smaller fields to move a niche before a higher-alignment one.
let b = BoolInTheMiddle( NonZeroU16::new(1).unwrap(), true, 0);
assert!(ptr::from_ref(&b.1).addr() > ptr::from_ref(&b.2).addr());
assert_eq!(size_of::<Cow<'static, str>>(), size_of::<String>());
let v = ReorderWithNiche {a: 0, b: ' ', c: 0, ary: [0; 8]};
assert!((&v.ary).as_ptr().is_aligned_to(4),
"here [u8; 8] should group with _at least_ align-4 fields");
assert_eq!(ptr::from_ref(&v), ptr::from_ref(&v.b).cast(),
"sort niches to the front where possible");
// Neither field has a niche at the beginning so the layout algorithm should try move niches to
// the end which means the 8-sized field shouldn't be alignment-promoted before the 4-sized one.
let v = ReorderEndNiche { a: EndNiche8([0; 7], false), b: MiddleNiche4(0, 0, false, 0) };
assert!(ptr::from_ref(&v.a).addr() > ptr::from_ref(&v.b).addr());
}