rustc_trans: use more of the trans::mir and ty::layout APIs throughout.

This commit is contained in:
Eduard-Mihai Burtescu 2017-06-25 12:41:24 +03:00
parent 260c41b4b8
commit 5b1fdaeb80
22 changed files with 711 additions and 890 deletions

View File

@ -1700,6 +1700,10 @@ impl<'a, 'tcx> Layout {
}
}
pub fn size_and_align<C: HasDataLayout>(&self, cx: C) -> (Size, Align) {
(self.size(cx), self.align(cx))
}
/// Returns alignment before repr alignment is applied
pub fn primitive_align<C: HasDataLayout>(&self, cx: C) -> Align {
match *self {
@ -2201,6 +2205,11 @@ impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
impl<'a, 'tcx> TyLayout<'tcx> {
pub fn for_variant(&self, variant_index: usize) -> Self {
let is_enum = match self.ty.sty {
ty::TyAdt(def, _) => def.is_enum(),
_ => false
};
assert!(is_enum);
TyLayout {
variant_index: Some(variant_index),
..*self
@ -2214,13 +2223,26 @@ impl<'a, 'tcx> TyLayout<'tcx> {
pub fn field_count(&self) -> usize {
// Handle enum/union through the type rather than Layout.
if let ty::TyAdt(def, _) = self.ty.sty {
let v = self.variant_index.unwrap_or(0);
if def.variants.is_empty() {
assert_eq!(v, 0);
return 0;
let v = if def.is_enum() {
if def.variants.is_empty() {
return 0;
}
match self.variant_index {
None => match *self.layout {
// Discriminant field for enums (where applicable).
General { .. } => return 1,
_ if def.variants.len() > 1 => return 0,
// Enums with one variant behave like structs.
_ => 0
},
Some(v) => v
}
} else {
return def.variants[v].fields.len();
}
0
};
return def.variants[v].fields.len();
}
match *self.layout {
@ -2248,7 +2270,7 @@ impl<'a, 'tcx> TyLayout<'tcx> {
}
}
pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
fn field_type_unnormalized<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
let tcx = cx.tcx();
let ptr_field_type = |pointee: Ty<'tcx>| {
@ -2314,7 +2336,25 @@ impl<'a, 'tcx> TyLayout<'tcx> {
// ADTs.
ty::TyAdt(def, substs) => {
def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
let v = if def.is_enum() {
match self.variant_index {
None => match *self.layout {
// Discriminant field for enums (where applicable).
General { discr, .. } => {
return [discr.to_ty(tcx, false)][i];
}
_ if def.variants.len() > 1 => return [][i],
// Enums with one variant behave like structs.
_ => 0
},
Some(v) => v
}
} else {
0
};
def.variants[v].fields[i].ty(tcx, substs)
}
ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
@ -2324,11 +2364,15 @@ impl<'a, 'tcx> TyLayout<'tcx> {
}
}
pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
cx.normalize_projections(self.field_type_unnormalized(cx, i))
}
pub fn field<C: LayoutTyper<'tcx>>(&self,
cx: C,
i: usize)
-> C::TyLayout {
cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
cx.layout_of(self.field_type(cx, i))
}
}

View File

@ -30,6 +30,7 @@ use cabi_sparc64;
use cabi_nvptx;
use cabi_nvptx64;
use cabi_hexagon;
use mir::lvalue::LvalueRef;
use type_::Type;
use type_of;
@ -570,20 +571,20 @@ impl<'a, 'tcx> ArgType<'tcx> {
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) {
pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: LvalueRef<'tcx>) {
if self.is_ignore() {
return;
}
let ccx = bcx.ccx;
if self.is_indirect() {
let llsz = C_usize(ccx, self.layout.size(ccx).bytes());
base::call_memcpy(bcx, dst, val, llsz, self.layout.align(ccx));
base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align(ccx));
} else if let Some(ty) = self.cast {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst, ty.llvm_type(ccx).ptr_to());
let cast_dst = bcx.pointercast(dst.llval, ty.llvm_type(ccx).ptr_to());
bcx.store(val, cast_dst, Some(self.layout.align(ccx)));
} else {
// The actual return type is a struct, but the ABI
@ -610,7 +611,7 @@ impl<'a, 'tcx> ArgType<'tcx> {
// ...and then memcpy it to the intended destination.
base::call_memcpy(bcx,
bcx.pointercast(dst, Type::i8p(ccx)),
bcx.pointercast(dst.llval, Type::i8p(ccx)),
bcx.pointercast(llscratch, Type::i8p(ccx)),
C_usize(ccx, self.layout.size(ccx).bytes()),
self.layout.align(ccx).min(ty.align(ccx)));
@ -618,14 +619,12 @@ impl<'a, 'tcx> ArgType<'tcx> {
bcx.lifetime_end(llscratch, scratch_size);
}
} else {
if self.layout.ty == ccx.tcx().types.bool {
val = bcx.zext(val, Type::i8(ccx));
}
bcx.store(val, dst, None);
val = base::from_immediate(bcx, val);
bcx.store(val, dst.llval, None);
}
}
pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) {
pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) {
if self.pad.is_some() {
*idx += 1;
}

View File

@ -42,10 +42,9 @@
//! taken to it, implementing them for Rust seems difficult.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size};
use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size, TyLayout};
use context::CrateContext;
use monomorphize;
use type_::Type;
use type_of;
@ -75,15 +74,25 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
| layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { }
layout::Univariant { ..}
| layout::StructWrappedNullablePointer { .. } => {
let (nonnull_variant_index, nonnull_variant, packed) = match *l {
layout::Univariant { ref variant, .. } => (0, variant, variant.packed),
let (variant_layout, variant) = match *l {
layout::Univariant { ref variant, .. } => {
let is_enum = if let ty::TyAdt(def, _) = t.sty {
def.is_enum()
} else {
false
};
if is_enum {
(l.for_variant(0), variant)
} else {
(l, variant)
}
}
layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } =>
(nndiscr, nonnull, nonnull.packed),
(l.for_variant(nndiscr as usize), nonnull),
_ => unreachable!()
};
llty.set_struct_body(&struct_llfields(cx, t, nonnull_variant_index as usize,
nonnull_variant, None),
packed)
llty.set_struct_body(&struct_llfields(cx, variant_layout, variant, None),
variant.packed)
},
_ => bug!("This function cannot handle {} with layout {:#?}", t, l)
}
@ -97,22 +106,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
match *l {
layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
layout::RawNullablePointer { nndiscr, .. } => {
let (def, substs) = match t.sty {
ty::TyAdt(d, s) => (d, s),
_ => bug!("{} is not an ADT", t)
};
let nnty = monomorphize::field_ty(cx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]);
if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) {
let nnfield = l.for_variant(nndiscr as usize).field(cx, 0);
if let layout::Scalar { value: layout::Pointer, .. } = *nnfield {
Type::i8p(cx)
} else {
type_of::type_of(cx, nnty)
type_of::type_of(cx, nnfield.ty)
}
}
layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
match name {
None => {
Type::struct_(cx, &struct_llfields(cx, t, nndiscr as usize, nonnull, None),
Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize),
nonnull, None),
nonnull.packed)
}
Some(name) => {
@ -123,7 +128,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
layout::Univariant { ref variant, .. } => {
match name {
None => {
Type::struct_(cx, &struct_llfields(cx, t, 0, &variant, None),
Type::struct_(cx, &struct_llfields(cx, l, &variant, None),
variant.packed)
}
Some(name) => {
@ -199,61 +204,30 @@ fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type {
}
/// Double an index to account for padding.
pub fn memory_index_to_gep(index: usize) -> usize {
pub fn memory_index_to_gep(index: u64) -> u64 {
index * 2
}
/// Lookup `Struct::memory_index`, double it to account for padding.
pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
memory_index_to_gep(variant.memory_index[index] as usize)
}
pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
variant_index: usize,
layout: TyLayout<'tcx>,
variant: &layout::Struct,
discr: Option<Ty<'tcx>>) -> Vec<Type> {
let field_count = match t.sty {
ty::TyAdt(ref def, _) if def.variants.len() == 0 => return vec![],
ty::TyAdt(ref def, _) => {
discr.is_some() as usize + def.variants[variant_index].fields.len()
},
ty::TyTuple(fields, _) => fields.len(),
ty::TyClosure(def_id, substs) => {
if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
substs.upvar_tys(def_id, cx.tcx()).count()
},
ty::TyGenerator(def_id, substs, _) => {
if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);}
substs.field_tys(def_id, cx.tcx()).count()
},
_ => bug!("{} is not a type that can have fields.", t)
};
let field_count = (discr.is_some() as usize) + layout.field_count();
debug!("struct_llfields: variant: {:?}", variant);
let mut first_field = true;
let mut offset = Size::from_bytes(0);
let mut result: Vec<Type> = Vec::with_capacity(field_count * 2);
let field_iter = variant.field_index_by_increasing_offset().map(|i| {
(i, match t.sty {
ty::TyAdt(..) if i == 0 && discr.is_some() => discr.unwrap(),
ty::TyAdt(ref def, ref substs) => {
monomorphize::field_ty(cx.tcx(), substs,
&def.variants[variant_index].fields[i as usize - discr.is_some() as usize])
},
ty::TyTuple(fields, _) => fields[i as usize],
ty::TyClosure(def_id, substs) => {
substs.upvar_tys(def_id, cx.tcx()).nth(i).unwrap()
},
ty::TyGenerator(def_id, substs, _) => {
let ty = substs.field_tys(def_id, cx.tcx()).nth(i).unwrap();
cx.tcx().normalize_associated_type(&ty)
},
_ => bug!()
}, variant.offsets[i as usize])
let ty = if i == 0 && discr.is_some() {
cx.layout_of(discr.unwrap())
} else {
layout.field(cx, i - discr.is_some() as usize)
};
(i, ty, variant.offsets[i as usize])
});
for (index, ty, target_offset) in field_iter {
debug!("struct_llfields: {} ty: {} offset: {:?} target_offset: {:?}",
index, ty, offset, target_offset);
for (index, field, target_offset) in field_iter {
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
index, field, offset, target_offset);
assert!(target_offset >= offset);
let padding = target_offset - offset;
if first_field {
@ -263,19 +237,19 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
result.push(Type::array(&Type::i8(cx), padding.bytes()));
debug!(" padding before: {:?}", padding);
}
let llty = type_of::in_memory_type_of(cx, ty);
let llty = type_of::in_memory_type_of(cx, field.ty);
result.push(llty);
let layout = cx.layout_of(ty);
if variant.packed {
assert_eq!(padding.bytes(), 0);
} else {
let field_align = layout.align(cx);
let field_align = field.align(cx);
assert!(field_align.abi() <= variant.align.abi(),
"non-packed type has field with larger align ({}): {:#?}",
field_align.abi(), variant);
}
let target_size = layout.size(&cx.tcx().data_layout);
offset = target_offset + target_size;
offset = target_offset + field.size(cx);
}
if variant.sized && field_count > 0 {
if offset > variant.stride() {

View File

@ -11,7 +11,6 @@
//! # Translation of inline assembly.
use llvm::{self, ValueRef};
use base;
use common::*;
use type_of;
use type_::Type;
@ -19,8 +18,9 @@ use builder::Builder;
use rustc::hir;
use rustc::ty::Ty;
use rustc::ty::layout::Align;
use mir::lvalue::Alignment;
use mir::lvalue::{LvalueRef, Alignment};
use std::ffi::CString;
use syntax::ast::AsmDialect;
@ -40,16 +40,17 @@ pub fn trans_inline_asm<'a, 'tcx>(
let mut indirect_outputs = vec![];
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
Some(base::load_ty(bcx, val, Alignment::Packed, ty))
Some(LvalueRef::new_sized(val, ty,
Alignment::Packed(Align::from_bytes(1, 1).unwrap())).load(bcx))
} else {
None
};
if out.is_rw {
inputs.push(val.unwrap());
inputs.push(val.unwrap().immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(val.unwrap());
indirect_outputs.push(val.unwrap().immediate());
} else {
output_types.push(type_of::type_of(bcx.ccx, ty));
}
@ -107,7 +108,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
bcx.store(v, val, None);
}

View File

@ -48,7 +48,6 @@ use rustc::util::common::{time, print_time_passes_entry};
use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session;
use rustc_incremental;
use abi;
use allocator;
use mir::lvalue::LvalueRef;
use attributes;
@ -56,7 +55,7 @@ use builder::Builder;
use callee;
use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_undef, C_array};
use common::{C_struct_in_context, C_array};
use common::CrateContext;
use common::{type_is_zero_size, val_ty};
use common;
@ -66,14 +65,13 @@ use debuginfo;
use declare;
use meth;
use mir;
use monomorphize::{self, Instance};
use monomorphize::Instance;
use partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt};
use symbol_names_test;
use time_graph;
use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames};
use type_::Type;
use type_of;
use value::Value;
use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet};
use CrateInfo;
@ -90,7 +88,7 @@ use syntax::attr;
use rustc::hir;
use syntax::ast;
use mir::lvalue::Alignment;
use mir::operand::{OperandRef, OperandValue};
pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr};
pub use rustc_trans_utils::trans_item::linkage_by_name;
@ -125,14 +123,6 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
}
}
pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef {
bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
}
pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef {
bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
}
pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
signed: bool)
-> llvm::IntPredicate {
@ -257,25 +247,29 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
src: &LvalueRef<'tcx>,
dst: &LvalueRef<'tcx>) {
src: LvalueRef<'tcx>,
dst: LvalueRef<'tcx>) {
let src_ty = src.ty.to_ty(bcx.tcx());
let dst_ty = dst.ty.to_ty(bcx.tcx());
let coerce_ptr = || {
let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty);
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
let base = bcx.pointercast(base, llcast_ty);
(base, info)
} else {
let base = load_ty(bcx, src.llval, src.alignment, src_ty);
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
let (base, info) = match src.load(bcx).val {
OperandValue::Pair(base, info) => {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
(bcx.pointercast(base, llcast_ty), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
}
OperandValue::Ref(..) => bug!()
};
store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty);
OperandRef {
val: OperandValue::Pair(base, info),
ty: dst_ty
}.store(bcx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyRef(..), &ty::TyRef(..)) |
@ -287,32 +281,25 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
coerce_ptr()
}
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
(&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_a, f)
});
let dst_fields = def_b.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_b, f)
});
for i in 0..def_a.variants[0].fields.len() {
let src_f = src.project_field(bcx, i);
let dst_f = dst.project_field(bcx, i);
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
if type_is_zero_size(bcx.ccx, dst_fty) {
let src_f_ty = src_f.ty.to_ty(bcx.tcx());
let dst_f_ty = dst_f.ty.to_ty(bcx.tcx());
if type_is_zero_size(bcx.ccx, dst_f_ty) {
continue;
}
let (src_f, src_f_align) = src.trans_field_ptr(bcx, i);
let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
if src_f_ty == dst_f_ty {
memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f_ty,
(src_f.alignment | dst_f.alignment).non_abi());
} else {
coerce_unsized_into(
bcx,
&LvalueRef::new_sized_ty(src_f, src_fty, src_f_align),
&LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align)
);
coerce_unsized_into(bcx, src_f, dst_f);
}
}
}
@ -385,94 +372,6 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
b.call(assume_intrinsic, &[val], None);
}
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
alignment: Alignment, t: Ty<'tcx>) -> ValueRef {
let ccx = b.ccx;
if type_is_zero_size(ccx, t) {
return C_undef(type_of::type_of(ccx, t));
}
unsafe {
let global = llvm::LLVMIsAGlobalVariable(ptr);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
let val = llvm::LLVMGetInitializer(global);
if !val.is_null() {
if t.is_bool() {
return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref());
}
return val;
}
}
}
if t.is_bool() {
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()),
Type::i1(ccx))
} else if t.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
} else if (t.is_region_ptr() || t.is_box() || t.is_fn())
&& !common::type_is_fat_ptr(ccx, t)
{
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr, alignment.to_align())
}
}
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef,
dst_align: Alignment, t: Ty<'tcx>) {
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
if common::type_is_fat_ptr(cx.ccx, t) {
let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t);
} else {
cx.store(from_immediate(cx, v), dst, dst_align.to_align());
}
}
pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
data: ValueRef,
extra: ValueRef,
dst: ValueRef,
dst_align: Alignment,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
cx.store(data, get_dataptr(cx, dst), dst_align.to_align());
cx.store(extra, get_meta(cx, dst), dst_align.to_align());
}
pub fn load_fat_ptr<'a, 'tcx>(
b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx>
) -> (ValueRef, ValueRef) {
let ptr = get_dataptr(b, src);
let ptr = if t.is_region_ptr() || t.is_box() {
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr, alignment.to_align())
};
let meta = get_meta(b, src);
let meta_ty = val_ty(meta);
// If the 'meta' field is a pointer, it's a vtable, so use load_nonnull
// instead
let meta = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer {
b.load_nonnull(meta, None)
} else {
b.load(meta, None)
};
(ptr, meta)
}
pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
if val_ty(val) == Type::i1(bcx.ccx) {
bcx.zext(val, Type::i8(bcx.ccx))

View File

@ -625,25 +625,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
#[inline]
pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef {
// Small vector optimization. This should catch 100% of the cases that
// we care about.
if ixs.len() < 16 {
let mut small_vec = [ C_i32(self.ccx, 0); 16 ];
for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) {
*small_vec_e = C_i32(self.ccx, ix as i32);
}
self.inbounds_gep(base, &small_vec[..ixs.len()])
} else {
let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::<Vec<ValueRef>>();
self.count_insn("gepi");
self.inbounds_gep(base, &v)
}
}
pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
self.count_insn("inboundsgep");
unsafe {
@ -652,8 +633,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef {
pub fn struct_gep(&self, ptr: ValueRef, idx: u64) -> ValueRef {
self.count_insn("structgep");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
}
@ -959,16 +941,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef {
pub fn extract_value(&self, agg_val: ValueRef, idx: u64) -> ValueRef {
self.count_insn("extractvalue");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
}
}
pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef,
idx: usize) -> ValueRef {
idx: u64) -> ValueRef {
self.count_insn("insertvalue");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
noname())

View File

@ -22,7 +22,6 @@ use base;
use builder::Builder;
use consts;
use declare;
use monomorphize;
use type_::Type;
use value::Value;
use rustc::traits;
@ -68,53 +67,11 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
}
}
/// Returns Some([a, b]) if the type has a pair of fields with types a and b.
pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
-> Option<[Ty<'tcx>; 2]> {
match ty.sty {
ty::TyAdt(adt, substs) => {
assert_eq!(adt.variants.len(), 1);
let fields = &adt.variants[0].fields;
if fields.len() != 2 {
return None;
}
Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]),
monomorphize::field_ty(ccx.tcx(), substs, &fields[1])])
}
ty::TyClosure(def_id, substs) => {
let mut tys = substs.upvar_tys(def_id, ccx.tcx());
tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| {
if tys.next().is_some() {
None
} else {
Some([first_ty, second_ty])
}
}))
}
ty::TyGenerator(def_id, substs, _) => {
let mut tys = substs.field_tys(def_id, ccx.tcx());
tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| {
if tys.next().is_some() {
None
} else {
Some([first_ty, second_ty])
}
}))
}
ty::TyTuple(tys, _) => {
if tys.len() != 2 {
return None;
}
Some([tys[0], tys[1]])
}
_ => None
}
}
/// Returns true if the type is represented as a pair of immediates.
pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
-> bool {
match *ccx.layout_of(ty) {
let layout = ccx.layout_of(ty);
match *layout {
Layout::FatPointer { .. } => true,
Layout::Univariant { ref variant, .. } => {
// There must be only 2 fields.
@ -122,12 +79,9 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
return false;
}
match type_pair_fields(ccx, ty) {
Some([a, b]) => {
type_is_immediate(ccx, a) && type_is_immediate(ccx, b)
}
None => false
}
// The two fields must be both immediates.
type_is_immediate(ccx, layout.field_type(ccx, 0)) &&
type_is_immediate(ccx, layout.field_type(ccx, 1))
}
_ => false
}
@ -356,13 +310,14 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
}
}
pub fn const_get_elt(v: ValueRef, i: usize) -> ValueRef {
pub fn const_get_elt(v: ValueRef, idx: u64) -> ValueRef {
unsafe {
let us = &[i as c_uint];
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, i={}, r={:?})",
Value(v), i, Value(r));
debug!("const_get_elt(v={:?}, idx={}, r={:?})",
Value(v), idx, Value(r));
r
}

View File

@ -30,10 +30,9 @@ use rustc::ty::fold::TypeVisitor;
use rustc::ty::subst::Substs;
use rustc::ty::util::TypeIdHasher;
use rustc::ich::Fingerprint;
use monomorphize;
use common::{self, CrateContext};
use rustc::ty::{self, AdtKind, Ty};
use rustc::ty::layout::{self, Align, LayoutTyper, Size};
use rustc::ty::layout::{self, Align, LayoutTyper, Size, TyLayout};
use rustc::session::{Session, config};
use rustc::util::nodemap::FxHashMap;
use rustc::util::common::path2cstr;
@ -932,7 +931,6 @@ impl<'tcx> MemberDescriptionFactory<'tcx> {
struct StructMemberDescriptionFactory<'tcx> {
ty: Ty<'tcx>,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>,
span: Span,
}
@ -960,12 +958,11 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
} else {
f.name.to_string()
};
let fty = monomorphize::field_ty(cx.tcx(), self.substs, f);
let (size, align) = cx.size_and_align_of(fty);
let field = layout.field(cx, i);
let (size, align) = field.size_and_align(cx);
MemberDescription {
name,
type_metadata: type_metadata(cx, fty, self.span),
type_metadata: type_metadata(cx, field.ty, self.span),
offset: offsets[i],
size,
align,
@ -983,8 +980,8 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-> RecursiveTypeDescription<'tcx> {
let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
let (struct_def_id, variant, substs) = match struct_type.sty {
ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs),
let (struct_def_id, variant) = match struct_type.sty {
ty::TyAdt(def, _) => (def.did, def.struct_variant()),
_ => bug!("prepare_struct_metadata on a non-ADT")
};
@ -1004,7 +1001,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
StructMDF(StructMemberDescriptionFactory {
ty: struct_type,
variant,
substs,
span,
})
)
@ -1075,20 +1071,20 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
//=-----------------------------------------------------------------------------
struct UnionMemberDescriptionFactory<'tcx> {
layout: TyLayout<'tcx>,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>,
span: Span,
}
impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
self.variant.fields.iter().map(|field| {
let fty = monomorphize::field_ty(cx.tcx(), self.substs, field);
let (size, align) = cx.size_and_align_of(fty);
self.variant.fields.iter().enumerate().map(|(i, f)| {
let field = self.layout.field(cx, i);
let (size, align) = field.size_and_align(cx);
MemberDescription {
name: field.name.to_string(),
type_metadata: type_metadata(cx, fty, self.span),
name: f.name.to_string(),
type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::from_bytes(0),
size,
align,
@ -1105,8 +1101,8 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-> RecursiveTypeDescription<'tcx> {
let union_name = compute_debuginfo_type_name(cx, union_type, false);
let (union_def_id, variant, substs) = match union_type.sty {
ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs),
let (union_def_id, variant) = match union_type.sty {
ty::TyAdt(def, _) => (def.did, def.struct_variant()),
_ => bug!("prepare_union_metadata on a non-ADT")
};
@ -1124,8 +1120,8 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
unique_type_id,
union_metadata_stub,
UnionMDF(UnionMemberDescriptionFactory {
layout: cx.layout_of(union_type),
variant,
substs,
span,
})
)
@ -1142,7 +1138,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// offset of zero bytes).
struct EnumMemberDescriptionFactory<'tcx> {
enum_type: Ty<'tcx>,
type_rep: &'tcx layout::Layout,
type_rep: TyLayout<'tcx>,
discriminant_type_metadata: Option<DIType>,
containing_scope: DIScope,
file_metadata: DIFile,
@ -1153,11 +1149,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
let adt = &self.enum_type.ty_adt_def().unwrap();
let substs = match self.enum_type.sty {
ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s,
_ => bug!("{} is not an enum", self.enum_type)
};
match *self.type_rep {
match *self.type_rep.layout {
layout::General { ref variants, .. } => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
@ -1169,6 +1161,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
describe_enum_variant(cx,
self.enum_type,
struct_def,
i,
&adt.variants[i],
discriminant_info,
self.containing_scope,
@ -1200,6 +1193,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
describe_enum_variant(cx,
self.enum_type,
variant,
0,
&adt.variants[0],
NoDiscriminant,
self.containing_scope,
@ -1223,19 +1217,19 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
]
}
}
layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => {
layout::RawNullablePointer { nndiscr, .. } => {
// As far as debuginfo is concerned, the pointer this enum
// represents is still wrapped in a struct. This is to make the
// DWARF representation of enums uniform.
// First create a description of the artificial wrapper struct:
let non_null_variant = &adt.variants[non_null_variant_index as usize];
let non_null_variant = &adt.variants[nndiscr as usize];
let non_null_variant_name = non_null_variant.name.as_str();
// The llvm type and metadata of the pointer
let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0]);
let (size, align) = cx.size_and_align_of(nnty);
let non_null_type_metadata = type_metadata(cx, nnty, self.span);
let nnfield = self.type_rep.for_variant(nndiscr as usize).field(cx, 0);
let (size, align) = nnfield.size_and_align(cx);
let non_null_type_metadata = type_metadata(cx, nnfield.ty, self.span);
// For the metadata of the wrapper struct, we need to create a
// MemberDescription of the struct's single field.
@ -1264,7 +1258,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
// Now we can create the metadata of the artificial struct
let artificial_struct_metadata =
composite_type_metadata(cx,
nnty,
nnfield.ty,
&non_null_variant_name,
unique_type_id,
&[sole_struct_member_description],
@ -1274,8 +1268,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - non_null_variant_index) as usize;
let null_variant_name = adt.variants[null_variant_index].name;
let null_variant_name = adt.variants[(1 - nndiscr) as usize].name;
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
0,
null_variant_name);
@ -1301,6 +1294,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
describe_enum_variant(cx,
self.enum_type,
struct_def,
nndiscr as usize,
&adt.variants[nndiscr as usize],
OptimizedDiscriminant,
self.containing_scope,
@ -1386,31 +1380,25 @@ enum EnumDiscriminantInfo {
fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
struct_def: &'tcx layout::Struct,
variant_index: usize,
variant: &'tcx ty::VariantDef,
discriminant_info: EnumDiscriminantInfo,
containing_scope: DIScope,
span: Span)
-> (DICompositeType, MemberDescriptionFactory<'tcx>) {
let substs = match enum_type.sty {
ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s,
ref t @ _ => bug!("{:#?} is not an enum", t)
let layout = cx.layout_of(enum_type);
let maybe_discr = match *layout {
layout::General { .. } => Some(layout.field_type(cx, 0)),
_ => None,
};
let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) {
layout::CEnum {discr, ..} => Some((discr, true)),
layout::General{discr, ..} => Some((discr, false)),
layout::Univariant { .. }
| layout::RawNullablePointer { .. }
| layout::StructWrappedNullablePointer { .. } => None,
ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l)
};
let mut field_tys = variant.fields.iter().map(|f| {
monomorphize::field_ty(cx.tcx(), &substs, f)
let layout = layout.for_variant(variant_index);
let mut field_tys = (0..layout.field_count()).map(|i| {
layout.field_type(cx, i)
}).collect::<Vec<_>>();
if let Some((discr, signed)) = maybe_discr_and_signed {
field_tys.insert(0, discr.to_ty(cx.tcx(), signed));
if let Some(discr) = maybe_discr {
field_tys.insert(0, discr);
}
// Could do some consistency checks here: size, align, field count, discr type
@ -1560,7 +1548,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ref l @ _ => bug!("Not an enum layout: {:#?}", l)
};
let (enum_type_size, enum_type_align) = cx.size_and_align_of(enum_type);
let (enum_type_size, enum_type_align) = type_rep.size_and_align(cx);
let enum_name = CString::new(enum_name).unwrap();
let unique_type_id_str = CString::new(
@ -1588,7 +1576,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_metadata,
EnumMDF(EnumMemberDescriptionFactory {
enum_type,
type_rep: type_rep.layout,
type_rep,
discriminant_type_metadata,
containing_scope,
file_metadata,

View File

@ -19,7 +19,6 @@ use common::*;
use llvm::{ValueRef};
use llvm;
use meth;
use monomorphize;
use rustc::ty::layout::LayoutTyper;
use rustc::ty::{self, Ty};
use value::Value;
@ -38,7 +37,19 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
}
assert!(!info.is_null());
match t.sty {
ty::TyAdt(..) | ty::TyTuple(..) => {
ty::TyDynamic(..) => {
// load size/align from vtable
(meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
}
ty::TySlice(_) | ty::TyStr => {
let unit = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bcx.ccx.size_and_align_of(unit);
(bcx.mul(info, C_usize(bcx.ccx, size.bytes())),
C_usize(bcx.ccx, align.abi()))
}
_ => {
let ccx = bcx.ccx;
// First get the size of all statically known fields.
// Don't use size_of because it also rounds up to alignment, which we
@ -63,14 +74,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let field_ty = match t.sty {
ty::TyAdt(def, substs) => {
let last_field = def.struct_variant().fields.last().unwrap();
monomorphize::field_ty(bcx.tcx(), substs, last_field)
},
ty::TyTuple(tys, _) => tys.last().unwrap(),
_ => unreachable!(),
};
let field_ty = layout.field_type(ccx, layout.field_count() - 1);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
@ -113,18 +117,5 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
(size, align)
}
ty::TyDynamic(..) => {
// load size/align from vtable
(meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
}
ty::TySlice(_) | ty::TyStr => {
let unit = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bcx.ccx.size_and_align_of(unit);
(bcx.mul(info, C_usize(bcx.ccx, size.bytes())),
C_usize(bcx.ccx, align.abi()))
}
_ => bug!("Unexpected unsized type, found {}", t)
}
}

View File

@ -13,8 +13,9 @@
use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType};
use abi::{self, Abi, FnType};
use mir::lvalue::{LvalueRef, Alignment};
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
use declare;
@ -105,6 +106,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let name = &*tcx.item_name(def_id);
let llret_ty = type_of::type_of(ccx, ret_ty);
let result = LvalueRef::new_sized(llresult, ret_ty, Alignment::AbiAligned);
let simple = get_simple_intrinsic(ccx, name);
let llval = match name {
@ -238,9 +240,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
},
"volatile_store" => {
let tp_ty = substs.type_at(0);
let dst = LvalueRef::new_sized(llargs[0], tp_ty, Alignment::AbiAligned);
if type_is_fat_ptr(bcx.ccx, tp_ty) {
bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0]));
bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
bcx.volatile_store(llargs[1], dst.project_field(bcx, abi::FAT_PTR_ADDR).llval);
bcx.volatile_store(llargs[2], dst.project_field(bcx, abi::FAT_PTR_EXTRA).llval);
} else {
let val = if fn_ty.args[1].is_indirect() {
bcx.load(llargs[1], None)
@ -250,7 +253,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
from_immediate(bcx, llargs[1])
};
let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to());
let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
let store = bcx.volatile_store(val, ptr);
unsafe {
llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
@ -306,11 +309,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let llfn = bcx.ccx.get_intrinsic(&intrinsic);
// Convert `i1` to a `bool`, and write it to the out parameter
let val = bcx.call(llfn, &[llargs[0], llargs[1]], None);
let result = bcx.extract_value(val, 0);
let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx));
bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(overflow, bcx.struct_gep(llresult, 1), None);
let pair = bcx.call(llfn, &[llargs[0], llargs[1]], None);
let val = bcx.extract_value(pair, 0);
let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.alignment.non_abi());
let dest = result.project_field(bcx, 1);
bcx.store(overflow, dest.llval, dest.alignment.non_abi());
return;
},
@ -373,7 +379,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
"discriminant_value" => {
let val_ty = substs.type_at(0);
let adt_val = LvalueRef::new_sized_ty(llargs[0], val_ty, Alignment::AbiAligned);
let adt_val = LvalueRef::new_sized(llargs[0], val_ty, Alignment::AbiAligned);
match val_ty.sty {
ty::TyAdt(adt, ..) if adt.is_enum() => {
adt_val.trans_get_discr(bcx, ret_ty)
@ -446,12 +452,15 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let ty = substs.type_at(0);
if int_type_width_signed(ty, ccx).is_some() {
let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order,
let pair = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order,
failorder, weak);
let result = bcx.extract_value(val, 0);
let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx));
bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(success, bcx.struct_gep(llresult, 1), None);
let val = bcx.extract_value(pair, 0);
let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.alignment.non_abi());
let dest = result.project_field(bcx, 1);
bcx.store(success, dest.llval, dest.alignment.non_abi());
return;
} else {
return invalid_monomorphization(ty);
@ -589,10 +598,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// destructors, and the contents are SIMD
// etc.
assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned);
let arg = LvalueRef::new_sized(llarg, arg_type, Alignment::AbiAligned);
(0..contents.len()).map(|i| {
let (ptr, align) = arg.trans_field_ptr(bcx, i);
bcx.load(ptr, align.to_align())
arg.project_field(bcx, i).load(bcx).immediate()
}).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
@ -654,11 +662,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
assert!(!flatten);
for i in 0..elems.len() {
let val = bcx.extract_value(val, i);
let lval = LvalueRef::new_sized_ty(llresult, ret_ty,
Alignment::AbiAligned);
let (dest, align) = lval.trans_field_ptr(bcx, i);
bcx.store(val, dest, align.to_align());
let dest = result.project_field(bcx, i);
let val = bcx.extract_value(val, i as u64);
bcx.store(val, dest.llval, dest.alignment.non_abi());
}
return;
}
@ -672,7 +678,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
} else {
store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
OperandRef {
val: OperandValue::Immediate(llval),
ty: ret_ty
}.unpack_if_pair(bcx).store(bcx, result);
}
}
}
@ -1071,7 +1080,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = const_get_elt(vector, i);
let val = const_get_elt(vector, i as u64);
match const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);

View File

@ -143,7 +143,6 @@ mod partitioning;
mod symbol_names_test;
mod time_graph;
mod trans_item;
mod tvec;
mod type_;
mod type_of;
mod value;

View File

@ -21,7 +21,7 @@ use rustc::ty::layout::HasDataLayout;
use debuginfo;
#[derive(Copy, Clone, Debug)]
pub struct VirtualIndex(usize);
pub struct VirtualIndex(u64);
pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
pub const SIZE: VirtualIndex = VirtualIndex(1);
@ -29,14 +29,14 @@ pub const ALIGN: VirtualIndex = VirtualIndex(2);
impl<'a, 'tcx> VirtualIndex {
pub fn from_index(index: usize) -> Self {
VirtualIndex(index + 3)
VirtualIndex(index as u64 + 3)
}
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None);
let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
@ -47,7 +47,7 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to());
let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None);
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr

View File

@ -12,11 +12,10 @@ use llvm::{self, ValueRef, BasicBlockRef};
use rustc::middle::lang_items;
use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::ty::layout::LayoutTyper;
use rustc::traits;
use rustc::mir;
use abi::{Abi, FnType, ArgType};
use adt;
use base;
use callee;
use builder::Builder;
@ -24,7 +23,7 @@ use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef};
use consts;
use meth;
use monomorphize;
use type_of;
use type_of::{self, LayoutLlvmExt};
use type_::Type;
use syntax::symbol::Symbol;
@ -173,13 +172,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.cleanup_ret(cleanup_pad, None);
} else {
let slot = self.get_personality_slot(&bcx);
let (lp0ptr, align) = slot.trans_field_ptr(&bcx, 0);
let lp0 = bcx.load(lp0ptr, align.to_align());
let (lp1ptr, align) = slot.trans_field_ptr(&bcx, 1);
let lp1 = bcx.load(lp1ptr, align.to_align());
let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate();
let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate();
slot.storage_dead(&bcx);
if !bcx.sess().target.target.options.custom_unwind_resume {
@ -240,9 +234,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None);
self.store_operand(&bcx, llscratch, None, op);
llscratch
let scratch = LvalueRef::alloca(&bcx, ret.layout.ty, "ret");
op.store(&bcx, scratch);
scratch.llval
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
@ -257,7 +251,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
if let Ref(llval, align) = op.val {
base::load_ty(&bcx, llval, align, op.ty)
bcx.load(llval, align.non_abi())
} else {
op.pack_if_pair(&bcx).immediate()
}
@ -558,8 +552,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
ReturnDest::Nothing => {
(C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..])
}
ReturnDest::IndirectOperand(dst, _) => (dst.llval, &llargs[..]),
ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (dst.llval, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
@ -650,21 +644,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true)
let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg");
op.store(bcx, scratch);
(scratch.llval, Alignment::AbiAligned, true)
} else {
(op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false)
}
}
Ref(llval, align @ Alignment::Packed) if arg.is_indirect() => {
Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
base::memcpy_ty(bcx, llscratch, llval, op.ty, align.to_align());
(llscratch, Alignment::AbiAligned, true)
let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.ty, align.non_abi());
(scratch.llval, Alignment::AbiAligned, true)
}
Ref(llval, align) => (llval, align, true)
};
@ -672,14 +666,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.layout.ty == bcx.tcx().types.bool {
// We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None);
llval = bcx.trunc(llval, Type::i1(bcx.ccx));
// We store bools as i8 so we need to truncate to i1.
llval = base::to_immediate(bcx, llval, arg.layout.ty);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
align.min_with(Some(arg.layout.align(bcx.ccx))));
(align | Alignment::Packed(arg.layout.align(bcx.ccx)))
.non_abi());
} else {
llval = bcx.load(llval, align.to_align());
llval = bcx.load(llval, align.non_abi());
}
}
@ -705,38 +700,28 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval, align) => {
let tuple_ptr = LvalueRef::new_sized(llval, tuple.ty, align);
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align);
let (ptr, align) = ptr.trans_field_ptr(bcx, n);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty);
Pair(lldata, llextra)
let field_ptr = tuple_ptr.project_field(bcx, n);
let op = if common::type_is_fat_ptr(bcx.ccx, ty) {
field_ptr.load(bcx)
} else {
// trans_argument will load this if it needs to
Ref(ptr, align)
};
let op = OperandRef {
val,
ty,
OperandRef {
val: Ref(field_ptr.llval, field_ptr.alignment),
ty
}
};
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
}
}
Immediate(llval) => {
let l = bcx.ccx.layout_of(tuple.ty);
let v = if let layout::Univariant { ref variant, .. } = *l {
variant
} else {
bug!("Not a tuple.");
};
let layout = bcx.ccx.layout_of(tuple.ty);
for (n, &ty) in arg_types.iter().enumerate() {
let mut elem = bcx.extract_value(
llval, adt::struct_llfields_index(v, n));
let mut elem = bcx.extract_value(llval, layout.llvm_field_index(n));
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
}
elem = base::to_immediate(bcx, elem, ty);
// If the tuple is immediate, the elements are as well
let op = OperandRef {
val: Immediate(elem),
@ -748,11 +733,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
Pair(a, b) => {
let elems = [a, b];
for (n, &ty) in arg_types.iter().enumerate() {
let mut elem = elems[n];
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
}
let elem = base::to_immediate(bcx, elems[n], ty);
// Pair is always made up of immediates
let op = OperandRef {
val: Immediate(elem),
@ -809,10 +790,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let slot = self.get_personality_slot(&bcx);
slot.storage_live(&bcx);
self.store_operand(&bcx, slot.llval, None, OperandRef {
OperandRef {
val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)),
ty: slot.ty.to_ty(ccx.tcx())
});
}.store(&bcx, slot);
bcx.br(target_bb);
bcx.llbb()
@ -888,7 +869,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
llargs.push(dest.llval);
ReturnDest::Nothing
},
Alignment::Packed => {
Alignment::Packed(_) => {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps),
@ -899,7 +880,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
} else {
ReturnDest::Store(dest.llval)
ReturnDest::Store(dest)
}
}
@ -908,14 +889,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
dst: &mir::Lvalue<'tcx>) {
if let mir::Lvalue::Local(index) = *dst {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue),
LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue),
LocalRef::Operand(None) => {
let lvalue_ty = self.monomorphized_lvalue_ty(dst);
assert!(!lvalue_ty.has_erasable_regions());
let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp");
lvalue.storage_live(bcx);
self.trans_transmute_into(bcx, src, &lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty);
self.trans_transmute_into(bcx, src, lvalue);
let op = lvalue.load(bcx);
lvalue.storage_dead(bcx);
self.locals[index] = LocalRef::Operand(Some(op));
}
@ -927,20 +908,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
} else {
let dst = self.trans_lvalue(bcx, dst);
self.trans_transmute_into(bcx, src, &dst);
self.trans_transmute_into(bcx, src, dst);
}
}
fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>,
dst: &LvalueRef<'tcx>) {
dst: LvalueRef<'tcx>) {
let val = self.trans_operand(bcx, src);
let llty = type_of::type_of(bcx.ccx, val.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());
let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type));
self.store_operand(bcx, cast_ptr, Some(align), val);
val.store(bcx,
LvalueRef::new_sized(cast_ptr, val.ty, Alignment::Packed(align)));
}
@ -956,7 +938,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
Nothing => (),
Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
IndirectOperand(tmp, index) => {
let op = self.trans_load(bcx, tmp.llval, Alignment::AbiAligned, op.ty);
let op = tmp.load(bcx);
tmp.storage_dead(bcx);
self.locals[index] = LocalRef::Operand(Some(op));
}
@ -965,8 +947,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let op = if ret_ty.cast.is_some() {
let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret");
tmp.storage_live(bcx);
ret_ty.store(bcx, op.immediate(), tmp.llval);
let op = self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty);
ret_ty.store(bcx, op.immediate(), tmp);
let op = tmp.load(bcx);
tmp.storage_dead(bcx);
op
} else {
@ -982,7 +964,7 @@ enum ReturnDest<'tcx> {
// Do nothing, the return value is indirect or ignored
Nothing,
// Store the return value to the pointer
Store(ValueRef),
Store(LvalueRef<'tcx>),
// Stores an indirect return value to an operand local lvalue
IndirectOperand(LvalueRef<'tcx>, mir::Local),
// Stores a direct return value to an operand local lvalue

View File

@ -32,7 +32,7 @@ use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u
use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector};
use common::const_to_opt_u128;
use consts;
use type_of;
use type_of::{self, LayoutLlvmExt};
use type_::Type;
use value::Value;
@ -117,14 +117,7 @@ impl<'a, 'tcx> Const<'tcx> {
}
fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
let layout = ccx.layout_of(self.ty);
let ix = if let layout::Univariant { ref variant, .. } = *layout {
adt::struct_llfields_index(variant, i)
} else {
i
};
const_get_elt(self.llval, ix)
const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i))
}
fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
@ -494,7 +487,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
// Produce an undef instead of a LLVM assertion on OOB.
let len = common::const_to_uint(tr_base.len(self.ccx));
let llelem = if iv < len as u128 {
const_get_elt(base.llval, iv as usize)
const_get_elt(base.llval, iv as u64)
} else {
C_undef(type_of::type_of(self.ccx, projected_ty))
};

View File

@ -10,28 +10,31 @@
use llvm::{self, ValueRef};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, Align, LayoutTyper};
use rustc::ty::layout::{self, Align, Layout, LayoutTyper};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use abi;
use adt;
use base;
use builder::Builder;
use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty};
use common::{self, CrateContext, C_usize, C_u8, C_u32, C_int, C_null, val_ty};
use consts;
use type_of;
use type_of::{self, LayoutLlvmExt};
use type_::Type;
use value::Value;
use glue;
use std::iter;
use std::ptr;
use std::ops;
use super::{MirContext, LocalRef};
use super::operand::{OperandRef, OperandValue};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Alignment {
Packed,
Packed(Align),
AbiAligned,
}
@ -40,31 +43,41 @@ impl ops::BitOr for Alignment {
fn bitor(self, rhs: Self) -> Self {
match (self, rhs) {
(Alignment::Packed, _) => Alignment::Packed,
(Alignment::AbiAligned, a) => a,
(Alignment::Packed(a), Alignment::Packed(b)) => {
Alignment::Packed(a.min(b))
}
(Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => {
Alignment::Packed(x)
}
(Alignment::AbiAligned, Alignment::AbiAligned) => {
Alignment::AbiAligned
}
}
}
}
impl<'a> From<&'a Layout> for Alignment {
fn from(layout: &Layout) -> Self {
let (packed, align) = match *layout {
Layout::UntaggedUnion { ref variants } => (variants.packed, variants.align),
Layout::Univariant { ref variant, .. } => (variant.packed, variant.align),
_ => return Alignment::AbiAligned
};
if packed {
Alignment::Packed(align)
} else {
Alignment::AbiAligned
}
}
}
impl Alignment {
pub fn from_packed(packed: bool) -> Self {
if packed {
Alignment::Packed
} else {
Alignment::AbiAligned
}
}
pub fn to_align(self) -> Option<Align> {
pub fn non_abi(self) -> Option<Align> {
match self {
Alignment::Packed => Some(Align::from_bytes(1, 1).unwrap()),
Alignment::Packed(x) => Some(x),
Alignment::AbiAligned => None,
}
}
pub fn min_with(self, align: Option<Align>) -> Option<Align> {
self.to_align().or(align)
}
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
@ -87,13 +100,8 @@ pub struct LvalueRef<'tcx> {
}
impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>,
alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment }
}
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
pub fn new_sized(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef { llval, llextra: ptr::null_mut(), ty: LvalueTy::from_ty(ty), alignment }
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
@ -101,7 +109,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let tmp = bcx.alloca(
type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty));
assert!(!ty.has_param_types());
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
Self::new_sized(tmp, ty, Alignment::AbiAligned)
}
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
@ -122,8 +130,74 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
!self.llextra.is_null()
}
pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
debug!("LvalueRef::load: {:?}", self);
assert!(!self.has_extra());
let ty = self.ty.to_ty(bcx.tcx());
if common::type_is_zero_size(bcx.ccx, ty) {
return OperandRef::new_zst(bcx.ccx, ty);
}
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let data = self.project_field(bcx, abi::FAT_PTR_ADDR);
let lldata = if ty.is_region_ptr() || ty.is_box() {
bcx.load_nonnull(data.llval, data.alignment.non_abi())
} else {
bcx.load(data.llval, data.alignment.non_abi())
};
let extra = self.project_field(bcx, abi::FAT_PTR_EXTRA);
let meta_ty = val_ty(extra.llval);
// If the 'extra' field is a pointer, it's a vtable, so use load_nonnull
// instead
let llextra = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer {
bcx.load_nonnull(extra.llval, extra.alignment.non_abi())
} else {
bcx.load(extra.llval, extra.alignment.non_abi())
};
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
OperandValue::Pair(
self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(),
self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate())
} else if common::type_is_immediate(bcx.ccx, ty) {
let mut const_llval = ptr::null_mut();
unsafe {
let global = llvm::LLVMIsAGlobalVariable(self.llval);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
let llval = if !const_llval.is_null() {
const_llval
} else if ty.is_bool() {
bcx.load_range_assert(self.llval, 0, 2, llvm::False,
self.alignment.non_abi())
} else if ty.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False,
self.alignment.non_abi())
} else if ty.is_region_ptr() || ty.is_box() || ty.is_fn() {
bcx.load_nonnull(self.llval, self.alignment.non_abi())
} else {
bcx.load(self.llval, self.alignment.non_abi())
};
OperandValue::Immediate(base::to_immediate(bcx, llval, ty))
} else {
OperandValue::Ref(self.llval, self.alignment)
};
OperandRef { val, ty }
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> {
let ccx = bcx.ccx;
let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx()));
match self.ty {
@ -132,16 +206,16 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
l = l.for_variant(variant_index)
}
}
let fty = l.field(ccx, ix).ty;
let mut ix = ix;
let st = match *l {
layout::Vector { .. } => {
return (bcx.struct_gep(self.llval, ix), self.alignment);
}
layout::UntaggedUnion { ref variants } => {
let fty = l.field_type(ccx, ix);
let alignment = self.alignment | Alignment::from(&*l);
// Handle all the non-aggregate cases first.
match *l {
layout::UntaggedUnion { .. } => {
let ty = type_of::in_memory_type_of(ccx, fty);
return (bcx.pointercast(self.llval, ty.ptr_to()),
self.alignment | Alignment::from_packed(variants.packed));
return LvalueRef::new_sized(
bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment);
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. }
@ -150,58 +224,66 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(ccx, fty);
assert_eq!(ccx.size_of(fty).bytes(), 0);
return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed);
return LvalueRef::new_sized(
bcx.pointercast(self.llval, ty.ptr_to()), fty,
Alignment::Packed(Align::from_bytes(1, 1).unwrap()));
}
layout::RawNullablePointer { .. } => {
let ty = type_of::type_of(ccx, fty);
return (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment);
return LvalueRef::new_sized(
bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment);
}
layout::Univariant { ref variant, .. } => variant,
layout::StructWrappedNullablePointer { ref nonnull, .. } => nonnull,
layout::General { ref variants, .. } => {
_ => {}
}
// Adjust the index to account for enum discriminants in variants.
let mut ix = ix;
if let layout::General { .. } = *l {
if l.variant_index.is_some() {
ix += 1;
&variants[l.variant_index.unwrap()]
}
_ => bug!("element access in type without elements: {} represented as {:#?}", l.ty, l)
}
let simple = || {
LvalueRef {
llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)),
llextra: if !ccx.shared().type_has_metadata(fty) {
ptr::null_mut()
} else {
self.llextra
},
ty: LvalueTy::from_ty(fty),
alignment,
}
};
let alignment = self.alignment | Alignment::from_packed(st.packed);
let ptr_val = if let layout::General { discr, .. } = *l {
let variant_ty = Type::struct_(ccx,
&adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st,
Some(discr.to_ty(bcx.tcx(), false))), st.packed);
bcx.pointercast(self.llval, variant_ty.ptr_to())
} else {
self.llval
// Check whether the variant being used is packed, if applicable.
let is_packed = match (&*l, l.variant_index) {
(&layout::Univariant { ref variant, .. }, _) => variant.packed,
(&layout::StructWrappedNullablePointer { ref nonnull, .. }, _) => nonnull.packed,
(&layout::General { ref variants, .. }, Some(v)) => variants[v].packed,
_ => return simple()
};
// Simple case - we can just GEP the field
// * First field - Always aligned properly
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
ccx.shared().type_is_sized(fty)
{
return (bcx.struct_gep(
ptr_val, adt::struct_llfields_index(st, ix)), alignment);
if is_packed || ccx.shared().type_is_sized(fty) {
return simple();
}
// If the type of the last field is [T], str or a foreign type, then we don't need to do
// any adjusments
match fty.sty {
ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => {
return (bcx.struct_gep(
ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
_ => ()
}
// There's no metadata available, log the case and just do the GEP.
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment);
ix, Value(self.llval));
return simple();
}
// We need to get the pointer manually now.
@ -222,7 +304,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let meta = self.llextra;
let offset = st.offsets[ix].bytes();
let offset = l.field_offset(ccx, ix).bytes();
let unaligned_offset = C_usize(ccx, offset);
// Get the alignment of the field
@ -241,21 +323,29 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
// Cast and adjust pointer
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(ccx));
let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx));
let byte_ptr = bcx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected
let ll_fty = type_of::in_memory_type_of(ccx, fty);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
(bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
LvalueRef {
llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
llextra: self.llextra,
ty: LvalueTy::from_ty(fty),
alignment,
}
}
// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef {
let path = discrfield.iter().map(|&i| {
adt::memory_index_to_gep(i as usize)
}).collect::<Vec<_>>();
bcx.gepi(self.llval, &path)
let path = iter::once(C_u32(bcx.ccx, 0)).chain(discrfield[1..].iter().map(|&i| {
let i = adt::memory_index_to_gep(i as u64);
assert_eq!(i as u32 as u64, i);
C_u32(bcx.ccx, i as u32)
})).collect::<Vec<_>>();
bcx.inbounds_gep(self.llval, &path)
}
/// Helper for cases where the discriminant is simply loaded.
@ -274,12 +364,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
bcx.load(ptr, self.alignment.to_align())
bcx.load(ptr, self.alignment.non_abi())
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ llvm::True,
self.alignment.to_align())
self.alignment.non_abi())
}
}
@ -292,18 +382,18 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
self.load_discr(bcx, discr, self.llval, min, max)
}
layout::General { discr, ref variants, .. } => {
let ptr = bcx.struct_gep(self.llval, 0);
self.load_discr(bcx, discr, ptr, 0, variants.len() as u64 - 1)
let ptr = self.project_field(bcx, 0);
self.load_discr(bcx, discr, ptr.llval, 0, variants.len() as u64 - 1)
}
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE };
let discr = bcx.load(self.llval, self.alignment.to_align());
let discr = bcx.load(self.llval, self.alignment.non_abi());
bcx.icmp(cmp, discr, C_null(val_ty(discr)))
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield);
let llptr = bcx.load(llptrptr, self.alignment.to_align());
let llptr = bcx.load(llptrptr, self.alignment.non_abi());
let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
},
@ -324,11 +414,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
layout::CEnum { discr, min, max, .. } => {
adt::assert_discr_in_range(min, max, to);
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
self.llval, self.alignment.to_align());
self.llval, self.alignment.non_abi());
}
layout::General { discr, .. } => {
let ptr = self.project_field(bcx, 0);
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
bcx.struct_gep(self.llval, 0), self.alignment.to_align());
ptr.llval, ptr.alignment.non_abi());
}
layout::Univariant { .. }
| layout::UntaggedUnion { .. }
@ -338,7 +429,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
layout::RawNullablePointer { nndiscr, .. } => {
if to != nndiscr {
let llptrty = val_ty(self.llval).element_type();
bcx.store(C_null(llptrty), self.llval, self.alignment.to_align());
bcx.store(C_null(llptrty), self.llval, self.alignment.non_abi());
}
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
@ -350,12 +441,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let size = C_usize(bcx.ccx, nonnull.stride().bytes());
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
let align = C_u32(bcx.ccx, nonnull.align.abi() as u32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, self.alignment.to_align());
bcx.store(C_null(llptrty), llptrptr, self.alignment.non_abi());
}
}
}
@ -363,13 +454,47 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
}
}
pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(self.llval, &[llindex])
pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
-> LvalueRef<'tcx> {
let ty = self.ty.to_ty(bcx.tcx());
let (ptr, elem_ty) = match ty.sty {
ty::TySlice(ty) => {
// Slices already point to the array element type.
(bcx.inbounds_gep(self.llval, &[llindex]), ty)
}
ty::TyArray(ty, _) => {
let zero = common::C_usize(bcx.ccx, 0);
(bcx.inbounds_gep(self.llval, &[zero, llindex]), ty)
}
_ => bug!("unexpected type `{}` in LvalueRef::project_index", ty)
};
LvalueRef::new_sized(ptr, elem_ty, self.alignment)
}
pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
-> LvalueRef<'tcx> {
let ty = self.ty.to_ty(bcx.tcx());
if let ty::TyAdt(adt_def, substs) = ty.sty {
let mut downcast = *self;
downcast.ty = LvalueTy::Downcast {
adt_def,
substs,
variant_index,
};
// If this is an enum, cast to the appropriate variant struct type.
let layout = bcx.ccx.layout_of(ty).for_variant(variant_index);
if let layout::General { discr, ref variants, .. } = *layout {
let st = &variants[variant_index];
let variant_ty = Type::struct_(bcx.ccx,
&adt::struct_llfields(bcx.ccx, layout, st,
Some(discr.to_ty(bcx.tcx(), false))), st.packed);
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
}
downcast
} else {
let zero = common::C_usize(bcx.ccx, 0);
bcx.inbounds_gep(self.llval, &[zero, llindex])
bug!("unexpected type `{}` in LvalueRef::project_downcast", ty)
}
}
@ -407,7 +532,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Lvalue::Local(_) => bug!(), // handled above
mir::Lvalue::Static(box mir::Static { def_id, ty }) => {
LvalueRef::new_sized(consts::get_static(ccx, def_id),
LvalueTy::from_ty(self.monomorphize(&ty)),
self.monomorphize(&ty),
Alignment::AbiAligned)
},
mir::Lvalue::Projection(box mir::Projection {
@ -419,33 +544,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
let projected_ty = self.monomorphize(&projected_ty);
let align = tr_base.alignment;
let ((llprojected, align), llextra) = match projection.elem {
match projection.elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
let has_metadata = self.ccx.shared()
.type_has_metadata(projected_ty.to_ty(tcx));
let llextra = if !has_metadata {
ptr::null_mut()
} else {
tr_base.llextra
};
(tr_base.trans_field_ptr(bcx, field.index()), llextra)
tr_base.project_field(bcx, field.index())
}
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Consume(mir::Lvalue::Local(index));
let index = self.trans_operand(bcx, index);
let llindex = index.immediate();
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
tr_base.project_index(bcx, llindex)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let lloffset = C_usize(bcx.ccx, offset as u64);
((tr_base.project_index(bcx, lloffset), align), ptr::null_mut())
tr_base.project_index(bcx, lloffset)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
@ -453,39 +568,34 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let lloffset = C_usize(bcx.ccx, offset as u64);
let lllen = tr_base.len(bcx.ccx);
let llindex = bcx.sub(lllen, lloffset);
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
tr_base.project_index(bcx, llindex)
}
mir::ProjectionElem::Subslice { from, to } => {
let llbase = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64));
let mut subslice = tr_base.project_index(bcx,
C_usize(bcx.ccx, from as u64));
subslice.ty = tr_base.ty.projection_ty(tcx, &projection.elem);
subslice.ty = self.monomorphize(&subslice.ty);
let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {
match subslice.ty.to_ty(tcx).sty {
ty::TyArray(..) => {
// must cast the lvalue pointer type to the new
// array type (*[%_; new_len]).
let base_ty = self.monomorphized_lvalue_ty(lvalue);
let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
let llbase = bcx.pointercast(llbase, llbasety);
((llbase, align), ptr::null_mut())
subslice.llval = bcx.pointercast(subslice.llval,
type_of::type_of(bcx.ccx, subslice.ty.to_ty(tcx)).ptr_to())
}
ty::TySlice(..) => {
assert!(tr_base.llextra != ptr::null_mut());
let lllen = bcx.sub(tr_base.llextra,
C_usize(bcx.ccx, (from as u64)+(to as u64)));
((llbase, align), lllen)
assert!(tr_base.has_extra());
subslice.llextra = bcx.sub(tr_base.llextra,
C_usize(bcx.ccx, (from as u64) + (to as u64)));
}
_ => bug!("unexpected type {:?} in Subslice", base_ty)
_ => bug!("unexpected type {:?} in Subslice", subslice.ty)
}
subslice
}
mir::ProjectionElem::Downcast(..) => {
((tr_base.llval, align), tr_base.llextra)
mir::ProjectionElem::Downcast(_, v) => {
tr_base.project_downcast(bcx, v)
}
};
LvalueRef {
llval: llprojected,
llextra,
ty: projected_ty,
alignment: align,
}
}
};

View File

@ -14,7 +14,6 @@ use llvm::debuginfo::DIScope;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir::{self, Mir};
use rustc::mir::tcx::LvalueTy;
use rustc::ty::subst::Substs;
use rustc::infer::TransNormalize;
use rustc::session::config::FullDebugInfo;
@ -23,7 +22,7 @@ use builder::Builder;
use common::{self, CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance;
use abi::{ArgAttribute, FnType};
use abi::{self, ArgAttribute, FnType};
use type_of;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
@ -281,8 +280,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return pointer) -> lvalue", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty),
Alignment::AbiAligned))
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, ty, Alignment::AbiAligned))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
assert!(!ty.has_erasable_regions());
@ -404,7 +402,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let lvalue = LvalueRef::alloca(bcx, arg_ty, &name);
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let (dst, _) = lvalue.trans_field_ptr(bcx, i);
let dst = lvalue.project_field(bcx, i);
let arg = &mircx.fn_ty.args[idx];
idx += 1;
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
@ -412,8 +410,10 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// they are the two sub-fields of a single aggregate field.
let meta = &mircx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst));
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst));
arg.store_fn_arg(bcx, &mut llarg_idx,
dst.project_field(bcx, abi::FAT_PTR_ADDR));
meta.store_fn_arg(bcx, &mut llarg_idx,
dst.project_field(bcx, abi::FAT_PTR_EXTRA));
} else {
arg.store_fn_arg(bcx, &mut llarg_idx, dst);
}
@ -441,7 +441,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let arg = &mircx.fn_ty.args[idx];
idx += 1;
let llval = if arg.is_indirect() {
let lvalue = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up
// FIXME: lifetimes
@ -451,7 +451,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
llarg
LvalueRef::new_sized(llarg, arg_ty, Alignment::AbiAligned)
} else if !lvalue_locals.contains(local.index()) &&
arg.cast.is_none() && arg_scope.is_none() {
if arg.is_ignore() {
@ -502,21 +502,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = LvalueRef::alloca(bcx, arg_ty, &name);
let tmp = LvalueRef::alloca(bcx, arg_ty, &name);
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
let meta = &mircx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval));
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval));
arg.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_ADDR));
meta.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_EXTRA));
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval);
arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
}
lltemp.llval
tmp
};
arg_scope.map(|scope| {
// Is this a regular argument?
@ -527,11 +527,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let variable_access = if arg.is_indirect() &&
!arg.attrs.contains(ArgAttribute::ByVal) {
VariableAccess::IndirectVariable {
alloca: llval,
alloca: lvalue.llval,
address_operations: &deref_op,
}
} else {
VariableAccess::DirectVariable { alloca: llval }
VariableAccess::DirectVariable { alloca: lvalue.llval }
};
declare_local(
@ -567,11 +567,12 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None);
bcx.store(llval, alloc, None);
alloc
let alloc_ty = tcx.mk_mut_ptr(arg_ty);
let alloc = LvalueRef::alloca(bcx, alloc_ty, "__debuginfo_env_ptr");
bcx.store(lvalue.llval, alloc.llval, None);
alloc.llval
} else {
llval
lvalue.llval
};
let layout = bcx.ccx.layout_of(closure_ty);
@ -619,8 +620,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
);
}
});
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty),
Alignment::AbiAligned))
LocalRef::Lvalue(lvalue)
}).collect()
}
@ -628,6 +628,6 @@ mod analyze;
mod block;
mod constant;
pub mod lvalue;
mod operand;
pub mod operand;
mod rvalue;
mod statement;

View File

@ -10,18 +10,16 @@
use llvm::ValueRef;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Align, Layout, LayoutTyper};
use rustc::ty::layout::LayoutTyper;
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use adt;
use base;
use common::{self, CrateContext, C_undef};
use builder::Builder;
use value::Value;
use type_of;
use type_::Type;
use type_of::{self, LayoutLlvmExt};
use std::fmt;
use std::ptr;
@ -49,8 +47,8 @@ pub enum OperandValue {
///
/// NOTE: unless you know a value's type exactly, you should not
/// generate LLVM opcodes acting on it and instead act via methods,
/// to avoid nasty edge cases. In particular, using `Builder.store`
/// directly is sure to cause problems -- use `MirContext.store_operand`
/// to avoid nasty edge cases. In particular, using `Builder::store`
/// directly is sure to cause problems -- use `OperandRef::store`
/// instead.
#[derive(Copy, Clone)]
pub struct OperandRef<'tcx> {
@ -121,15 +119,10 @@ impl<'a, 'tcx> OperandRef<'tcx> {
let llty = type_of::type_of(bcx.ccx, self.ty);
let mut llpair = C_undef(llty);
let elems = [a, b];
let layout = bcx.ccx.layout_of(self.ty);
for i in 0..2 {
let elem = base::from_immediate(bcx, elems[i]);
let layout = bcx.ccx.layout_of(self.ty);
let i = if let Layout::Univariant { ref variant, .. } = *layout {
adt::struct_llfields_index(variant, i)
} else {
i
};
llpair = bcx.insert_value(llpair, elem, i);
llpair = bcx.insert_value(llpair, elem, layout.llvm_field_index(i));
}
self.val = OperandValue::Immediate(llpair);
}
@ -145,72 +138,51 @@ impl<'a, 'tcx> OperandRef<'tcx> {
debug!("Operand::unpack_if_pair: unpacking {:?}", self);
let layout = bcx.ccx.layout_of(self.ty);
let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout {
(adt::struct_llfields_index(variant, 0),
adt::struct_llfields_index(variant, 1))
} else {
(0, 1)
};
let mut a = bcx.extract_value(llval, ix0);
let mut b = bcx.extract_value(llval, ix1);
let a = bcx.extract_value(llval, layout.llvm_field_index(0));
let a = base::to_immediate(bcx, a, layout.field_type(bcx.ccx, 0));
let pair_fields = common::type_pair_fields(bcx.ccx, self.ty);
if let Some([a_ty, b_ty]) = pair_fields {
if a_ty.is_bool() {
a = bcx.trunc(a, Type::i1(bcx.ccx));
}
if b_ty.is_bool() {
b = bcx.trunc(b, Type::i1(bcx.ccx));
}
}
let b = bcx.extract_value(llval, layout.llvm_field_index(1));
let b = base::to_immediate(bcx, b, layout.field_type(bcx.ccx, 1));
self.val = OperandValue::Pair(a, b);
}
}
self
}
pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx, self.ty) {
return;
}
match self.val {
OperandValue::Ref(r, source_align) =>
base::memcpy_ty(bcx, dest.llval, r, self.ty,
(source_align | dest.alignment).non_abi()),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi());
}
OperandValue::Pair(a, b) => {
// See comment above about zero-sized values.
let dest_a = dest.project_field(bcx, 0);
if !common::type_is_zero_size(bcx.ccx, dest_a.ty.to_ty(bcx.tcx())) {
let a = base::from_immediate(bcx, a);
bcx.store(a, dest_a.llval, dest_a.alignment.non_abi());
}
let dest_b = dest.project_field(bcx, 1);
if !common::type_is_zero_size(bcx.ccx, dest_b.ty.to_ty(bcx.tcx())) {
let b = base::from_immediate(bcx, b);
bcx.store(b, dest_b.llval, dest_b.alignment.non_abi());
}
}
}
}
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_load(&mut self,
bcx: &Builder<'a, 'tcx>,
llval: ValueRef,
align: Alignment,
ty: Ty<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) {
Layout::Univariant { ref variant, .. } => {
(adt::struct_llfields_index(variant, 0),
adt::struct_llfields_index(variant, 1),
Alignment::from_packed(variant.packed) | align)
},
_ => (0, 1, align)
};
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
let a_ptr = bcx.struct_gep(llval, ix0);
let b_ptr = bcx.struct_gep(llval, ix1);
OperandValue::Pair(
base::load_ty(bcx, a_ptr, f_align, a_ty),
base::load_ty(bcx, b_ptr, f_align, b_ty)
)
} else if common::type_is_immediate(bcx.ccx, ty) {
OperandValue::Immediate(base::load_ty(bcx, llval, align, ty))
} else {
OperandValue::Ref(llval, align)
};
OperandRef { val: val, ty: ty }
}
pub fn trans_consume(&mut self,
bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>)
@ -258,9 +230,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty)
self.trans_lvalue(bcx, lvalue).load(bcx)
}
pub fn trans_operand(&mut self,
@ -280,59 +250,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let operand = val.to_operand(bcx.ccx);
if let OperandValue::Ref(ptr, align) = operand.val {
// If this is a OperandValue::Ref to an immediate constant, load it.
self.trans_load(bcx, ptr, align, operand.ty)
LvalueRef::new_sized(ptr, operand.ty, align).load(bcx)
} else {
operand
}
}
}
}
pub fn store_operand(&mut self,
bcx: &Builder<'a, 'tcx>,
lldest: ValueRef,
align: Option<Align>,
operand: OperandRef<'tcx>) {
debug!("store_operand: operand={:?}, align={:?}", operand, align);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx, operand.ty) {
return;
}
match operand.val {
OperandValue::Ref(r, source_align) =>
base::memcpy_ty(bcx, lldest, r, operand.ty,
source_align.min_with(align)),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => {
let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) {
Layout::Univariant { ref variant, .. } => {
(adt::struct_llfields_index(variant, 0),
adt::struct_llfields_index(variant, 1),
if variant.packed { Some(variant.align) } else { None })
}
_ => (0, 1, align)
};
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
// See comment above about zero-sized values.
let (a_zst, b_zst) = common::type_pair_fields(bcx.ccx, operand.ty)
.map_or((false, false), |[a_ty, b_ty]| {
(common::type_is_zero_size(bcx.ccx, a_ty),
common::type_is_zero_size(bcx.ccx, b_ty))
});
if !a_zst {
bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
}
if !b_zst {
bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
}
}
}
}
}

View File

@ -12,7 +12,6 @@ use llvm::{self, ValueRef};
use rustc::ty::{self, Ty};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::mir::tcx::LvalueTy;
use rustc::mir;
use rustc::middle::lang_items::ExchangeMallocFnLangItem;
use rustc_apfloat::{ieee, Float, Status, Round};
@ -25,11 +24,9 @@ use callee;
use common::{self, val_ty};
use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral};
use consts;
use adt;
use monomorphize;
use type_::Type;
use type_of;
use tvec;
use value::Value;
use super::{MirContext, LocalRef};
@ -52,7 +49,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tr_operand = self.trans_operand(&bcx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why dont we do that yet if we dont?)
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
tr_operand.store(&bcx, dest);
bcx
}
@ -63,7 +60,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
temp.store(&bcx, dest);
return bcx;
}
@ -73,9 +70,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(&bcx, source);
let operand = operand.pack_if_pair(&bcx);
let llref = match operand.val {
match operand.val {
OperandValue::Pair(..) => bug!(),
OperandValue::Immediate(llval) => {
OperandValue::Immediate(_) => {
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
@ -84,101 +81,93 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
scratch
scratch.storage_live(&bcx);
operand.store(&bcx, scratch);
base::coerce_unsized_into(&bcx, scratch, dest);
scratch.storage_dead(&bcx);
}
OperandValue::Ref(llref, align) => {
LvalueRef::new_sized_ty(llref, operand.ty, align)
let source = LvalueRef::new_sized(llref, operand.ty, align);
base::coerce_unsized_into(&bcx, source, dest);
}
};
base::coerce_unsized_into(&bcx, &llref, &dest);
}
bcx
}
mir::Rvalue::Repeat(ref elem, count) => {
let dest_ty = dest.ty.to_ty(bcx.tcx());
let tr_elem = self.trans_operand(&bcx, elem);
// No need to inizialize memory of a zero-sized slice
// Do not generate the loop for zero-sized elements or empty arrays.
let dest_ty = dest.ty.to_ty(bcx.tcx());
if common::type_is_zero_size(bcx.ccx, dest_ty) {
return bcx;
}
let tr_elem = self.trans_operand(&bcx, elem);
let count = count.as_u64();
let count = C_usize(bcx.ccx, count);
let base = base::get_dataptr(&bcx, dest.llval);
let align = dest.alignment.to_align();
let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval;
if let OperandValue::Immediate(v) = tr_elem.val {
let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
let align = dest.alignment.non_abi()
.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
let align = C_i32(bcx.ccx, align.abi() as i32);
let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
let fill = C_u8(bcx.ccx, 0);
base::call_memset(&bcx, base, fill, size, align, false);
base::call_memset(&bcx, start, fill, size, align, false);
return bcx;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
if common::val_ty(v) == Type::i8(bcx.ccx) {
base::call_memset(&bcx, base, v, size, align, false);
base::call_memset(&bcx, start, v, size, align, false);
return bcx;
}
}
tvec::slice_for_each(&bcx, base, tr_elem.ty, count, |bcx, llslot, loop_bb| {
self.store_operand(bcx, llslot, align, tr_elem);
bcx.br(loop_bb);
})
let count = count.as_u64();
let count = C_usize(bcx.ccx, count);
let end = dest.project_index(&bcx, count).llval;
let header_bcx = bcx.build_sibling_block("repeat_loop_header");
let body_bcx = bcx.build_sibling_block("repeat_loop_body");
let next_bcx = bcx.build_sibling_block("repeat_loop_next");
bcx.br(header_bcx.llbb());
let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]);
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
tr_elem.store(&body_bcx,
LvalueRef::new_sized(current, tr_elem.ty, dest.alignment));
let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]);
body_bcx.br(header_bcx.llbb());
header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
next_bcx
}
mir::Rvalue::Aggregate(ref kind, ref operands) => {
match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
dest.trans_set_discr(&bcx, variant_index);
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
let mut val = LvalueRef::new_sized(
dest.llval, dest.ty, dest.alignment);
let field_index = active_field_index.unwrap_or(i);
val.ty = LvalueTy::Downcast {
adt_def,
substs: self.monomorphize(&substs),
variant_index,
};
let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
self.store_operand(&bcx, lldest_i, align.to_align(), op);
}
}
},
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
let get_memory_index = |i| {
if let Layout::Univariant { ref variant, .. } = *layout {
adt::struct_llfields_index(variant, i)
} else {
i
}
};
let alignment = dest.alignment;
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
// Note: perhaps this should be StructGep, but
// note that in some cases the values here will
// not be structs but arrays.
let i = get_memory_index(i);
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, alignment.to_align(), op);
}
if adt_def.is_enum() {
(dest.project_downcast(&bcx, variant_index), active_field_index)
} else {
(dest, active_field_index)
}
}
_ => (dest, None)
};
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
let field_index = active_field_index.unwrap_or(i);
op.store(&bcx, dest.project_field(&bcx, field_index));
}
}
bcx
}
@ -186,7 +175,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
_ => {
assert!(self.rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
temp.store(&bcx, dest);
bcx
}
}

View File

@ -1,53 +0,0 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm;
use builder::Builder;
use llvm::{BasicBlockRef, ValueRef};
use common::*;
use rustc::ty::Ty;
pub fn slice_for_each<'a, 'tcx, F>(
bcx: &Builder<'a, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx, unit_ty);
let add = |bcx: &Builder, a, b| if zst {
bcx.add(a, b)
} else {
bcx.inbounds_gep(a, &[b])
};
let body_bcx = bcx.build_sibling_block("slice_loop_body");
let header_bcx = bcx.build_sibling_block("slice_loop_header");
let next_bcx = bcx.build_sibling_block("slice_loop_next");
let start = if zst {
C_usize(bcx.ccx, 1)
} else {
data_ptr
};
let end = add(&bcx, start, len);
bcx.br(header_bcx.llbb());
let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]);
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
let next = add(&body_bcx, current, C_usize(bcx.ccx, 1));
f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb());
header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
next_bcx
}

View File

@ -12,7 +12,7 @@ use abi::FnType;
use adt;
use common::*;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{Align, LayoutTyper, Size};
use rustc::ty::layout::{Align, Layout, LayoutTyper, Size, TyLayout};
use trans_item::DefPathBasedNames;
use type_::Type;
@ -237,6 +237,50 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> {
}
}
pub trait LayoutLlvmExt {
fn llvm_field_index(&self, index: usize) -> u64;
}
impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> {
fn llvm_field_index(&self, index: usize) -> u64 {
match **self {
Layout::Scalar { .. } |
Layout::CEnum { .. } |
Layout::UntaggedUnion { .. } |
Layout::RawNullablePointer { .. } => {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
}
Layout::Vector { .. } |
Layout::Array { .. } |
Layout::FatPointer { .. } => {
index as u64
}
Layout::Univariant { ref variant, .. } => {
adt::memory_index_to_gep(variant.memory_index[index] as u64)
}
Layout::General { ref variants, .. } => {
if let Some(v) = self.variant_index {
adt::memory_index_to_gep(variants[v].memory_index[index] as u64)
} else {
assert_eq!(index, 0);
index as u64
}
}
Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
if self.variant_index == Some(nndiscr as usize) {
adt::memory_index_to_gep(nonnull.memory_index[index] as u64)
} else {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
}
}
}
}
}
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {
let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx(), true, true);

View File

@ -12,7 +12,7 @@ use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
use rustc::traits;
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::subst::{Kind, Subst};
use rustc::ty::{self, Ty, TyCtxt};
pub use rustc::ty::Instance;
@ -125,12 +125,3 @@ pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
f: &'tcx ty::FieldDef)
-> Ty<'tcx>
{
tcx.fully_normalize_associated_types_in(&f.ty(tcx, param_substs))
}

View File

@ -15,7 +15,7 @@
// CHECK-LABEL: @zero_sized_elem
#[no_mangle]
pub fn zero_sized_elem() {
// CHECK-NOT: br label %slice_loop_header{{.*}}
// CHECK-NOT: br label %repeat_loop_header{{.*}}
// CHECK-NOT: call void @llvm.memset.p0i8
let x = [(); 4];
drop(&x);
@ -24,7 +24,7 @@ pub fn zero_sized_elem() {
// CHECK-LABEL: @zero_len_array
#[no_mangle]
pub fn zero_len_array() {
// CHECK-NOT: br label %slice_loop_header{{.*}}
// CHECK-NOT: br label %repeat_loop_header{{.*}}
// CHECK-NOT: call void @llvm.memset.p0i8
let x = [4; 0];
drop(&x);
@ -34,7 +34,7 @@ pub fn zero_len_array() {
#[no_mangle]
pub fn byte_array() {
// CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4
// CHECK-NOT: br label %slice_loop_header{{.*}}
// CHECK-NOT: br label %repeat_loop_header{{.*}}
let x = [7u8; 4];
drop(&x);
}
@ -50,7 +50,7 @@ enum Init {
#[no_mangle]
pub fn byte_enum_array() {
// CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4
// CHECK-NOT: br label %slice_loop_header{{.*}}
// CHECK-NOT: br label %repeat_loop_header{{.*}}
let x = [Init::Memset; 4];
drop(&x);
}
@ -59,7 +59,7 @@ pub fn byte_enum_array() {
#[no_mangle]
pub fn zeroed_integer_array() {
// CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16
// CHECK-NOT: br label %slice_loop_header{{.*}}
// CHECK-NOT: br label %repeat_loop_header{{.*}}
let x = [0u32; 4];
drop(&x);
}
@ -67,7 +67,7 @@ pub fn zeroed_integer_array() {
// CHECK-LABEL: @nonzero_integer_array
#[no_mangle]
pub fn nonzero_integer_array() {
// CHECK: br label %slice_loop_header{{.*}}
// CHECK: br label %repeat_loop_header{{.*}}
// CHECK-NOT: call void @llvm.memset.p0i8
let x = [0x1a_2b_3c_4d_u32; 4];
drop(&x);