[ARM] Follow AACPS standard for volatile bit-fields access width

This patch resumes the work of D16586.
According to the AAPCS, volatile bit-fields should
be accessed using containers of the widht of their
declarative type. In such case:
```
struct S1 {
  short a : 1;
}
```
should be accessed using load and stores of the width
(sizeof(short)), where now the compiler does only load
the minimum required width (char in this case).
However, as discussed in D16586,
that could overwrite non-volatile bit-fields, which
conflicted with C and C++ object models by creating
data race conditions that are not part of the bit-field,
e.g.
```
struct S2 {
  short a;
  int  b : 16;
}
```
Accessing `S2.b` would also access `S2.a`.

The AAPCS Release 2020Q2
(https://documentation-service.arm.com/static/5efb7fbedbdee951c1ccf186?token=)
section 8.1 Data Types, page 36, "Volatile bit-fields -
preserving number and width of container accesses" has been
updated to avoid conflict with the C++ Memory Model.
Now it reads in the note:
```
This ABI does not place any restrictions on the access widths of bit-fields where the container
overlaps with a non-bit-field member or where the container overlaps with any zero length bit-field
placed between two other bit-fields. This is because the C/C++ memory model defines these as being
separate memory locations, which can be accessed by two threads simultaneously. For this reason,
compilers must be permitted to use a narrower memory access width (including splitting the access into
multiple instructions) to avoid writing to a different memory location. For example, in
struct S { int a:24; char b; }; a write to a must not also write to the location occupied by b, this requires at least two
memory accesses in all current Arm architectures. In the same way, in struct S { int a:24; int:0; int b:8; };,
writes to a or b must not overwrite each other.
```

I've updated the patch D16586 to follow such behavior by verifying that we
only change volatile bit-field access when:
 - it won't overlap with any other non-bit-field member
 - we only access memory inside the bounds of the record
 - avoid overlapping zero-length bit-fields.

Regarding the number of memory accesses, that should be preserved, that will
be implemented by D67399.

Reviewed By: ostannard

Differential Revision: https://reviews.llvm.org/D72932
This commit is contained in:
Ties Stuij 2020-09-30 14:44:27 +01:00
parent aaafe350bb
commit 208987844f
9 changed files with 3524 additions and 106 deletions

View File

@ -401,12 +401,15 @@ CODEGENOPT(Addrsig, 1, 0)
/// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0)
/// Whether to not follow the AAPCS that enforce at least one read before storing to a volatile bitfield
/// Whether to follow the AAPCS enforcing at least one read before storing to a volatile bitfield
CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
/// Assume that by-value parameters do not alias any other values.
CODEGENOPT(PassByValueIsNoAlias, 1, 0)
/// Whether to not follow the AAPCS that enforces volatile bit-field access width to be
/// according to the field declaring type width.
CODEGENOPT(AAPCSBitfieldWidth, 1, 1)
#undef CODEGENOPT
#undef ENUM_CODEGENOPT

View File

@ -2391,9 +2391,15 @@ def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_
def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">;
def ForceAAPCSBitfieldLoad : Flag<["-"], "fAAPCSBitfieldLoad">, Group<m_arm_Features_Group>,
def ForceAAPCSBitfieldLoad : Flag<["-"], "faapcs-bitfield-load">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">;
def ForceNoAAPCSBitfieldWidth : Flag<["-"], "fno-aapcs-bitfield-width">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Do not follow the AAPCS standard requirement that volatile bit-field width is dictated by the field container type. (ARM only).">;
def AAPCSBitfieldWidth : Flag<["-"], "faapcs-bitfield-width">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Follow the AAPCS standard requirement stating that volatile bit-field width is dictated by the field container type. (ARM only).">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;

View File

@ -1934,22 +1934,27 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress();
llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
llvm::Value *Val =
Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
bool UseVolatile = LV.isVolatileQualified() &&
Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
const unsigned StorageSize =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
if (Info.IsSigned) {
assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
unsigned HighBits = StorageSize - Offset - Info.Size;
if (HighBits)
Val = Builder.CreateShl(Val, HighBits, "bf.shl");
if (Info.Offset + HighBits)
Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
if (Offset + HighBits)
Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
} else {
if (Info.Offset)
Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
Info.Size),
"bf.clear");
if (Offset)
Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
Val = Builder.CreateAnd(
Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc);
@ -2151,39 +2156,42 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
/*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
const bool UseVolatile =
CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
const unsigned StorageSize =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
// See if there are other bits in the bitfield's storage we'll need to load
// and mask together with source before storing.
if (Info.StorageSize != Info.Size) {
assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
if (StorageSize != Info.Size) {
assert(StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val =
Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateAnd(SrcVal,
llvm::APInt::getLowBitsSet(Info.StorageSize,
Info.Size),
"bf.value");
SrcVal = Builder.CreateAnd(
SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
"bf.value");
MaskedVal = SrcVal;
if (Info.Offset)
SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
if (Offset)
SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
// Mask out the original value.
Val = Builder.CreateAnd(Val,
~llvm::APInt::getBitsSet(Info.StorageSize,
Info.Offset,
Info.Offset + Info.Size),
"bf.clear");
Val = Builder.CreateAnd(
Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
"bf.clear");
// Or together the unchanged values and the source value.
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
assert(Info.Offset == 0);
assert(Offset == 0);
// According to the AACPS:
// When a volatile bit-field is written, and its container does not overlap
// with any non-bit-field member, its container must be read exactly once and
// written exactly once using the access width appropriate to the type of the
// container. The two accesses are not atomic.
// with any non-bit-field member, its container must be read exactly once
// and written exactly once using the access width appropriate to the type
// of the container. The two accesses are not atomic.
if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
Builder.CreateLoad(Ptr, true, "bf.load");
@ -2198,8 +2206,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Sign extend the value if needed.
if (Info.IsSigned) {
assert(Info.Size <= Info.StorageSize);
unsigned HighBits = Info.StorageSize - Info.Size;
assert(Info.Size <= StorageSize);
unsigned HighBits = StorageSize - Info.Size;
if (HighBits) {
ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
@ -4211,32 +4219,45 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (field->isBitField()) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(field->getParent());
CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
Info.VolatileStorageSize != 0 &&
field->getType()
.withCVRQualifiers(base.getVRQualifiers())
.isVolatileQualified();
Address Addr = base.getAddress(*this);
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
if (!IsInPreservedAIRegion &&
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
} else {
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
getContext().getRecordType(rec), rec->getLocation());
Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx,
getDebugInfoFIndex(rec, field->getFieldIndex()),
DbgInfo);
if (!UseVolatile) {
if (!IsInPreservedAIRegion &&
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
} else {
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
getContext().getRecordType(rec), rec->getLocation());
Addr = Builder.CreatePreserveStructAccessIndex(
Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
DbgInfo);
}
}
const unsigned SS =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
llvm::Type *FieldIntTy =
llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
if (UseVolatile) {
const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
if (VolatileOffset)
Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
}
QualType fieldType =
field->getType().withCVRQualifiers(base.getVRQualifiers());
field->getType().withCVRQualifiers(base.getVRQualifiers());
// TODO: Support TBAA for bit fields.
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,

View File

@ -46,7 +46,7 @@ namespace CodeGen {
/// };
///
/// This will end up as the following LLVM type. The first array is the
/// bitfield, and the second is the padding out to a 4-byte alignmnet.
/// bitfield, and the second is the padding out to a 4-byte alignment.
///
/// %t = type { i8, i8, i8, i8, i8, [3 x i8] }
///
@ -80,8 +80,21 @@ struct CGBitFieldInfo {
/// The offset of the bitfield storage from the start of the struct.
CharUnits StorageOffset;
/// The offset within a contiguous run of bitfields that are represented as a
/// single "field" within the LLVM struct type, taking into account the AAPCS
/// rules for volatile bitfields. This offset is in bits.
unsigned VolatileOffset : 16;
/// The storage size in bits which should be used when accessing this
/// bitfield.
unsigned VolatileStorageSize;
/// The offset of the bitfield storage from the start of the struct.
CharUnits VolatileStorageOffset;
CGBitFieldInfo()
: Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
: Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)

View File

@ -109,6 +109,14 @@ struct CGRecordLowering {
D->isMsStruct(Context);
}
/// Helper function to check if we are targeting AAPCS.
bool isAAPCS() const {
return Context.getTargetInfo().getABI().startswith("aapcs");
}
/// Helper function to check if the target machine is BigEndian.
bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
/// The Itanium base layout rule allows virtual bases to overlap
/// other bases, which complicates layout in specific ways.
///
@ -172,7 +180,8 @@ struct CGRecordLowering {
void lowerUnion();
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd);
RecordDecl::field_iterator FieldEnd);
void computeVolatileBitfields();
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
@ -237,6 +246,10 @@ void CGRecordLowering::setBitFieldInfo(
// least-significant-bit.
if (DataLayout.isBigEndian())
Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
Info.VolatileStorageSize = 0;
Info.VolatileOffset = 0;
Info.VolatileStorageOffset = CharUnits::Zero();
}
void CGRecordLowering::lower(bool NVBaseType) {
@ -261,15 +274,21 @@ void CGRecordLowering::lower(bool NVBaseType) {
// 8) Format the complete list of members in a way that can be consumed by
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
if (D->isUnion())
return lowerUnion();
if (D->isUnion()) {
lowerUnion();
computeVolatileBitfields();
return;
}
accumulateFields();
// RD implies C++.
if (RD) {
accumulateVPtrs();
accumulateBases();
if (Members.empty())
return appendPaddingBytes(Size);
if (Members.empty()) {
appendPaddingBytes(Size);
computeVolatileBitfields();
return;
}
if (!NVBaseType)
accumulateVBases();
}
@ -281,6 +300,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
Members.pop_back();
calculateZeroInit();
fillOutputFields();
computeVolatileBitfields();
}
void CGRecordLowering::lowerUnion() {
@ -418,9 +438,9 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
!DataLayout.fitsInLegalInteger(OffsetInRecord))
return false;
// Make sure StartBitOffset is natually aligned if it is treated as an
// Make sure StartBitOffset is naturally aligned if it is treated as an
// IType integer.
if (StartBitOffset %
if (StartBitOffset %
Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0)
return false;
@ -503,6 +523,123 @@ void CGRecordLowering::accumulateBases() {
}
}
/// The AAPCS that defines that, when possible, bit-fields should
/// be accessed using containers of the declared type width:
/// When a volatile bit-field is read, and its container does not overlap with
/// any non-bit-field member or any zero length bit-field member, its container
/// must be read exactly once using the access width appropriate to the type of
/// the container. When a volatile bit-field is written, and its container does
/// not overlap with any non-bit-field member or any zero-length bit-field
/// member, its container must be read exactly once and written exactly once
/// using the access width appropriate to the type of the container. The two
/// accesses are not atomic.
///
/// Enforcing the width restriction can be disabled using
/// -fno-aapcs-bitfield-width.
void CGRecordLowering::computeVolatileBitfields() {
if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
return;
for (auto &I : BitFields) {
const FieldDecl *Field = I.first;
CGBitFieldInfo &Info = I.second;
llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
// If the record alignment is less than the type width, we can't enforce a
// aligned load, bail out.
if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
ResLTy->getPrimitiveSizeInBits())
continue;
// CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
// for big-endian targets, but it assumes a container of width
// Info.StorageSize. Since AAPCS uses a different container size (width
// of the type), we first undo that calculation here and redo it once
// the bit-field offset within the new container is calculated.
const unsigned OldOffset =
isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
// Offset to the bit-field from the beginning of the struct.
const unsigned AbsoluteOffset =
Context.toBits(Info.StorageOffset) + OldOffset;
// Container size is the width of the bit-field type.
const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
// Nothing to do if the access uses the desired
// container width and is naturally aligned.
if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
continue;
// Offset within the container.
unsigned Offset = AbsoluteOffset & (StorageSize - 1);
// Bail out if an aligned load of the container cannot cover the entire
// bit-field. This can happen for example, if the bit-field is part of a
// packed struct. AAPCS does not define access rules for such cases, we let
// clang to follow its own rules.
if (Offset + Info.Size > StorageSize)
continue;
// Re-adjust offsets for big-endian targets.
if (isBE())
Offset = StorageSize - (Offset + Info.Size);
const CharUnits StorageOffset =
Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
const CharUnits End = StorageOffset +
Context.toCharUnitsFromBits(StorageSize) -
CharUnits::One();
const ASTRecordLayout &Layout =
Context.getASTRecordLayout(Field->getParent());
// If we access outside memory outside the record, than bail out.
const CharUnits RecordSize = Layout.getSize();
if (End >= RecordSize)
continue;
// Bail out if performing this load would access non-bit-fields members.
bool Conflict = false;
for (const auto *F : D->fields()) {
// Allow sized bit-fields overlaps.
if (F->isBitField() && !F->isZeroLengthBitField(Context))
continue;
const CharUnits FOffset = Context.toCharUnitsFromBits(
Layout.getFieldOffset(F->getFieldIndex()));
// As C11 defines, a zero sized bit-field defines a barrier, so
// fields after and before it should be race condition free.
// The AAPCS acknowledges it and imposes no restritions when the
// natural container overlaps a zero-length bit-field.
if (F->isZeroLengthBitField(Context)) {
if (End > FOffset && StorageOffset < FOffset) {
Conflict = true;
break;
}
}
const CharUnits FEnd =
FOffset +
Context.toCharUnitsFromBits(
Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
CharUnits::One();
// If no overlap, continue.
if (End < FOffset || FEnd < StorageOffset)
continue;
// The desired load overlaps a non-bit-field member, bail out.
Conflict = true;
break;
}
if (Conflict)
continue;
// Write the new bit-field access parameters.
// As the storage offset now is defined as the number of elements from the
// start of the structure, we should divide the Offset by the element size.
Info.VolatileStorageOffset =
StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
Info.VolatileStorageSize = StorageSize;
Info.VolatileOffset = Offset;
}
}
void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr())
Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
@ -848,8 +985,10 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
assert(Info.StorageSize <= SL->getSizeInBits() &&
"Union not large enough for bitfield storage");
} else {
assert(Info.StorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
assert((Info.StorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
Info.VolatileStorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
"Storage size does not match the element type size");
}
assert(Info.Size > 0 && "Empty bitfield!");
@ -897,11 +1036,12 @@ LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo"
<< " Offset:" << Offset
<< " Size:" << Size
<< " IsSigned:" << IsSigned
<< " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
<< " StorageSize:" << StorageSize
<< " StorageOffset:" << StorageOffset.getQuantity() << ">";
<< " StorageOffset:" << StorageOffset.getQuantity()
<< " VolatileOffset:" << VolatileOffset
<< " VolatileStorageSize:" << VolatileStorageSize
<< " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
}
LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {

View File

@ -1464,8 +1464,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
Opts.AAPCSBitfieldWidth =
Args.hasFlag(OPT_AAPCSBitfieldWidth, OPT_ForceNoAAPCSBitfieldWidth, true);
Opts.PassByValueIsNoAlias = Args.hasArg(OPT_fpass_by_value_is_noalias);
return Success;
}

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@
// CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0
struct __attribute((packed)) s0 {
int f0 : 24;
};
@ -54,8 +54,8 @@ unsigned long long test_0() {
// CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
#pragma pack(push)
#pragma pack(1)
@ -102,7 +102,7 @@ unsigned long long test_1() {
// CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0
union __attribute__((packed)) u2 {
unsigned long long f0 : 3;
@ -274,8 +274,8 @@ _Bool test_6() {
// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16
struct __attribute__((aligned(16))) s7 {
int a, b, c;

View File

@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple=%itanium_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-IT
// RUN: %clang_cc1 -triple=aarch64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-ARM
// RUN: %clang_cc1 -triple=x86_64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-OTHER
// RUN: %clang_cc1 -triple=%ms_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MS
int S;
@ -88,7 +89,8 @@ int main() {
// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF
// CHECK: store i32 {{.*}}, i32* [[I]]
i=vBF.x;
// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF
// CHECK-IT-OTHER: load volatile i8, i8* getelementptr {{.*}} @vBF
// CHECK-IT-ARM: load volatile i32, i32* bitcast {{.*}} @vBF
// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
// CHECK: store i32 {{.*}}, i32* [[I]]
i=V[3];
@ -163,9 +165,11 @@ int main() {
// CHECK-MS: store i32 {{.*}}, i32* getelementptr {{.*}} @BF
vBF.x=i;
// CHECK: load i32, i32* [[I]]
// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF
// CHECK-IT-OTHER: load volatile i8, i8* getelementptr {{.*}} @vBF
// CHECK-IT-ARM: load volatile i32, i32* bitcast {{.*}} @vBF
// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
// CHECK-IT: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF
// CHECK-IT-OTHER: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF
// CHECK-IT-ARM: store volatile i32 {{.*}}, i32* bitcast {{.*}} @vBF
// CHECK-MS: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vBF
V[3]=i;
// CHECK: load i32, i32* [[I]]