hanchenye-llvm-project/lld/ELF/InputSection.h

295 lines
9.6 KiB
C
Raw Normal View History

//===- InputSection.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLD_ELF_INPUT_SECTION_H
#define LLD_ELF_INPUT_SECTION_H
#include "Config.h"
#include "Relocations.h"
#include "Thunks.h"
#include "lld/Core/LLVM.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Object/ELF.h"
namespace lld {
2016-02-28 08:25:54 +08:00
namespace elf {
class DefinedCommon;
class SymbolBody;
struct SectionPiece;
template <class ELFT> class ICF;
template <class ELFT> class DefinedRegular;
template <class ELFT> class ObjectFile;
template <class ELFT> class OutputSection;
class OutputSectionBase;
// We need non-template input section class to store symbol layout
// in linker script parser structures, where we do not have ELFT
// template parameter. For each scripted output section symbol we
// store pointer to preceding InputSectionData object or nullptr,
// if symbol should be placed at the very beginning of the output
// section
class InputSectionData {
public:
enum Kind { Regular, EHFrame, Merge };
// The garbage collector sets sections' Live bits.
// If GC is disabled, all sections are considered live by default.
InputSectionData(Kind SectionKind, StringRef Name, ArrayRef<uint8_t> Data,
bool Compressed, bool Live)
: SectionKind(SectionKind), Live(Live), Compressed(Compressed),
Name(Name), Data(Data) {}
private:
unsigned SectionKind : 3;
public:
Kind kind() const { return (Kind)SectionKind; }
2016-10-20 13:23:23 +08:00
unsigned Live : 1; // for garbage collection
unsigned Compressed : 1;
uint32_t Alignment;
StringRef Name;
ArrayRef<uint8_t> Data;
template <typename T> llvm::ArrayRef<T> getDataAs() const {
size_t S = Data.size();
assert(S % sizeof(T) == 0);
return llvm::makeArrayRef<T>((const T *)Data.data(), S / sizeof(T));
}
// If a section is compressed, this has the uncompressed section data.
std::unique_ptr<uint8_t[]> UncompressedData;
std::vector<Relocation> Relocations;
};
// This corresponds to a section of an input file.
template <class ELFT> class InputSectionBase : public InputSectionData {
protected:
typedef typename ELFT::Chdr Elf_Chdr;
typedef typename ELFT::Rel Elf_Rel;
typedef typename ELFT::Rela Elf_Rela;
typedef typename ELFT::Shdr Elf_Shdr;
typedef typename ELFT::Sym Elf_Sym;
typedef typename ELFT::uint uintX_t;
// The file this section is from.
ObjectFile<ELFT> *File;
public:
// These corresponds to the fields in Elf_Shdr.
uintX_t Flags;
uintX_t Offset = 0;
uintX_t Entsize;
uint32_t Type;
uint32_t Link;
uint32_t Info;
InputSectionBase()
: InputSectionData(Regular, "", ArrayRef<uint8_t>(), false, false),
Repl(this) {}
InputSectionBase(ObjectFile<ELFT> *File, const Elf_Shdr *Header,
StringRef Name, Kind SectionKind);
InputSectionBase(ObjectFile<ELFT> *File, uintX_t Flags, uint32_t Type,
uintX_t Entsize, uint32_t Link, uint32_t Info,
uintX_t Addralign, ArrayRef<uint8_t> Data, StringRef Name,
Kind SectionKind);
OutputSectionBase *OutSec = nullptr;
// This pointer points to the "real" instance of this instance.
// Usually Repl == this. However, if ICF merges two sections,
// Repl pointer of one section points to another section. So,
// if you need to get a pointer to this instance, do not use
// this but instead this->Repl.
InputSectionBase<ELFT> *Repl;
// Returns the size of this section (even if this is a common or BSS.)
size_t getSize() const;
ObjectFile<ELFT> *getFile() const { return File; }
llvm::object::ELFFile<ELFT> getObj() const { return File->getObj(); }
uintX_t getOffset(const DefinedRegular<ELFT> &Sym) const;
InputSectionBase *getLinkOrderDep() const;
// Translate an offset in the input section to an offset in the output
// section.
uintX_t getOffset(uintX_t Offset) const;
void uncompress();
void relocate(uint8_t *Buf, uint8_t *BufEnd);
private:
std::pair<ArrayRef<uint8_t>, uint64_t>
getElfCompressedData(ArrayRef<uint8_t> Data);
std::pair<ArrayRef<uint8_t>, uint64_t>
getRawCompressedData(ArrayRef<uint8_t> Data);
};
// SectionPiece represents a piece of splittable section contents.
// We allocate a lot of these and binary search on them. This means that they
// have to be as compact as possible, which is why we don't store the size (can
// be found by looking at the next one) and put the hash in a side table.
struct SectionPiece {
SectionPiece(size_t Off, bool Live = false)
: InputOff(Off), OutputOff(-1), Live(Live || !Config->GcSections) {}
size_t InputOff;
ssize_t OutputOff : 8 * sizeof(ssize_t) - 1;
size_t Live : 1;
};
static_assert(sizeof(SectionPiece) == 2 * sizeof(size_t),
"SectionPiece is too big");
// This corresponds to a SHF_MERGE section of an input file.
template <class ELFT> class MergeInputSection : public InputSectionBase<ELFT> {
typedef typename ELFT::uint uintX_t;
typedef typename ELFT::Sym Elf_Sym;
typedef typename ELFT::Shdr Elf_Shdr;
public:
MergeInputSection(ObjectFile<ELFT> *F, const Elf_Shdr *Header,
StringRef Name);
static bool classof(const InputSectionData *S);
void splitIntoPieces();
// Mark the piece at a given offset live. Used by GC.
void markLiveAt(uintX_t Offset) {
assert(this->Flags & llvm::ELF::SHF_ALLOC);
LiveOffsets.insert(Offset);
}
// Translate an offset in the input section to an offset
// in the output section.
uintX_t getOffset(uintX_t Offset) const;
Avoid doing binary search. MergedInputSection::getOffset is the busiest function in LLD if string merging is enabled and input files have lots of mergeable sections. It is usually the case when creating executable with debug info, so it is pretty common. The reason why it is slow is because it has to do faily complex computations. For non-mergeable sections, section contents are contiguous in output, so in order to compute an output offset, we only have to add the output section's base address to an input offset. But for mergeable strings, section contents are split for merging, so they are not contigous. We've got to do some lookups. We used to do binary search on the list of section pieces. It is slow because I think it's hostile to branch prediction. This patch replaces it with hash table lookup. Seems it's working pretty well. Below is "perf stat -r10" output when linking clang with debug info. In this case this patch speeds up about 4%. Before: 6584.153205 task-clock (msec) # 1.001 CPUs utilized ( +- 0.09% ) 238 context-switches # 0.036 K/sec ( +- 6.59% ) 0 cpu-migrations # 0.000 K/sec ( +- 50.92% ) 1,067,675 page-faults # 0.162 M/sec ( +- 0.15% ) 18,369,931,470 cycles # 2.790 GHz ( +- 0.09% ) 9,640,680,143 stalled-cycles-frontend # 52.48% frontend cycles idle ( +- 0.18% ) <not supported> stalled-cycles-backend 21,206,747,787 instructions # 1.15 insns per cycle # 0.45 stalled cycles per insn ( +- 0.04% ) 3,817,398,032 branches # 579.786 M/sec ( +- 0.04% ) 132,787,249 branch-misses # 3.48% of all branches ( +- 0.02% ) 6.579106511 seconds time elapsed ( +- 0.09% ) After: 6312.317533 task-clock (msec) # 1.001 CPUs utilized ( +- 0.19% ) 221 context-switches # 0.035 K/sec ( +- 4.11% ) 1 cpu-migrations # 0.000 K/sec ( +- 45.21% ) 1,280,775 page-faults # 0.203 M/sec ( +- 0.37% ) 17,611,539,150 cycles # 2.790 GHz ( +- 0.19% ) 10,285,148,569 stalled-cycles-frontend # 58.40% frontend cycles idle ( +- 0.30% ) <not supported> stalled-cycles-backend 18,794,779,900 instructions # 1.07 insns per cycle # 0.55 stalled cycles per insn ( +- 0.03% ) 3,287,450,865 branches # 520.799 M/sec ( +- 0.03% ) 72,259,605 branch-misses # 2.20% of all branches ( +- 0.01% ) 6.307411828 seconds time elapsed ( +- 0.19% ) Differential Revision: http://reviews.llvm.org/D20645 llvm-svn: 270999
2016-05-27 22:39:13 +08:00
void finalizePieces();
// Splittable sections are handled as a sequence of data
// rather than a single large blob of data.
std::vector<SectionPiece> Pieces;
ArrayRef<uint8_t> getData(std::vector<SectionPiece>::const_iterator I) const;
std::vector<uint32_t> Hashes;
// Returns the SectionPiece at a given input section offset.
SectionPiece *getSectionPiece(uintX_t Offset);
const SectionPiece *getSectionPiece(uintX_t Offset) const;
private:
std::vector<SectionPiece> splitStrings(ArrayRef<uint8_t> A, size_t Size);
std::vector<SectionPiece> splitNonStrings(ArrayRef<uint8_t> A, size_t Size);
Avoid doing binary search. MergedInputSection::getOffset is the busiest function in LLD if string merging is enabled and input files have lots of mergeable sections. It is usually the case when creating executable with debug info, so it is pretty common. The reason why it is slow is because it has to do faily complex computations. For non-mergeable sections, section contents are contiguous in output, so in order to compute an output offset, we only have to add the output section's base address to an input offset. But for mergeable strings, section contents are split for merging, so they are not contigous. We've got to do some lookups. We used to do binary search on the list of section pieces. It is slow because I think it's hostile to branch prediction. This patch replaces it with hash table lookup. Seems it's working pretty well. Below is "perf stat -r10" output when linking clang with debug info. In this case this patch speeds up about 4%. Before: 6584.153205 task-clock (msec) # 1.001 CPUs utilized ( +- 0.09% ) 238 context-switches # 0.036 K/sec ( +- 6.59% ) 0 cpu-migrations # 0.000 K/sec ( +- 50.92% ) 1,067,675 page-faults # 0.162 M/sec ( +- 0.15% ) 18,369,931,470 cycles # 2.790 GHz ( +- 0.09% ) 9,640,680,143 stalled-cycles-frontend # 52.48% frontend cycles idle ( +- 0.18% ) <not supported> stalled-cycles-backend 21,206,747,787 instructions # 1.15 insns per cycle # 0.45 stalled cycles per insn ( +- 0.04% ) 3,817,398,032 branches # 579.786 M/sec ( +- 0.04% ) 132,787,249 branch-misses # 3.48% of all branches ( +- 0.02% ) 6.579106511 seconds time elapsed ( +- 0.09% ) After: 6312.317533 task-clock (msec) # 1.001 CPUs utilized ( +- 0.19% ) 221 context-switches # 0.035 K/sec ( +- 4.11% ) 1 cpu-migrations # 0.000 K/sec ( +- 45.21% ) 1,280,775 page-faults # 0.203 M/sec ( +- 0.37% ) 17,611,539,150 cycles # 2.790 GHz ( +- 0.19% ) 10,285,148,569 stalled-cycles-frontend # 58.40% frontend cycles idle ( +- 0.30% ) <not supported> stalled-cycles-backend 18,794,779,900 instructions # 1.07 insns per cycle # 0.55 stalled cycles per insn ( +- 0.03% ) 3,287,450,865 branches # 520.799 M/sec ( +- 0.03% ) 72,259,605 branch-misses # 2.20% of all branches ( +- 0.01% ) 6.307411828 seconds time elapsed ( +- 0.19% ) Differential Revision: http://reviews.llvm.org/D20645 llvm-svn: 270999
2016-05-27 22:39:13 +08:00
llvm::DenseMap<uintX_t, uintX_t> OffsetMap;
llvm::DenseSet<uintX_t> LiveOffsets;
};
struct EhSectionPiece : public SectionPiece {
EhSectionPiece(size_t Off, ArrayRef<uint8_t> Data, unsigned FirstRelocation)
: SectionPiece(Off, false), Data(Data.data()), Size(Data.size()),
FirstRelocation(FirstRelocation) {}
const uint8_t *Data;
uint32_t Size;
uint32_t size() const { return Size; }
ArrayRef<uint8_t> data() { return {Data, Size}; }
unsigned FirstRelocation;
};
// This corresponds to a .eh_frame section of an input file.
template <class ELFT> class EhInputSection : public InputSectionBase<ELFT> {
public:
typedef typename ELFT::Shdr Elf_Shdr;
typedef typename ELFT::uint uintX_t;
EhInputSection(ObjectFile<ELFT> *F, const Elf_Shdr *Header, StringRef Name);
static bool classof(const InputSectionData *S);
void split();
template <class RelTy> void split(ArrayRef<RelTy> Rels);
// Splittable sections are handled as a sequence of data
// rather than a single large blob of data.
std::vector<EhSectionPiece> Pieces;
// Relocation section that refer to this one.
const Elf_Shdr *RelocSection = nullptr;
};
// This corresponds to a non SHF_MERGE section of an input file.
template <class ELFT> class InputSection : public InputSectionBase<ELFT> {
friend ICF<ELFT>;
typedef InputSectionBase<ELFT> Base;
typedef typename ELFT::Shdr Elf_Shdr;
typedef typename ELFT::Rela Elf_Rela;
typedef typename ELFT::Rel Elf_Rel;
typedef typename ELFT::Sym Elf_Sym;
typedef typename ELFT::uint uintX_t;
public:
InputSection();
InputSection(uintX_t Flags, uint32_t Type, uintX_t Addralign,
ArrayRef<uint8_t> Data, StringRef Name);
InputSection(ObjectFile<ELFT> *F, const Elf_Shdr *Header, StringRef Name);
static InputSection<ELFT> Discarded;
// Write this section to a mmap'ed file, assuming Buf is pointing to
// beginning of the output section.
void writeTo(uint8_t *Buf);
// Relocation sections that refer to this one.
llvm::TinyPtrVector<const Elf_Shdr *> RelocSections;
// The offset from beginning of the output sections this section was assigned
// to. The writer sets a value.
uint64_t OutSecOff = 0;
// InputSection that is dependent on us (reverse dependency for GC)
InputSectionBase<ELFT> *DependentSection = nullptr;
static bool classof(const InputSectionData *S);
InputSectionBase<ELFT> *getRelocatedSection();
[ELF] Implement infrastructure for thunk code creation Some targets might require creation of thunks. For example, MIPS targets require stubs to call PIC code from non-PIC one. The patch implements infrastructure for thunk code creation and provides support for MIPS LA25 stubs. Any MIPS PIC code function is invoked with its address in register $t9. So if we have a branch instruction from non-PIC code to the PIC one we cannot make the jump directly and need to create a small stub to save the target function address. See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf - In relocation scanning phase we ask target about thunk creation necessity by calling `TagetInfo::needsThunk` method. The `InputSection` class maintains list of Symbols requires thunk creation. - Reassigning offsets performed for each input sections after relocation scanning complete because position of each section might change due thunk creation. - The patch introduces new dedicated value for DefinedSynthetic symbols DefinedSynthetic::SectionEnd. Synthetic symbol with that value always points to the end of the corresponding output section. That allows to escape updating synthetic symbols if output sections sizes changes after relocation scanning due thunk creation. - In the `InputSection::writeTo` method we write thunks after corresponding input section. Each thunk is written by calling `TargetInfo::writeThunk` method. - The patch supports the only type of thunk code for each target. For now, it is enough. Differential Revision: http://reviews.llvm.org/D17934 llvm-svn: 265059
2016-04-01 05:26:23 +08:00
// Register thunk related to the symbol. When the section is written
// to a mmap'ed file, target is requested to write an actual thunk code.
// Now thunks is supported for MIPS and ARM target only.
void addThunk(const Thunk<ELFT> *T);
[ELF] Implement infrastructure for thunk code creation Some targets might require creation of thunks. For example, MIPS targets require stubs to call PIC code from non-PIC one. The patch implements infrastructure for thunk code creation and provides support for MIPS LA25 stubs. Any MIPS PIC code function is invoked with its address in register $t9. So if we have a branch instruction from non-PIC code to the PIC one we cannot make the jump directly and need to create a small stub to save the target function address. See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf - In relocation scanning phase we ask target about thunk creation necessity by calling `TagetInfo::needsThunk` method. The `InputSection` class maintains list of Symbols requires thunk creation. - Reassigning offsets performed for each input sections after relocation scanning complete because position of each section might change due thunk creation. - The patch introduces new dedicated value for DefinedSynthetic symbols DefinedSynthetic::SectionEnd. Synthetic symbol with that value always points to the end of the corresponding output section. That allows to escape updating synthetic symbols if output sections sizes changes after relocation scanning due thunk creation. - In the `InputSection::writeTo` method we write thunks after corresponding input section. Each thunk is written by calling `TargetInfo::writeThunk` method. - The patch supports the only type of thunk code for each target. For now, it is enough. Differential Revision: http://reviews.llvm.org/D17934 llvm-svn: 265059
2016-04-01 05:26:23 +08:00
// The offset of synthetic thunk code from beginning of this section.
uint64_t getThunkOff() const;
// Size of chunk with thunks code.
uint64_t getThunksSize() const;
template <class RelTy>
void relocateNonAlloc(uint8_t *Buf, llvm::ArrayRef<RelTy> Rels);
private:
template <class RelTy>
void copyRelocations(uint8_t *Buf, llvm::ArrayRef<RelTy> Rels);
// Called by ICF to merge two input sections.
void replace(InputSection<ELFT> *Other);
// Used by ICF.
uint64_t GroupId = 0;
[ELF] Implement infrastructure for thunk code creation Some targets might require creation of thunks. For example, MIPS targets require stubs to call PIC code from non-PIC one. The patch implements infrastructure for thunk code creation and provides support for MIPS LA25 stubs. Any MIPS PIC code function is invoked with its address in register $t9. So if we have a branch instruction from non-PIC code to the PIC one we cannot make the jump directly and need to create a small stub to save the target function address. See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf - In relocation scanning phase we ask target about thunk creation necessity by calling `TagetInfo::needsThunk` method. The `InputSection` class maintains list of Symbols requires thunk creation. - Reassigning offsets performed for each input sections after relocation scanning complete because position of each section might change due thunk creation. - The patch introduces new dedicated value for DefinedSynthetic symbols DefinedSynthetic::SectionEnd. Synthetic symbol with that value always points to the end of the corresponding output section. That allows to escape updating synthetic symbols if output sections sizes changes after relocation scanning due thunk creation. - In the `InputSection::writeTo` method we write thunks after corresponding input section. Each thunk is written by calling `TargetInfo::writeThunk` method. - The patch supports the only type of thunk code for each target. For now, it is enough. Differential Revision: http://reviews.llvm.org/D17934 llvm-svn: 265059
2016-04-01 05:26:23 +08:00
llvm::TinyPtrVector<const Thunk<ELFT> *> Thunks;
};
template <class ELFT> InputSection<ELFT> InputSection<ELFT>::Discarded;
2016-02-28 08:25:54 +08:00
} // namespace elf
} // namespace lld
#endif