diff options
author | Alon Zakai <azakai@google.com> | 2019-12-19 09:04:08 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-12-19 09:04:08 -0800 |
commit | 4d28d3f32e7f213e300b24bc61c3f0ac9d6e1ab6 (patch) | |
tree | 91bffc2d47b1fe4bba01e7ada77006ef340bd138 /third_party/llvm-project/DWARFUnitIndex.cpp | |
parent | 0048f5b004ddf50e750aa335d0be314a73852058 (diff) | |
download | binaryen-4d28d3f32e7f213e300b24bc61c3f0ac9d6e1ab6.tar.gz binaryen-4d28d3f32e7f213e300b24bc61c3f0ac9d6e1ab6.tar.bz2 binaryen-4d28d3f32e7f213e300b24bc61c3f0ac9d6e1ab6.zip |
DWARF parsing and writing support using LLVM (#2520)
This imports LLVM code for DWARF handling. That code has the
Apache 2 license like us. It's also the same code used to
emit DWARF in the common toolchain, so it seems like a safe choice.
This adds two passes: --dwarfdump which runs the same code LLVM
runs for llvm-dwarfdump. This shows we can parse it ok, and will
be useful for debugging. And --dwarfupdate writes out the DWARF
sections (unchanged from what we read, so it just roundtrips - for
updating we need #2515).
This puts LLVM in thirdparty which is added here.
All the LLVM code is behind USE_LLVM_DWARF, which is on
by default, but off in JS for now, as it increases code size by 20%.
This current approach imports the LLVM files directly. This is not
how they are intended to be used, so it required a bunch of
local changes - more than I expected actually, for the platform-specific
stuff. For now this seems to work, so it may be good enough, but
in the long term we may want to switch to linking against libllvm.
A downside to doing that is that binaryen users would need to
have an LLVM build, and even in the waterfall builds we'd have a
problem - while we ship LLVM there anyhow, we constantly update
it, which means that binaryen would need to be on latest llvm all
the time too (which otherwise, given DWARF is quite stable, we
might not need to constantly update).
An even larger issue is that as I did this work I learned about how
DWARF works in LLVM, and while the reading code is easy to
reuse, the writing code is trickier. The main code path is heavily
integrated with the MC layer, which we don't have - we might want
to create a "fake MC layer" for that, but it sounds hard. Instead,
there is the YAML path which is used mostly for testing, and which
can convert DWARF to and from YAML and from binary. Using
the non-YAML parts there, we can convert binary DWARF to
the YAML layer's nice Info data, then convert that to binary. This
works, however, this is not the path LLVM uses normally, and it
supports only some basic DWARF sections - I had to add ranges
support, in fact. So if we need more complex things, we may end
up needing to use the MC layer approach, or consider some other
DWARF library. However, hopefully that should not affect the core
binaryen code which just calls a library for DWARF stuff.
Helps #2400
Diffstat (limited to 'third_party/llvm-project/DWARFUnitIndex.cpp')
-rw-r--r-- | third_party/llvm-project/DWARFUnitIndex.cpp | 200 |
1 files changed, 200 insertions, 0 deletions
diff --git a/third_party/llvm-project/DWARFUnitIndex.cpp b/third_party/llvm-project/DWARFUnitIndex.cpp new file mode 100644 index 000000000..f29c1e6cc --- /dev/null +++ b/third_party/llvm-project/DWARFUnitIndex.cpp @@ -0,0 +1,200 @@ +//===- DWARFUnitIndex.cpp -------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/raw_ostream.h" +#include <cinttypes> +#include <cstdint> + +using namespace llvm; + +bool DWARFUnitIndex::Header::parse(DataExtractor IndexData, + uint64_t *OffsetPtr) { + if (!IndexData.isValidOffsetForDataOfSize(*OffsetPtr, 16)) + return false; + Version = IndexData.getU32(OffsetPtr); + NumColumns = IndexData.getU32(OffsetPtr); + NumUnits = IndexData.getU32(OffsetPtr); + NumBuckets = IndexData.getU32(OffsetPtr); + return Version <= 2; +} + +void DWARFUnitIndex::Header::dump(raw_ostream &OS) const { + OS << format("version = %u slots = %u\n\n", Version, NumBuckets); +} + +bool DWARFUnitIndex::parse(DataExtractor IndexData) { + bool b = parseImpl(IndexData); + if (!b) { + // Make sure we don't try to dump anything + Header.NumBuckets = 0; + // Release any partially initialized data. + ColumnKinds.reset(); + Rows.reset(); + } + return b; +} + +bool DWARFUnitIndex::parseImpl(DataExtractor IndexData) { + uint64_t Offset = 0; + if (!Header.parse(IndexData, &Offset)) + return false; + + if (!IndexData.isValidOffsetForDataOfSize( + Offset, Header.NumBuckets * (8 + 4) + + (2 * Header.NumUnits + 1) * 4 * Header.NumColumns)) + return false; + + Rows = std::make_unique<Entry[]>(Header.NumBuckets); + auto Contribs = + std::make_unique<Entry::SectionContribution *[]>(Header.NumUnits); + ColumnKinds = std::make_unique<DWARFSectionKind[]>(Header.NumColumns); + + // Read Hash Table of Signatures + for (unsigned i = 0; i != Header.NumBuckets; ++i) + Rows[i].Signature = IndexData.getU64(&Offset); + + // Read Parallel Table of Indexes + for (unsigned i = 0; i != Header.NumBuckets; ++i) { + auto Index = IndexData.getU32(&Offset); + if (!Index) + continue; + Rows[i].Index = this; + Rows[i].Contributions = + std::make_unique<Entry::SectionContribution[]>(Header.NumColumns); + Contribs[Index - 1] = Rows[i].Contributions.get(); + } + + // Read the Column Headers + for (unsigned i = 0; i != Header.NumColumns; ++i) { + ColumnKinds[i] = static_cast<DWARFSectionKind>(IndexData.getU32(&Offset)); + if (ColumnKinds[i] == InfoColumnKind) { + if (InfoColumn != -1) + return false; + InfoColumn = i; + } + } + + if (InfoColumn == -1) + return false; + + // Read Table of Section Offsets + for (unsigned i = 0; i != Header.NumUnits; ++i) { + auto *Contrib = Contribs[i]; + for (unsigned i = 0; i != Header.NumColumns; ++i) + Contrib[i].Offset = IndexData.getU32(&Offset); + } + + // Read Table of Section Sizes + for (unsigned i = 0; i != Header.NumUnits; ++i) { + auto *Contrib = Contribs[i]; + for (unsigned i = 0; i != Header.NumColumns; ++i) + Contrib[i].Length = IndexData.getU32(&Offset); + } + + return true; +} + +StringRef DWARFUnitIndex::getColumnHeader(DWARFSectionKind DS) { +#define CASE(DS) \ + case DW_SECT_##DS: \ + return #DS; + switch (DS) { + CASE(INFO); + CASE(TYPES); + CASE(ABBREV); + CASE(LINE); + CASE(LOC); + CASE(STR_OFFSETS); + CASE(MACINFO); + CASE(MACRO); + } + llvm_unreachable("unknown DWARFSectionKind"); +} + +void DWARFUnitIndex::dump(raw_ostream &OS) const { + if (!*this) + return; + + Header.dump(OS); + OS << "Index Signature "; + for (unsigned i = 0; i != Header.NumColumns; ++i) + OS << ' ' << left_justify(getColumnHeader(ColumnKinds[i]), 24); + OS << "\n----- ------------------"; + for (unsigned i = 0; i != Header.NumColumns; ++i) + OS << " ------------------------"; + OS << '\n'; + for (unsigned i = 0; i != Header.NumBuckets; ++i) { + auto &Row = Rows[i]; + if (auto *Contribs = Row.Contributions.get()) { + OS << format("%5u 0x%016" PRIx64 " ", i + 1, Row.Signature); + for (unsigned i = 0; i != Header.NumColumns; ++i) { + auto &Contrib = Contribs[i]; + OS << format("[0x%08x, 0x%08x) ", Contrib.Offset, + Contrib.Offset + Contrib.Length); + } + OS << '\n'; + } + } +} + +const DWARFUnitIndex::Entry::SectionContribution * +DWARFUnitIndex::Entry::getOffset(DWARFSectionKind Sec) const { + uint32_t i = 0; + for (; i != Index->Header.NumColumns; ++i) + if (Index->ColumnKinds[i] == Sec) + return &Contributions[i]; + return nullptr; +} + +const DWARFUnitIndex::Entry::SectionContribution * +DWARFUnitIndex::Entry::getOffset() const { + return &Contributions[Index->InfoColumn]; +} + +const DWARFUnitIndex::Entry * +DWARFUnitIndex::getFromOffset(uint32_t Offset) const { + if (OffsetLookup.empty()) { + for (uint32_t i = 0; i != Header.NumBuckets; ++i) + if (Rows[i].Contributions) + OffsetLookup.push_back(&Rows[i]); + llvm::sort(OffsetLookup, [&](Entry *E1, Entry *E2) { + return E1->Contributions[InfoColumn].Offset < + E2->Contributions[InfoColumn].Offset; + }); + } + auto I = partition_point(OffsetLookup, [&](Entry *E2) { + return E2->Contributions[InfoColumn].Offset <= Offset; + }); + if (I == OffsetLookup.begin()) + return nullptr; + --I; + const auto *E = *I; + const auto &InfoContrib = E->Contributions[InfoColumn]; + if ((InfoContrib.Offset + InfoContrib.Length) <= Offset) + return nullptr; + return E; +} + +const DWARFUnitIndex::Entry *DWARFUnitIndex::getFromHash(uint64_t S) const { + uint64_t Mask = Header.NumBuckets - 1; + + auto H = S & Mask; + auto HP = ((S >> 32) & Mask) | 1; + while (Rows[H].getSignature() != S && Rows[H].getSignature() != 0) + H = (H + HP) & Mask; + + if (Rows[H].getSignature() != S) + return nullptr; + + return &Rows[H]; +} |