hanchenye-llvm-project/lldb/source/Core/SourceManager.cpp

318 lines
8.5 KiB
C++
Raw Normal View History

//===-- SourceManager.cpp ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "lldb/Core/SourceManager.h"
// C Includes
// C++ Includes
// Other libraries and framework includes
// Project includes
#include "lldb/Core/DataBuffer.h"
#include "lldb/Core/Stream.h"
using namespace lldb_private;
static inline bool is_newline_char(char ch)
{
return ch == '\n' || ch == '\r';
}
//----------------------------------------------------------------------
// SourceManager constructor
//----------------------------------------------------------------------
SourceManager::SourceManager() :
m_file_cache (),
m_last_file_sp (),
m_last_file_line (0),
m_last_file_context_before (0),
m_last_file_context_after (0)
{
}
//----------------------------------------------------------------------
// Destructor
//----------------------------------------------------------------------
SourceManager::~SourceManager()
{
}
size_t
SourceManager::DisplaySourceLines
(
const FileSpec &file_spec,
uint32_t line,
uint32_t context_before,
uint32_t context_after,
Stream *s
)
{
m_last_file_sp = GetFile (file_spec);
m_last_file_line = line + context_after + 1;
m_last_file_context_before = context_before;
m_last_file_context_after = context_after;
if (m_last_file_sp.get())
return m_last_file_sp->DisplaySourceLines (line, context_before, context_after, s);
return 0;
}
SourceManager::FileSP
SourceManager::GetFile (const FileSpec &file_spec)
{
FileSP file_sp;
FileCache::iterator pos = m_file_cache.find(file_spec);
if (pos != m_file_cache.end())
file_sp = pos->second;
else
{
file_sp.reset (new File (file_spec));
m_file_cache[file_spec] = file_sp;
}
return file_sp;
}
size_t
SourceManager::DisplaySourceLinesWithLineNumbersUsingLastFile
(
uint32_t line,
uint32_t context_before,
uint32_t context_after,
const char* current_line_cstr,
Stream *s
)
{
if (line == 0)
{
if (m_last_file_line != 0
&& m_last_file_line != UINT32_MAX)
line = m_last_file_line + context_before;
else
line = 1;
}
m_last_file_line = line + context_after + 1;
m_last_file_context_before = context_before;
m_last_file_context_after = context_after;
if (context_before == UINT32_MAX)
context_before = 0;
if (context_after == UINT32_MAX)
context_after = 10;
if (m_last_file_sp.get())
{
const uint32_t start_line = line <= context_before ? 1 : line - context_before;
const uint32_t end_line = line + context_after;
uint32_t curr_line;
for (curr_line = start_line; curr_line <= end_line; ++curr_line)
{
if (!m_last_file_sp->LineIsValid (curr_line))
{
m_last_file_line = UINT32_MAX;
break;
}
Many improvements to the Platform base class and subclasses. The base Platform class now implements the Host functionality for a lot of things that make sense by default so that subclasses can check: int PlatformSubclass::Foo () { if (IsHost()) return Platform::Foo (); // Let the platform base class do the host specific stuff // Platform subclass specific code... int result = ... return result; } Added new functions to the platform: virtual const char *Platform::GetUserName (uint32_t uid); virtual const char *Platform::GetGroupName (uint32_t gid); The user and group names are cached locally so that remote platforms can avoid sending packets multiple times to resolve this information. Added the parent process ID to the ProcessInfo class. Added a new ProcessInfoMatch class which helps us to match processes up and changed the Host layer over to using this new class. The new class allows us to search for processs: 1 - by name (equal to, starts with, ends with, contains, and regex) 2 - by pid 3 - And further check for parent pid == value, uid == value, gid == value, euid == value, egid == value, arch == value, parent == value. This is all hookup up to the "platform process list" command which required adding dumping routines to dump process information. If the Host class implements the process lookup routines, you can now lists processes on your local machine: machine1.foo.com % lldb (lldb) platform process list PID PARENT USER GROUP EFF USER EFF GROUP TRIPLE NAME ====== ====== ========== ========== ========== ========== ======================== ============================ 99538 1 username usergroup username usergroup x86_64-apple-darwin FileMerge 94943 1 username usergroup username usergroup x86_64-apple-darwin mdworker 94852 244 username usergroup username usergroup x86_64-apple-darwin Safari 94727 244 username usergroup username usergroup x86_64-apple-darwin Xcode 92742 92710 username usergroup username usergroup i386-apple-darwin debugserver This of course also works remotely with the lldb-platform: machine1.foo.com % lldb-platform --listen 1234 machine2.foo.com % lldb (lldb) platform create remote-macosx Platform: remote-macosx Connected: no (lldb) platform connect connect://localhost:1444 Platform: remote-macosx Triple: x86_64-apple-darwin OS Version: 10.6.7 (10J869) Kernel: Darwin Kernel Version 10.7.0: Sat Jan 29 15:17:16 PST 2011; root:xnu-1504.9.37~1/RELEASE_I386 Hostname: machine1.foo.com Connected: yes (lldb) platform process list PID PARENT USER GROUP EFF USER EFF GROUP TRIPLE NAME ====== ====== ========== ========== ========== ========== ======================== ============================ 99556 244 username usergroup username usergroup x86_64-apple-darwin trustevaluation 99548 65539 username usergroup username usergroup x86_64-apple-darwin lldb 99538 1 username usergroup username usergroup x86_64-apple-darwin FileMerge 94943 1 username usergroup username usergroup x86_64-apple-darwin mdworker 94852 244 username usergroup username usergroup x86_64-apple-darwin Safari The lldb-platform implements everything with the Host:: layer, so this should "just work" for linux. I will probably be adding more stuff to the Host layer for launching processes and attaching to processes so that this support should eventually just work as well. Modified the target to be able to be created with an architecture that differs from the main executable. This is needed for iOS debugging since we can have an "armv6" binary which can run on an "armv7" machine, so we want to be able to do: % lldb (lldb) platform create remote-ios (lldb) file --arch armv7 a.out Where "a.out" is an armv6 executable. The platform then can correctly decide to open all "armv7" images for all dependent shared libraries. Modified the disassembly to show the current PC value. Example output: (lldb) disassemble --frame a.out`main: 0x1eb7: pushl %ebp 0x1eb8: movl %esp, %ebp 0x1eba: pushl %ebx 0x1ebb: subl $20, %esp 0x1ebe: calll 0x1ec3 ; main + 12 at test.c:18 0x1ec3: popl %ebx -> 0x1ec4: calll 0x1f12 ; getpid 0x1ec9: movl %eax, 4(%esp) 0x1ecd: leal 199(%ebx), %eax 0x1ed3: movl %eax, (%esp) 0x1ed6: calll 0x1f18 ; printf 0x1edb: leal 213(%ebx), %eax 0x1ee1: movl %eax, (%esp) 0x1ee4: calll 0x1f1e ; puts 0x1ee9: calll 0x1f0c ; getchar 0x1eee: movl $20, (%esp) 0x1ef5: calll 0x1e6a ; sleep_loop at test.c:6 0x1efa: movl $12, %eax 0x1eff: addl $20, %esp 0x1f02: popl %ebx 0x1f03: leave 0x1f04: ret This can be handy when dealing with the new --line options that was recently added: (lldb) disassemble --line a.out`main + 13 at test.c:19 18 { -> 19 printf("Process: %i\n\n", getpid()); 20 puts("Press any key to continue..."); getchar(); -> 0x1ec4: calll 0x1f12 ; getpid 0x1ec9: movl %eax, 4(%esp) 0x1ecd: leal 199(%ebx), %eax 0x1ed3: movl %eax, (%esp) 0x1ed6: calll 0x1f18 ; printf Modified the ModuleList to have a lookup based solely on a UUID. Since the UUID is typically the MD5 checksum of a binary image, there is no need to give the path and architecture when searching for a pre-existing image in an image list. Now that we support remote debugging a bit better, our lldb_private::Module needs to be able to track what the original path for file was as the platform knows it, as well as where the file is locally. The module has the two following functions to retrieve both paths: const FileSpec &Module::GetFileSpec () const; const FileSpec &Module::GetPlatformFileSpec () const; llvm-svn: 128563
2011-03-31 02:16:51 +08:00
s->Printf("%2.2s %-4u\t", curr_line == line ? current_line_cstr : "", curr_line);
if (m_last_file_sp->DisplaySourceLines (curr_line, 0, 0, s) == 0)
{
m_last_file_line = UINT32_MAX;
break;
}
}
}
return 0;
}
size_t
SourceManager::DisplaySourceLinesWithLineNumbers
(
const FileSpec &file_spec,
uint32_t line,
uint32_t context_before,
uint32_t context_after,
const char* current_line_cstr,
Stream *s
)
{
bool same_as_previous = m_last_file_sp && m_last_file_sp->FileSpecMatches (file_spec);
if (!same_as_previous)
m_last_file_sp = GetFile (file_spec);
if (line == 0)
{
if (!same_as_previous)
m_last_file_line = 0;
}
return DisplaySourceLinesWithLineNumbersUsingLastFile (line, context_before, context_after, current_line_cstr, s);
}
size_t
SourceManager::DisplayMoreWithLineNumbers (Stream *s)
{
if (m_last_file_sp)
{
if (m_last_file_line == UINT32_MAX)
return 0;
DisplaySourceLinesWithLineNumbersUsingLastFile (0, m_last_file_context_before, m_last_file_context_after, "", s);
}
return 0;
}
SourceManager::File::File(const FileSpec &file_spec) :
m_file_spec(file_spec),
m_mod_time (m_file_spec.GetModificationTime()),
m_data_sp(file_spec.ReadFileContents ()),
m_offsets()
{
}
SourceManager::File::~File()
{
}
uint32_t
SourceManager::File::GetLineOffset (uint32_t line)
{
if (line == 0)
return UINT32_MAX;
if (line == 1)
return 0;
if (CalculateLineOffsets (line))
{
if (line < m_offsets.size())
return m_offsets[line - 1]; // yes we want "line - 1" in the index
}
return UINT32_MAX;
}
bool
SourceManager::File::LineIsValid (uint32_t line)
{
if (line == 0)
return false;
if (CalculateLineOffsets (line))
return line < m_offsets.size();
return false;
}
size_t
SourceManager::File::DisplaySourceLines (uint32_t line, uint32_t context_before, uint32_t context_after, Stream *s)
{
// TODO: use host API to sign up for file modifications to anything in our
// source cache and only update when we determine a file has been updated.
// For now we check each time we want to display info for the file.
TimeValue curr_mod_time (m_file_spec.GetModificationTime());
if (m_mod_time != curr_mod_time)
{
m_mod_time = curr_mod_time;
m_data_sp = m_file_spec.ReadFileContents ();
m_offsets.clear();
}
const uint32_t start_line = line <= context_before ? 1 : line - context_before;
const uint32_t start_line_offset = GetLineOffset (start_line);
if (start_line_offset != UINT32_MAX)
{
const uint32_t end_line = line + context_after;
uint32_t end_line_offset = GetLineOffset (end_line + 1);
if (end_line_offset == UINT32_MAX)
end_line_offset = m_data_sp->GetByteSize();
assert (start_line_offset <= end_line_offset);
size_t bytes_written = 0;
if (start_line_offset < end_line_offset)
{
size_t count = end_line_offset - start_line_offset;
const uint8_t *cstr = m_data_sp->GetBytes() + start_line_offset;
bytes_written = s->Write(cstr, count);
if (!is_newline_char(cstr[count-1]))
bytes_written += s->EOL();
}
return bytes_written;
}
return 0;
}
bool
SourceManager::File::FileSpecMatches (const FileSpec &file_spec)
{
return FileSpec::Compare (m_file_spec, file_spec, false) == 0;
}
bool
SourceManager::File::CalculateLineOffsets (uint32_t line)
{
line = UINT32_MAX; // TODO: take this line out when we support partial indexing
if (line == UINT32_MAX)
{
// Already done?
if (!m_offsets.empty() && m_offsets[0] == UINT32_MAX)
return true;
if (m_offsets.empty())
{
if (m_data_sp.get() == NULL)
return false;
const char *start = (char *)m_data_sp->GetBytes();
if (start)
{
const char *end = start + m_data_sp->GetByteSize();
// Calculate all line offsets from scratch
// Push a 1 at index zero to indicate the file has been completely indexed.
m_offsets.push_back(UINT32_MAX);
register const char *s;
for (s = start; s < end; ++s)
{
register char curr_ch = *s;
if (is_newline_char (curr_ch))
{
register char next_ch = s[1];
if (is_newline_char (next_ch))
{
if (curr_ch != next_ch)
++s;
}
m_offsets.push_back(s + 1 - start);
}
}
if (!m_offsets.empty())
{
if (m_offsets.back() < end - start)
m_offsets.push_back(end - start);
}
return true;
}
}
else
{
// Some lines have been populated, start where we last left off
assert(!"Not implemented yet");
}
}
else
{
// Calculate all line offsets up to "line"
assert(!"Not implemented yet");
}
return false;
}