gdb-remote testing: new test, cleaned up socket reading.

Added new SocketPacketPump class to decouple gdb remote packet
reading from packet expectations code.  This allowed for cleaner
implementation of the separate $O output streams (non-deterministic
packaging of inferior stdout/stderr) from all the rest of the packets.

Added a packet expectation matcher that can match expected accumulated
output with a timeout.  Use a dictionary with "type":"output_match".
See lldbgdbserverutils.MatchRemoteOutputEntry for details.

Added a gdb remote test to verify that $Hc (continue thread selection)
plus signal delivery ($C{signo}) works.  Having trouble getting this
to pass with debugserver on MacOSX 10.9.  Tried different variants,
including $vCont;C{signo}:{thread-id};c.  In some cases, I get the
test exe's signal handler to run ($vCont variant first time), in others I don't
($vCont second and further times).  $C{signo} doesn't hit the signal
handler code at all in the test exe but delivers a stop.  Further
$Hc and $C{signo} deliver the stop marking the wrong thread.  For now I'm
marking the test as XFAIL on dsym/debugserver.  Will revisit this on
lldb-dev.

Updated the text exe for these tests to support thread:print-ids (each
thread announces its thread id) and provide a SIGUSR1 thread handler
that prints out the thread id on which it was signaled.

llvm-svn: 209845
This commit is contained in:
Todd Fiala 2014-05-29 20:44:45 +00:00
parent 9ede702bc4
commit 0428c97837
5 changed files with 524 additions and 132 deletions

View File

@ -1,6 +1,6 @@
LEVEL = ../../make
CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS
CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS
LD_EXTRAS := -lpthread
CXX_SOURCES := main.cpp
MAKE_DSYM :=NO

View File

@ -4,6 +4,8 @@ Test lldb-gdbserver operation
import unittest2
import pexpect
import platform
import signal
import socket
import subprocess
import sys
@ -38,6 +40,11 @@ class LldbGdbServerTestCase(TestBase):
self.test_sequence = GdbRemoteTestSequence(self.logger)
self.set_inferior_startup_launch()
# Uncomment this code to force only a single test to run (by name).
# if self._testMethodName != "test_Hc_then_Csignal_signals_correct_thread_launch_debugserver_dsym":
# # print "skipping test {}".format(self._testMethodName)
# self.skipTest("focusing on one test")
def reset_test_sequence(self):
self.test_sequence = GdbRemoteTestSequence(self.logger)
@ -45,11 +52,13 @@ class LldbGdbServerTestCase(TestBase):
self.debug_monitor_exe = get_lldb_gdbserver_exe()
if not self.debug_monitor_exe:
self.skipTest("lldb_gdbserver exe not found")
self.debug_monitor_extra_args = ""
def init_debugserver_test(self):
self.debug_monitor_exe = get_debugserver_exe()
if not self.debug_monitor_exe:
self.skipTest("debugserver exe not found")
self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
def create_socket(self):
sock = socket.socket()
@ -81,7 +90,7 @@ class LldbGdbServerTestCase(TestBase):
def start_server(self, attach_pid=None):
# Create the command line
commandline = "{} localhost:{}".format(self.debug_monitor_exe, self.port)
commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
if attach_pid:
commandline += " --attach=%d" % attach_pid
@ -239,7 +248,6 @@ class LldbGdbServerTestCase(TestBase):
self.assertTrue("encoding" in reg_info)
self.assertTrue("format" in reg_info)
def add_threadinfo_collection_packets(self):
self.test_sequence.add_log_lines(
[ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
@ -247,7 +255,6 @@ class LldbGdbServerTestCase(TestBase):
"save_key":"threadinfo_responses" } ],
True)
def parse_threadinfo_packets(self, context):
"""Return an array of thread ids (decimal ints), one per thread."""
threadinfo_responses = context.get("threadinfo_responses")
@ -259,7 +266,6 @@ class LldbGdbServerTestCase(TestBase):
thread_ids.extend(new_thread_infos)
return thread_ids
def wait_for_thread_count(self, thread_count, timeout_seconds=3):
start_time = time.time()
timeout_time = start_time + timeout_seconds
@ -284,7 +290,6 @@ class LldbGdbServerTestCase(TestBase):
return threads
def run_process_then_stop(self, run_seconds=1):
# Tell the stub to continue.
self.test_sequence.add_log_lines(
@ -304,7 +309,8 @@ class LldbGdbServerTestCase(TestBase):
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
self.assertIsNotNone(context.get("stop_result"))
return context
@debugserver_test
def test_exe_starts_debugserver(self):
@ -806,7 +812,6 @@ class LldbGdbServerTestCase(TestBase):
# Ensure we have a flags register.
self.assertTrue('flags' in generic_regs)
@debugserver_test
@dsym_test
def test_qRegisterInfo_contains_required_generics_debugserver_dsym(self):
@ -822,7 +827,6 @@ class LldbGdbServerTestCase(TestBase):
self.buildDwarf()
self.qRegisterInfo_contains_required_generics()
def qRegisterInfo_contains_at_least_one_register_set(self):
server = self.start_server()
self.assertIsNotNone(server)
@ -846,7 +850,6 @@ class LldbGdbServerTestCase(TestBase):
register_sets = { reg_info['set']:1 for reg_info in reg_infos if 'set' in reg_info }
self.assertTrue(len(register_sets) >= 1)
@debugserver_test
@dsym_test
def test_qRegisterInfo_contains_at_least_one_register_set_debugserver_dsym(self):
@ -854,7 +857,6 @@ class LldbGdbServerTestCase(TestBase):
self.buildDsym()
self.qRegisterInfo_contains_at_least_one_register_set()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -863,7 +865,6 @@ class LldbGdbServerTestCase(TestBase):
self.buildDwarf()
self.qRegisterInfo_contains_at_least_one_register_set()
def qThreadInfo_contains_thread(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_threadinfo_collection_packets()
@ -879,7 +880,6 @@ class LldbGdbServerTestCase(TestBase):
# We should have exactly one thread.
self.assertEqual(len(threads), 1)
@debugserver_test
@dsym_test
def test_qThreadInfo_contains_thread_launch_debugserver_dsym(self):
@ -888,7 +888,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -898,7 +897,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@debugserver_test
@dsym_test
def test_qThreadInfo_contains_thread_attach_debugserver_dsym(self):
@ -907,7 +905,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -917,7 +914,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
def qThreadInfo_matches_qC(self):
procs = self.prep_debug_monitor_and_inferior()
@ -946,7 +942,6 @@ class LldbGdbServerTestCase(TestBase):
# Those two should be the same.
self.assertEquals(threads[0], QC_thread_id)
@debugserver_test
@dsym_test
def test_qThreadInfo_matches_qC_launch_debugserver_dsym(self):
@ -955,7 +950,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -965,7 +959,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@debugserver_test
@dsym_test
def test_qThreadInfo_matches_qC_attach_debugserver_dsym(self):
@ -974,7 +967,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -984,7 +976,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
def p_returns_correct_data_size_for_each_qRegisterInfo(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_register_info_collection_packets()
@ -1021,7 +1012,6 @@ class LldbGdbServerTestCase(TestBase):
# Increment loop
reg_index += 1
@debugserver_test
@dsym_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver_dsym(self):
@ -1030,7 +1020,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -1040,7 +1029,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@debugserver_test
@dsym_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver_dsym(self):
@ -1049,7 +1037,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -1059,7 +1046,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
def Hg_switches_to_3_threads(self):
# Startup the inferior with three threads (main + 2 new ones).
procs = self.prep_debug_monitor_and_inferior(inferior_args=["thread:new", "thread:new"])
@ -1076,7 +1062,7 @@ class LldbGdbServerTestCase(TestBase):
# Change to each thread, verify current thread id.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $Hg{}#00".format(hex(thread)), # Set current thread.
["read packet: $Hg{0:x}#00".format(thread), # Set current thread.
"send packet: $OK#00",
"read packet: $qC#00",
{ "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} }],
@ -1097,7 +1083,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -1107,7 +1092,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@debugserver_test
@dsym_test
def test_Hg_switches_to_3_threads_attach_debugserver_dsym(self):
@ -1116,7 +1100,6 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
@ -1126,5 +1109,87 @@ class LldbGdbServerTestCase(TestBase):
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
def Hc_then_Csignal_signals_correct_thread(self):
# NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached,
# and the test requires getting stdout from the exe.
NUM_THREADS = 3
# Startup the inferior with three threads (main + NUM_THREADS-1 worker threads).
inferior_args=["thread:print-ids"]
for i in range(NUM_THREADS - 1):
inferior_args.append("thread:new")
inferior_args.append("sleep:20")
procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args)
# Let the inferior process have a few moments to start up the thread when launched.
context = self.run_process_then_stop(run_seconds=1)
# Wait at most x seconds for all threads to be present.
threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5)
self.assertEquals(len(threads), NUM_THREADS)
# print_thread_ids = {}
# Switch to each thread, deliver a signal, and verify signal delivery
for thread_id in threads:
# Change to each thread, verify current thread id.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $Hc{0:x}#00".format(thread_id), # Set current thread.
"send packet: $OK#00",
"read packet: $C{0:x}#00".format(signal.SIGUSR1),
{"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
# "read packet: $vCont;C{0:x}:{1:x};c#00".format(signal.SIGUSR1, thread_id),
# "read packet: $vCont;C{0:x};c#00".format(signal.SIGUSR1, thread_id),
# { "type":"output_match", "regex":r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\n$", "capture":{ 1:"print_thread_id"} },
"read packet: $c#00",
"read packet: {}".format(chr(03)),
{"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"intr_signo", 2:"intr_thread_id"} }
],
True)
# Run the sequence.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure the stop signal is the signal we delivered.
stop_signo = context.get("stop_signo")
self.assertIsNotNone(stop_signo)
self.assertEquals(int(stop_signo,16), signal.SIGUSR1)
# Ensure the stop thread is the thread to which we delivered the signal.
stop_thread_id = context.get("stop_thread_id")
self.assertIsNotNone(stop_thread_id)
self.assertEquals(int(stop_thread_id,16), thread_id)
# Ensure we haven't seen this thread id yet. The inferior's self-obtained thread ids are not guaranteed to match the stub tids (at least on MacOSX).
# print_thread_id = context.get("print_thread_id")
# self.assertIsNotNone(print_thread_id)
# self.assertFalse(print_thread_id in print_thread_ids)
# Now remember this print (i.e. inferior-reflected) thread id and ensure we don't hit it again.
# print_thread_ids[print_thread_id] = 1
@debugserver_test
@dsym_test
@unittest2.expectedFailure() # this test is failing on MacOSX 10.9
def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver_dsym(self):
self.init_debugserver_test()
self.buildDsym()
self.set_inferior_startup_launch()
self.Hc_then_Csignal_signals_correct_thread()
@llgs_test
@dwarf_test
@unittest2.expectedFailure()
def test_Hc_then_Csignal_signals_correct_thread_launch_llgs_dwarf(self):
self.init_llgs_test()
self.buildDwarf()
self.set_inferior_startup_launch()
self.Hc_then_Csignal_signals_correct_thread()
if __name__ == '__main__':
unittest2.main()

View File

@ -4,12 +4,13 @@
import os
import os.path
import platform
import Queue
import re
import select
import socket_packet_pump
import subprocess
import time
def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename):
"""Return the debug monitor exe path given the lldb exe path.
@ -108,7 +109,7 @@ def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read):
raise "Unknown packet type: {}".format(packet_type)
def handle_O_packet(context, packet_contents):
def handle_O_packet(context, packet_contents, logger):
"""Handle O packets."""
if (not packet_contents) or (len(packet_contents) < 1):
return False
@ -117,8 +118,13 @@ def handle_O_packet(context, packet_contents):
elif packet_contents == "OK":
return False
context["O_content"] += gdbremote_hex_decode_string(packet_contents[1:])
new_text = gdbremote_hex_decode_string(packet_contents[1:])
context["O_content"] += new_text
context["O_count"] += 1
if logger:
logger.debug("text: new \"{}\", cumulative: \"{}\"".format(new_text, context["O_content"]))
return True
_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$')
@ -134,9 +140,6 @@ def assert_packets_equal(asserter, actual_packet, expected_packet):
expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet)
asserter.assertEqual(actual_stripped, expected_stripped)
_GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}')
def expect_lldb_gdbserver_replay(
asserter,
sock,
@ -178,83 +181,63 @@ def expect_lldb_gdbserver_replay(
# Ensure we have some work to do.
if len(test_sequence.entries) < 1:
return {}
received_lines = []
receive_buffer = ''
context = {"O_count":0, "O_content":""}
sequence_entry = test_sequence.entries.pop(0)
while sequence_entry:
if sequence_entry.is_send_to_remote():
# This is an entry to send to the remote debug monitor.
send_packet = sequence_entry.get_send_packet()
if logger:
logger.info("sending packet to remote: %s" % send_packet)
sock.sendall(send_packet)
else:
# This is an entry to expect to receive from the remote debug monitor.
if logger:
logger.info("receiving packet from remote")
start_time = time.time()
timeout_time = start_time + timeout_seconds
# while we don't have a complete line of input, wait
# for it from socket.
while len(received_lines) < 1:
# check for timeout
if time.time() > timeout_time:
raise Exception(
'timed out after {} seconds while waiting for llgs to respond, currently received: {}'.format(
timeout_seconds, receive_buffer))
can_read, _, _ = select.select([sock], [], [], 0)
if can_read and sock in can_read:
try:
new_bytes = sock.recv(4096)
except:
new_bytes = None
if new_bytes and len(new_bytes) > 0:
# read the next bits from the socket
if logger:
logger.debug("llgs responded with bytes: {}".format(new_bytes))
receive_buffer += new_bytes
# parse fully-formed packets into individual packets
has_more = len(receive_buffer) > 0
while has_more:
if len(receive_buffer) <= 0:
has_more = False
# handle '+' ack
elif receive_buffer[0] == '+':
received_lines.append('+')
receive_buffer = receive_buffer[1:]
if logger:
logger.debug('parsed packet from llgs: +, new receive_buffer: {}'.format(receive_buffer))
else:
packet_match = _GDB_REMOTE_PACKET_REGEX.match(receive_buffer)
if packet_match:
if not handle_O_packet(context, packet_match.group(1)):
# Normal packet to match.
received_lines.append(packet_match.group(0))
receive_buffer = receive_buffer[len(packet_match.group(0)):]
if logger:
logger.debug('parsed packet from llgs: {}, new receive_buffer: {}'.format(packet_match.group(0), receive_buffer))
else:
has_more = False
# got a line - now try to match it against expected line
if len(received_lines) > 0:
received_packet = received_lines.pop(0)
context = sequence_entry.assert_match(asserter, received_packet, context=context)
# Move on to next sequence entry as needed. Some sequence entries support executing multiple
# times in different states (for looping over query/response packets).
if sequence_entry.is_consumed():
if len(test_sequence.entries) > 0:
sequence_entry = test_sequence.entries.pop(0)
with socket_packet_pump.SocketPacketPump(sock, logger) as pump:
# Grab the first sequence entry.
sequence_entry = test_sequence.entries.pop(0)
# While we have an active sequence entry, send messages
# destined for the stub and collect/match/process responses
# expected from the stub.
while sequence_entry:
if sequence_entry.is_send_to_remote():
# This is an entry to send to the remote debug monitor.
send_packet = sequence_entry.get_send_packet()
if logger:
logger.info("sending packet to remote: %s" % send_packet)
sock.sendall(send_packet)
else:
sequence_entry = None
return context
# This is an entry expecting to receive content from the remote debug monitor.
# We'll pull from (and wait on) the queue appropriate for the type of matcher.
# We keep separate queues for process output (coming from non-deterministic
# $O packet division) and for all other packets.
if sequence_entry.is_output_matcher():
try:
# Grab next entry from the output queue.
content = pump.output_queue().get(True, timeout_seconds)
except Queue.Empty:
if logger:
logger.warning("timeout waiting for stub output (accumulated output:{})".format(pump.get_accumulated_output()))
raise Exception("timed out while waiting for output match (accumulated output: {})".format(pump.get_accumulated_output()))
else:
try:
content = pump.packet_queue().get(True, timeout_seconds)
except Queue.Empty:
if logger:
logger.warning("timeout waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer()))
raise Exception("timed out while waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer()))
# Give the sequence entry the opportunity to match the content.
# Output matchers might match or pass after more output accumulates.
# Other packet types generally must match.
asserter.assertIsNotNone(content)
context = sequence_entry.assert_match(asserter, content, context=context)
# Move on to next sequence entry as needed. Some sequence entries support executing multiple
# times in different states (for looping over query/response packets).
if sequence_entry.is_consumed():
if len(test_sequence.entries) > 0:
sequence_entry = test_sequence.entries.pop(0)
else:
sequence_entry = None
# Fill in the O_content entries.
context["O_count"] = 1
context["O_content"] = pump.get_accumulated_output()
return context
def gdbremote_hex_encode_string(str):
output = ''
@ -271,7 +254,6 @@ def gdbremote_packet_encode_string(str):
checksum += ord(c)
return '$' + str + '#{0:02x}'.format(checksum % 256)
def build_gdbremote_A_packet(args_list):
"""Given a list of args, create a properly-formed $A packet containing each arg.
"""
@ -327,8 +309,11 @@ def parse_threadinfo_response(response_packet):
# Return list of thread ids
return [int(thread_id_hex,16) for thread_id_hex in response_packet.split(",") if len(thread_id_hex) > 0]
class GdbRemoteEntryBase(object):
def is_output_matcher(self):
return False
class GdbRemoteEntry(object):
class GdbRemoteEntry(GdbRemoteEntryBase):
def __init__(self, is_send_to_remote=True, exact_payload=None, regex=None, capture=None, expect_captures=None):
"""Create an entry representing one piece of the I/O to/from a gdb remote debug monitor.
@ -446,7 +431,7 @@ class GdbRemoteEntry(object):
else:
raise Exception("Don't know how to match a remote-sent packet when exact_payload isn't specified.")
class MultiResponseGdbRemoteEntry(object):
class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase):
"""Represents a query/response style packet.
Assumes the first item is sent to the gdb remote.
@ -557,6 +542,99 @@ class MultiResponseGdbRemoteEntry(object):
self._is_send_to_remote = True
return context
class MatchRemoteOutputEntry(GdbRemoteEntryBase):
"""Waits for output from the debug monitor to match a regex or time out.
This entry type tries to match each time new gdb remote output is accumulated
using a provided regex. If the output does not match the regex within the
given timeframe, the command fails the playback session. If the regex does
match, any capture fields are recorded in the context.
Settings accepted from params:
regex: required. Specifies a compiled regex object that must either succeed
with re.match or re.search (see regex_mode below) within the given timeout
(see timeout_seconds below) or cause the playback to fail.
regex_mode: optional. Available values: "match" or "search". If "match", the entire
stub output as collected so far must match the regex. If search, then the regex
must match starting somewhere within the output text accumulated thus far.
Default: "match" (i.e. the regex must match the entirety of the accumulated output
buffer, so unexpected text will generally fail the match).
capture: optional. If specified, is a dictionary of regex match group indices (should start
with 1) to variable names that will store the capture group indicated by the
index. For example, {1:"thread_id"} will store capture group 1's content in the
context dictionary where "thread_id" is the key and the match group value is
the value. The value stored off can be used later in a expect_captures expression.
This arg only makes sense when regex is specified.
"""
def __init__(self, regex=None, regex_mode="match", capture=None):
self._regex = regex
self._regex_mode = regex_mode
self._capture = capture
self._matched = False
if not self._regex:
raise Exception("regex cannot be None")
if not self._regex_mode in ["match", "search"]:
raise Exception("unsupported regex mode \"{}\": must be \"match\" or \"search\"".format(self._regex_mode))
def is_output_matcher(self):
return True
def is_send_to_remote(self):
# This is always a "wait for remote" command.
return False
def is_consumed(self):
return self._matched
def assert_match(self, asserter, accumulated_output, context):
# Validate args.
if not accumulated_output:
raise Exception("accumulated_output cannot be none")
if not context:
raise Exception("context cannot be none")
# Validate that we haven't already matched.
if self._matched:
raise Exception("invalid state - already matched, attempting to match again")
# If we don't have any content yet, we don't match.
if len(accumulated_output) < 1:
return context
# Check if we match
if self._regex_mode == "match":
match = self._regex.match(accumulated_output)
elif self._regex_mode == "search":
match = self._regex.search(accumulated_output)
else:
raise Exception("Unexpected regex mode: {}".format(self._regex_mode))
# If we don't match, wait to try again after next $O content, or time out.
if not match:
# print "re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output)
return context
# We do match.
self._matched = True
# print "re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output)
# Collect up any captures into the context.
if self._capture:
# Handle captures.
for group_index, var_name in self._capture.items():
capture_text = match.group(group_index)
if not capture_text:
raise Exception("No content for group index {}".format(group_index))
context[var_name] = capture_text
return context
class GdbRemoteTestSequence(object):
_LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$')
@ -569,21 +647,21 @@ class GdbRemoteTestSequence(object):
for line in log_lines:
if type(line) == str:
# Handle log line import
if self.logger:
self.logger.debug("processing log line: {}".format(line))
# if self.logger:
# self.logger.debug("processing log line: {}".format(line))
match = self._LOG_LINE_REGEX.match(line)
if match:
playback_packet = match.group(2)
direction = match.group(1)
if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read):
# Handle as something to send to the remote debug monitor.
if self.logger:
self.logger.info("processed packet to send to remote: {}".format(playback_packet))
# if self.logger:
# self.logger.info("processed packet to send to remote: {}".format(playback_packet))
self.entries.append(GdbRemoteEntry(is_send_to_remote=True, exact_payload=playback_packet))
else:
# Log line represents content to be expected from the remote debug monitor.
if self.logger:
self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet))
# if self.logger:
# self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet))
self.entries.append(GdbRemoteEntry(is_send_to_remote=False,exact_payload=playback_packet))
else:
raise Exception("failed to interpret log line: {}".format(line))
@ -602,16 +680,26 @@ class GdbRemoteTestSequence(object):
if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read):
# Handle as something to send to the remote debug monitor.
if self.logger:
self.logger.info("processed dict sequence to send to remote")
# if self.logger:
# self.logger.info("processed dict sequence to send to remote")
self.entries.append(GdbRemoteEntry(is_send_to_remote=True, regex=regex, capture=capture, expect_captures=expect_captures))
else:
# Log line represents content to be expected from the remote debug monitor.
if self.logger:
self.logger.info("processed dict sequence to match receiving from remote")
# if self.logger:
# self.logger.info("processed dict sequence to match receiving from remote")
self.entries.append(GdbRemoteEntry(is_send_to_remote=False, regex=regex, capture=capture, expect_captures=expect_captures))
elif entry_type == "multi_response":
self.entries.append(MultiResponseGdbRemoteEntry(line))
elif entry_type == "output_match":
regex = line.get("regex", None)
# Compile the regex.
if regex and (type(regex) == str):
regex = re.compile(regex)
regex_mode = line.get("regex_mode", "match")
capture = line.get("capture", None)
self.entries.append(MatchRemoteOutputEntry(regex=regex, regex_mode=regex_mode, capture=capture))
else:
raise Exception("unknown entry type \"%s\"" % entry_type)

View File

@ -1,24 +1,88 @@
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <vector>
#if defined(__linux__)
#include <sys/syscall.h>
#endif
static const char *const RETVAL_PREFIX = "retval:";
static const char *const SLEEP_PREFIX = "sleep:";
static const char *const STDERR_PREFIX = "stderr:";
static const char *const THREAD_PREFIX = "thread:";
static const char *const THREAD_COMMAND_NEW = "new";
static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids";
static bool g_print_thread_ids = false;
static pthread_mutex_t g_print_mutex = PTHREAD_MUTEX_INITIALIZER;
static void
print_thread_id ()
{
// Put in the right magic here for your platform to spit out the thread id (tid) that debugserver/lldb-gdbserver would see as a TID.
// Otherwise, let the else clause print out the unsupported text so that the unit test knows to skip verifying thread ids.
#if defined(__APPLE__)
printf ("%" PRIx64, static_cast<uint64_t> (pthread_mach_thread_np(pthread_self())));
#elif defined (__linux__)
// This is a call to gettid() via syscall.
printf ("%" PRIx64, static_cast<uint64_t> (syscall (__NR_gettid)));
#else
printf("{no-tid-support}");
#endif
}
static void
signal_handler (int signo)
{
switch (signo)
{
case SIGUSR1:
// Print notice that we received the signal on a given thread.
pthread_mutex_lock (&g_print_mutex);
printf ("received SIGUSR1 on thread id: ");
print_thread_id ();
printf ("\n");
pthread_mutex_unlock (&g_print_mutex);
// Reset the signal handler.
sig_t sig_result = signal (SIGUSR1, signal_handler);
if (sig_result == SIG_ERR)
{
fprintf(stderr, "failed to set signal handler: errno=%d\n", errno);
exit (1);
}
break;
}
}
static void*
thread_func (void *arg)
{
static int s_thread_index = 1;
// For now, just sleep for a few seconds.
// std::cout << "thread " << pthread_self() << ": created" << std::endl;
int sleep_seconds_remaining = 5;
const int this_thread_index = s_thread_index++;
if (g_print_thread_ids)
{
pthread_mutex_lock (&g_print_mutex);
printf ("thread %d id: ", this_thread_index);
print_thread_id ();
printf ("\n");
pthread_mutex_unlock (&g_print_mutex);
}
int sleep_seconds_remaining = 20;
while (sleep_seconds_remaining > 0)
{
sleep_seconds_remaining = sleep (sleep_seconds_remaining);
@ -33,12 +97,21 @@ int main (int argc, char **argv)
std::vector<pthread_t> threads;
int return_value = 0;
// Set the signal handler.
sig_t sig_result = signal (SIGUSR1, signal_handler);
if (sig_result == SIG_ERR)
{
fprintf(stderr, "failed to set signal handler: errno=%d\n", errno);
exit (1);
}
// Process command line args.
for (int i = 1; i < argc; ++i)
{
if (std::strstr (argv[i], STDERR_PREFIX))
{
// Treat remainder as text to go to stderr.
std::cerr << (argv[i] + strlen (STDERR_PREFIX)) << std::endl;
fprintf (stderr, "%s\n", (argv[i] + strlen (STDERR_PREFIX)));
}
else if (std::strstr (argv[i], RETVAL_PREFIX))
{
@ -68,11 +141,23 @@ int main (int argc, char **argv)
const int err = ::pthread_create (&new_thread, NULL, thread_func, NULL);
if (err)
{
std::cerr << "pthread_create() failed with error code " << err << std::endl;
fprintf (stderr, "pthread_create() failed with error code %d\n", err);
exit (err);
}
threads.push_back (new_thread);
}
else if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_PRINT_IDS))
{
// Turn on thread id announcing.
g_print_thread_ids = true;
// And announce us.
pthread_mutex_lock (&g_print_mutex);
printf ("thread 0 id: ");
print_thread_id ();
printf ("\n");
pthread_mutex_unlock (&g_print_mutex);
}
else
{
// At this point we don't do anything else with threads.
@ -82,7 +167,7 @@ int main (int argc, char **argv)
else
{
// Treat the argument as text for stdout.
std::cout << argv[i] << std::endl;
printf("%s\n", argv[i]);
}
}
@ -92,9 +177,7 @@ int main (int argc, char **argv)
void *thread_retval = NULL;
const int err = ::pthread_join (*it, &thread_retval);
if (err != 0)
{
std::cerr << "pthread_join() failed with error code " << err << std::endl;
}
fprintf (stderr, "pthread_join() failed with error code %d\n", err);
}
return return_value;

View File

@ -0,0 +1,156 @@
import Queue
import re
import select
import threading
def _handle_output_packet_string(packet_contents):
if (not packet_contents) or (len(packet_contents) < 1):
return None
elif packet_contents[0] != "O":
return None
elif packet_contents == "OK":
return None
else:
return packet_contents[1:].decode("hex")
class SocketPacketPump(object):
"""A threaded packet reader that partitions packets into two streams.
All incoming $O packet content is accumulated with the current accumulation
state put into the OutputQueue.
All other incoming packets are placed in the packet queue.
A select thread can be started and stopped, and runs to place packet
content into the two queues.
"""
_GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}')
def __init__(self, pump_socket, logger=None):
if not pump_socket:
raise Exception("pump_socket cannot be None")
self._output_queue = Queue.Queue()
self._packet_queue = Queue.Queue()
self._thread = None
self._stop_thread = False
self._socket = pump_socket
self._logger = logger
self._receive_buffer = ""
self._accumulated_output = ""
def __enter__(self):
"""Support the python 'with' statement.
Start the pump thread."""
self.start_pump_thread()
return self
def __exit__(self, exit_type, value, traceback):
"""Support the python 'with' statement.
Shut down the pump thread."""
self.stop_pump_thread()
def start_pump_thread(self):
if self._thread:
raise Exception("pump thread is already running")
self._stop_thread = False
self._thread = threading.Thread(target=self._run_method)
self._thread.start()
def stop_pump_thread(self):
self._stop_thread = True
if self._thread:
self._thread.join()
def output_queue(self):
return self._output_queue
def packet_queue(self):
return self._packet_queue
def _process_new_bytes(self, new_bytes):
if not new_bytes:
return
if len(new_bytes) < 1:
return
# Add new bytes to our accumulated unprocessed packet bytes.
self._receive_buffer += new_bytes
# Parse fully-formed packets into individual packets.
has_more = len(self._receive_buffer) > 0
while has_more:
if len(self._receive_buffer) <= 0:
has_more = False
# handle '+' ack
elif self._receive_buffer[0] == "+":
self._packet_queue.put("+")
self._receive_buffer = self._receive_buffer[1:]
if self._logger:
self._logger.debug(
"parsed packet from stub: +\n" +
"new receive_buffer: {}".format(
self._receive_buffer))
else:
packet_match = self._GDB_REMOTE_PACKET_REGEX.match(
self._receive_buffer)
if packet_match:
# Our receive buffer matches a packet at the
# start of the receive buffer.
new_output_content = _handle_output_packet_string(
packet_match.group(1))
if new_output_content:
# This was an $O packet with new content.
self._accumulated_output += new_output_content
self._output_queue.put(self._accumulated_output)
else:
# Any packet other than $O.
self._packet_queue.put(packet_match.group(0))
# Remove the parsed packet from the receive
# buffer.
self._receive_buffer = self._receive_buffer[
len(packet_match.group(0)):]
if self._logger:
self._logger.debug("parsed packet from stub: " +
packet_match.group(0))
self._logger.debug("new receive_buffer: " +
self._receive_buffer)
else:
# We don't have enough in the receive bufferto make a full
# packet. Stop trying until we read more.
has_more = False
def _run_method(self):
self._receive_buffer = ""
self._accumulated_output = ""
if self._logger:
self._logger.info("socket pump starting")
# Keep looping around until we're asked to stop the thread.
while not self._stop_thread:
can_read, _, _ = select.select([self._socket], [], [], 0)
if can_read and self._socket in can_read:
try:
new_bytes = self._socket.recv(4096)
if self._logger and new_bytes and len(new_bytes) > 0:
self._logger.debug("pump received bytes: {}".format(new_bytes))
except:
# Likely a closed socket. Done with the pump thread.
if self._logger:
self._logger.debug("socket read failed, stopping pump read thread")
break
self._process_new_bytes(new_bytes)
if self._logger:
self._logger.info("socket pump exiting")
def get_accumulated_output(self):
return self._accumulated_output
def get_receive_buffer(self):
return self._receive_buffer