def test_controller(self): """Build a simple C program, then run it with GdbController and verify the output is parsed as expected""" # Initialize object that manages gdb subprocess gdbmi = GdbController() c_hello_world_binary = self._get_c_program("hello", "pygdbmiapp.a") if USING_WINDOWS: c_hello_world_binary = c_hello_world_binary.replace("\\", "/") # Load the binary and its symbols in the gdb subprocess responses = gdbmi.write("-file-exec-and-symbols %s" % c_hello_world_binary, timeout_sec=1) # Verify output was parsed into a list of responses assert len(responses) != 0 response = responses[0] assert set(response.keys()) == { "message", "type", "payload", "stream", "token" } assert response["message"] == "thread-group-added" assert response["type"] == "notify" assert response["payload"] == {"id": "i1"} assert response["stream"] == "stdout" assert response["token"] is None responses = gdbmi.write( ["-file-list-exec-source-files", "-break-insert main"], timeout_sec=3) assert len(responses) != 0 responses = gdbmi.write(["-exec-run", "-exec-continue"], timeout_sec=3) # Test GdbTimeoutError exception got_timeout_exception = False try: gdbmi.get_gdb_response(timeout_sec=0) except GdbTimeoutError: got_timeout_exception = True assert got_timeout_exception is True # Close gdb subprocess responses = gdbmi.exit() assert responses is None assert gdbmi.gdb_process is None # Test ValueError exception self.assertRaises(ValueError, gdbmi.write, "-file-exec-and-symbols %s" % c_hello_world_binary) # Respawn and test signal handling gdbmi.spawn_new_gdb_subprocess() responses = gdbmi.write("-file-exec-and-symbols %s" % c_hello_world_binary, timeout_sec=1) responses = gdbmi.write(["-break-insert main", "-exec-run"])
def __init__(self, gdb_exe, target_img, symbol_list): # Initial args for gdb. gdb_args = [ target_img, # target executable. '-batch' # Causes gdb to exit when done processing initial args. ] # Append more args to the list of args. # These args print the address of symbols. for sym in symbol_list: gdb_args.append('-ex=p(&{0})'.format(sym)) # For more robust error checking, print "done" at the end, from gdb to the response list. gdb_args.append('--ex=p("done")') # Create a gdb object. gdbmi = GdbController( gdb_path=gdb_exe, gdb_args=gdb_args, verbose=False ###True ) # Lauch gdb and get the response(s). responses = gdbmi.get_gdb_response(timeout_sec=2) # validate the response based on the length and the last response being '$3 = "done"' where 3 is the len() of responses. if (len(responses) != (len(symbol_list) + 1) or responses[-1]['payload'] != '${0} = "done"'.format( len(responses))): raise Exception('Unexpected response from gdb.') del responses[ -1] # delete the last element of the responses (containing "done"). # Compose a dictionary of results, indexed by the symbol name. self.symbols = {} for eachResponse in responses: try: response_payload = eachResponse['payload'] # for a string like '$1 = (float *) 0x20000a64 <HeaterControl::htrCurrentTotal_amps>', parse for 'float *' and '0x20000a64'. reg_exp_match = re.search( '\$(\d*?) = \((.*?)\) 0x([0-9A-Fa-f]*) ', response_payload) index = int(reg_exp_match.group(1)) - 1 address = int(reg_exp_match.group(3), 16) type = reg_exp_match.group(2) ###print('{0}: {1:08X}, {2}'.format(index, address, type)) except: print('Can\'t parse {0}'.format(response_payload)) # Add this response to the dictionary. self.symbols[symbol_list[index]] = SymbolLookup.SymbolAddressType( address, type) # self.symbols should now be complete. return
class RRController: def __init__(self, binary_path: str, trace: List[Instruction]) -> None: self.binary_path = binary_path self.trace = trace self.rr = GdbController(gdb_path=DEFAULT_RR_PATH, gdb_args=[binary_path], rr=True) self.current_index = 0 def eval_expression(self, expr: str) -> None: res = self.rr.write("-data-evaluate-expression %s" % expr, timeout_sec=99999) print(res) def write_request(self, req: str, get_resp: bool = True, **kwargs: Any) -> List[Dict[str, Any]]: timeout_sec = kwargs.pop("timeout_sec", 10) kwargs["read_response"] = False self.rr.write(req, timeout_sec=timeout_sec, **kwargs) resp = [] # type: List[Dict[str, Any]] if get_resp: while True: try: resp += self.rr.get_gdb_response() except Exception: break return resp def count_occurence(self, idx: int) -> int: """Count # of addr -> target in trace""" instruction = self.trace[idx] addr = instruction.ip cnt = 0 step = 1 if idx > self.current_index else -1 for i in range(self.current_index, idx, step): e = self.trace[i] if e.ip == addr: cnt += 1 return cnt def run_until(self, idx: int) -> None: instruction = self.trace[idx] addr = instruction.ip n = self.count_occurence(idx) cont_ins = "c" if idx > self.current_index else "reverse-cont" self.write_request("b *{}".format(hex(addr)), get_resp=False, timeout_sec=100) self.write_request("{} {}".format(cont_ins, n), get_resp=False, timeout_sec=10000) self.write_request("clear *{}".format(hex(addr)), get_resp=False, timeout_sec=100)
def gdb_resp_reader(out_queue: Queue, gdbmi: GdbController): try: while True: ret = gdbmi.get_gdb_response(timeout_sec=0.01, raise_error_on_timeout=False) for r in ret: out_queue.put(r) except (NoGdbProcessError, OSError): debug(f'gdb_resp_reader Terminated\n') except: debug_exception()
class RRController(object): def __init__(self, binary_path, trace): self.binary_path = binary_path self.trace = trace self.rr = GdbController( gdb_path=DEFAULT_RR_PATH, gdb_args=[binary_path], rr=True, ) self.current_index = 0 def eval_expression(self, expr): # type: (str) -> None res = self.rr.write( "-data-evaluate-expression %s" % expr, timeout_sec=99999) print(res) def write_request(self, req, get_resp=True, **kwargs): timeout_sec = kwargs.pop('timeout_sec', 10) kwargs['read_response'] = False self.rr.write(req, timeout_sec=timeout_sec, **kwargs) resp = [] if get_resp: while True: try: resp += self.rr.get_gdb_response() except: break return resp def count_occurence(self, idx): """Count # of addr -> target in trace""" event = self.trace[idx] addr = event.addr cnt = 0 step = 1 if idx > self.current_index else -1 for i in range(self.current_index, idx, step): e = self.trace[i] if e.addr == addr: cnt += 1 def run_until(self, idx): event = self.trace[idx] addr = event.addr n = self.count_occurence(idx) cont_ins = 'c' if idx > self.current_index else 'reverse-cont' self.write_request('b *{}'.format(hex(addr)), get_resp=False, timeout_sec=100) self.write_request('{} {}'.format(cont_ins, n), get_resp=False, timeout_sec=10000) self.write_request('clear *{}'.format(hex(addr)), get_resp=False, timeout_sec=100)
class CoredumpGDB: def __init__(self, elf: ELF, coredump: Coredump, lib_text_addrs: Dict[str, int]) -> None: self.coredump = coredump self.elf = elf self.corefile = self.coredump.file.name self.execfile = self.elf.file.name self.gdb = GdbController(gdb_args=["--quiet", "--interpreter=mi2"]) self.lib_text_addrs = lib_text_addrs self.get_response() self.setup_gdb() def setup_gdb(self) -> None: self.write_request("file {}".format(self.execfile)) self.write_request("core-file {}".format(self.corefile)) for path, value in self.lib_text_addrs.items(): self.write_request("add-symbol-file {} {}".format(path, value)) self.write_request("y") def get_response(self) -> List[Dict[str, Any]]: resp = [] # type: List[Dict[str, Any]] while True: try: resp += self.gdb.get_gdb_response() except Exception: break return resp def write_request(self, req: str, **kwargs: Any) -> List[Dict[str, Any]]: self.gdb.write(req, timeout_sec=1, read_response=False, **kwargs) resp = self.get_response() return resp def parse_frame(self, r: str) -> Dict[str, Any]: attrs = {} # type: Dict[str, Any] # NOTE: #n addr in func (args=args[ <name>][@entry=v]) at source_code[:line]\n r = r.replace("\\n", "") attrs["index"] = r.partition(" ")[0][1:] r = r.partition(" ")[2][1:] attrs["addr"] = r.partition(" ")[0] r = r.partition(" ")[2] r = r.partition(" ")[2] attrs["func"] = r.partition(" ")[0] r = r.partition(" ")[2] args = r.partition(")")[0][1:].split(", ") args_list = [] # NOTE: remove <xxx> def remove_comment(arg: str) -> str: if arg.find("<") != -1: arg = arg.partition("<")[0] arg = arg.replace(" ", "") return arg for arg in args: if arg.find("@") != -1: name, _, entry_ = arg.partition("@") else: name = arg entry_ = "" name, _, value = name.partition("=") value = remove_comment(value) if entry_: _, _, entry = entry_.partition("=") entry = remove_comment(entry) args_list.append([name, value, entry]) else: args_list.append([name, value, ""]) attrs["args"] = args_list r = r.partition(")")[2] r = r.partition(" ")[2] r = r.partition(" ")[2] if r.find(":") != -1: source, _, line = r.partition(":") else: source = r line = "?" attrs["file"] = source attrs["line"] = line return attrs def parse_addr(self, r: str) -> int: # $n = (...) 0xaddr <name> l = r.split(" ") for blk in l: if blk.startswith("0x"): blk = blk.replace("\\t", "").replace("\\n", "") return int(blk, 16) return 0 def parse_offset(self, r: str) -> int: # addr <+offset>: inst l = r.split(" ") for blk in l: if blk.startswith("<+"): idx = blk.find(">") return int(blk[2:idx]) return 0 def backtrace(self) -> List[Dict[str, Any]]: resp = self.write_request("where") bt = [] for r in resp: payload = r["payload"] if payload and payload[0] == "#": print(payload.replace("\\n", "")) bt.append(self.parse_frame(payload)) return bt def get_symbol(self, addr: int) -> str: resp = self.write_request("info symbol {}".format(addr)) return resp[1]["payload"] def get_reg(self, reg_name: str) -> int: resp = self.write_request("info reg {}".format(reg_name)) for r in resp: if "payload" in r.keys(): if r["payload"].startswith(reg_name): vs = r["payload"].split(" ") for v in vs: if v.startswith("0x"): v = v.replace("\\n", "").replace("\\t", "") return int(v, 16) return 0 def get_stack_base(self, n: int) -> Tuple[int, int]: self.write_request("select-frame {}".format(n)) rsp_value = self.get_reg("rsp") rbp_value = self.get_reg("rbp") return rsp_value, rbp_value def get_func_range(self, name: str) -> List[int]: # FIXME: Not a good idea. Maybe some gdb extension? r1 = self.write_request("print &{}".format(name)) addr = 0 for r in r1: if r.get("payload") is not None: payload = r["payload"] if isinstance(payload, str) and payload.startswith("$"): addr = self.parse_addr(payload) break r2 = self.write_request("disass {}".format(name)) size = 0 for r in r2[::-1]: if r.get("payload") is not None: payload = r["payload"] if isinstance(payload, str) and "<+" in payload: size = self.parse_offset(payload) break return [addr, size + 1]
class Gdb: # Target states TARGET_STATE_UNKNOWN = 0 TARGET_STATE_STOPPED = 1 TARGET_STATE_RUNNING = 2 # Target stop reasons TARGET_STOP_REASON_UNKNOWN = 0 TARGET_STOP_REASON_SIGINT = 1 TARGET_STOP_REASON_SIGTRAP = 2 TARGET_STOP_REASON_BP = 3 TARGET_STOP_REASON_WP = 4 TARGET_STOP_REASON_STEPPED = 5 @staticmethod def get_logger(): return logging.getLogger('Gdb') def __init__(self, gdb=None): # Start gdb process self._logger = self.get_logger() self._gdbmi = GdbController(gdb_path=gdb) self._resp_cache = [] self._target_state = self.TARGET_STATE_UNKNOWN self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN self._curr_frame = None def _on_notify(self, rec): if rec['message'] == 'stopped': self._target_state = self.TARGET_STATE_STOPPED self._curr_frame = rec['payload']['frame'] if 'reason' in rec['payload']: if rec['payload']['reason'] == 'breakpoint-hit': self._target_stop_reason = self.TARGET_STOP_REASON_BP elif rec['payload']['reason'] == 'watchpoint-trigger': self._target_stop_reason = self.TARGET_STOP_REASON_WP elif rec['payload']['reason'] == 'end-stepping-range': self._target_stop_reason = self.TARGET_STOP_REASON_STEPPED elif rec['payload']['reason'] == 'signal-received': if rec['payload']['signal-name'] == 'SIGINT': self._target_stop_reason = self.TARGET_STOP_REASON_SIGINT elif rec['payload']['signal-name'] == 'SIGTRAP': self._target_stop_reason = self.TARGET_STOP_REASON_SIGTRAP else: self._logger.warning('Unknown signal received "%s"!', rec['payload']['signal-name']) self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN else: self._logger.warning('Unknown target stop reason "%s"!', rec['payload']['reason']) self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN else: self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN elif rec['message'] == 'running': self._target_state = self.TARGET_STATE_RUNNING def _parse_mi_resp(self, new_resp, new_tgt_state): result = None result_body = None old_state = self._target_state # if any cached records go first resp = self._resp_cache + new_resp processed_recs = 0 for rec in resp: processed_recs += 1 if rec['type'] == 'log': self._logger.debug('LOG: %s', pformat(rec['payload'])) elif rec['type'] == 'console': self._logger.info('CONS: %s', pformat(rec['payload'])) elif rec['type'] == 'notify': self._logger.info('NOTIFY: %s %s', rec['message'], pformat(rec['payload'])) self._on_notify(rec) # stop upon result receiption if we do not expect target state change if self._target_state != old_state and self._target_state == new_tgt_state: # self._logger.debug('new target state %d', self._target_state) break elif rec['type'] == 'result': self._logger.debug('RESULT: %s %s', rec['message'], pformat(rec['payload'])) result = rec['message'] result_body = rec['payload'] # stop upon result receiption if we do not expect target state change if not new_tgt_state: break # cache unprocessed records self._resp_cache = resp[processed_recs:] # self._logger.debug('cached recs: %s', pformat(self._resp_cache)) return result, result_body def _mi_cmd_run(self, cmd, new_tgt_state=None, tmo=5): self._logger.debug('MI->: %s', cmd) response = [] if tmo: end = time.time() + tmo try: response = self._gdbmi.write(cmd, timeout_sec=tmo) except: self._gdbmi.verify_valid_gdb_subprocess() else: while len(response) == 0: response = self._gdbmi.write(cmd, raise_error_on_timeout=False) self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp(response, new_tgt_state) while not res: # check for result report from GDB response = self._gdbmi.get_gdb_response( 1, raise_error_on_timeout=False) if len(response) == 0: if tmo and (time.time() >= end): raise DebuggerTargetStateTimeoutError( 'Failed to wait for completion of command "%s" / %s!' % (cmd, tmo)) else: self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp(response, new_tgt_state) return res, res_body def target_select(self, tgt_type, tgt_params): # -target-select type parameters res, _ = self._mi_cmd_run('-target-select %s %s' % (tgt_type, tgt_params)) if res != 'connected': raise DebuggerError('Failed to connect to "%s %s"!' % (tgt_type, tgt_params)) def target_reset(self, action='halt'): self.monitor_run('reset %s' % action) if action == 'halt': self.wait_target_state(self.TARGET_STATE_STOPPED, 5) def target_download(self): raise NotImplementedError('target_download') def target_program(self, file_name, off, actions='verify'): # actions can be any or both of 'verify reset' self.monitor_run( 'program_esp32 %s %s 0x%x' % (file_name, actions, off), 30) def exec_file_set(self, file_path): # -file-exec-and-symbols file res, _ = self._mi_cmd_run('-file-exec-and-symbols %s' % file_path) if res != 'done': raise DebuggerError('Failed to set program file!') def exec_interrupt(self): # Hack, unfortunately GDB does not react on -exec-interrupt, # so send CTRL+C to it self._gdbmi.gdb_process.send_signal(signal.SIGINT) # # -exec-interrupt [--all|--thread-group N] # res,_ = self._mi_cmd_run('-exec-interrupt --all') # if res != 'done': # raise DebuggerError('Failed to stop program!') def exec_continue(self): # -exec-continue [--reverse] [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-continue --all') if res != 'running': raise DebuggerError('Failed to continue program!') def exec_jump(self, loc): # -exec-jump location res, _ = self._mi_cmd_run('-exec-jump %s' % loc) if res != 'running': raise DebuggerError('Failed to make jump in program!') def exec_next(self): # -exec-next [--reverse] res, _ = self._mi_cmd_run('-exec-next') if res != 'running': raise DebuggerError('Failed to step program!') def data_eval_expr(self, expr): # -data-evaluate-expression expr res, res_body = self._mi_cmd_run('-data-evaluate-expression %s' % expr) if res != 'done' or not res_body: raise DebuggerError('Failed to eval expression!') return res_body['value'] def get_backtrace(self): # -stack-list-frames [ --no-frame-filters low-frame high-frame ] res, res_body = self._mi_cmd_run('-stack-list-frames') if res != 'done' or not res_body: raise DebuggerError('Failed to get backtrace!') return res_body['stack'] def add_bp(self, loc): # -break-insert [ -t ] [ -h ] [ -f ] [ -d ] [ -a ] [ -c condition ] [ -i ignore-count ] [ -p thread-id ] [ location ] res, res_body = self._mi_cmd_run('-break-insert %s' % loc) if res != 'done' or not res_body or 'bkpt' not in res_body or 'number' not in res_body[ 'bkpt']: raise DebuggerError('Failed to insert BP!') return res_body['bkpt']['number'] def delete_bp(self, bp): # -break-delete ( breakpoint )+ res, _ = self._mi_cmd_run('-break-delete %s' % bp) if res != 'done': raise DebuggerError('Failed to delete BP!') def monitor_run(self, cmd, tmo=None): res, _ = self._mi_cmd_run('mon %s' % cmd, tmo=tmo) if res != 'done': raise DebuggerError('Failed to run monitor cmd "%s"!' % cmd) def wait_target_state(self, state, tmo=None): if tmo: end = time.time() + tmo while self._target_state != state: recs = [] if len(self._resp_cache): recs = self._resp_cache else: # check for target state change report from GDB recs = self._gdbmi.get_gdb_response( 1, raise_error_on_timeout=False) if tmo and len(recs) == 0 and time.time() >= end: raise DebuggerTargetStateTimeoutError( "Failed to wait for target state %d!" % state) self._parse_mi_resp(recs, state) return self._target_stop_reason def get_target_state(self): return self._target_state, self._target_stop_reason def get_current_frame(self): return self._curr_frame
class EspGDB(object): def __init__(self, gdb_path, gdb_cmds, core_filename, prog_filename, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC): """ Start GDB and initialize a GdbController instance """ gdb_args = [ '--quiet', # inhibit dumping info at start-up '--nx', # inhibit window interface '--nw', # ignore .gdbinit '--interpreter=mi2', # use GDB/MI v2 '--core=%s' % core_filename ] # core file for c in gdb_cmds: if c: gdb_args += ['-ex', c] gdb_args.append(prog_filename) self.p = GdbController(gdb_path=gdb_path, gdb_args=gdb_args) self.timeout = timeout_sec # Consume initial output by issuing a dummy command self._gdbmi_run_cmd_get_responses( cmd='-data-list-register-values x pc', resp_message=None, resp_type='console', multiple=True, done_message='done', done_type='result') def __del__(self): try: self.p.exit() except IndexError: logging.warning( 'Attempt to terminate the GDB process failed, because it is already terminated. Skip' ) def _gdbmi_run_cmd_get_responses(self, cmd, resp_message, resp_type, multiple=True, done_message=None, done_type=None): self.p.write(cmd, read_response=False) t_end = time.time() + self.timeout filtered_response_list = [] all_responses = [] while time.time() < t_end: more_responses = self.p.get_gdb_response( timeout_sec=0, raise_error_on_timeout=False) filtered_response_list += filter( lambda rsp: rsp['message'] == resp_message and rsp['type'] == resp_type, more_responses) all_responses += more_responses if filtered_response_list and not multiple: break if done_message and done_type and self._gdbmi_filter_responses( more_responses, done_message, done_type): break if not filtered_response_list and not multiple: raise ESPCoreDumpError( "Couldn't find response with message '{}', type '{}' in responses '{}'" .format(resp_message, resp_type, str(all_responses))) return filtered_response_list def _gdbmi_run_cmd_get_one_response(self, cmd, resp_message, resp_type): return self._gdbmi_run_cmd_get_responses(cmd, resp_message, resp_type, multiple=False)[0] def _gdbmi_data_evaluate_expression(self, expr): """ Get the value of an expression, similar to the 'print' command """ return self._gdbmi_run_cmd_get_one_response( "-data-evaluate-expression \"%s\"" % expr, 'done', 'result')['payload']['value'] def get_freertos_task_name(self, tcb_addr): """ Get FreeRTOS task name given the TCB address """ try: val = self._gdbmi_data_evaluate_expression( '(char*)((TCB_t *)0x%x)->pcTaskName' % tcb_addr) except (ESPCoreDumpError, KeyError): # KeyError is raised when "value" is not in "payload" return '' # Value is of form '0x12345678 "task_name"', extract the actual name result = re.search(r"\"([^']*)\"$", val) if result: return result.group(1) return '' def run_cmd(self, gdb_cmd): """ Execute a generic GDB console command via MI2 """ filtered_responses = self._gdbmi_run_cmd_get_responses( cmd="-interpreter-exec console \"%s\"" % gdb_cmd, resp_message=None, resp_type='console', multiple=True, done_message='done', done_type='result') return ''.join([x['payload'] for x in filtered_responses]) \ .replace('\\n', '\n') \ .replace('\\t', '\t') \ .rstrip('\n') def get_thread_info(self): """ Get information about all threads known to GDB, and the current thread ID """ result = self._gdbmi_run_cmd_get_one_response('-thread-info', 'done', 'result')['payload'] current_thread_id = result['current-thread-id'] threads = result['threads'] return threads, current_thread_id def switch_thread(self, thr_id): """ Tell GDB to switch to a specific thread, given its ID """ self._gdbmi_run_cmd_get_one_response('-thread-select %s' % thr_id, 'done', 'result') @staticmethod def _gdbmi_filter_responses(responses, resp_message, resp_type): return list( filter( lambda rsp: rsp['message'] == resp_message and rsp['type'] == resp_type, responses)) @staticmethod def gdb2freertos_thread_id(gdb_target_id): """ Convert GDB 'target ID' to the FreeRTOS TCB address """ return int(gdb_target_id.replace('process ', ''), 0)
class Debugger: def __init__(self, arch='riscv:rv32'): # logging.basicConfig(level=logging.DEBUG) self.gdbmi = None try: command = ['gdb-multiarch', '--interpreter=mi3', '--quiet'] self.gdbmi = GdbController(command) except ValueError as msg: logging.critical(msg) if self.gdbmi is not None: self.gdbmi.write('set confirm off') self.gdbmi.write('set architecture ' + arch) def __del__(self): self.close() def connect(self): if self.gdbmi is not None: self.gdbmi.write('target remote :1234') def suspend(self): # self.gdbmi.send_signal_to_gdb('SIGINT') os.kill(self.gdbmi.gdb_process.pid, 2) self.gdbmi.get_gdb_response() def run(self): response = self.gdbmi.write('c') return response def step(self): response = self.gdbmi.write('s') for res in response: if res['type'] == 'result': if res['message'] == 'running': self.suspend() status = self.getStatus() return status def breakpoint(self, number): gdb_cmd = f'b {number}' response = self.gdbmi.write(gdb_cmd) return response def loadCode(self, filename): gdb_cmd = f'add-symbol-file {filename}' response = self.gdbmi.write(gdb_cmd) gdb_cmd = f'load {filename}' response = self.gdbmi.write(gdb_cmd) return response def readMemory(self, dir, size): gdb_cmd = f'x/{size} {dir}' response = self.gdbmi.write(gdb_cmd) for res in response: if res['type'] == 'result': msg = res['message'] if res['payload'] is not None: payload = res['payload']['msg'].encode() else: payload = None return msg, payload def getStatus(self): response = self.gdbmi.write('frame') line = 0 for res in response: if res['type'] == 'console': if ':' in res['payload']: line = res['payload'].split(':')[1] line = line.split('\\')[0] line = int(line) response = self.gdbmi.write('info registers') regs = [] for res in response: if res['type'] == 'console': reg = res['payload'].split()[1] reg = reg.split('\\')[0] regs.append(int(reg, 16)) status = regs status.append(line) return status def close(self): if self.gdbmi is not None: self.gdbmi.exit()
class Gdb(object): """ Class to communicate to GDB """ chip_name = '' def __init__(self, gdb_path='gdb', remote_target=None, extended_remote_mode=False, gdb_log_file=None, log_level=None, log_stream_handler=None, log_file_handler=None): """ Constructor. Parameters ---------- gdb_path : string path to GDB executable. remote_target : string remote target address, for possible values see https://www.sourceware.org/gdb/onlinedocs/gdb/Connecting.html. Use "" or None value to skip the connection stage. extended_remote_mode : bool If True extended remote mode should be used. gdb_log_file : string path to GDB log file. log_level : int logging level for this object. See logging.CRITICAL etc log_stream_handler : logging.Handler Logging stream handler for this object. log_file_handler : logging.Handler Logging file handler for this object. """ self.tmo_scale_factor = 1 self._remote_target = remote_target self._extended_remote_mode = extended_remote_mode self._logger = log.logger_init("Gdb", log_level, log_stream_handler, log_file_handler) self._gdbmi = GdbController(gdb_path=gdb_path) self._gdbmi_lock = threading.Lock() self._resp_cache = [] self._target_state = TARGET_STATE_UNKNOWN self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN self.stream_handlers = {'console': [], 'target': [], 'log': []} self._curr_frame = None self._curr_wp_val = None # gdb config try: self.prog_startup_cmdfile = None self.gdb_set("mi-async", "on") if gdb_log_file is not None: pardirs = os.path.dirname(gdb_log_file) if pardirs: os.makedirs(pardirs, exist_ok=True) # create non-existing folders self.gdb_set("logging", "file %s" % gdb_log_file) self.gdb_set("logging", "on") except Exception as e: self._logger.error('Failed to config GDB (%s)!', e) if self._gdbmi.gdb_process.stdout: out = self._gdbmi.gdb_process.stdout.read() self._logger.debug( '================== GDB OUTPUT START =================\n' '%s================== GDB OUTPUT END =================\n', out) if self._gdbmi.gdb_process.stderr: out = self._gdbmi.gdb_process.stderr.read() self._logger.debug( '================== GDB ERR OUTPUT START =================\n' '%s================== GDB ERR OUTPUT END =================\n', out) raise e def _on_notify(self, rec): if rec['message'] == 'stopped': self._target_state = TARGET_STATE_STOPPED self._curr_frame = rec['payload']['frame'] if 'reason' in rec['payload']: if rec['payload']['reason'] == 'breakpoint-hit': self._target_stop_reason = TARGET_STOP_REASON_BP elif rec['payload']['reason'] == 'watchpoint-trigger' or \ rec['payload']['reason'] == 'access-watchpoint-trigger': self._target_stop_reason = TARGET_STOP_REASON_WP self._curr_wp_val = rec['payload']['value'] elif rec['payload']['reason'] == 'watchpoint-scope': self._target_stop_reason = TARGET_STOP_REASON_WP_SCOPE elif rec['payload']['reason'] == 'end-stepping-range': self._target_stop_reason = TARGET_STOP_REASON_STEPPED elif rec['payload']['reason'] == 'function-finished': self._target_stop_reason = TARGET_STOP_REASON_FN_FINISHED elif rec['payload']['reason'] == 'signal-received': if rec['payload']['signal-name'] == 'SIGINT': self._target_stop_reason = TARGET_STOP_REASON_SIGINT elif rec['payload']['signal-name'] == 'SIGTRAP': self._target_stop_reason = TARGET_STOP_REASON_SIGTRAP else: self._logger.warning('Unknown signal received "%s"!', rec['payload']['signal-name']) self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN else: self._logger.warning('Unknown target stop reason "%s"!', rec['payload']['reason']) self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN else: self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN elif rec['message'] == 'running': self._target_state = TARGET_STATE_RUNNING def _parse_mi_resp(self, new_resp, new_tgt_state): result = None result_body = None old_state = self._target_state # if any cached records go first resp = self._resp_cache + new_resp processed_recs = 0 for rec in resp: processed_recs += 1 if rec['type'] == 'log': self._logger.debug('LOG: %s', pformat(rec['payload'])) for hnd in self.stream_handlers['log']: hnd(rec['type'], rec['stream'], rec['payload']) elif rec['type'] == 'console': self._logger.info('CONS: %s', pformat(rec['payload'])) for hnd in self.stream_handlers['console']: hnd(rec['type'], rec['stream'], rec['payload']) elif rec['type'] == 'target': self._logger.debug('TGT: %s', pformat(rec['payload'])) for hnd in self.stream_handlers['target']: hnd(rec['type'], rec['stream'], rec['payload']) elif rec['type'] == 'notify': self._logger.info('NOTIFY: %s %s', rec['message'], pformat(rec['payload'])) self._on_notify(rec) # stop upon result receiption if we do not expect target state change if self._target_state != old_state and self._target_state == new_tgt_state: self._logger.debug('new target state %d', self._target_state) break elif rec['type'] == 'result': self._logger.debug('RESULT: %s %s', rec['message'], pformat(rec['payload'])) result = rec['message'] result_body = rec['payload'] # stop upon result reception if we do not expect target state change if not new_tgt_state: break # cache unprocessed records self._resp_cache = resp[processed_recs:] # self._logger.debug('cached recs: %s', pformat(self._resp_cache)) return result, result_body def _mi_cmd_run(self, cmd, new_target_state=None, response_on_success=["done"], tmo=5): def is_sublist(what, where): for i in range(len(where)): if what == where[i:i + len(what)]: return True return False def _mi_cmd_isdone(response, response_on_success): if not len(response_on_success): return True if len(response) < len(response_on_success): return False r_list = [str(i.get('message')) for i in response] return is_sublist(response_on_success, r_list) with self._gdbmi_lock: self._logger.debug('MI->: %s', cmd) response = [] end = time.time() if tmo: end += tmo * self.tmo_scale_factor done = False try: self._gdbmi.write(cmd, read_response=False) while time.time( ) <= end and not done: # while time is not up r = self._gdbmi.get_gdb_response( timeout_sec=0, raise_error_on_timeout=False) response += r done = _mi_cmd_isdone(response, response_on_success) except Exception as e: self._gdbmi.verify_valid_gdb_subprocess() else: while len(response) == 0: response = self._gdbmi.write(cmd, raise_error_on_timeout=False) self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp( response, new_target_state) # None, None if empty while not res: # check for result report from GDB response = self._gdbmi.get_gdb_response( 0, raise_error_on_timeout=False) if not len(response): if tmo and (time.time() >= end): raise DebuggerTargetStateTimeoutError( 'Failed to wait for completion of command "%s" / %s!' % (cmd, tmo * self.tmo_scale_factor)) else: self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp( response, new_target_state) # None, None if empty return res, res_body def stream_handler_add(self, stream_type, handler): if stream_type not in self.stream_handlers: raise DebuggerError('Unsupported stream type "%s"' % stream_type) if handler in self.stream_handlers[stream_type]: return self.stream_handlers[stream_type].append(handler) def stream_handler_remove(self, stream_type, handler): if stream_type not in self.stream_handlers: raise DebuggerError('Unsupported stream type "%s"' % stream_type) if handler not in self.stream_handlers[stream_type]: return self.stream_handlers[stream_type].remove(handler) def gdb_exit(self, tmo=5): """ -gdb-exit ~= quit """ self._mi_cmd_run("-gdb-exit", response_on_success=["exit"], tmo=tmo) def console_cmd_run(self, cmd, response_on_success=["done"], tmo=5): """ Execute a command in the console mode Parameters ---------- cmd : str response_on_success : list list of expected responses on success tmo : int time after that command will be considered as failed Returns ------- res, res_body """ return self._mi_cmd_run("-interpreter-exec console \"%s\"" % cmd, response_on_success=response_on_success, tmo=tmo) def target_select(self, tgt_type, tgt_params, tmo=5): # -target-select type parameters res, _ = self._mi_cmd_run('-target-select %s %s' % (tgt_type, tgt_params), response_on_success=["connected"], tmo=tmo) if res != 'connected': raise DebuggerError('Failed to connect to "%s %s"!' % (tgt_type, tgt_params)) def target_disconnect(self): # -target-disconnect self._mi_cmd_run('-target-disconnect') def target_reset(self, action='halt', tmo=5): self.monitor_run('reset %s' % action) if action == 'halt': self.wait_target_state(TARGET_STATE_STOPPED, tmo=tmo) self.console_cmd_run('flushregs') def exec_file_set(self, file_path): # -file-exec-and-symbols file local_file_path = file_path if os.name == 'nt': # Convert filepath from Windows format if needed local_file_path = local_file_path.replace("\\", "/") res, _ = self._mi_cmd_run('-file-exec-and-symbols %s' % local_file_path) if res != 'done': raise DebuggerError('Failed to set program file!') def exec_file_core_set(self, file_path): local_file_path = file_path if os.name == 'nt': # Convert filepath from Windows format if needed local_file_path = local_file_path.replace("\\", "/") res, _ = self.console_cmd_run( "core %s" % local_file_path) # TODO find/add mi-command for this if res != 'done': raise DebuggerError('Failed to set the core file!') def exec_interrupt(self): # -exec-interrupt [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-interrupt --all') if res != 'done': raise DebuggerError('Failed to stop program!') def exec_continue(self): # -exec-continue [--reverse] [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-continue --all', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to continue program!') def file_cmd_run(self, path, tmo=5): """ Parameters ---------- path : str tmo : int """ if os.name == 'nt': path = path.replace("\\", "/") # BUG: using commands changing prompt type like 'commands' is not supported self.console_cmd_run('source %s' % path, tmo=tmo) def exec_run(self, start_func='main', startup_tmo=5, only_startup=False): """ Executes a startup command file in the beginning if it specified and then executes `-exec-run [ --start ]` mi-command Parameters ---------- start_func : str if not empty `exec_run` works like `start` stopping on the main function, otherwise - as `run` startup_tmo : int timeout for startup command file's execution only_startup :bool execute only a startup command file omitting `run`/`start` logic """ if self.prog_startup_cmdfile: self.file_cmd_run(self.prog_startup_cmdfile, tmo=startup_tmo) if only_startup: return if start_func: # if the start function specified execute `start` res, _ = self._mi_cmd_run('-exec-run --all --start', response_on_success=["running" ]) # stop on main() if start_func != 'main': # if we are want to use another function as a start function self.wait_target_state(TARGET_STATE_STOPPED, 5) # check if we are really stopped self.add_bp(start_func, tmp=True) # add a bp at the custom start function self.exec_continue() # and continue else: # if the start function is not specified execute `run` res, _ = self._mi_cmd_run('-exec-run --all', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to run program!') def exec_jump(self, loc): # -exec-jump location res, _ = self._mi_cmd_run('-exec-jump %s' % loc, response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to make jump in program!') def exec_next(self): # -exec-next [--reverse] res, _ = self._mi_cmd_run('-exec-next', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to step over!') def exec_step(self): # -exec-step [--reverse] res, _ = self._mi_cmd_run('-exec-step', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to step in!') def exec_finish(self): # -exec-finish [--reverse] res, _ = self._mi_cmd_run('-exec-finish', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to step out!') def exec_next_insn(self): # -exec-next-instruction [--reverse] res, _ = self._mi_cmd_run('-exec-next-instruction', response_on_success=["running"]) if res != 'running': raise DebuggerError('Failed to step insn!') def data_eval_expr(self, expr): # -data-evaluate-expression expr res, res_body = self._mi_cmd_run('-data-evaluate-expression "%s"' % expr, tmo=1) if res == "done" and 'value' in res_body: return res_body['value'] elif res == "error" and 'msg' in res_body: return res_body['msg'] else: raise DebuggerError('Failed to eval expression!') @staticmethod def extract_exec_addr(addr_val): sval_re = re.search('(.*)[<](.*)[>]', addr_val) if sval_re: return int(sval_re.group(1), 0) return int(addr_val, 0) def get_reg(self, nm): sval = self.data_eval_expr('$%s' % nm) # for PC we'll get something like '0x400e0db8 <gpio_set_direction>' return self.extract_exec_addr(sval) def set_reg(self, nm, val): return self.data_eval_expr('$%s=%s' % (nm, str(val))) def get_reg_names(self, reg_no=[]): # -data-list-register-names [ ( regno )+ ] res, res_body = self._mi_cmd_run('-data-list-register-names %s' % ' '.join(str(x) for x in reg_no)) if res == "done" and 'register-names' in res_body: return res_body['register-names'] else: raise DebuggerError('Failed to get registers names!') def get_reg_values(self, fmt, skip_unavailable=False, reg_no=[]): # -data-list-register-values [ --skip-unavailable ] fmt [ ( regno )*] res, res_body = self._mi_cmd_run('-data-list-register-values %s %s %s' % \ ('--skip-unavailable' if skip_unavailable else '', fmt, ' '.join(str(x) for x in reg_no))) if res == "done" and 'register-values' in res_body: return res_body['register-values'] else: raise DebuggerError('Failed to get registers values!') def gdb_set(self, var, val): res, _ = self._mi_cmd_run("-gdb-set %s %s" % (var, val)) if res != "done": raise DebuggerError('Failed to set variable!') def get_variables(self, thread_num=None, frame_num=0): # -stack-list-variables [ --no-frame-filters ] [ --skip-unavailable ] print-values if thread_num is not None: cmd = '-stack-list-variables --thread %d --frame %d --all-values' % ( thread_num, frame_num) else: cmd = '-stack-list-variables --all-values' res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'variables' not in res_body: raise DebuggerError( 'Failed to get variables @ frame %d of thread %d!' % (frame_num, thread_num)) return res_body['variables'] def get_local_variables(self, no_values=False): # -stack-list-variables [ --no-frame-filters ] [ --skip-unavailable ] print-values # noinspection PyTypeChecker cmd = '-stack-list-locals %i' % int(not no_values) res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'locals' not in res_body: raise DebuggerError('Failed to get variables @ frame') return res_body['locals'] def get_backtrace(self): # -stack-list-frames [ --no-frame-filters low-frame high-frame ] res, res_body = self._mi_cmd_run('-stack-list-frames') if res != 'done' or not res_body or 'stack' not in res_body: raise DebuggerError('Failed to get backtrace! (%s / %s)' % (res, res_body)) return res_body['stack'] def select_frame(self, frame): # -stack-select-frame framenum res, _ = self._mi_cmd_run('-stack-select-frame %d' % frame) if res != 'done': raise DebuggerError('Failed to get backtrace!') def add_bp(self, loc, ignore_count=0, cond='', hw=False, tmp=False): # -break-insert [ -t ] [ -h ] [ -f ] [ -d ] [ -a ] [ -c condition ] [ -i ignore-count ] # [ -p thread-id ] [ location ] cmd_args = '-i %d %s' % (ignore_count, loc) if len(cond): cmd_args = '-c "%s" %s' % (cond, cmd_args) if hw: cmd_args = "-h " + cmd_args if tmp: cmd_args = "-t " + cmd_args res, res_body = self._mi_cmd_run('-break-insert %s' % cmd_args) if res != 'done' or not res_body or 'bkpt' not in res_body or 'number' not in res_body[ 'bkpt']: raise DebuggerError('Failed to insert BP!') return res_body['bkpt']['number'] def add_wp(self, exp, tp='w'): # -break-watch [ -a | -r ] expr cmd_args = '"%s"' % exp if tp == 'r': cmd_args = '-r %s' % cmd_args elif tp == 'rw': cmd_args = '-a %s' % cmd_args res, res_body = self._mi_cmd_run('-break-watch %s' % cmd_args) if res != 'done' or not res_body: raise DebuggerError('Failed to insert WP!') if tp == 'w': if 'wpt' not in res_body or 'number' not in res_body['wpt']: raise DebuggerError('Failed to insert WP!') return res_body['wpt']['number'] elif tp == 'r': if 'hw-rwpt' not in res_body or 'number' not in res_body['hw-rwpt']: raise DebuggerError('Failed to insert RWP!') return res_body['hw-rwpt']['number'] elif tp == 'rw': if 'hw-awpt' not in res_body or 'number' not in res_body['hw-awpt']: raise DebuggerError('Failed to insert AWP!') return res_body['hw-awpt']['number'] return None def delete_bp(self, bp): # -break-delete ( breakpoint )+ res, _ = self._mi_cmd_run('-break-delete %s' % bp) if res != 'done': raise DebuggerError('Failed to delete BP!') def monitor_run(self, cmd, tmo=None, output_type=None): target_output = '' def _target_stream_handler(type, stream, payload): nonlocal target_output if output_type == 'any' or stream == output_type: target_output += payload self.stream_handler_add('target', _target_stream_handler) try: res, resp = self._mi_cmd_run('mon %s' % cmd, tmo=tmo) finally: self.stream_handler_remove('target', _target_stream_handler) if res != 'done': raise DebuggerError('Failed to run monitor cmd "%s"!' % cmd) return resp, target_output def wait_target_state(self, state, tmo=None): """ Parameters ---------- state : int tmo : int Returns ------- stop_reason : int """ with self._gdbmi_lock: end = time.time() if tmo is not None: end += tmo * self.tmo_scale_factor while self._target_state != state: if len(self._resp_cache): recs = [] # self._resp_cache else: # check for target state change report from GDB recs = self._gdbmi.get_gdb_response( 0.5, raise_error_on_timeout=False) if tmo and len(recs) == 0 and time.time() >= end: raise DebuggerTargetStateTimeoutError( "Failed to wait for target state %d!" % state) self._parse_mi_resp(recs, state) return self._target_stop_reason def get_target_state(self): return self._target_state, self._target_stop_reason def get_current_frame(self): return self._curr_frame def get_current_wp_val(self): return self._curr_wp_val def connect(self, tmo=10): if not self._remote_target: self._logger.debug('Skipped connection to remote target') return self._logger.debug('Connecting to %s', self._remote_target) remote_mode = 'extended_remote' if self._extended_remote_mode else 'remote' self.target_select(remote_mode, self._remote_target, tmo=tmo) def disconnect(self): self.target_disconnect() def resume(self): self.exec_continue() self.wait_target_state(TARGET_STATE_RUNNING, 5) def halt(self): if self._target_state == TARGET_STATE_STOPPED: return self.exec_interrupt() self.wait_target_state(TARGET_STATE_STOPPED, 5) def get_thread_info(self, thread_id=None): """ Parameters ---------- thread_id : int or None thread to info if exists Returns ------- current-thread-id : str threads : dict """ # -thread-info [ thread-id ] if thread_id: cmd = '-thread-info %d' % thread_id else: cmd = '-thread-info' # streaming of info for all threads over gdbmi can take some time, so use large timeout value res, res_body = self._mi_cmd_run(cmd, tmo=20) # if res != 'done' or not res_body or 'threads' not in res_body or 'current-thread-id' not in res_body: if res != 'done' or not res_body or 'threads' not in res_body: # TODO verify removing current-thread-id raise DebuggerError('Failed to get thread info!') return res_body.get('current-thread-id', None), res_body['threads'] def select_thread(self, num): res, _ = self._mi_cmd_run('-thread-select %d' % num) if res != 'done': raise DebuggerError('Failed to set thread!') return res def set_thread(self, num): """Old-named method. For backward compatibility""" return self.select_thread(num) def get_thread_ids(self): # -thread-list-ids expr res, thread_ids = self._mi_cmd_run('-thread-list-ids') if res != 'done': raise DebuggerError('Failed to eval expression!') return thread_ids def get_selected_thread(self): # sel_id, ths = self.get_thread_info() for th in ths: if th['id'] == sel_id: return th return None def target_program(self, **kwargs): return None def set_prog_startup_script(self, path): """ Set up a startup command file which will be executed in the beginning of `exec_run` method. See : https://sourceware.org/gdb/current/onlinedocs/gdb/Command-Files.html#Command-Files Parameters ---------- path : str or None path to the command file. If None the script will not be executed """ if os.path.isfile(path): self.prog_startup_cmdfile = os.path.normpath(path) else: raise FileNotFoundError
class Gdb: # Target states TARGET_STATE_UNKNOWN = 0 TARGET_STATE_STOPPED = 1 TARGET_STATE_RUNNING = 2 # Target stop reasons TARGET_STOP_REASON_UNKNOWN = 0 TARGET_STOP_REASON_SIGINT = 1 TARGET_STOP_REASON_SIGTRAP = 2 TARGET_STOP_REASON_BP = 3 TARGET_STOP_REASON_WP = 4 TARGET_STOP_REASON_WP_SCOPE = 5 TARGET_STOP_REASON_STEPPED = 6 TARGET_STOP_REASON_FN_FINISHED = 7 @staticmethod def get_logger(): return logging.getLogger('Gdb') def __init__(self, gdb=None): # Start gdb process self._logger = self.get_logger() if os.name == 'nt': self._gdbmi = GdbController(gdb_path=gdb) # self._gdbmi = GdbControllerWin(gdb_path=gdb) else: self._gdbmi = GdbController(gdb_path=gdb) self._resp_cache = [] self._target_state = self.TARGET_STATE_UNKNOWN self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN self._curr_frame = None self._curr_wp_val = None def _on_notify(self, rec): if rec['message'] == 'stopped': self._target_state = self.TARGET_STATE_STOPPED self._curr_frame = rec['payload']['frame'] if 'reason' in rec['payload']: if rec['payload']['reason'] == 'breakpoint-hit': self._target_stop_reason = self.TARGET_STOP_REASON_BP elif rec['payload']['reason'] == 'watchpoint-trigger': self._target_stop_reason = self.TARGET_STOP_REASON_WP self._curr_wp_val = rec['payload']['value'] elif rec['payload']['reason'] == 'watchpoint-scope': self._target_stop_reason = self.TARGET_STOP_REASON_WP_SCOPE elif rec['payload']['reason'] == 'end-stepping-range': self._target_stop_reason = self.TARGET_STOP_REASON_STEPPED elif rec['payload']['reason'] == 'function-finished': self._target_stop_reason = self.TARGET_STOP_REASON_FN_FINISHED elif rec['payload']['reason'] == 'signal-received': if rec['payload']['signal-name'] == 'SIGINT': self._target_stop_reason = self.TARGET_STOP_REASON_SIGINT elif rec['payload']['signal-name'] == 'SIGTRAP': self._target_stop_reason = self.TARGET_STOP_REASON_SIGTRAP else: self._logger.warning('Unknown signal received "%s"!', rec['payload']['signal-name']) self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN else: self._logger.warning('Unknown target stop reason "%s"!', rec['payload']['reason']) self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN else: self._target_stop_reason = self.TARGET_STOP_REASON_UNKNOWN elif rec['message'] == 'running': self._target_state = self.TARGET_STATE_RUNNING def _parse_mi_resp(self, new_resp, new_tgt_state): result = None result_body = None old_state = self._target_state # if any cached records go first resp = self._resp_cache + new_resp processed_recs = 0 for rec in resp: processed_recs += 1 if rec['type'] == 'log': self._logger.debug('LOG: %s', pformat(rec['payload'])) elif rec['type'] == 'console': self._logger.info('CONS: %s', pformat(rec['payload'])) elif rec['type'] == 'notify': self._logger.info('NOTIFY: %s %s', rec['message'], pformat(rec['payload'])) self._on_notify(rec) # stop upon result receiption if we do not expect target state change if self._target_state != old_state and self._target_state == new_tgt_state: self._logger.debug('new target state %d', self._target_state) break elif rec['type'] == 'result': self._logger.debug('RESULT: %s %s', rec['message'], pformat(rec['payload'])) result = rec['message'] result_body = rec['payload'] # stop upon result receiption if we do not expect target state change if not new_tgt_state: break # cache unprocessed records self._resp_cache = resp[processed_recs:] # self._logger.debug('cached recs: %s', pformat(self._resp_cache)) return result, result_body def _mi_cmd_run(self, cmd, new_tgt_state=None, tmo=5): self._logger.debug('MI->: %s', cmd) response = [] if tmo: end = time.time() + tmo try: response = self._gdbmi.write(cmd, timeout_sec=tmo) except: self._gdbmi.verify_valid_gdb_subprocess() else: while len(response) == 0: response = self._gdbmi.write(cmd, raise_error_on_timeout=False) self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp(response, new_tgt_state) while not res: # check for result report from GDB response = self._gdbmi.get_gdb_response( 1, raise_error_on_timeout=False) if len(response) == 0: if tmo and (time.time() >= end): raise DebuggerTargetStateTimeoutError( 'Failed to wait for completion of command "%s" / %s!' % (cmd, tmo)) else: self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp(response, new_tgt_state) return res, res_body def console_cmd_run(self, cmd): self._mi_cmd_run('-interpreter-exec console %s' % cmd) def target_select(self, tgt_type, tgt_params): # -target-select type parameters res, _ = self._mi_cmd_run('-target-select %s %s' % (tgt_type, tgt_params)) if res != 'connected': raise DebuggerError('Failed to connect to "%s %s"!' % (tgt_type, tgt_params)) def target_disconnect(self): # -target-disconnect self._mi_cmd_run('-target-disconnect') def target_reset(self, action='halt'): self.monitor_run('reset %s' % action) if action == 'halt': self.wait_target_state(self.TARGET_STATE_STOPPED, 5) self.console_cmd_run('flushregs') def target_download(self): raise NotImplementedError('target_download') def target_program(self, file_name, off, actions='verify', tmo=30): # actions can be any or both of 'verify reset' self.monitor_run( 'program_esp %s %s 0x%x' % (fixup_path(file_name), actions, off), tmo) def exec_file_set(self, file_path): # -file-exec-and-symbols file self._logger.debug('exec_file_set %s' % file_path) res, _ = self._mi_cmd_run('-file-exec-and-symbols %s' % fixup_path(file_path)) if res != 'done': raise DebuggerError('Failed to set program file!') def exec_interrupt(self): global OS_INT_SIG # Hack, unfortunately GDB does not react on -exec-interrupt, # so send CTRL+C to it self._logger.debug('MI->: send SIGINT') self._gdbmi.gdb_process.send_signal(OS_INT_SIG) # # -exec-interrupt [--all|--thread-group N] # res,_ = self._mi_cmd_run('-exec-interrupt --all') # if res != 'done': # raise DebuggerError('Failed to stop program!') def exec_continue(self): # -exec-continue [--reverse] [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-continue --all') if res != 'running': raise DebuggerError('Failed to continue program!') def exec_jump(self, loc): # -exec-jump location res, _ = self._mi_cmd_run('-exec-jump %s' % loc) if res != 'running': raise DebuggerError('Failed to make jump in program!') def exec_next(self): # -exec-next [--reverse] res, _ = self._mi_cmd_run('-exec-next') if res != 'running': raise DebuggerError('Failed to step over!') def exec_step(self): # -exec-step [--reverse] res, _ = self._mi_cmd_run('-exec-step') if res != 'running': raise DebuggerError('Failed to step in!') def exec_finish(self): # -exec-finish [--reverse] res, _ = self._mi_cmd_run('-exec-finish') if res != 'running': raise DebuggerError('Failed to step out!') def exec_next_insn(self): # -exec-next-instruction [--reverse] res, _ = self._mi_cmd_run('-exec-next-instruction') if res != 'running': raise DebuggerError('Failed to step insn!') def data_eval_expr(self, expr): # -data-evaluate-expression expr res, res_body = self._mi_cmd_run('-data-evaluate-expression %s' % expr) if res != 'done' or not res_body or 'value' not in res_body: raise DebuggerError('Failed to eval expression!') return res_body['value'] def get_reg(self, nm): sval = self.data_eval_expr('$%s' % nm) # for PC we get something like '0x400e0db8 <gpio_set_direction>' # TODO: use regexp to extract value fn_start = sval.find(' <') if fn_start != -1: sval = sval[:fn_start] return int(sval, 0) def get_variables_at_frame(self, thread_num=None, frame_num=0): # -stack-list-variables [ --no-frame-filters ] [ --skip-unavailable ] print-values if thread_num: cmd = '-stack-list-variables --thread %d --frame %d --all-values' % ( thread_num, frame_num) else: cmd = '-stack-list-variables --all-values' res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'variables' not in res_body: raise DebuggerError( 'Failed to get variables @ frame %d of thread %d!' % (frame_num, thread_num)) return res_body['variables'] def get_backtrace(self): # -stack-list-frames [ --no-frame-filters low-frame high-frame ] res, res_body = self._mi_cmd_run('-stack-list-frames') if res != 'done' or not res_body or 'stack' not in res_body: raise DebuggerError('Failed to get backtrace!') return res_body['stack'] def select_frame(self, frame): # -stack-select-frame framenum res, _ = self._mi_cmd_run('-stack-select-frame %d' % frame) if res != 'done': raise DebuggerError('Failed to get backtrace!') def add_bp(self, loc, ignore_count=0, cond=''): # -break-insert [ -t ] [ -h ] [ -f ] [ -d ] [ -a ] [ -c condition ] [ -i ignore-count ] [ -p thread-id ] [ location ] cmd_args = '-i %d %s' % (ignore_count, loc) if len(cond): cmd_args = '-c "%s" %s' % (cond, cmd_args) res, res_body = self._mi_cmd_run('-break-insert %s' % cmd_args) if res != 'done' or not res_body or 'bkpt' not in res_body or 'number' not in res_body[ 'bkpt']: raise DebuggerError('Failed to insert BP!') return res_body['bkpt']['number'] def add_wp(self, exp, tp='w'): # -break-watch [ -a | -r ] expr cmd_args = '"%s"' % exp if tp == 'r': cmd_args = '-r %s' % cmd_args elif tp == 'rw': cmd_args = '-a %s' % cmd_args res, res_body = self._mi_cmd_run('-break-watch %s' % cmd_args) if res != 'done' or not res_body: raise DebuggerError('Failed to insert WP!') if tp == 'w': if 'wpt' not in res_body or 'number' not in res_body['wpt']: raise DebuggerError('Failed to insert WP!') return res_body['wpt']['number'] elif tp == 'r': if 'hw-rwpt' not in res_body or 'number' not in res_body['hw-rwpt']: raise DebuggerError('Failed to insert RWP!') return res_body['hw-rwpt']['number'] elif tp == 'rw': if 'hw-awpt' not in res_body or 'number' not in res_body['hw-awpt']: raise DebuggerError('Failed to insert AWP!') return res_body['hw-awpt']['number'] return None def delete_bp(self, bp): # -break-delete ( breakpoint )+ res, _ = self._mi_cmd_run('-break-delete %s' % bp) if res != 'done': raise DebuggerError('Failed to delete BP!') def monitor_run(self, cmd, tmo=None): res, resp = self._mi_cmd_run('mon %s' % cmd, tmo=tmo) if res != 'done': raise DebuggerError('Failed to run monitor cmd "%s"!' % cmd) return resp def wait_target_state(self, state, tmo=None): if tmo: end = time.time() + tmo while self._target_state != state: recs = [] if len(self._resp_cache): recs = self._resp_cache else: # check for target state change report from GDB recs = self._gdbmi.get_gdb_response( 1, raise_error_on_timeout=False) if tmo and len(recs) == 0 and time.time() >= end: raise DebuggerTargetStateTimeoutError( "Failed to wait for target state %d!" % state) self._parse_mi_resp(recs, state) return self._target_stop_reason def get_target_state(self): return self._target_state, self._target_stop_reason def get_current_frame(self): return self._curr_frame def get_current_wp_val(self): return self._curr_wp_val def connect(self): global OOCD_PORT self.target_select('remote', ':%d' % OOCD_PORT) def disconnect(self): self.target_disconnect() def get_thread_info(self, thread_id=None): # -thread-info [ thread-id ] if thread_id: cmd = '-thread-info %d' % thread_id else: cmd = '-thread-info' res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'threads' not in res_body or 'current-thread-id' not in res_body: raise DebuggerError('Failed to get thread info!') return (res_body['current-thread-id'], res_body['threads']) def set_thread(self, num): res, _ = self._mi_cmd_run('-thread-select %d' % num) if res != 'done': raise DebuggerError('Failed to set thread!') return res def get_thread_ids(self): # -thread-list-ids expr res, thread_ids = self._mi_cmd_run('-thread-list-ids') if res != 'done': raise DebuggerError('Failed to eval expression!') return thread_ids def gcov_dump(self, on_the_fly=True): if on_the_fly: cmd = '%s gcov' % Oocd.current_target_name_get() else: cmd = '%s gcov dump' % Oocd.current_target_name_get() self.monitor_run(cmd, tmo=20) def sysview_start(self, file1, file2=''): self.monitor_run('%s sysview start %s %s' % (Oocd.current_target_name_get(), file1, file2)) def sysview_stop(self): self.monitor_run('%s sysview stop' % Oocd.current_target_name_get())
class CoredumpGDB(): def __init__(self, elf, coredump): self.coredump = coredump self.elf = elf self.corefile = self.coredump.file.name self.execfile = self.elf.file.name # XXX: use --nx will let manually set debug-file-directory # and unknown cause for not showing libc_start_main and argv # FIXME: get all response and retry if failed self.gdb = GdbController(gdb_args=['--quiet', '--interpreter=mi2']) # pwnlibs response self.get_response() self.setup_gdb() def setup_gdb(self): self.write_request("file {}".format(self.execfile)) self.write_request("core {}".format(self.corefile)) def get_response(self): resp = [] while True: try: resp += self.gdb.get_gdb_response() except: break return resp def write_request(self, req, **kwargs): timeout_sec = kwargs.pop('timeout_sec', 1) kwargs['read_response'] = False self.gdb.write(req, timeout_sec=timeout_sec, **kwargs) resp = self.get_response() return resp def parse_frame(self, r): # type: (str) -> Dict[str, Any] attrs = {} # Dict[str, Any] # NOTE: #n addr in func (args=args[ <name>][@entry=v]) at source_code[:line]\n r = r.replace('\\n', '') attrs['index'] = r.partition(' ')[0][1:] r = r.partition(' ')[2][1:] attrs['addr'] = r.partition(' ')[0] r = r.partition(' ')[2] r = r.partition(' ')[2] attrs['func'] = r.partition(' ')[0] r = r.partition(' ')[2] args = r.partition(')')[0][1:].split(', ') args_list = [] # NOTE: remove <xxx> def remove_comment(arg): if arg.find('<') != -1: arg = arg.partition('<')[0] arg = arg.replace(' ', '') return arg for arg in args: if arg.find('@') != -1: name, _, entry_ = arg.partition('@') else: name = arg entry_ = '' name, _, value = name.partition('=') value = remove_comment(value) if entry_: _, _, entry = entry_.partition('=') entry = remove_comment(entry) args_list.append([name, value, entry]) else: args_list.append([name, value, '']) attrs['args'] = args_list # type: ignore r = r.partition(')')[2] r = r.partition(' ')[2] r = r.partition(' ')[2] if r.find(':') != -1: source, _, line = r.partition(':') else: source = r line = '?' attrs['file'] = source attrs['line'] = line return attrs def parse_addr(self, r): # $n = (...) 0xaddr <name> l = r.split(' ') for blk in l: if blk.startswith('0x'): return int(blk, 16) return 0 def parse_offset(self, r): # addr <+offset>: inst l = r.split(' ') for blk in l: if blk.startswith('<+'): idx = blk.find('>') return int(blk[2:idx]) return 0 def backtrace(self): resp = self.write_request("where") bt = [] for r in resp: payload = r['payload'] if payload and payload[0] == '#': print(payload) bt.append(self.parse_frame(payload)) return bt def get_symbol(self, addr): # type: (int) -> str resp = self.write_request("info symbol {}".format(addr)) return resp[1]['payload'] def get_reg(self, reg_name): resp = self.write_request("info reg {}".format(reg_name)) if len(resp) < 5 or not resp[2]['payload'].startswith('\\t'): return 0 return int(resp[2]['payload'][2:].split(' ')[0], 16) def get_stack_base(self, n): # type: (int) -> Tuple[int, int] self.write_request("select-frame {}".format(n)) rsp_value = self.get_reg('rsp') rbp_value = self.get_reg('rbp') return rsp_value, rbp_value def get_func_range(self, name): # type: (str) -> List[int] # FIXME: Not a good idea. Maybe some gdb extension? r1 = self.write_request("print &{}".format(name)) addr = self.parse_addr(r1[1]['payload']) r2 = self.write_request("disass {}".format(name)) size = self.parse_offset(r2[-3]['payload']) return [addr, size + 1]
class PanicTestDut(IdfDut): BOOT_CMD_ADDR = 0x9000 BOOT_CMD_SIZE = 0x1000 DEFAULT_EXPECT_TIMEOUT = 10 COREDUMP_UART_START = '================= CORE DUMP START =================' COREDUMP_UART_END = '================= CORE DUMP END =================' app: IdfApp serial: IdfSerial def __init__(self, *args, **kwargs) -> None: # type: ignore super().__init__(*args, **kwargs) self.gdb: GdbController = None # type: ignore # record this since pygdbmi is using logging.debug to generate some single character mess self.log_level = logging.getLogger().level # pygdbmi is using logging.debug to generate some single character mess if self.log_level <= logging.DEBUG: logging.getLogger().setLevel(logging.INFO) self.coredump_output: TextIO = None # type: ignore def close(self) -> None: if self.gdb: self.gdb.exit() super().close() def revert_log_level(self) -> None: logging.getLogger().setLevel(self.log_level) def expect_test_func_name(self, test_func_name: str) -> None: self.expect_exact('Enter test name:') self.write(test_func_name) self.expect_exact('Got test name: ' + test_func_name) def expect_none(self, pattern, **kwargs) -> None: # type: ignore """like dut.expect_all, but with an inverse logic""" if 'timeout' not in kwargs: kwargs['timeout'] = 1 try: res = self.expect(pattern, **kwargs) raise AssertionError(f'Unexpected: {res.group().decode("utf8")}') except pexpect.TIMEOUT: pass def expect_backtrace(self) -> None: self.expect_exact('Backtrace:') self.expect_none('CORRUPTED') def expect_gme(self, reason: str) -> None: """Expect method for Guru Meditation Errors""" self.expect_exact( f"Guru Meditation Error: Core 0 panic'ed ({reason})") def expect_reg_dump(self, core: int = 0) -> None: """Expect method for the register dump""" self.expect(r'Core\s+%d register dump:' % core) def expect_elf_sha256(self) -> None: """Expect method for ELF SHA256 line""" elf_sha256 = sha256(self.app.elf_file) elf_sha256_len = int( self.app.sdkconfig.get('CONFIG_APP_RETRIEVE_LEN_ELF_SHA', '16')) self.expect_exact('ELF file SHA256: ' + elf_sha256[0:elf_sha256_len]) def _call_espcoredump(self, extra_args: List[str], coredump_file_name: str, output_file_name: str) -> None: # no "with" here, since we need the file to be open for later inspection by the test case if not self.coredump_output: self.coredump_output = open(output_file_name, 'w') espcoredump_script = os.path.join(os.environ['IDF_PATH'], 'components', 'espcoredump', 'espcoredump.py') espcoredump_args = [ sys.executable, espcoredump_script, 'info_corefile', '--core', coredump_file_name, ] espcoredump_args += extra_args espcoredump_args.append(self.app.elf_file) logging.info('Running %s', ' '.join(espcoredump_args)) logging.info('espcoredump output is written to %s', self.coredump_output.name) subprocess.check_call(espcoredump_args, stdout=self.coredump_output) self.coredump_output.flush() self.coredump_output.seek(0) def process_coredump_uart(self) -> None: """Extract the core dump from UART output of the test, run espcoredump on it""" self.expect(self.COREDUMP_UART_START) res = self.expect('(.+)' + self.COREDUMP_UART_END) coredump_base64 = res.group(1).decode('utf8') with open(os.path.join(self.logdir, 'coredump_data.b64'), 'w') as coredump_file: logging.info('Writing UART base64 core dump to %s', coredump_file.name) coredump_file.write(coredump_base64) output_file_name = os.path.join(self.logdir, 'coredump_uart_result.txt') self._call_espcoredump(['--core-format', 'b64'], coredump_file.name, output_file_name) def process_coredump_flash(self) -> None: """Extract the core dump from flash, run espcoredump on it""" coredump_file_name = os.path.join(self.logdir, 'coredump_data.bin') logging.info('Writing flash binary core dump to %s', coredump_file_name) self.serial.dump_flash(coredump_file_name, partition='coredump') output_file_name = os.path.join(self.logdir, 'coredump_flash_result.txt') self._call_espcoredump(['--core-format', 'raw'], coredump_file_name, output_file_name) def gdb_write(self, command: str) -> Any: """ Wrapper to write to gdb with a longer timeout, as test runner host can be slow sometimes """ return self.gdb.write(command, timeout_sec=10) def start_gdb(self) -> None: """ Runs GDB and connects it to the "serial" port of the DUT. After this, the DUT expect methods can no longer be used to capture output. """ self.gdb = GdbController(gdb_path=self.toolchain_prefix + 'gdb') # pygdbmi logs to console by default, make it log to a file instead pygdbmi_log_file_name = os.path.join(self.logdir, 'pygdbmi_log.txt') pygdbmi_logger = self.gdb.logger pygdbmi_logger.setLevel(logging.DEBUG) while pygdbmi_logger.hasHandlers(): pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0]) log_handler = logging.FileHandler(pygdbmi_log_file_name) log_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s')) pygdbmi_logger.addHandler(log_handler) logging.info('Running command: %s', self.gdb.get_subprocess_cmd()) for _ in range(10): try: # GdbController creates a process with subprocess.Popen(). Is it really running? It is probable that # an RPI under high load will get non-responsive during creating a lot of processes. resp = self.gdb.get_gdb_response( timeout_sec=10 ) # calls verify_valid_gdb_subprocess() internally # it will be interesting to look up this response if the next GDB command fails (times out) logging.info('GDB response: %s', resp) break # success except GdbTimeoutError: logging.warning( 'GDB internal error: cannot get response from the subprocess' ) except NoGdbProcessError: logging.error('GDB internal error: process is not running') break # failure - TODO: create another GdbController except ValueError: logging.error( 'GDB internal error: select() returned an unexpected file number' ) # Set up logging for GDB remote protocol gdb_remotelog_file_name = os.path.join(self.logdir, 'gdb_remote_log.txt') self.gdb_write('-gdb-set remotelogfile ' + gdb_remotelog_file_name) # Load the ELF file self.gdb_write('-file-exec-and-symbols {}'.format(self.app.elf_file)) # Connect GDB to UART self.serial.proc.close() logging.info('Connecting to GDB Stub...') self.gdb_write('-gdb-set serial baud 115200') responses = self.gdb_write('-target-select remote ' + self.serial.port) # Make sure we get the 'stopped' notification stop_response = self.find_gdb_response('stopped', 'notify', responses) if not stop_response: responses = self.gdb_write('-exec-interrupt') stop_response = self.find_gdb_response('stopped', 'notify', responses) assert stop_response frame = stop_response['payload']['frame'] if 'file' not in frame: frame['file'] = '?' if 'line' not in frame: frame['line'] = '?' logging.info( 'Stopped in {func} at {addr} ({file}:{line})'.format(**frame)) # Drain remaining responses self.gdb.get_gdb_response(raise_error_on_timeout=False) def gdb_backtrace(self) -> Any: """ Returns the list of stack frames for the current thread. Each frame is a dictionary, refer to pygdbmi docs for the format. """ assert self.gdb responses = self.gdb_write('-stack-list-frames') return self.find_gdb_response('done', 'result', responses)['payload']['stack'] @staticmethod def match_backtrace(gdb_backtrace: List[Any], expected_functions_list: List[Any]) -> bool: """ Returns True if the function names listed in expected_functions_list match the backtrace given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace() function. """ return all([ frame['func'] == expected_functions_list[i] for i, frame in enumerate(gdb_backtrace) ]) @staticmethod def find_gdb_response(message: str, response_type: str, responses: List[Any]) -> Any: """ Helper function which extracts one response from an array of GDB responses, filtering by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format. """ def match_response(response: Dict[str, Any]) -> bool: return response['message'] == message and response[ 'type'] == response_type # type: ignore filtered_responses = [r for r in responses if match_response(r)] if not filtered_responses: return None return filtered_responses[0]
class Controller(): # pylint: disable=too-many-instance-attributes """ Thread object that handles GDB events and commands. """ def __init__(self, vimx): """ Creates the GDB SBDebugger object and more! """ import logging self.logger = logging.getLogger(__name__) self.dbg = None self._proc_cur_line_len = 0 self._proc_lines_count = 0 self.result_queue = [] self.vimx = vimx self.busy_stack = 0 # when > 0, buffers are not updated self.buffers = VimBuffers(self, vimx) self.session = Session(self, vimx) def dbg_start(self): if self.dbg is None: self.dbg = GdbController() def dbg_interrupt(self): self.dbg.gdb_process.send_signal(SIGINT) # what if remote process? def dbg_stop(self): self.dbg.exit() self.dbg = None self.logger.info('Terminated!') def is_busy(self): return self.busy_stack > 0 def busy_more(self): self.busy_stack += 1 def busy_less(self): self.busy_stack -= 1 if self.busy_stack < 0: self.logger.critical("busy_stack < 0") self.busy_stack = 0 def get_program_counters(self): return [] def get_breakpoints(self): return [] def serialize_mijson(self, result): out = "message: {}, stream: {}, token: {}, type: {}\n".format( result.get('message'), result.get('stream'), result.get('token'), result.get('type')) payload = result.get('payload') if isinstance(payload, str): payload = payload.encode('utf8').decode('unicode_escape') else: payload = str(payload) out += "{}\n".format(payload) self.buffers.logs_append(out, u'\u2713') def execute(self, command): """ Run command in the interpreter, refresh all buffers, and display the result in the logs buffer. Returns True if succeeded. """ self.buffers.logs_append(u'\u2192(gdb) {}\n'.format(command)) result = self.get_command_result(command) if result is not None: self.serialize_mijson(result) else: self.logs_append("error\n", u'\u2717') self.update_buffers() def complete_command(self, arg, line, pos): """ Returns a list of viable completions for line, and cursor at pos. """ # TODO complete the first word? return [] def update_buffers(self, buf=None): """ Update gdb buffers and signs placed in source files. @param buf If None, all buffers and signs would be updated. Otherwise, update only the specified buffer. """ if self.is_busy(): return if buf is None: self.buffers.update() else: self.buffers.update_buffer(buf) def bp_set_line(self, spath, line): filepath = path.abspath(spath) self.buffers.logs_append(u'\u2192(gdb-bp) {}:{}\n'.format(spath, line)) #self.execute("b {}:{}".format(filepath, line)) #TODO #self.update_buffers(buf='breakpoints') def do_breakswitch(self, bufnr, line): """ Switch breakpoint at the specified line in the buffer. """ key = (bufnr, line) if key in self.buffers.bp_list: bp = self.buffers.bp_list[key] #self.execute("delete breakpoints {}".format(bp.id)) #TODO else: self.bp_set_line(self.vimx.get_buffer_name(bufnr), line) def do_breakdelete(self, bp_id): """ Delete a breakpoint by id """ #self.execute("breakpoint delete {}".format(bp_id)) #TODO pass def put_stdin(self, instr): #if process is running: self.dbg.write(instr, 0, read_response=False) def get_command_result(self, command): """ Runs command in the interpreter and returns (success, output) Not to be called directly for commands which changes debugger state; use execute instead. """ #FIXME run only if process is not running? if len(self.result_queue) > 0: # garbage? self.logger.warning('result cleaned: %s', self.result_queue.pop()) self.result_queue = [] # clean self.logger.info('(gdb) %s', command) self.dbg.write(command, 0, read_response=False) count = 0 while len(self.result_queue) == 0: self.poke() count += 1 if count > 8: self.logger.warning('(gdb-no-result) %s', command) return None result = self.result_queue.pop() return result def poke(self): """ Pokes the gdb process for responses. """ if self.dbg is None: raise ValueError('Poked a non-existent dbg!') try: responses = self.dbg.get_gdb_response(timeout_sec=0.5) except ValueError as e: self.logger.warning('Gdb poke error: %s', e) return except Exception as e: self.logger.critical('Unexpected error: %s', e) self.dbg_stop() return for resp in responses: if resp['type'] == 'result': self.result_queue.append(resp) else: self.serialize_mijson(resp)
class Gdb(object): def __init__(self, vim): self.vim = vim self.ctrl = None self.thread = None self.running = False self.pc = None self.next_watch_no = 1 # --- watches self.watches = {} self.watch_buf = self.vim.api.create_buf(True, False) self.watch_buf.name = "dbug-watch-expressions" self.watch_buf.api.set_option("bt", "nofile") #self.watch_buf.api.set_option("readonly", True) self.vim.api.buf_set_keymap(self.watch_buf, 'n', 'd', ':DbgWatchDelete<cr>', {'nowait': True}) self.bt = Backtrace(vim) self.bpl = BreakpointList(vim) def start(self): if self.running: self.stop() gdb_path = self.vim.vars.get('dbug_gdb_path') if not gdb_path or not os.path.isfile(gdb_path): self.vim.command("echom \"Dbg: Using the default gdb installation\"") # TODO: check if gdb is installed: `which gdb` on Linux (and try on # windows as well) gdb_path = "gdb" self.ctrl = GdbController([gdb_path, "--interpreter=mi3"]) # Start the thread that listens for responses self.thread = threading.Thread(target=self.parse_response) self.running = True self.thread.start() self.ctrl.write("-enable-pretty-printing", read_response=False) info("Started GDB debugger %s" % (gdb_path)) def stop(self): # clear the PC sign (if any) if self.pc: self.vim.command("sign unplace %d" % self.pc["number"]) self.pc = None self.bpl.purge() # Stop the listening thread self.running = False self.thread.join() # Gracefully disconnect and exit self.target_disconnect() self.ctrl.exit() self.ctrl = None info("GDB debugger has stopped") def target_connect_remote(self, remote): info("Connecting remotely to %s" % remote) self.ctrl.write("-target-select remote %s" % remote, read_response=False) def target_disconnect(self): info("Disconnecting from target") self.ctrl.write("-target-disconnect", read_response=False) def load_exec_and_symbol_file(self, fname): if not os.path.isfile(fname): error("File '%s' doesn't exist" % (fname)) info("Using '%s' as both executable and symbols file" % fname) self.ctrl.write("-file-exec-and-symbols %s" % (fname), read_response=False) def download(self): self.ctrl.write("-target-download", read_response=False) def run(self): self.ctrl.write("-exec-run", read_response=False) def cont(self): self.ctrl.write("-exec-continue", read_response=False) def step(self): self.ctrl.write("-exec-step", read_response=False) def next(self): self.ctrl.write("-exec-next", read_response=False) ### BREAKPOINTS {{{1 def bp_toggle(self, fname, line): bp = Breakpoint(fname, line) if bp in self.bpl: bp = self.bpl.remove(bp) self.ctrl.write(f"-break-delete {bp.number}", read_response=False) else: self.ctrl.write(f"-break-insert {str(bp)}", read_response=False) def bp_list(self): self.ctrl.write("-break-list", read_response=False) ### PROGRAM-COUNTER (PC) {{{1 def _update_pc(self, pc): old_pc = None if self.pc: old_pc = self.pc pc["number"] = (old_pc["number"] % 2) + 1 else: pc["number"] = 1 self.pc = pc buf_is_open = False for buf in self.vim.api.list_bufs(): if buf.name == pc['file']: self.vim.api.win_set_buf(0, buf) self.vim.api.win_set_cursor(0, (pc['line'], 0)) buf_is_open = True break if not buf_is_open: self.vim.command("e %s" % pc['file']) self.vim.api.win_set_cursor(0, (pc['line'], 0)) #for no, bp in self.breakpoints.items(): # if bp["line"] == pc["line"] and bp["file"] == pc["file"]: # self.vim.command("sign unplace %d" % (no + 2)) # break self.vim.command("sign place %d line=%d name=dbg_pc file=%s" % (pc['number'], pc['line'], pc['file'])) self.vim.command("normal! zz") debug("Update PC at '%s:%d'" % (pc["file"], pc["line"])) # Update the old_pc here because first removing the sing and then placing # when in the same file can cause flicker since the gutter is resized if old_pc: self.vim.command("sign unplace %s" % old_pc['number']) #for no, bp in self.breakpoints.items(): # if bp["line"] == old_pc["line"] and bp["file"] == old_pc["file"]: # self.vim.command("sign place %d line=%d name=dbg_bp file=%s" % (no + 2, bp['line'], bp['file'])) # break ### STACK {{{1 ### Commands {{{2 def stack_info(self): self.ctrl.write("-stack-info-frame", read_response=False) def stack_list(self): self.ctrl.write("-stack-list-frames", read_response=False) ### WATCHES {{{1 ### Commands {{{2 def expr_watch(self, expr): expr_no = self.next_watch_no self.next_watch_no = self.next_watch_no + 1 expr_name = "var%d" % (expr_no) self.watches[expr_no] = {"name": expr_name, "expr": expr} self.ctrl.write("-var-create %s @ %s" % (expr_name, expr), read_response=False) def expr_update(self): self.ctrl.write("-var-update *", read_response=False) def watch_del(self, line): watch = None for n, w in self.watches.items(): if line == w["line"]: watch = w del self.watches[n] break if watch: # update line numbers for each watch for n, w in self.watches.items(): if w['line'] > watch['line']: self.watches[n]['line'] = w['line'] - 1 self._watch_refresh() debug("Watch '{:s}' deleted".format(watch["expr"])) ### Utilities {{{2 def _pr_watch(self, watch): line = watch['line'] text = "{:<30s} {:<30s}[{:s}]".format(watch['expr'], watch['value'], watch['type']) self.vim.api.buf_set_lines(self.watch_buf, line, line, True, [text]) def _watch_refresh(self): self.vim.api.buf_set_lines(self.watch_buf, 0, -1, False, []) for n, w in self.watches.items(): self._pr_watch(w) ### Handles {{{2 def _update_watches(self, n): watch = self.watches[n] if 'line' not in watch: last_line = 0 for n, w in self.watches.items(): if 'line' in w and w['line'] >= last_line: last_line = w['line'] + 1 self.watches[n]['line'] = last_line watch = self.watches[n] self._pr_watch(watch) info("Updated watch '%s' on line %d" % (watch['expr'], watch['line'])) def _watch_update(self, var): response = self.ctrl.write("-var-evaluate-expression %s" % (var), read_response=True) for r in response: for k, v in r.items(): if k in ['payload']: n = int(var.split("var")[1]) self.watches[n]["value"] = v['value'] self.vim.async_call(self._watch_refresh) debug("Watch's %s value changed to %s" % (var, v['value'])) ### PRINT FUNCTIONS {{{1 ### Used for logging messages from GDB; they exist because the string has ### to be modified (escaped) before printed to the screen def _info(self, hdr, msg): if msg: for m in msg.split('\\n'): m = m.replace('\\"', '"') info("%s: %s" % (hdr, m)) def _debug(self, hdr, msg): if msg: for m in msg.split('\\n'): m = m.replace('\\"', '"') debug("%s: %s" % (hdr, m)) ### PARSING THE RESPONSE {{{1 ### This function if run by a thread, waits in a loop for messages from GDB ### and then calls the corresponding handling functions def parse_response(self): debug("Started response parser thread") while self.running: response = self.ctrl.get_gdb_response(timeout_sec=1, raise_error_on_timeout=False) # The response is a list of dictionaries with each entry in the list # of the form: # {'type': '', 'message': '', 'payload': ''} # # where: # type := 'console' | 'log' | 'output' | 'result' | 'notify' # message := None | 'Some message' # payload := None | 'Some message' | a dictionary/list carrying more information # # the other fields are ignored for r in response: # debug(r) # The information that is printed on the screen can be found in the # 'message' field (if not None) and in the 'payload' field if it is # of string type; additionally it can be found int r['payload']['msg'] # if 'payload' is a dictionary self._info(f"gdb-{r['type']}[m]", r['message'] if r['message'] else None) self._info(f"gdb-{r['type']}[p]", r['payload'] if type(r['payload']) == type('') else None) self._info(f"gdb-{r['type']}", r['payload']['msg'] if type(r['payload']) == type({}) and 'msg' in r['payload'] else None) if r['type'] in ['notify', 'result'] and type(r['payload']) == type({}): # When the 'payload' field is a dictionary is as a response to a command # and carries additional information that is used # The contents of the 'payload' is dependent on the command sent and for k, v in r['payload'].items(): # ---> Breakpoints if k in ['bkpt']: bp = Breakpoint(v['fullname'], int(v['line']), int(v['number'])) self.vim.async_call(self.bpl.add, bp) elif k in ['BreakpointTable']: debug('---BreakpointTable') for b in v["body"]: bp = Breakpoint(b['fullname'], int(b['line']), int(b['number'])) if bp not in self.bpl: self.vim.async_call(self.bpl.add, bp) # ---> Program Counter (PC) elif k in ['frame'] and 'line' in v and 'fullname' in v: pc = {'line': int(v['line']), 'file': v['fullname']} self.vim.async_call(self._update_pc, pc) # Update any watch that may be used self.ctrl.write("-var-update *", read_response=False) # ---> Watches elif k in ['name'] and 'var' in r['payload']['name']: n = int(r['payload']['name'].split('var')[1]) self.watches[n]['value'] = r['payload']['value'] self.watches[n]['type'] = r['payload']['type'] self.vim.async_call(self._update_watches, n) elif k in ['changelist']: for w in v: self._watch_update(w['name']) # ---> Backtrace elif k in ['stack']: self.vim.async_call(self.bt.update, v) debug("Response parser thread stopped")
class Gdb(object): def get_config(self, param_name, in_val=None): if in_val is not None: return in_val return self.config.get(param_name) def __init__(self, gdb_path=None, log_level=None, log_stream_handler=None, log_file_handler=None, log_gdb_proc_file=None, remote_target=None, remote_address=None, remote_port=None, top_defaults=None, **kwargs): defaults = { "gdb_path": "gdb", "remote_target": True, "remote_address": "localhost", "remote_port": 3333, } self.config = defaults # type: dict if top_defaults: self.config.update(top_defaults) gdb_path = self.get_config("gdb_path", gdb_path) log_level = self.get_config("log_level", log_level) log_stream_handler = self.get_config("log_stream_handler", log_stream_handler) log_file_handler = self.get_config("log_file_handler", log_file_handler) log_gdb_proc_file = self.get_config("log_gdb_proc_file", log_gdb_proc_file) remote_target = self.get_config("remote_target", remote_target) remote_address = self.get_config("remote_address", remote_address) remote_port = self.get_config("remote_port", remote_port) # Start gdb process self.remote_target = { "use_remote": remote_target, "address": remote_address, "port": remote_port } self._logger = log.logger_init("Gdb", log_level, log_stream_handler, log_file_handler) self._gdbmi = GdbController(gdb_path=gdb_path) self._gdbmi_lock = threading.Lock() self._resp_cache = [] self._target_state = TARGET_STATE_UNKNOWN self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN self._curr_frame = None self._curr_wp_val = None # gdb config self.gdb_set("mi-async", "on") if log_gdb_proc_file is not None: pardirs = os.path.dirname(log_gdb_proc_file) if pardirs: os.makedirs(pardirs, exist_ok=True) # create non-existing folders self.gdb_set("logging", "file %s" % log_gdb_proc_file) self.gdb_set("logging", "on") def _on_notify(self, rec): if rec['message'] == 'stopped': self._target_state = TARGET_STATE_STOPPED self._curr_frame = rec['payload']['frame'] if 'reason' in rec['payload']: if rec['payload']['reason'] == 'breakpoint-hit': self._target_stop_reason = TARGET_STOP_REASON_BP elif rec['payload']['reason'] == 'watchpoint-trigger': self._target_stop_reason = TARGET_STOP_REASON_WP self._curr_wp_val = rec['payload']['value'] elif rec['payload']['reason'] == 'watchpoint-scope': self._target_stop_reason = TARGET_STOP_REASON_WP_SCOPE elif rec['payload']['reason'] == 'end-stepping-range': self._target_stop_reason = TARGET_STOP_REASON_STEPPED elif rec['payload']['reason'] == 'function-finished': self._target_stop_reason = TARGET_STOP_REASON_FN_FINISHED elif rec['payload']['reason'] == 'signal-received': if rec['payload']['signal-name'] == 'SIGINT': self._target_stop_reason = TARGET_STOP_REASON_SIGINT elif rec['payload']['signal-name'] == 'SIGTRAP': self._target_stop_reason = TARGET_STOP_REASON_SIGTRAP else: self._logger.warning('Unknown signal received "%s"!', rec['payload']['signal-name']) self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN else: self._logger.warning('Unknown target stop reason "%s"!', rec['payload']['reason']) self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN else: self._target_stop_reason = TARGET_STOP_REASON_UNKNOWN elif rec['message'] == 'running': self._target_state = TARGET_STATE_RUNNING def _parse_mi_resp(self, new_resp, new_tgt_state): result = None result_body = None old_state = self._target_state # if any cached records go first resp = self._resp_cache + new_resp processed_recs = 0 for rec in resp: processed_recs += 1 if rec['type'] == 'log': self._logger.debug('LOG: %s', pformat(rec['payload'])) elif rec['type'] == 'console': self._logger.info('CONS: %s', pformat(rec['payload'])) elif rec['type'] == 'notify': self._logger.info('NOTIFY: %s %s', rec['message'], pformat(rec['payload'])) self._on_notify(rec) # stop upon result receiption if we do not expect target state change if self._target_state != old_state and self._target_state == new_tgt_state: self._logger.debug('new target state %d', self._target_state) break elif rec['type'] == 'result': self._logger.debug('RESULT: %s %s', rec['message'], pformat(rec['payload'])) result = rec['message'] result_body = rec['payload'] # stop upon result reception if we do not expect target state change if not new_tgt_state: break # cache unprocessed records self._resp_cache = resp[processed_recs:] # self._logger.debug('cached recs: %s', pformat(self._resp_cache)) return result, result_body def _mi_cmd_run(self, cmd, new_tgt_state=None, tmo=5): def _mi_cmd_isdone(cmd, response): if len(response): # if cmd == '-exec-next': # if response[-1].get('message') == 'stopped' and response[-2].get('message') == 'running': # return True # TODO: less hardcode if cmd in [ '-exec-step', '-exec-next', '-exec-continue', '-exec-continue --all', '-exec-finish' ]: if response[-1].get('message') == 'stopped': return True elif cmd == '-thread-info': if (response[-1].get('message') == 'done') or \ (len(response) > 1 and response[-2].get('message') == 'done' and response[-1].get('message') == 'thread-selected'): return True else: if response[-1].get('message') == 'done': return True return False with self._gdbmi_lock: self._logger.debug('MI->: %s', cmd) response = [] end = time.time() if tmo: end += tmo done = False try: self._gdbmi.write(cmd, read_response=False) while time.time( ) <= end and not done: # while time is not up r = self._gdbmi.get_gdb_response( timeout_sec=0, raise_error_on_timeout=False) response += r done = _mi_cmd_isdone(cmd, response) except Exception as e: self._gdbmi.verify_valid_gdb_subprocess() else: while len(response) == 0: response = self._gdbmi.write(cmd, raise_error_on_timeout=False) self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp( response, new_tgt_state) # None, None if empty while not res: # check for result report from GDB response = self._gdbmi.get_gdb_response( 0, raise_error_on_timeout=False) if not len(response): if tmo and (time.time() >= end): raise DebuggerTargetStateTimeoutError( 'Failed to wait for completion of command "%s" / %s!' % (cmd, tmo)) else: self._logger.debug('MI<-:\n%s', pformat(response)) res, res_body = self._parse_mi_resp( response, new_tgt_state) # None, None if empty return res, res_body def gdb_exit(self): """ -gdb-exit ~= quit """ self._mi_cmd_run("-gdb-exit") def console_cmd_run(self, cmd): self._mi_cmd_run("-interpreter-exec console \"%s\"" % cmd) def target_select(self, tgt_type, tgt_params): # -target-select type parameters res, _ = self._mi_cmd_run('-target-select %s %s' % (tgt_type, tgt_params)) if res != 'connected': raise DebuggerError('Failed to connect to "%s %s"!' % (tgt_type, tgt_params)) def target_disconnect(self): # -target-disconnect self._mi_cmd_run('-target-disconnect') def target_reset(self, action='halt'): self.monitor_run('reset %s' % action) if action == 'halt': self.wait_target_state(TARGET_STATE_STOPPED, 5) self.console_cmd_run('flushregs') def exec_file_set(self, file_path): # -file-exec-and-symbols file local_file_path = file_path if os.name == 'nt': # Convert filepath from Windows format if needed local_file_path = local_file_path.replace("\\", "/") res, _ = self._mi_cmd_run('-file-exec-and-symbols %s' % local_file_path) if res != 'done': raise DebuggerError('Failed to set program file!') def exec_interrupt(self): # -exec-interrupt [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-interrupt --all') if res != 'done': raise DebuggerError('Failed to stop program!') def exec_continue(self): # -exec-continue [--reverse] [--all|--thread-group N] res, _ = self._mi_cmd_run('-exec-continue --all') if res != 'running': raise DebuggerError('Failed to continue program!') def exec_run(self, start=True): # -exec-run [ --all | --thread-group N ] [ --start ] if start: cmd = '-exec-run --all --start' else: cmd = '-exec-run --all' res, _ = self._mi_cmd_run(cmd) if res != 'running': raise DebuggerError('Failed to run program!') def exec_jump(self, loc): # -exec-jump location res, _ = self._mi_cmd_run('-exec-jump %s' % loc) if res != 'running': raise DebuggerError('Failed to make jump in program!') def exec_next(self): # -exec-next [--reverse] res, _ = self._mi_cmd_run('-exec-next') if res != 'running': raise DebuggerError('Failed to step over!') def exec_step(self): # -exec-step [--reverse] res, _ = self._mi_cmd_run('-exec-step') if res != 'running': raise DebuggerError('Failed to step in!') def exec_finish(self): # -exec-finish [--reverse] res, _ = self._mi_cmd_run('-exec-finish') if res != 'running': raise DebuggerError('Failed to step out!') def exec_next_insn(self): # -exec-next-instruction [--reverse] res, _ = self._mi_cmd_run('-exec-next-instruction') if res != 'running': raise DebuggerError('Failed to step insn!') def data_eval_expr(self, expr): # -data-evaluate-expression expr res, res_body = self._mi_cmd_run('-data-evaluate-expression "%s"' % expr, tmo=1) if res == "done" and 'value' in res_body: return res_body['value'] elif res == "error" and 'msg' in res_body: return res_body['msg'] else: raise DebuggerError('Failed to eval expression!') def get_reg(self, nm): sval = self.data_eval_expr('$%s' % nm) # for PC we'll get something like '0x400e0db8 <gpio_set_direction>' sval_re = re.search('(.*)[<](.*)[>]', sval) if sval_re: sval = sval_re.group(1) return int(sval, 0) def gdb_set(self, var, val): self._mi_cmd_run("-gdb-set %s %s" % (var, val)) def get_variables(self, thread_num=None, frame_num=0): # -stack-list-variables [ --no-frame-filters ] [ --skip-unavailable ] print-values if thread_num is not None: cmd = '-stack-list-variables --thread %d --frame %d --all-values' % ( thread_num, frame_num) else: cmd = '-stack-list-variables --all-values' res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'variables' not in res_body: raise DebuggerError( 'Failed to get variables @ frame %d of thread %d!' % (frame_num, thread_num)) return res_body['variables'] def get_local_variables(self, no_values=False): # -stack-list-variables [ --no-frame-filters ] [ --skip-unavailable ] print-values # noinspection PyTypeChecker cmd = '-stack-list-locals %i' % int(not no_values) res, res_body = self._mi_cmd_run(cmd) if res != 'done' or not res_body or 'locals' not in res_body: raise DebuggerError('Failed to get variables @ frame') return res_body['locals'] def get_backtrace(self): # -stack-list-frames [ --no-frame-filters low-frame high-frame ] res, res_body = self._mi_cmd_run('-stack-list-frames') if res != 'done' or not res_body or 'stack' not in res_body: raise DebuggerError('Failed to get backtrace! (%s / %s)' % (res, res_body)) return res_body['stack'] def select_frame(self, frame): # -stack-select-frame framenum res, _ = self._mi_cmd_run('-stack-select-frame %d' % frame) if res != 'done': raise DebuggerError('Failed to get backtrace!') def add_bp(self, loc, ignore_count=0, cond='', hw=False, tmp=False): # -break-insert [ -t ] [ -h ] [ -f ] [ -d ] [ -a ] [ -c condition ] [ -i ignore-count ] # [ -p thread-id ] [ location ] cmd_args = '-i %d %s' % (ignore_count, loc) if len(cond): cmd_args = '-c "%s" %s' % (cond, cmd_args) if hw: cmd_args = "-h " + cmd_args if tmp: cmd_args = "-t " + cmd_args res, res_body = self._mi_cmd_run('-break-insert %s' % cmd_args) if res != 'done' or not res_body or 'bkpt' not in res_body or 'number' not in res_body[ 'bkpt']: raise DebuggerError('Failed to insert BP!') return res_body['bkpt']['number'] def add_wp(self, exp, tp='w'): # -break-watch [ -a | -r ] expr cmd_args = '"%s"' % exp if tp == 'r': cmd_args = '-r %s' % cmd_args elif tp == 'rw': cmd_args = '-a %s' % cmd_args res, res_body = self._mi_cmd_run('-break-watch %s' % cmd_args) if res != 'done' or not res_body: raise DebuggerError('Failed to insert WP!') if tp == 'w': if 'wpt' not in res_body or 'number' not in res_body['wpt']: raise DebuggerError('Failed to insert WP!') return res_body['wpt']['number'] elif tp == 'r': if 'hw-rwpt' not in res_body or 'number' not in res_body['hw-rwpt']: raise DebuggerError('Failed to insert RWP!') return res_body['hw-rwpt']['number'] elif tp == 'rw': if 'hw-awpt' not in res_body or 'number' not in res_body['hw-awpt']: raise DebuggerError('Failed to insert AWP!') return res_body['hw-awpt']['number'] return None def delete_bp(self, bp): # -break-delete ( breakpoint )+ res, _ = self._mi_cmd_run('-break-delete %s' % bp) if res != 'done': raise DebuggerError('Failed to delete BP!') def monitor_run(self, cmd, tmo=None): res, resp = self._mi_cmd_run('mon %s' % cmd, tmo=tmo) if res != 'done': raise DebuggerError('Failed to run monitor cmd "%s"!' % cmd) return resp def wait_target_state(self, state, tmo=None): """ Parameters ---------- state : int tmo : int Returns ------- stop_reason : int """ with self._gdbmi_lock: end = time.time() if tmo is not None: end += tmo while self._target_state != state: if len(self._resp_cache): recs = [] #self._resp_cache else: # check for target state change report from GDB recs = self._gdbmi.get_gdb_response( 0.5, raise_error_on_timeout=False) if tmo and len(recs) == 0 and time.time() >= end: raise DebuggerTargetStateTimeoutError( "Failed to wait for target state %d!" % state) self._parse_mi_resp(recs, state) return self._target_stop_reason def get_target_state(self): return self._target_state, self._target_stop_reason def get_current_frame(self): return self._curr_frame def get_current_wp_val(self): return self._curr_wp_val def connect(self): if self.remote_target["use_remote"]: self.target_select( 'remote', '%s:%s' % (self.remote_target["address"], self.remote_target["port"])) def disconnect(self): self.target_disconnect() def get_thread_info(self, thread_id=None): """ Parameters ---------- thread_id : int or None thread to info if exists Returns ------- current-thread-id : str threads : dict """ # -thread-info [ thread-id ] if thread_id: cmd = '-thread-info %d' % thread_id else: cmd = '-thread-info' # streaming of info for all threads over gdbmi can take some time, so use large timeout value res, res_body = self._mi_cmd_run(cmd, tmo=20) # if res != 'done' or not res_body or 'threads' not in res_body or 'current-thread-id' not in res_body: if res != 'done' or not res_body or 'threads' not in res_body: # TODO verify removing current-thread-id raise DebuggerError('Failed to get thread info!') return res_body.get('current-thread-id', None), res_body['threads'] def select_thread(self, num): res, _ = self._mi_cmd_run('-thread-select %d' % num) if res != 'done': raise DebuggerError('Failed to set thread!') return res def set_thread(self, num): """Old-named method. For backward compatibility""" return self.select_thread(num) def get_thread_ids(self): # -thread-list-ids expr res, thread_ids = self._mi_cmd_run('-thread-list-ids') if res != 'done': raise DebuggerError('Failed to eval expression!') return thread_ids def get_selected_thread(self): # sel_id, ths = self.get_thread_info() for th in ths: if th['id'] == sel_id: return th return None def set_app_offset(self, **kwargs): return None def target_program(self, **kwargs): return None
class GdbServer: def __init__( self, states: StateManager, binary: str, cda: CoredumpAnalyzer, active_state: Optional[State] = None, ) -> None: # FIXME: this binary is original path master, ptsname = create_pty() self.master = master self.COMMANDS = { "q": self.handle_query, "g": self.read_register_all, "G": self.write_register_all, "H": self.set_thread, "m": self.read_memory, "M": self.write_memory, "p": self.read_register, "P": self.write_register, "v": self.handle_long_commands, "X": self.write_memory_bin, "Z": self.insert_breakpoint, "z": self.remove_breakpoint, "?": self.stop_reason, "!": self.extend_mode, } self.states = states self.active_state = active_state if active_state else states.get_major( -1) self.regs = GdbRegSpace(self.active_state) self.mem = GdbMemSpace(self.active_state, cda) self.packet_size = PAGESIZE self.libs = GdbSharedLibrary(self.active_state, self.packet_size) self.gdb = GdbController( gdb_args=["--quiet", "--nx", "--interpreter=mi2"]) self.gdb.write("-target-select remote %s" % ptsname, timeout_sec=10) self.thread = threading.Thread(target=self.run) self.thread.start() self.gdb.write("-file-exec-and-symbols %s" % binary, timeout_sec=100) self.gdb.write("set stack-cache off", timeout_sec=100) def update_active(self) -> None: self.regs.active_state = self.active_state self.mem.active_state = self.active_state self.libs.active_state = self.active_state self.write_request("c") def read_variables(self) -> List[Dict[str, Any]]: py_file = APP_ROOT.joinpath("gdb/gdb_get_locals.py") resp = self.write_request('python execfile ("{}")'.format(py_file)) res = [] for r in resp: if ("payload" in r.keys() and isinstance(r["payload"], str) and r["payload"].startswith("ARGS:")): l = r["payload"].split(" ") name = l[1] tystr = l[2].replace("%", " ") idr = int(l[3]) addr_comment = l[4].strip().replace("\\n", "") if "&" in addr_comment: if idr == 1: ll = addr_comment.partition("&") addr = int(ll[0], 16) # type: Union[str, int] comment = ll[2] else: if idr == 1: addr = int(addr_comment, 16) comment = "" else: addr = addr_comment comment = "" size = int(l[5].strip().replace("\\n", "")) res.append({ "name": name, "type": tystr, "loc": idr, "addr": addr, "size": size, "comment": comment, }) return res def eval_expression(self, expr: str) -> None: res = self.gdb.write("-data-evaluate-expression %s" % expr, timeout_sec=99999) print(res) def write_request(self, req: str, **kwargs: Any) -> List[Dict[str, Any]]: timeout_sec = kwargs.pop("timeout_sec", 10) kwargs["read_response"] = False self.gdb.write(req, timeout_sec=timeout_sec, **kwargs) resp = [] # type: List[Dict[str, Any]] while True: try: resp += self.gdb.get_gdb_response() except Exception: break return resp def run(self) -> None: l.info("start server gdb server") buf = "" while True: try: data = os.read(self.master.fileno(), PAGESIZE) except OSError as e: l.info("gdb connection was closed: %s", e) return if len(data) == 0: l.debug("gdb connection was closed") buf += data.decode("utf-8") buf = self.process_data(buf) def process_data(self, buf: str) -> str: while len(buf): if buf[0] == "+" or buf[0] == "-": buf = buf[1:] if len(buf) == 0: return buf if "$" not in buf: return buf begin = buf.index("$") + 1 end = buf.index("#") if begin >= 0 and end < len(buf): packet = buf[begin:end] checksum = int(buf[end + 2], 16) checksum += int(buf[end + 1], 16) << 4 assert checksum == compute_checksum(packet) self.process_packet(packet) buf = buf[end + 3:] return buf def write_ack(self) -> None: self.master.write("+") self.master.flush() def process_packet(self, packet: str) -> None: handler = self.COMMANDS.get(packet[0], None) request = "".join(packet[1:]) l.warning("<-- %s%s" % (packet[0], request)) if handler is None: l.warning("unknown command %s%s received" % (packet[0], request)) response = "" else: response = handler(request) self.write_response(response) def write_response(self, response: str) -> None: # Each packet should be acknowledged with a single character. # '+' to indicate satisfactory receipt l.warning("--> %s" % response) s = "+$%s#%.2x" % (response, compute_checksum(response)) self.master.write(s.encode("utf-8")) self.master.flush() def extend_mode(self, packet: str) -> str: """ ! """ return "OK" def read_register_all(self, packet: str) -> str: """ g """ return self.regs.read_all() def write_register_all(self, packet: str) -> str: """ G XX... """ self.regs.write_all(packet) return "OK" def read_register(self, packet: str) -> str: """ p n """ n = int(packet, 16) # FIXME: gdb request out of range while gdb info frame if n < len(self.regs.names): return self.regs[self.regs.names[n]] return "ffffffff" def write_register(self, packet: str) -> str: """ P n...=r... """ n_, r_ = packet.split("=") n = int(n_, 16) r = int(r_, 16) if n < len(self.regs.names): self.regs[self.regs.names[n]] = r return "OK" def set_thread(self, packet: str) -> str: """ H op thread-id """ return "OK" def read_memory(self, packet: str) -> str: """ m addr,length """ addr_, length_ = packet.split(",") addr = int(addr_, 16) length = int(length_, 16) return self.mem.read(addr, length) def write_memory(self, packet: str) -> str: """ M addr,length:XX """ l = packet.split(",") addr_ = l[0] length_, value = l[1].split(":") addr = int(addr_, 16) length = int(length_, 16) self.mem.write(addr, length, value) return "OK" def write_memory_bin(self, packet: str) -> str: """ X addr,length:XX(bin) """ pass def insert_breakpoint(self, packet: str) -> str: """ Z type,addr,kind type: 0 software (0xcc) 1 hardware (drx) 2 write watchpoint 3 read watchpoint """ return "OK" def remove_breakpoint(self, packet: str) -> str: """ z type,addr,kind """ return "OK" def stop_reason(self, packet: str) -> str: GDB_SIGNAL_TRAP = 5 return "S%.2x" % GDB_SIGNAL_TRAP def handle_long_commands(self, packet: str) -> str: def handle_cont(action: str, tid: Optional[int] = None) -> str: # TODO: for a continue/step/stop operation self.write_response("T05library:r;") return "S05" if packet.startswith("Cont"): supported_action = ["", "c", "s", "t"] # TODO: C sig/S sig/r start,end packet = packet[4:] if packet == "?": return ";".join(supported_action) action = packet.split(";")[1] action = action.split(":")[0] if action in supported_action: return handle_cont(action) l.warning("unknown command: v%s", "Cont" + packet) return "" if packet.startswith("CtrlC"): return "OK" if packet.startswith("MustReplyEmpty"): return "" else: l.warning("unknown command: v%s", packet) return "" def handle_query(self, packet: str) -> str: """ qSupported|qAttached|qC qXfer:...:read:annex:offset,size """ if packet.startswith("Supported"): features = [ "qXfer:libraries-svr4:read+", # 'qXfer:memory-map:read+' ] features.append("PacketSize=%x" % self.packet_size) return ";".join(features) elif packet.startswith("Xfer"): reqs = packet.split(":") # FIXME: not working now if reqs[1] == "libraries-svr4" and reqs[2] == "read": data = reqs[4].split(",") return self.libs.read_xml(int(data[0], 16), int(data[1], 16)) if reqs[1] == "memory-map" and reqs[2] == "read": # TODO: add memory-map, (do we really need it now?) return "" return "" elif packet.startswith("Attached"): return "1" elif packet.startswith("C"): # FIXME real thread id return "" # empty means no threads elif packet.startswith("fThreadInfo"): return "m0" elif packet.startswith("sThreadInfo"): return "l" elif packet.startswith("TStatus"): # catch all for all commands we know and don't want to implement return "" elif packet.startswith("Symbol"): if packet == "Symbol::": return "OK" _, sym_value, sym_name = packet.split(":") return "OK" else: l.warning("unknown query: %s", packet) return ""
class DuzzleListener(threading.Thread): """ docstring for DuzzleListener """ def __init__(self, out_q, in_q, verbose): """ Constructor for DuzzleListener. Args: out_q: Queue of GDMI commands to be dispatched. in_q: Queue of responses from GDB. """ # Super constructor super(DuzzleListener, self).__init__() # Initialise self._out_q = out_q self._in_q = in_q self._verbose = verbose self._communicator = GdbController() def run(self): """ Main thread. """ # Main listener loop while True: # Read from gdb resp = self._communicator.get_gdb_response( timeout_sec=0, raise_error_on_timeout=False) # Write to out queue if len(resp) > 0: for msg in resp: # Debug utils.dprint('[+] Read response', self._verbose) utils.dprint(msg, self._verbose) self._in_q.put(msg) # Out message to process if not self._out_q.empty(): # Get out message out_msg = self._out_q.get() # Check for kill command if out_msg != 'die': # Debug utils.dprint('[+] Dispatched command "{}"'.format(out_msg), self._verbose) # Write to gdb self._communicator.write(out_msg, read_response=False) self._out_q.task_done() # Kill thread else: # Kill gdb self._communicator.write('-gdb-exit', read_response=False) # Debug utils.dprint('[+] Killing thread', self._verbose) return
class CrackMe(): def __init__(self, args=[]): self.uid = str(uuid.uuid4()) # Start gdb process gdb_args = (['--nx', '--quiet', '--interpreter=mi2'] + ['--args', './crackme'] + args) self.gdbmi = GdbController(gdb_args=gdb_args) logging.info('Starting gdb with ' + repr(self.gdbmi.get_subprocess_cmd())) def wait_for_resp(self): msgs = [] out = {} while True: resp = self.gdbmi.get_gdb_response(timeout_sec=4, raise_error_on_timeout=False) msgs += resp for m in resp: m = to_namespace(m) if m.type != 'result': continue out['result'] = m.message return msgs, out def run(self): self.gdbmi.write('run', read_response=False) return self.process_execution() def cont(self): self.gdbmi.write('continue', read_response=False) return self.process_execution() def si(self): self.gdbmi.write('si', read_response=False) return self.process_execution() def ni(self): self.gdbmi.write('ni', read_response=False) return self.process_execution() def breakpoint(self, addr): addr = filter_str(addr) self.gdbmi.write('break *' + addr, read_response=False) msgs, out = self.wait_for_resp() return out def set(self, arg): arg = filter_str(arg) self.gdbmi.write('set ' + arg, read_response=False) msgs, out = self.wait_for_resp() return out def disassemble(self, arg): arg = filter_str(arg) self.gdbmi.write('disassemble ' + arg, read_response=False) msgs, out = self.wait_for_resp() data = '' for m in msgs: m = to_namespace(m) if m.type == 'console': data += m.payload data = data.encode('latin-1').decode('unicode_escape') out['data'] = data return out def memory(self, arg): arg = filter_str(arg) self.gdbmi.write('x/' + arg, read_response=False) msgs, out = self.wait_for_resp() data = '' for m in msgs: m = to_namespace(m) if m.type == 'console': data += m.payload data = data.encode('latin-1').decode('unicode_escape') out['data'] = data return out def registers(self): self.gdbmi.write('i r', read_response=False) msgs, out = self.wait_for_resp() data = '' for m in msgs: m = to_namespace(m) if m.type == 'console': data += m.payload data = data.encode('latin-1').decode('unicode_escape') data = data.strip().split('\n') regs = {x[0]: x[1] for x in (y.split() for y in data) if len(x) >= 2} out['registers'] = regs return out def process_execution(self): run_output = '' running = True out = {} # Loop until execution stops while running: resp = self.gdbmi.get_gdb_response(timeout_sec=4, raise_error_on_timeout=False) for m in resp: m = to_namespace(m) # Console output if m.type == 'output': run_output += m.payload if m.type == 'result' and m.message == 'error': running = False out['stop_reason'] = m.payload.msg # Program stopped if m.type == 'notify': if m.message == 'stopped': running = False reason = m.payload.reason out['stop_reason'] = reason if reason == 'breakpoint-hit': out['bp_addr'] = m.payload.frame.addr out['output'] = run_output return out
class PanicTestMixin(object): """ Provides custom functionality for the panic test DUT """ BOOT_CMD_ADDR = 0x9000 BOOT_CMD_SIZE = 0x1000 DEFAULT_EXPECT_TIMEOUT = 10 COREDUMP_UART_START = '================= CORE DUMP START =================' COREDUMP_UART_END = '================= CORE DUMP END =================' def start_test(self, test_name): """ Starts the app and sends it the test name """ self.test_name = test_name # Start the app and verify that it has started up correctly self.start_capture_raw_data() self.start_app() self.expect('Enter test name: ') Utility.console_log('Setting boot command: ' + test_name) self.write(test_name) self.expect('Got test name: ' + test_name) def expect_none(self, *patterns, **timeout_args): """ like dut.expect_all, but with an inverse logic """ found_data = [] if 'timeout' not in timeout_args: timeout_args['timeout'] = 1 def found(data): raise AssertionError('Unexpected: {}'.format(data)) found_data.append(data) try: expect_items = [(pattern, found) for pattern in patterns] self.expect_any(*expect_items, **timeout_args) raise AssertionError('Unexpected: {}'.format(found_data)) except DUT.ExpectTimeout: return True def expect_gme(self, reason): """ Expect method for Guru Meditation Errors """ self.expect(r"Guru Meditation Error: Core 0 panic'ed (%s)" % reason) def expect_reg_dump(self, core=0): """ Expect method for the register dump """ self.expect(re.compile(r'Core\s+%d register dump:' % core)) def expect_elf_sha256(self): """ Expect method for ELF SHA256 line """ elf_sha256 = self.app.get_elf_sha256() sdkconfig = self.app.get_sdkconfig() elf_sha256_len = int(sdkconfig.get('CONFIG_APP_RETRIEVE_LEN_ELF_SHA', '16')) self.expect('ELF file SHA256: ' + elf_sha256[0:elf_sha256_len]) def expect_backtrace(self): self.expect('Backtrace:') self.expect_none('CORRUPTED') def __enter__(self): self._raw_data = None self.gdb = None return self def __exit__(self, type, value, traceback): log_folder = self.app.get_log_folder(TEST_SUITE) with open(os.path.join(log_folder, 'log_' + self.test_name + '.txt'), 'w') as log_file: Utility.console_log('Writing output of {} to {}'.format(self.test_name, log_file.name)) log_file.write(self.get_raw_data()) if self.gdb: self.gdb.exit() self.close() def get_raw_data(self): if not self._raw_data: self._raw_data = self.stop_capture_raw_data() return self._raw_data def _call_espcoredump(self, extra_args, coredump_file_name, output_file_name): # no "with" here, since we need the file to be open for later inspection by the test case self.coredump_output = open(output_file_name, 'w') espcoredump_script = os.path.join(os.environ['IDF_PATH'], 'components', 'espcoredump', 'espcoredump.py') espcoredump_args = [ sys.executable, espcoredump_script, 'info_corefile', '--core', coredump_file_name, ] espcoredump_args += extra_args espcoredump_args.append(self.app.elf_file) Utility.console_log('Running ' + ' '.join(espcoredump_args)) Utility.console_log('espcoredump output is written to ' + self.coredump_output.name) subprocess.check_call(espcoredump_args, stdout=self.coredump_output) self.coredump_output.flush() self.coredump_output.seek(0) def process_coredump_uart(self): """ Extract the core dump from UART output of the test, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) data = self.get_raw_data() coredump_start = data.find(self.COREDUMP_UART_START) coredump_end = data.find(self.COREDUMP_UART_END) coredump_base64 = data[coredump_start + len(self.COREDUMP_UART_START):coredump_end] with open(os.path.join(log_folder, 'coredump_data_' + self.test_name + '.b64'), 'w') as coredump_file: Utility.console_log('Writing UART base64 core dump to ' + coredump_file.name) coredump_file.write(coredump_base64) output_file_name = os.path.join(log_folder, 'coredump_uart_result_' + self.test_name + '.txt') self._call_espcoredump(['--core-format', 'b64'], coredump_file.name, output_file_name) def process_coredump_flash(self): """ Extract the core dump from flash, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) coredump_file_name = os.path.join(log_folder, 'coredump_data_' + self.test_name + '.bin') Utility.console_log('Writing flash binary core dump to ' + coredump_file_name) self.dump_flash(coredump_file_name, partition='coredump') output_file_name = os.path.join(log_folder, 'coredump_flash_result_' + self.test_name + '.txt') self._call_espcoredump(['--core-format', 'raw'], coredump_file_name, output_file_name) def _gdb_write(self, command): """ Wrapper to write to gdb with a longer timeout, as test runner host can be slow sometimes """ return self.gdb.write(command, timeout_sec=10) def start_gdb(self): """ Runs GDB and connects it to the "serial" port of the DUT. After this, the DUT expect methods can no longer be used to capture output. """ self.stop_receive() self._port_close() Utility.console_log('Starting GDB...', 'orange') self.gdb = GdbController(gdb_path=self.TOOLCHAIN_PREFIX + 'gdb') Utility.console_log('Running command: {}'.format(self.gdb.get_subprocess_cmd()), 'orange') for _ in range(10): try: # GdbController creates a process with subprocess.Popen(). Is it really running? It is probable that # an RPI under high load will get non-responsive during creating a lot of processes. resp = self.gdb.get_gdb_response(timeout_sec=10) # calls verify_valid_gdb_subprocess() internally # it will be interesting to look up this response if the next GDB command fails (times out) Utility.console_log('GDB response: {}'.format(resp), 'orange') break # success except GdbTimeoutError: Utility.console_log('GDB internal error: cannot get response from the subprocess', 'orange') except NoGdbProcessError: Utility.console_log('GDB internal error: process is not running', 'red') break # failure - TODO: create another GdbController except ValueError: Utility.console_log('GDB internal error: select() returned an unexpected file number', 'red') # pygdbmi logs to console by default, make it log to a file instead log_folder = self.app.get_log_folder(TEST_SUITE) pygdbmi_log_file_name = os.path.join(log_folder, 'pygdbmi_log_' + self.test_name + '.txt') pygdbmi_logger = self.gdb.logger pygdbmi_logger.setLevel(logging.DEBUG) while pygdbmi_logger.hasHandlers(): pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0]) log_handler = logging.FileHandler(pygdbmi_log_file_name) log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s')) pygdbmi_logger.addHandler(log_handler) # Set up logging for GDB remote protocol gdb_remotelog_file_name = os.path.join(log_folder, 'gdb_remote_log_' + self.test_name + '.txt') self._gdb_write('-gdb-set remotelogfile ' + gdb_remotelog_file_name) # Load the ELF file self._gdb_write('-file-exec-and-symbols {}'.format(self.app.elf_file)) # Connect GDB to UART Utility.console_log('Connecting to GDB Stub...', 'orange') self._gdb_write('-gdb-set serial baud 115200') responses = self._gdb_write('-target-select remote ' + self.get_gdb_remote()) # Make sure we get the 'stopped' notification stop_response = self.find_gdb_response('stopped', 'notify', responses) if not stop_response: responses = self._gdb_write('-exec-interrupt') stop_response = self.find_gdb_response('stopped', 'notify', responses) assert stop_response frame = stop_response['payload']['frame'] if 'file' not in frame: frame['file'] = '?' if 'line' not in frame: frame['line'] = '?' Utility.console_log('Stopped in {func} at {addr} ({file}:{line})'.format(**frame), 'orange') # Drain remaining responses self.gdb.get_gdb_response(raise_error_on_timeout=False) def gdb_backtrace(self): """ Returns the list of stack frames for the current thread. Each frame is a dictionary, refer to pygdbmi docs for the format. """ assert self.gdb responses = self._gdb_write('-stack-list-frames') return self.find_gdb_response('done', 'result', responses)['payload']['stack'] @staticmethod def match_backtrace(gdb_backtrace, expected_functions_list): """ Returns True if the function names listed in expected_functions_list match the backtrace given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace() function. """ return all([frame['func'] == expected_functions_list[i] for i, frame in enumerate(gdb_backtrace)]) @staticmethod def find_gdb_response(message, response_type, responses): """ Helper function which extracts one response from an array of GDB responses, filtering by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format. """ def match_response(response): return (response['message'] == message and response['type'] == response_type) filtered_responses = [r for r in responses if match_response(r)] if not filtered_responses: return None return filtered_responses[0]
class PanicTestMixin(object): """ Provides custom functionality for the panic test DUT """ BOOT_CMD_ADDR = 0x9000 BOOT_CMD_SIZE = 0x1000 DEFAULT_EXPECT_TIMEOUT = 10 COREDUMP_UART_START = "================= CORE DUMP START =================" COREDUMP_UART_END = "================= CORE DUMP END =================" def start_test(self, test_name): """ Starts the app and sends it the test name """ self.test_name = test_name # Start the app and verify that it has started up correctly self.start_capture_raw_data() self.start_app() self.expect("Enter test name: ") Utility.console_log("Setting boot command: " + test_name) self.write(test_name) self.expect("Got test name: " + test_name) def expect_none(self, *patterns, **timeout_args): """ like dut.expect_all, but with an inverse logic """ found_data = [] if "timeout" not in timeout_args: timeout_args["timeout"] = 1 def found(data): raise AssertionError("Unexpected: {}".format(data)) found_data.append(data) try: expect_items = [(pattern, found) for pattern in patterns] self.expect_any(*expect_items, **timeout_args) raise AssertionError("Unexpected: {}".format(found_data)) except DUT.ExpectTimeout: return True def expect_gme(self, reason): """ Expect method for Guru Meditation Errors """ self.expect(r"Guru Meditation Error: Core 0 panic'ed (%s)" % reason) def expect_reg_dump(self, core=0): """ Expect method for the register dump """ self.expect(re.compile(r"Core\s+%d register dump:" % core)) def expect_elf_sha256(self): """ Expect method for ELF SHA256 line """ elf_sha256 = self.app.get_elf_sha256() sdkconfig = self.app.get_sdkconfig() elf_sha256_len = int( sdkconfig.get("CONFIG_APP_RETRIEVE_LEN_ELF_SHA", "16")) self.expect("ELF file SHA256: " + elf_sha256[0:elf_sha256_len]) def expect_backtrace(self): self.expect("Backtrace:") self.expect_none("CORRUPTED") def __enter__(self): self._raw_data = None self.gdb = None return self def __exit__(self, type, value, traceback): log_folder = self.app.get_log_folder(TEST_SUITE) with open(os.path.join(log_folder, "log_" + self.test_name + ".txt"), "w") as log_file: Utility.console_log("Writing output of {} to {}".format( self.test_name, log_file.name)) log_file.write(self.get_raw_data()) if self.gdb: self.gdb.exit() self.close() def get_raw_data(self): if not self._raw_data: self._raw_data = self.stop_capture_raw_data() return self._raw_data def _call_espcoredump(self, extra_args, coredump_file_name, output_file_name): # no "with" here, since we need the file to be open for later inspection by the test case self.coredump_output = open(output_file_name, "w") espcoredump_script = os.path.join(os.environ["IDF_PATH"], "components", "espcoredump", "espcoredump.py") espcoredump_args = [ sys.executable, espcoredump_script, "info_corefile", "--core", coredump_file_name, ] espcoredump_args += extra_args espcoredump_args.append(self.app.elf_file) Utility.console_log("Running " + " ".join(espcoredump_args)) Utility.console_log("espcoredump output is written to " + self.coredump_output.name) subprocess.check_call(espcoredump_args, stdout=self.coredump_output) self.coredump_output.flush() self.coredump_output.seek(0) def process_coredump_uart(self): """ Extract the core dump from UART output of the test, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) data = self.get_raw_data() coredump_start = data.find(self.COREDUMP_UART_START) coredump_end = data.find(self.COREDUMP_UART_END) coredump_base64 = data[coredump_start + len(self.COREDUMP_UART_START):coredump_end] with open( os.path.join(log_folder, "coredump_data_" + self.test_name + ".b64"), "w") as coredump_file: Utility.console_log("Writing UART base64 core dump to " + coredump_file.name) coredump_file.write(coredump_base64) output_file_name = os.path.join( log_folder, "coredump_uart_result_" + self.test_name + ".txt") self._call_espcoredump(["--core-format", "b64"], coredump_file.name, output_file_name) def process_coredump_flash(self): """ Extract the core dump from flash, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) coredump_file_name = os.path.join( log_folder, "coredump_data_" + self.test_name + ".bin") Utility.console_log("Writing flash binary core dump to " + coredump_file_name) self.dump_flush(coredump_file_name, partition="coredump") output_file_name = os.path.join( log_folder, "coredump_flash_result_" + self.test_name + ".txt") self._call_espcoredump(["--core-format", "raw"], coredump_file_name, output_file_name) def start_gdb(self): """ Runs GDB and connects it to the "serial" port of the DUT. After this, the DUT expect methods can no longer be used to capture output. """ self.stop_receive() self._port_close() Utility.console_log("Starting GDB...", "orange") self.gdb = GdbController(gdb_path=self.TOOLCHAIN_PREFIX + "gdb") # pygdbmi logs to console by default, make it log to a file instead log_folder = self.app.get_log_folder(TEST_SUITE) pygdbmi_log_file_name = os.path.join( log_folder, "pygdbmi_log_" + self.test_name + ".txt") pygdbmi_logger = self.gdb.logger pygdbmi_logger.setLevel(logging.DEBUG) while pygdbmi_logger.hasHandlers(): pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0]) log_handler = logging.FileHandler(pygdbmi_log_file_name) log_handler.setFormatter( logging.Formatter("%(asctime)s %(levelname)s: %(message)s")) pygdbmi_logger.addHandler(log_handler) # Set up logging for GDB remote protocol gdb_remotelog_file_name = os.path.join( log_folder, "gdb_remote_log_" + self.test_name + ".txt") self.gdb.write("-gdb-set remotelogfile " + gdb_remotelog_file_name) # Load the ELF file self.gdb.write("-file-exec-and-symbols {}".format(self.app.elf_file)) # Connect GDB to UART Utility.console_log("Connecting to GDB Stub...", "orange") self.gdb.write("-gdb-set serial baud 115200") responses = self.gdb.write("-target-select remote " + self.get_gdb_remote(), timeout_sec=3) # Make sure we get the 'stopped' notification stop_response = self.find_gdb_response('stopped', 'notify', responses) if not stop_response: responses = self.gdb.write("-exec-interrupt", timeout_sec=3) stop_response = self.find_gdb_response('stopped', 'notify', responses) assert stop_response frame = stop_response["payload"]["frame"] if "file" not in frame: frame["file"] = "?" if "line" not in frame: frame["line"] = "?" Utility.console_log( "Stopped in {func} at {addr} ({file}:{line})".format(**frame), "orange") # Drain remaining responses self.gdb.get_gdb_response(raise_error_on_timeout=False) def gdb_backtrace(self): """ Returns the list of stack frames for the current thread. Each frame is a dictionary, refer to pygdbmi docs for the format. """ assert self.gdb responses = self.gdb.write("-stack-list-frames", timeout_sec=3) return self.find_gdb_response("done", "result", responses)["payload"]["stack"] @staticmethod def match_backtrace(gdb_backtrace, expected_functions_list): """ Returns True if the function names listed in expected_functions_list match the backtrace given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace() function. """ return all([ frame["func"] == expected_functions_list[i] for i, frame in enumerate(gdb_backtrace) ]) @staticmethod def find_gdb_response(message, response_type, responses): """ Helper function which extracts one response from an array of GDB responses, filtering by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format. """ def match_response(response): return (response["message"] == message and response["type"] == response_type) filtered_responses = [r for r in responses if match_response(r)] if not filtered_responses: return None return filtered_responses[0]
print("Our quick sort function is located at: %s" % hex(quicksort_addy)) # # Hook the binary sort function # 48 b8 35 08 40 00 00 00 00 00 mov rax, 0x0000000000400835 # ff e0 jmp rax # print("Let's do some magic things") gdbmi.write('b *%s' % hex(sortarr_addy)) gdbmi.write('run') gdbmi.write('set *(short*)%d = 0xb848' % sortarr_addy) # mov rax gdbmi.write('set *(long long*)%d = %d' % (sortarr_addy + 2, quicksort_addy)) # our new sort gdbmi.write('set *(short*)%d = 0xe0ff' % (sortarr_addy + 10)) # jmp rax gdbmi.write('continue') # # Just get your awesome flag # => ECSC{5d12758be6f2a971153c5599339f77b0} # res = gdbmi.get_gdb_response(timeout_sec=15) flag = [x for x in res if x['type'] == 'output'][0]['payload'] print("\nHere is your flag: %s" % flag) # Close everything gdbmi.exit()
class GdbServer(object): def __init__(self, states, binary, cda, active_state=None): # type: (StateManager, str, Any, Optional[State]) -> None # FIXME: this binary is original path master, ptsname = create_pty() self.master = master self.COMMANDS = { 'q': self.handle_query, 'g': self.read_register_all, 'G': self.write_register_all, 'H': self.set_thread, 'm': self.read_memory, 'M': self.write_memory, 'p': self.read_register, 'P': self.write_register, 'v': self.handle_long_commands, 'X': self.write_memory_bin, 'Z': self.insert_breakpoint, 'z': self.remove_breakpoint, '?': self.stop_reason, '!': self.extend_mode, } self.states = states self.active_state = active_state if active_state else states.get_major(-1) self.regs = GdbRegSpace(self.active_state) self.mem = GdbMemSpace(self.active_state, cda) self.packet_size = PAGESIZE self.libs = GdbSharedLibrary(self.active_state, self.packet_size) self.gdb = GdbController(gdb_args=['--quiet', '--nx', '--interpreter=mi2']) self.gdb.write("-target-select remote %s" % ptsname, timeout_sec=10) self.thread = threading.Thread(target=self.run) self.thread.start() self.gdb.write("-file-exec-and-symbols %s" % binary, timeout_sec=100) self.gdb.write('set stack-cache off', timeout_sec=100) def update_active(self): self.regs.active_state = self.active_state self.mem.active_state = self.active_state self.libs.active_state = self.active_state self.write_request('c') def read_variables(self): py_file = APP_ROOT.join("gdb/gdb_get_locals.py") resp = self.write_request('python execfile (\"{}\")'.format(py_file)) res = [] for r in resp: if 'payload' in r.keys() and \ isinstance(r['payload'], unicode) and \ r['payload'].startswith('ARGS:'): l = r['payload'].split(' ') name = l[1] tystr = l[2].replace('%', ' ') idr = int(l[3]) addr_comment = l[4].strip().replace('\\n', '') if '&' in addr_comment: if idr == 1: ll = addr_comment.partition('&') addr = int(ll[0], 16) comment = ll[2] else: if idr == 1: addr = int(addr_comment, 16) comment = '' else: addr = addr_comment comment = '' size = int(l[5].strip().replace('\\n', '')) res.append({ 'name': name, 'type': tystr, 'loc': idr, 'addr': addr, 'size': size, 'comment': comment, }) return res def eval_expression(self, expr): # type: (str) -> None res = self.gdb.write( "-data-evaluate-expression %s" % expr, timeout_sec=99999) print(res) def write_request(self, req, **kwargs): timeout_sec = kwargs.pop('timeout_sec', 10) kwargs['read_response'] = False self.gdb.write(req, timeout_sec=timeout_sec, **kwargs) resp = [] while True: try: resp += self.gdb.get_gdb_response() except: break return resp def run(self): # () -> None l.info("start server gdb server") buf = [] while True: try: data = os.read(self.master.fileno(), PAGESIZE) except OSError as e: l.info("gdb connection was closed: %s", e) return if len(data) == 0: l.debug("gdb connection was closed") buf += data buf = self.process_data(buf) def process_data(self, buf): # type: (str) -> str while len(buf): if buf[0] == "+" or buf[0] == "-": buf = buf[1:] if len(buf) == 0: return buf if '$' not in buf: return buf begin = buf.index("$") + 1 end = buf.index("#") if begin >= 0 and end < len(buf): packet = buf[begin:end] checksum = int(buf[end + 2], 16) checksum += int(buf[end + 1], 16) << 4 assert checksum == compute_checksum(packet) self.process_packet(packet) buf = buf[end + 3:] return buf def write_ack(self): # type: () -> None self.master.write("+") self.master.flush() def process_packet(self, packet): # type: (str) -> None handler = self.COMMANDS.get(packet[0], None) request = "".join(packet[1:]) l.warning("<-- %s%s" % (packet[0], request)) if handler is None: l.warning("unknown command %s%s received" % (packet[0], request)) response = "" else: response = handler(request) self.write_response(response) def write_response(self, response): # type: (str) -> None # Each packet should be acknowledged with a single character. # '+' to indicate satisfactory receipt l.warning("--> %s" % response) self.master.write("+$%s#%.2x" % (response, compute_checksum(response))) self.master.flush() def extend_mode(self, packet): # type: (str) -> str """ ! """ return "OK" def read_register_all(self, packet): # type: (str) -> str """ g """ return self.regs.read_all() def write_register_all(self, packet): # type: (str) -> str """ G XX... """ self.regs.write_all(packet) return "OK" def read_register(self, packet): # type: (str) -> str """ p n """ n = int(packet, 16) # FIXME: gdb request out of range while gdb info frame if n < len(self.regs.names): return self.regs[self.regs.names[n]] return "ffffffff" def write_register(self, packet): # type: (str) -> str """ P n...=r... """ n_, r_ = packet.split('=') n = int(n_, 16) r = int(r_, 16) if n < len(self.regs.names): self.regs[self.regs.names[n]] = r return "OK" def set_thread(self, packet): # type: (str) -> str """ H op thread-id """ return 'OK' def read_memory(self, packet): # type: (str) -> str """ m addr,length """ addr_, length_ = packet.split(',') addr = int(addr_, 16) length = int(length_, 16) return self.mem.read(addr, length) def write_memory(self, packet): # type: (str) -> str """ M addr,length:XX """ l = packet.split(',') addr_ = l[0] length_, value = l[1].split(':') addr = int(addr_, 16) length = int(length_, 16) self.mem.write(addr, length, value) return "OK" def write_memory_bin(self, packet): # type: (str) -> str """ X addr,length:XX(bin) """ pass def insert_breakpoint(self, packet): # type: (str) -> str """ Z type,addr,kind type: 0 software (0xcc) 1 hardware (drx) 2 write watchpoint 3 read watchpoint """ return "OK" def remove_breakpoint(self, packet): # type: (str) -> str """ z type,addr,kind """ return "OK" def stop_reason(self, packet): # type: (str) -> str GDB_SIGNAL_TRAP = 5 return "S%.2x" % GDB_SIGNAL_TRAP def handle_long_commands(self, packet): # type: (str) -> str def handle_cont(action, tid=None): # type: (str, Optional[int]) -> str # TODO: for a continue/step/stop operation self.write_response("T05library:r;") return "S05" if packet.startswith('Cont'): supported_action = ['', 'c', 's', 't'] # TODO: C sig/S sig/r start,end packet = packet[4:] if packet == '?': return ';'.join(supported_action) action = packet.split(';')[1] action = action.split(':')[0] if action in supported_action: return handle_cont(action) l.warning("unknown command: v%s", 'Cont' + packet) return "" if packet.startswith('CtrlC'): return "OK" if packet.startswith('MustReplyEmpty'): return "" else: l.warning("unknown command: v%s", packet) return "" def handle_query(self, packet): # type: (str) -> str """ qSupported|qAttached|qC qXfer:...:read:annex:offset,size """ if packet.startswith('Supported'): features = [ 'qXfer:libraries-svr4:read+', # 'qXfer:memory-map:read+' ] features.append('PacketSize=%x' % self.packet_size) return ';'.join(features) elif packet.startswith('Xfer'): reqs = packet.split(':') # FIXME: not working now if reqs[1] == 'libraries-svr4' and reqs[2] == 'read': data = reqs[4].split(',') return self.libs.read_xml(int(data[0], 16), int(data[1], 16)) if reqs[1] == 'memory-map' and reqs[2] == 'read': # TODO: add memory-map, (do we really need it now?) return "" return '' elif packet.startswith('Attached'): return '1' elif packet.startswith("C"): # FIXME real thread id return "" # empty means no threads elif packet.startswith("fThreadInfo"): return "m0" elif packet.startswith("sThreadInfo"): return "l" elif packet.startswith("TStatus"): # catch all for all commands we know and don't want to implement return "" elif packet.startswith('Symbol'): if packet == 'Symbol::': return "OK" _, sym_value, sym_name = packet.split(':') return "OK" else: l.warning("unknown query: %s", packet) return ""
class Gdb: def __init__(self, gdb_path=None): if gdb_path: self.gdbmi = GdbController([gdb_path, "--interpreter=mi3"]) else: self.gdbmi = GdbController() self.pc = VimSign("", "", 1000, "dbg_pc") self.bp_number = None self.bp_line = None self.result = None self.timeout = 3 def __write(self, cmd): return self.gdbmi.write(cmd, timeout_sec=self.timeout) def file_and_exec_symbols(self, filepath): response = self.__write("-file-exec-and-symbols %s" % (filepath)) logger.info("Response: " + str(response)) self.__parse_response(response) if not self.result or self.result == "error": logger.error("GDB unable to load exec and symbols file: %s" % filepath) return logger.debug("GDB loaded exec and symbols file: %s" % filepath) def remote(self, address): response = self.__write("-target-select remote %s" % (address)) self.__parse_response(response) if not self.result or self.result == "error": logger.error("GDB unable to target remote to %s" % (address)) return logger.debug("GDB connect to remote %s" % (address)) def load(self): response = self.__write("-target-download") self.__parse_response(response) if self.result and self.result == "error": logger.error("GDB unable to do target download") return def insert_bp(self, location): logger.info("Inserting breakpoint @location: " + location) response = self.__write("-break-insert %s" % (location)) self.__parse_response(response) if not self.result or self.result == "error": return False return True def delete_bp(self, number): logger.info("Deleting breakpoint: " + number) response = self.__write("-break-delete %s" % (number)) self.__parse_response(response) def go(self): response = self.__write("-exec-continue") self.__parse_response(response) logger.info("Continue") def pause(self): #self.gdbmi.interrupt_gdb() self.__write("-exec-interrupt --all") response = self.gdbmi.get_gdb_response(timeout_sec=self.timeout) self.__parse_response(response) logger.info("Pause") def step(self): response = self.__write("-exec-step") self.__parse_response(response) logger.info("Step") def __parse_response(self, response): global vim unused = [] self.result = None self.bp_number = None self.bp_line = None for r in response: if r["type"] == "notify": if r["message"] == "stopped": p = r["payload"] if 'frame' in p: self.pc.filepath = p["frame"].get("fullname", None) self.pc.line = p["frame"].get("line", None) if self.pc.filepath and self.pc.line: # open file and move cursor on line vim.execute(":e %s" % (self.pc.filepath)) vim.execute(":%s" % (self.pc.line)) self.pc.place() if "reason" in p and p["reason"] == "signal-received": vim.echo("GDB: Segmentation fault") pass elif r["message"] == "library-loaded": libinfo = r["payload"] logger.debug("Gdb: library loaded: %s" % (libinfo["target-name"])) elif r["message"] == "library-unloaded": libinfo = r["payload"] logger.debug("Gdb: library unloaded: %s" % (libinfo["target-name"])) elif r["message"] in [ "thread-group-exited", "thread-group-started", "thread-group-added", "thread-created", "thread-exited", "running", "breakpoint-modified" ]: # TODO: treat this? pass else: unused.append(r) elif r["type"] == "log": logger.debug("GDB: %s" % (r["payload"])) elif r["type"] == "result": self.result = r["message"] if r["payload"] and "bkpt" in r["payload"]: self.bp_number = r["payload"]["bkpt"].get("number", None) self.bp_line = r["payload"]["bkpt"].get("line", None) elif r["type"] == "console": # ignore cosole output for now pass elif r["type"] == "output": if r["stream"] == "stdout": logger.info("%s" % (r["payload"])) else: unused.append(r) if unused: logger.debug("From GDB - not treated:\n" + pprint.pformat(unused))
class ProgramSimulation: def __init__(self, binary, prog_args, method_name, registers, args): self.gdbmi = None self.binary = binary self.prog_args = prog_args self.done = None self.signal = None self.prev_register_values = None self.method_name = method_name self.args = args self.registers = registers def init(self): self.gdbmi = GdbController() self.gdbmi.write('-exec-arguments %s %s' % self.prog_args, read_response=False) self.gdbmi.write('-file-exec-and-symbols %s' % self.binary, read_response=False) self.gdbmi.write('-break-insert %s' % self.method_name, read_response=False) self.gdbmi.write('-exec-run', read_response=False) self.gdbmi.write('-data-list-register-names', read_response=False) def run(self): self.init() self.prev_register_values = {} self.signal = [] self.done = False step_count = 0 check_interval = 100 register_value_interval = self.args.register_check_interval while not self.done: # print("\rStep: %d " % step_count, end='') # Parse reponses from issues commands if step_count % check_interval == 0: self.parse_responses( register_values_cb=self.update_power_consumption) # Send command to get register values if step_count % register_value_interval == 0: self.get_register_values(self.registers) # Send command to get next step self.program_step() step_count += 1 self.gdbmi.exit() return np.array(self.signal) def run_find_varying_registers(self, nruns=3): self.register_value_sum = defaultdict(lambda: []) # Sum each register value during steps. Repeat nruns times. for n in range(0, nruns): print("Run %d..." % n) self.init() self.done = False self.register_value_history = defaultdict(lambda: []) while not self.done: self.get_register_values(self.registers) self.parse_responses( register_values_cb=self.compare_register_values) self.program_step() del self.gdbmi for key, values in self.register_value_history.items(): self.register_value_sum[key].append( sum([int(x) for x in values])) # Check if there were runs with a different outcome normal_keys = [] for key, values in self.register_value_sum.items(): if len(set(values)) > 1: print("Found weird key %s: %s" % (key, str(values))) else: normal_keys.append(key) return normal_keys def compare_register_values(self, register_values): for key, value in register_values.items(): self.register_value_history[key].append(value) def update_power_consumption(self, current_register_values): power_consumption = get_registers_power_consumption( self.prev_register_values, current_register_values) self.prev_register_values = current_register_values # print("Power consumption: %d" % power_consumption) self.signal.append(power_consumption) def parse_responses(self, register_values_cb=None): try: responses = self.gdbmi.get_gdb_response(timeout_sec=2) except GdbTimeoutError: print("ERROR: Got timeout from GDB. Exiting prematurely.") self.done = True return for response in responses: #print(response) # Check for register values payload = response['payload'] if payload is not None: if 'register-values' in payload: register_tuples = payload['register-values'] register_values = _parse_register_tuples(register_tuples) register_values_cb(register_values) # Check for end packet if 'type' in response and response['type'] == 'notify': if response['message'] == 'thread-exited': self.done = True def program_step(self): """ Step program :return: """ if self.args.granularity == 'instruction': self.gdbmi.write('-exec-step-instruction', read_response=False, timeout_sec=0) elif self.args.granularity == 'step': self.gdbmi.write('-exec-step', read_response=False, timeout_sec=0) elif self.args.granularity == 'next': self.gdbmi.write('-exec-next', read_response=False, timeout_sec=0) def get_register_values(self, target_registers=None): # Filter? if target_registers is not None: register_list = ' '.join(target_registers) else: register_list = '' self.gdbmi.write('-data-list-register-values r %s' % register_list, read_response=False, timeout_sec=0) def get_changed_registers(self): """ DEPRECATED Get list of changed registers. Not used anymore because just batching requests for all register values is faster than checking which ones changed, waiting, and then querying for them. :return: """ self.gdbmi.write('-data-list-changed-registers', read_response=False)
def test_controller(self): """Build a simple C program, then run it with GdbController and verify the output is parsed as expected""" # Initialize object that manages gdb subprocess gdbmi = GdbController() c_hello_world_binary = self._get_c_program("hello", "pygdbmiapp.a") if USING_WINDOWS: c_hello_world_binary = c_hello_world_binary.replace("\\", "/") # Load the binary and its symbols in the gdb subprocess responses = gdbmi.write("-file-exec-and-symbols %s" % c_hello_world_binary, timeout_sec=1) # Verify output was parsed into a list of responses assert len(responses) != 0 response = responses[0] assert set(response.keys()) == { "message", "type", "payload", "stream", "token" } assert response["message"] == "thread-group-added" assert response["type"] == "notify" assert response["payload"] == {"id": "i1"} assert response["stream"] == "stdout" assert response["token"] is None responses = gdbmi.write( ["-file-list-exec-source-files", "-break-insert main"]) assert len(responses) != 0 responses = gdbmi.write(["-exec-run", "-exec-continue"], timeout_sec=3) found_match = False print(responses) for r in responses: if (r.get( "payload", "" ) == " leading spaces should be preserved. So should trailing spaces. " ): found_match = True assert found_match is True # Test GdbTimeoutError exception got_timeout_exception = False try: gdbmi.get_gdb_response(timeout_sec=0) except GdbTimeoutError: got_timeout_exception = True assert got_timeout_exception is True # Close gdb subprocess if not USING_WINDOWS: # access denied on windows gdbmi.send_signal_to_gdb("SIGINT") gdbmi.send_signal_to_gdb(2) gdbmi.interrupt_gdb() responses = gdbmi.exit() assert responses is None assert gdbmi.gdb_process is None # Test NoGdbProcessError exception got_no_process_exception = False try: responses = gdbmi.write("-file-exec-and-symbols %s" % c_hello_world_binary) except NoGdbProcessError: got_no_process_exception = True assert got_no_process_exception is True # Respawn and test signal handling gdbmi.spawn_new_gdb_subprocess() responses = gdbmi.write("-file-exec-and-symbols %s" % c_hello_world_binary, timeout_sec=1) responses = gdbmi.write(["-break-insert main", "-exec-run"]) if not USING_WINDOWS: gdbmi.interrupt_gdb() gdbmi.send_signal_to_gdb(2) gdbmi.send_signal_to_gdb("sigTeRm") try: gdbmi.send_signal_to_gdb("sigterms") # exception must be raised assert False except ValueError: assert True responses = gdbmi.write("-exec-run") if not USING_WINDOWS: gdbmi.send_signal_to_gdb("sigstop")
def test_controller(self): """Build a simple C program, then run it with GdbController and verify the output is parsed as expected""" # Initialize object that manages gdb subprocess gdbmi = GdbController() c_hello_world_binary = self._get_c_program('hello', 'pygdbmiapp.a') if USING_WINDOWS: c_hello_world_binary = c_hello_world_binary.replace('\\', '/') # Load the binary and its symbols in the gdb subprocess responses = gdbmi.write('-file-exec-and-symbols %s' % c_hello_world_binary, timeout_sec=1) # Verify output was parsed into a list of responses assert(len(responses) != 0) response = responses[0] assert(set(response.keys()) == set(['message', 'type', 'payload', 'stream', 'token'])) assert(response['message'] == 'thread-group-added') assert(response['type'] == 'notify') assert(response['payload'] == {'id': 'i1'}) assert(response['stream'] == 'stdout') assert(response['token'] is None) responses = gdbmi.write(['-file-list-exec-source-files', '-break-insert main']) assert(len(responses) != 0) responses = gdbmi.write(['-exec-run', '-exec-continue'], timeout_sec=3) found_match = False for r in responses: if r.get('payload', '') == ' leading spaces should be preserved. So should trailing spaces. ': found_match = True assert(found_match is True) # Test GdbTimeoutError exception got_timeout_exception = False try: gdbmi.get_gdb_response(timeout_sec=0) except GdbTimeoutError: got_timeout_exception = True assert(got_timeout_exception is True) # Close gdb subprocess if not USING_WINDOWS: # access denied on windows gdbmi.send_signal_to_gdb('SIGINT') gdbmi.send_signal_to_gdb(2) gdbmi.interrupt_gdb() responses = gdbmi.exit() assert(responses is None) assert(gdbmi.gdb_process is None) # Test NoGdbProcessError exception got_no_process_exception = False try: responses = gdbmi.write('-file-exec-and-symbols %s' % c_hello_world_binary) except NoGdbProcessError: got_no_process_exception = True assert(got_no_process_exception is True) # Respawn and test signal handling gdbmi.spawn_new_gdb_subprocess() responses = gdbmi.write('-file-exec-and-symbols %s' % c_hello_world_binary, timeout_sec=1) responses = gdbmi.write(['-break-insert main', '-exec-run']) if not USING_WINDOWS: gdbmi.interrupt_gdb() gdbmi.send_signal_to_gdb(2) gdbmi.send_signal_to_gdb('sigTeRm') try: gdbmi.send_signal_to_gdb('sigterms') # exception must be raised assert(False) except ValueError: assert(True) responses = gdbmi.write('-exec-run') if not USING_WINDOWS: gdbmi.send_signal_to_gdb('sigstop')