Exemple #1
0
    def connect_client(self, client_id, desired_gdbpid):
        message = ""
        pid = 0
        error = False
        using_existing = False

        if desired_gdbpid > 0:
            controller = self.get_controller_from_pid(desired_gdbpid)

            if controller:
                self.controller_to_client_ids[controller].append(client_id)
                message = "gdbgui is using existing subprocess with pid %s, "
                "originally opened with command %s" % (str(
                    desired_gdbpid
                ), controller.get_subprocess_cmd())
                using_existing = True
                pid = desired_gdbpid
            else:
                print("error! could not find that pid")
                message = "Could not find a gdb subprocess with pid %s. " % str(
                    desired_gdbpid
                )
                error = True

        if self.get_controller_from_client_id(client_id) is None:
            logger.info("new sid", client_id)

            gdb_args = (
                deepcopy(self.config["initial_binary_and_args"])
                + deepcopy(self.config["gdb_args"])
                + REQUIRED_GDB_FLAGS
            )

            controller = GdbController(
                gdb_path=self.config["gdb_path"],
                gdb_args=gdb_args,
                rr=self.config["rr"],
            )
            self.controller_to_client_ids[controller].append(client_id)

            pid = self.get_pid_from_controller(controller)
            message += "gdbgui spawned subprocess with pid %s from command %s." % (
                str(pid),
                controller.get_subprocess_cmd(),
            )

        return {
            "pid": pid,
            "message": message,
            "error": error,
            "using_existing": using_existing,
        }
Exemple #2
0
    def connect_client(self, client_id: str, desired_gdbpid: int) -> Dict[str, Any]:
        message = ""
        pid: Optional[int] = 0
        error = False
        using_existing = False

        if desired_gdbpid > 0:
            controller = self.get_controller_from_pid(desired_gdbpid)

            if controller:
                self.controller_to_client_ids[controller].append(client_id)
                message = (
                    "gdbgui is using existing subprocess with pid %s, "
                    "originally opened with command %s"
                ) % (str(desired_gdbpid), controller.get_subprocess_cmd())
                using_existing = True
                pid = desired_gdbpid
            else:
                print("error! could not find that pid")
                message = "Could not find a gdb subprocess with pid %s. " % str(
                    desired_gdbpid
                )
                error = True

        if self.get_controller_from_client_id(client_id) is None:
            logger.info("new sid", client_id)

            gdb_args = self.get_gdb_args()

            controller = GdbController(
                gdb_path=self.config["gdb_path"],
                gdb_args=gdb_args,
                rr=self.config["rr"],
            )
            self.controller_to_client_ids[controller].append(client_id)

            pid = self.get_pid_from_controller(controller)
            if pid is None:
                error = True
                message = "Developer error"
            else:
                message += "gdbgui spawned subprocess with pid %s from command %s." % (
                    str(pid),
                    controller.get_subprocess_cmd(),
                )

        return {
            "pid": pid,
            "message": message,
            "error": error,
            "using_existing": using_existing,
        }
Exemple #3
0
                elif start:
                    class_type.content += resp["payload"]
            class_type.content = class_type.content.replace("\\n", "\n")
    return structured_includes


def saveEnrichedIncludes(dirName, enriched_includes):
    if not os.path.exists(dirName):
        os.mkdir(dirName)
    for filename, contents in enriched_includes.items():
        file_path = os.path.join(dirName, filename)
        with open(file_path, 'w') as f:
            content = '\n'.join((elem.content for elem in contents))
            f.write(content)


PID = input("Enter the process PID :")

gdbmi = GdbController(
    gdb_args=["--nx", "--quiet", "--interpreter=mi2", "--pid=" + PID])
print(gdbmi.get_subprocess_cmd())
writeCommand(gdbmi, '')

includes = getIncludesFiles(gdbmi)

structured_includes = getTypesOfIncludes(gdbmi, includes)

enriched_includes = enrichIncludes(gdbmi, structured_includes)

saveEnrichedIncludes("headers", enriched_includes)
Exemple #4
0
class CrackMe():
    def __init__(self, args=[]):
        self.uid = str(uuid.uuid4())
        # Start gdb process
        gdb_args = (['--nx', '--quiet', '--interpreter=mi2'] +
                    ['--args', './crackme'] + args)

        self.gdbmi = GdbController(gdb_args=gdb_args)
        logging.info('Starting gdb with ' +
                     repr(self.gdbmi.get_subprocess_cmd()))

    def wait_for_resp(self):
        msgs = []
        out = {}

        while True:
            resp = self.gdbmi.get_gdb_response(timeout_sec=4,
                                               raise_error_on_timeout=False)
            msgs += resp

            for m in resp:
                m = to_namespace(m)
                if m.type != 'result':
                    continue
                out['result'] = m.message
                return msgs, out

    def run(self):
        self.gdbmi.write('run', read_response=False)
        return self.process_execution()

    def cont(self):
        self.gdbmi.write('continue', read_response=False)
        return self.process_execution()

    def si(self):
        self.gdbmi.write('si', read_response=False)
        return self.process_execution()

    def ni(self):
        self.gdbmi.write('ni', read_response=False)
        return self.process_execution()

    def breakpoint(self, addr):
        addr = filter_str(addr)
        self.gdbmi.write('break *' + addr, read_response=False)
        msgs, out = self.wait_for_resp()
        return out

    def set(self, arg):
        arg = filter_str(arg)
        self.gdbmi.write('set ' + arg, read_response=False)
        msgs, out = self.wait_for_resp()
        return out

    def disassemble(self, arg):
        arg = filter_str(arg)
        self.gdbmi.write('disassemble ' + arg, read_response=False)
        msgs, out = self.wait_for_resp()
        data = ''
        for m in msgs:
            m = to_namespace(m)
            if m.type == 'console':
                data += m.payload

        data = data.encode('latin-1').decode('unicode_escape')
        out['data'] = data
        return out

    def memory(self, arg):
        arg = filter_str(arg)
        self.gdbmi.write('x/' + arg, read_response=False)
        msgs, out = self.wait_for_resp()
        data = ''
        for m in msgs:
            m = to_namespace(m)
            if m.type == 'console':
                data += m.payload

        data = data.encode('latin-1').decode('unicode_escape')
        out['data'] = data
        return out

    def registers(self):
        self.gdbmi.write('i r', read_response=False)
        msgs, out = self.wait_for_resp()
        data = ''
        for m in msgs:
            m = to_namespace(m)
            if m.type == 'console':
                data += m.payload

        data = data.encode('latin-1').decode('unicode_escape')
        data = data.strip().split('\n')
        regs = {x[0]: x[1] for x in (y.split() for y in data) if len(x) >= 2}
        out['registers'] = regs
        return out

    def process_execution(self):

        run_output = ''
        running = True

        out = {}

        # Loop until execution stops
        while running:
            resp = self.gdbmi.get_gdb_response(timeout_sec=4,
                                               raise_error_on_timeout=False)

            for m in resp:
                m = to_namespace(m)

                # Console output
                if m.type == 'output':
                    run_output += m.payload

                if m.type == 'result' and m.message == 'error':
                    running = False
                    out['stop_reason'] = m.payload.msg

                # Program stopped
                if m.type == 'notify':
                    if m.message == 'stopped':
                        running = False
                        reason = m.payload.reason
                        out['stop_reason'] = reason
                        if reason == 'breakpoint-hit':
                            out['bp_addr'] = m.payload.frame.addr

        out['output'] = run_output

        return out
Exemple #5
0
class PanicTestMixin(object):
    """ Provides custom functionality for the panic test DUT """
    BOOT_CMD_ADDR = 0x9000
    BOOT_CMD_SIZE = 0x1000
    DEFAULT_EXPECT_TIMEOUT = 10
    COREDUMP_UART_START = '================= CORE DUMP START ================='
    COREDUMP_UART_END = '================= CORE DUMP END ================='

    def start_test(self, test_name):
        """ Starts the app and sends it the test name """
        self.test_name = test_name
        # Start the app and verify that it has started up correctly
        self.start_capture_raw_data()
        self.start_app()
        self.expect('Enter test name: ')
        Utility.console_log('Setting boot command: ' + test_name)
        self.write(test_name)
        self.expect('Got test name: ' + test_name)

    def expect_none(self, *patterns, **timeout_args):
        """ like dut.expect_all, but with an inverse logic """
        found_data = []
        if 'timeout' not in timeout_args:
            timeout_args['timeout'] = 1

        def found(data):
            raise AssertionError('Unexpected: {}'.format(data))
            found_data.append(data)
        try:
            expect_items = [(pattern, found) for pattern in patterns]
            self.expect_any(*expect_items, **timeout_args)
            raise AssertionError('Unexpected: {}'.format(found_data))
        except DUT.ExpectTimeout:
            return True

    def expect_gme(self, reason):
        """ Expect method for Guru Meditation Errors """
        self.expect(r"Guru Meditation Error: Core  0 panic'ed (%s)" % reason)

    def expect_reg_dump(self, core=0):
        """ Expect method for the register dump """
        self.expect(re.compile(r'Core\s+%d register dump:' % core))

    def expect_elf_sha256(self):
        """ Expect method for ELF SHA256 line """
        elf_sha256 = self.app.get_elf_sha256()
        sdkconfig = self.app.get_sdkconfig()
        elf_sha256_len = int(sdkconfig.get('CONFIG_APP_RETRIEVE_LEN_ELF_SHA', '16'))
        self.expect('ELF file SHA256: ' + elf_sha256[0:elf_sha256_len])

    def expect_backtrace(self):
        self.expect('Backtrace:')
        self.expect_none('CORRUPTED')

    def __enter__(self):
        self._raw_data = None
        self.gdb = None
        return self

    def __exit__(self, type, value, traceback):
        log_folder = self.app.get_log_folder(TEST_SUITE)
        with open(os.path.join(log_folder, 'log_' + self.test_name + '.txt'), 'w') as log_file:
            Utility.console_log('Writing output of {} to {}'.format(self.test_name, log_file.name))
            log_file.write(self.get_raw_data())
        if self.gdb:
            self.gdb.exit()
        self.close()

    def get_raw_data(self):
        if not self._raw_data:
            self._raw_data = self.stop_capture_raw_data()
        return self._raw_data

    def _call_espcoredump(self, extra_args, coredump_file_name, output_file_name):
        # no "with" here, since we need the file to be open for later inspection by the test case
        self.coredump_output = open(output_file_name, 'w')
        espcoredump_script = os.path.join(os.environ['IDF_PATH'], 'components', 'espcoredump', 'espcoredump.py')
        espcoredump_args = [
            sys.executable,
            espcoredump_script,
            'info_corefile',
            '--core', coredump_file_name,
        ]
        espcoredump_args += extra_args
        espcoredump_args.append(self.app.elf_file)
        Utility.console_log('Running ' + ' '.join(espcoredump_args))
        Utility.console_log('espcoredump output is written to ' + self.coredump_output.name)

        subprocess.check_call(espcoredump_args, stdout=self.coredump_output)
        self.coredump_output.flush()
        self.coredump_output.seek(0)

    def process_coredump_uart(self):
        """ Extract the core dump from UART output of the test, run espcoredump on it """
        log_folder = self.app.get_log_folder(TEST_SUITE)
        data = self.get_raw_data()
        coredump_start = data.find(self.COREDUMP_UART_START)
        coredump_end = data.find(self.COREDUMP_UART_END)
        coredump_base64 = data[coredump_start + len(self.COREDUMP_UART_START):coredump_end]
        with open(os.path.join(log_folder, 'coredump_data_' + self.test_name + '.b64'), 'w') as coredump_file:
            Utility.console_log('Writing UART base64 core dump to ' + coredump_file.name)
            coredump_file.write(coredump_base64)

        output_file_name = os.path.join(log_folder, 'coredump_uart_result_' + self.test_name + '.txt')
        self._call_espcoredump(['--core-format', 'b64'], coredump_file.name, output_file_name)

    def process_coredump_flash(self):
        """ Extract the core dump from flash, run espcoredump on it """
        log_folder = self.app.get_log_folder(TEST_SUITE)
        coredump_file_name = os.path.join(log_folder, 'coredump_data_' + self.test_name + '.bin')
        Utility.console_log('Writing flash binary core dump to ' + coredump_file_name)
        self.dump_flash(coredump_file_name, partition='coredump')

        output_file_name = os.path.join(log_folder, 'coredump_flash_result_' + self.test_name + '.txt')
        self._call_espcoredump(['--core-format', 'raw'], coredump_file_name, output_file_name)

    def _gdb_write(self, command):
        """
        Wrapper to write to gdb with a longer timeout, as test runner
        host can be slow sometimes
        """
        return self.gdb.write(command, timeout_sec=10)

    def start_gdb(self):
        """
        Runs GDB and connects it to the "serial" port of the DUT.
        After this, the DUT expect methods can no longer be used to capture output.
        """
        self.stop_receive()
        self._port_close()

        Utility.console_log('Starting GDB...', 'orange')
        self.gdb = GdbController(gdb_path=self.TOOLCHAIN_PREFIX + 'gdb')
        Utility.console_log('Running command: {}'.format(self.gdb.get_subprocess_cmd()), 'orange')

        for _ in range(10):
            try:
                # GdbController creates a process with subprocess.Popen(). Is it really running? It is probable that
                # an RPI under high load will get non-responsive during creating a lot of processes.
                resp = self.gdb.get_gdb_response(timeout_sec=10)  # calls verify_valid_gdb_subprocess() internally
                # it will be interesting to look up this response if the next GDB command fails (times out)
                Utility.console_log('GDB response: {}'.format(resp), 'orange')
                break  # success
            except GdbTimeoutError:
                Utility.console_log('GDB internal error: cannot get response from the subprocess', 'orange')
            except NoGdbProcessError:
                Utility.console_log('GDB internal error: process is not running', 'red')
                break  # failure - TODO: create another GdbController
            except ValueError:
                Utility.console_log('GDB internal error: select() returned an unexpected file number', 'red')

        # pygdbmi logs to console by default, make it log to a file instead
        log_folder = self.app.get_log_folder(TEST_SUITE)
        pygdbmi_log_file_name = os.path.join(log_folder, 'pygdbmi_log_' + self.test_name + '.txt')
        pygdbmi_logger = self.gdb.logger
        pygdbmi_logger.setLevel(logging.DEBUG)
        while pygdbmi_logger.hasHandlers():
            pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0])
        log_handler = logging.FileHandler(pygdbmi_log_file_name)
        log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
        pygdbmi_logger.addHandler(log_handler)

        # Set up logging for GDB remote protocol
        gdb_remotelog_file_name = os.path.join(log_folder, 'gdb_remote_log_' + self.test_name + '.txt')
        self._gdb_write('-gdb-set remotelogfile ' + gdb_remotelog_file_name)

        # Load the ELF file
        self._gdb_write('-file-exec-and-symbols {}'.format(self.app.elf_file))

        # Connect GDB to UART
        Utility.console_log('Connecting to GDB Stub...', 'orange')
        self._gdb_write('-gdb-set serial baud 115200')
        responses = self._gdb_write('-target-select remote ' + self.get_gdb_remote())

        # Make sure we get the 'stopped' notification
        stop_response = self.find_gdb_response('stopped', 'notify', responses)
        if not stop_response:
            responses = self._gdb_write('-exec-interrupt')
            stop_response = self.find_gdb_response('stopped', 'notify', responses)
            assert stop_response
        frame = stop_response['payload']['frame']
        if 'file' not in frame:
            frame['file'] = '?'
        if 'line' not in frame:
            frame['line'] = '?'
        Utility.console_log('Stopped in {func} at {addr} ({file}:{line})'.format(**frame), 'orange')

        # Drain remaining responses
        self.gdb.get_gdb_response(raise_error_on_timeout=False)

    def gdb_backtrace(self):
        """
        Returns the list of stack frames for the current thread.
        Each frame is a dictionary, refer to pygdbmi docs for the format.
        """
        assert self.gdb

        responses = self._gdb_write('-stack-list-frames')
        return self.find_gdb_response('done', 'result', responses)['payload']['stack']

    @staticmethod
    def match_backtrace(gdb_backtrace, expected_functions_list):
        """
        Returns True if the function names listed in expected_functions_list match the backtrace
        given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace()
        function.
        """
        return all([frame['func'] == expected_functions_list[i] for i, frame in enumerate(gdb_backtrace)])

    @staticmethod
    def find_gdb_response(message, response_type, responses):
        """
        Helper function which extracts one response from an array of GDB responses, filtering
        by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format.
        """
        def match_response(response):
            return (response['message'] == message and
                    response['type'] == response_type)

        filtered_responses = [r for r in responses if match_response(r)]
        if not filtered_responses:
            return None
        return filtered_responses[0]
Exemple #6
0
from pygdbmi.gdbcontroller import GdbController
from pprint import pprint

gdbmi = GdbController()
print(gdbmi.get_subprocess_cmd())  # print actual command run as subprocess

# Load binary a.out and get structured response
pprint(gdbmi.write('-file-exec-file a.exe'))
pprint(gdbmi.write('set new-console on'))
pprint(gdbmi.write('run'))

Exemple #7
0
class PanicTestDut(IdfDut):
    BOOT_CMD_ADDR = 0x9000
    BOOT_CMD_SIZE = 0x1000
    DEFAULT_EXPECT_TIMEOUT = 10
    COREDUMP_UART_START = '================= CORE DUMP START ================='
    COREDUMP_UART_END = '================= CORE DUMP END ================='

    app: IdfApp
    serial: IdfSerial

    def __init__(self, *args, **kwargs) -> None:  # type: ignore
        super().__init__(*args, **kwargs)

        self.gdb: GdbController = None  # type: ignore
        # record this since pygdbmi is using logging.debug to generate some single character mess
        self.log_level = logging.getLogger().level
        # pygdbmi is using logging.debug to generate some single character mess
        if self.log_level <= logging.DEBUG:
            logging.getLogger().setLevel(logging.INFO)

        self.coredump_output: TextIO = None  # type: ignore

    def close(self) -> None:
        if self.gdb:
            self.gdb.exit()

        super().close()

    def revert_log_level(self) -> None:
        logging.getLogger().setLevel(self.log_level)

    def expect_test_func_name(self, test_func_name: str) -> None:
        self.expect_exact('Enter test name:')
        self.write(test_func_name)
        self.expect_exact('Got test name: ' + test_func_name)

    def expect_none(self, pattern, **kwargs) -> None:  # type: ignore
        """like dut.expect_all, but with an inverse logic"""
        if 'timeout' not in kwargs:
            kwargs['timeout'] = 1

        try:
            res = self.expect(pattern, **kwargs)
            raise AssertionError(f'Unexpected: {res.group().decode("utf8")}')
        except pexpect.TIMEOUT:
            pass

    def expect_backtrace(self) -> None:
        self.expect_exact('Backtrace:')
        self.expect_none('CORRUPTED')

    def expect_gme(self, reason: str) -> None:
        """Expect method for Guru Meditation Errors"""
        self.expect_exact(
            f"Guru Meditation Error: Core  0 panic'ed ({reason})")

    def expect_reg_dump(self, core: int = 0) -> None:
        """Expect method for the register dump"""
        self.expect(r'Core\s+%d register dump:' % core)

    def expect_elf_sha256(self) -> None:
        """Expect method for ELF SHA256 line"""
        elf_sha256 = sha256(self.app.elf_file)
        elf_sha256_len = int(
            self.app.sdkconfig.get('CONFIG_APP_RETRIEVE_LEN_ELF_SHA', '16'))
        self.expect_exact('ELF file SHA256: ' + elf_sha256[0:elf_sha256_len])

    def _call_espcoredump(self, extra_args: List[str], coredump_file_name: str,
                          output_file_name: str) -> None:
        # no "with" here, since we need the file to be open for later inspection by the test case
        if not self.coredump_output:
            self.coredump_output = open(output_file_name, 'w')

        espcoredump_script = os.path.join(os.environ['IDF_PATH'], 'components',
                                          'espcoredump', 'espcoredump.py')
        espcoredump_args = [
            sys.executable,
            espcoredump_script,
            'info_corefile',
            '--core',
            coredump_file_name,
        ]
        espcoredump_args += extra_args
        espcoredump_args.append(self.app.elf_file)
        logging.info('Running %s', ' '.join(espcoredump_args))
        logging.info('espcoredump output is written to %s',
                     self.coredump_output.name)

        subprocess.check_call(espcoredump_args, stdout=self.coredump_output)
        self.coredump_output.flush()
        self.coredump_output.seek(0)

    def process_coredump_uart(self) -> None:
        """Extract the core dump from UART output of the test, run espcoredump on it"""
        self.expect(self.COREDUMP_UART_START)
        res = self.expect('(.+)' + self.COREDUMP_UART_END)
        coredump_base64 = res.group(1).decode('utf8')
        with open(os.path.join(self.logdir, 'coredump_data.b64'),
                  'w') as coredump_file:
            logging.info('Writing UART base64 core dump to %s',
                         coredump_file.name)
            coredump_file.write(coredump_base64)

        output_file_name = os.path.join(self.logdir,
                                        'coredump_uart_result.txt')
        self._call_espcoredump(['--core-format', 'b64'], coredump_file.name,
                               output_file_name)

    def process_coredump_flash(self) -> None:
        """Extract the core dump from flash, run espcoredump on it"""
        coredump_file_name = os.path.join(self.logdir, 'coredump_data.bin')
        logging.info('Writing flash binary core dump to %s',
                     coredump_file_name)
        self.serial.dump_flash(coredump_file_name, partition='coredump')

        output_file_name = os.path.join(self.logdir,
                                        'coredump_flash_result.txt')
        self._call_espcoredump(['--core-format', 'raw'], coredump_file_name,
                               output_file_name)

    def gdb_write(self, command: str) -> Any:
        """
        Wrapper to write to gdb with a longer timeout, as test runner
        host can be slow sometimes
        """
        return self.gdb.write(command, timeout_sec=10)

    def start_gdb(self) -> None:
        """
        Runs GDB and connects it to the "serial" port of the DUT.
        After this, the DUT expect methods can no longer be used to capture output.
        """
        self.gdb = GdbController(gdb_path=self.toolchain_prefix + 'gdb')

        # pygdbmi logs to console by default, make it log to a file instead
        pygdbmi_log_file_name = os.path.join(self.logdir, 'pygdbmi_log.txt')
        pygdbmi_logger = self.gdb.logger
        pygdbmi_logger.setLevel(logging.DEBUG)
        while pygdbmi_logger.hasHandlers():
            pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0])
        log_handler = logging.FileHandler(pygdbmi_log_file_name)
        log_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
        pygdbmi_logger.addHandler(log_handler)

        logging.info('Running command: %s', self.gdb.get_subprocess_cmd())
        for _ in range(10):
            try:
                # GdbController creates a process with subprocess.Popen(). Is it really running? It is probable that
                # an RPI under high load will get non-responsive during creating a lot of processes.
                resp = self.gdb.get_gdb_response(
                    timeout_sec=10
                )  # calls verify_valid_gdb_subprocess() internally
                # it will be interesting to look up this response if the next GDB command fails (times out)
                logging.info('GDB response: %s', resp)
                break  # success
            except GdbTimeoutError:
                logging.warning(
                    'GDB internal error: cannot get response from the subprocess'
                )
            except NoGdbProcessError:
                logging.error('GDB internal error: process is not running')
                break  # failure - TODO: create another GdbController
            except ValueError:
                logging.error(
                    'GDB internal error: select() returned an unexpected file number'
                )

        # Set up logging for GDB remote protocol
        gdb_remotelog_file_name = os.path.join(self.logdir,
                                               'gdb_remote_log.txt')
        self.gdb_write('-gdb-set remotelogfile ' + gdb_remotelog_file_name)

        # Load the ELF file
        self.gdb_write('-file-exec-and-symbols {}'.format(self.app.elf_file))

        # Connect GDB to UART
        self.serial.proc.close()
        logging.info('Connecting to GDB Stub...')
        self.gdb_write('-gdb-set serial baud 115200')
        responses = self.gdb_write('-target-select remote ' + self.serial.port)

        # Make sure we get the 'stopped' notification
        stop_response = self.find_gdb_response('stopped', 'notify', responses)
        if not stop_response:
            responses = self.gdb_write('-exec-interrupt')
            stop_response = self.find_gdb_response('stopped', 'notify',
                                                   responses)
            assert stop_response
        frame = stop_response['payload']['frame']
        if 'file' not in frame:
            frame['file'] = '?'
        if 'line' not in frame:
            frame['line'] = '?'
        logging.info(
            'Stopped in {func} at {addr} ({file}:{line})'.format(**frame))

        # Drain remaining responses
        self.gdb.get_gdb_response(raise_error_on_timeout=False)

    def gdb_backtrace(self) -> Any:
        """
        Returns the list of stack frames for the current thread.
        Each frame is a dictionary, refer to pygdbmi docs for the format.
        """
        assert self.gdb

        responses = self.gdb_write('-stack-list-frames')
        return self.find_gdb_response('done', 'result',
                                      responses)['payload']['stack']

    @staticmethod
    def match_backtrace(gdb_backtrace: List[Any],
                        expected_functions_list: List[Any]) -> bool:
        """
        Returns True if the function names listed in expected_functions_list match the backtrace
        given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace()
        function.
        """
        return all([
            frame['func'] == expected_functions_list[i]
            for i, frame in enumerate(gdb_backtrace)
        ])

    @staticmethod
    def find_gdb_response(message: str, response_type: str,
                          responses: List[Any]) -> Any:
        """
        Helper function which extracts one response from an array of GDB responses, filtering
        by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format.
        """
        def match_response(response: Dict[str, Any]) -> bool:
            return response['message'] == message and response[
                'type'] == response_type  # type: ignore

        filtered_responses = [r for r in responses if match_response(r)]
        if not filtered_responses:
            return None
        return filtered_responses[0]