Esempio n. 1
0
    def invoke(self, arg, from_tty):
        argv = gdb.string_to_argv(arg)
        if argv and len(argv) > 0:
            btdepth = argv[0]
        else:
            btdepth = ""

        ltq = gdb.execute('p/x __active_lthreads',
                          to_string=True).split('=')[1].strip()

        no = 1
        while (int(ltq, 16) != 0):
            lt = gdb.execute('p/x ((struct lthread_queue*)%s)->lt' % ltq,
                             to_string=True).split('=')[1].strip()
            lt_tid = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->tid' %
                                 ltq,
                                 to_string=True).split('=')[1].strip()
            lt_name = gdb.execute(
                'p/s ((struct lthread_queue*)%s)->lt->funcname' % ltq,
                to_string=True).split('=')[1].strip().split(',')[0]
            lt_cpu = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->cpu' %
                                 ltq,
                                 to_string=True).split('=')[1].strip()
            gdb.write('#%3d Lthread: TID: %3s, Addr: %s, Name: %s, CPU: %s\n' %
                      (no, lt_tid, lt, lt_name, lt_cpu))
            gdb.execute('lthread-bt %s %s' % (lt, btdepth))
            gdb.write('\n')
            gdb.flush()

            ltq = gdb.execute('p/x ((struct lthread_queue*)%s)->next' % ltq,
                              to_string=True).split('=')[1].strip()
            no = no + 1

        return False
Esempio n. 2
0
    def invoke(self, arg, from_tty):
        argv = gdb.string_to_argv(arg)
        if argv and len(argv) > 0:
            btdepth = argv[0]
        else:
            btdepth = ""

        schedq_lts = 0
        syscall_req_lts = 0
        syscall_ret_lts = 0
        fxq_lts = 0

        schedq_lts = self.count_queue_elements('__scheduler_queue')
        syscall_req_lts = self.count_queue_elements('__syscall_queue')
        syscall_ret_lts = self.count_queue_elements('__return_queue')

        fxq = gdb.execute('p/x futex_queues->slh_first',
                          to_string=True).split('=')[1].strip()
        while (int(fxq, 16) != 0):
            fxq_lts = fxq_lts + 1
            fxq = gdb.execute('p/x ((struct futex_q*)%s)->entries.sle_next' %
                              fxq,
                              to_string=True).split('=')[1].strip()

        waiting_total = schedq_lts + syscall_req_lts + syscall_ret_lts + fxq_lts

        gdb.write('Waiting lthreads:\n')
        gdb.write('  scheduler queue:       %s\n' % schedq_lts)
        gdb.write('  syscall request queue: %s\n' % syscall_req_lts)
        gdb.write('  syscall return queue:  %s\n' % syscall_ret_lts)
        gdb.write('  waiting for futex:     %s\n' % fxq_lts)
        gdb.write('  Total:                 %s\n' % waiting_total)
        gdb.flush()

        return False
Esempio n. 3
0
    def print_bts_for_queue(self, queue, btdepth):
        enqueue_pos = int(
            gdb.execute('p %s->enqueue_pos' % queue,
                        to_string=True).split('=')[1].strip())
        dequeue_pos = int(
            gdb.execute('p %s->dequeue_pos' % queue,
                        to_string=True).split('=')[1].strip())
        if (enqueue_pos < dequeue_pos):
            raise Exception("Logic error: %d < %d" %
                            (enqueue_pos, dequeue_pos))

        buffer_mask = int(
            gdb.execute('p %s->buffer_mask' % queue,
                        to_string=True).split('=')[1].strip())

        for i in range(dequeue_pos, enqueue_pos):
            lt = gdb.execute('p/x slotlthreads[%s->buffer[%d & %d].data]' %
                             (queue, i, buffer_mask),
                             to_string=True).split('=')[1].strip()
            if (lt != '0x0'):
                tid = int(
                    gdb.execute('p ((struct lthread*)%s)->tid' % lt,
                                to_string=True).split('=')[1].strip())
                gdb.write('Lthread [tid=%d]\n' % tid)
                gdb.execute('lthread-bt %s %s' % (lt, btdepth))
                gdb.write('\n')
            else:
                gdb.write('Queue entry without associated lthread...\n')

        gdb.flush()
Esempio n. 4
0
    def invoke(self, arg, from_tty):

        enqueue_pos = int(
            gdb.execute('p __scheduler_queue->enqueue_pos',
                        to_string=True).split('=')[1].strip())
        dequeue_pos = int(
            gdb.execute('p __scheduler_queue->dequeue_pos',
                        to_string=True).split('=')[1].strip())
        if (enqueue_pos < dequeue_pos):
            raise Exception("Logic error: %d < %d" %
                            (enqueue_pos, dequeue_pos))

        buffer_mask = int(
            gdb.execute('p __scheduler_queue->buffer_mask',
                        to_string=True).split('=')[1].strip())

        tids = []
        for i in range(dequeue_pos, enqueue_pos):
            gdb.write(
                'p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid\n'
                % (i, buffer_mask))
            tid = int(
                gdb.execute(
                    'p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid'
                    % (i, buffer_mask),
                    to_string=True).split('=')[1].strip())
            tids.append(tid)

        gdb.write('\nScheduler queue lthreads:\n' + tw.fill(str(tids)) + '\n')
        gdb.flush()
 def log(self, args):
     gdb.flush()
     if args.logfile:
         gdb.execute("set logging on")
         gdb.execute("set logging file %s" % args.logfile)
     else:
         gdb.execute("set logging off")
Esempio n. 6
0
    def invoke(self, argument, from_tty):
        parser = self.NoexitArgumentParser(prog=self._command,
                description=self.__doc__)
        parser.add_argument('limit', metavar='limit', type=int, nargs='?',
                default=sys.maxsize, help='Only consider [limit] stack frames')
        parser.add_argument('--skip', metavar='N', nargs='?', type=int,
                default=0, help='Skip first [N] stack frames')
        parser.add_argument('--ignore-pc', action='store_true', default=False,
                help='Ignore program counter for frame equivalence')
        parser.add_argument('--show-source', action='store_true', default=False,
                help='Show source file and line info, if available')

        args = parser.parse_args(gdb.string_to_argv(argument))

        traces = []
        for thread in gdb.inferiors()[0].threads():
            traces.append(stacks.StackTrace(thread, args.skip, args.limit,
                args.ignore_pc, args.show_source))

        uniq = {}
        for stack in traces:
            uniq.setdefault(stack,[]).append(stack.gdb_thread_id)

        sorter = lambda d: sorted(d.items(), key=lambda item: len(item[1]),
                reverse=True)

        gdb.write("\n== Printing {} unique stacks from {} threads\n\n".format(
            len(uniq), len(traces)))

        for k, v in sorter(uniq):
            gdb.write("Stack for thread ids {}\n".format(sorted(v)))
            gdb.write(str(k))
            gdb.write("\n\n")

        gdb.flush()
 def stage_finish(self, now=False):
     fd = FlushDatabase(self.controller.current_stage)
     gdb.flush()
     if now:
         fd()
     else:
         gdb.post_event(fd)
Esempio n. 8
0
 def do(self):
     gdb.flush()
     global start
     stop = time.time()
     gdb.write(
         "watchpoint trace finished in %f minutes\n" %
         ((stop - start) / 60), gdb.STDOUT)
Esempio n. 9
0
 def invoke(self, arg, from_tty):
     gdb.write('\nSlot tids:\n' + tw.fill(str(self.slot_tids())))
     gdb.write('\nSlot syscallnos:\n' + tw.fill(str(self.syscall_nos())))
     gdb.write('\nSyscall tids:\n' +
               tw.fill(str(self.queue_tids('syscall'))))
     gdb.write('\nReturn tids:\n' + tw.fill(str(self.queue_tids('return'))))
     gdb.flush()
Esempio n. 10
0
    def invoke(self, argstr, from_tty):
        '''
        Called when this Command is invoked from GDB. Prints classification of
        Inferior to GDB's STDOUT.

        Note that sys.stdout is automatically redirected to GDB's STDOUT.
        See GDB Python API documentation for details
        '''
        check_version()

        op = NiceArgParser(prog=self._cmdstr, description=self.__doc__)
        op.add_argument("-v", "--verbose", action="store_true",
                        help="print analysis info from the Inferior")
        op.add_argument("-m", "--machine", action="store_true",
                        help="Print output in a machine parsable format")
        op.add_argument("-p", "--pkl-file", type=argparse.FileType("wb"),
                        help="pickle exploitability classification object and store to PKL_FILE")
        op.add_argument("-a", "--asan-log", type=argparse.FileType(),
                        help="Symbolize and analyze AddressSanitizer output (assumes "
                        "executable is loaded) (WARNING: untested).")
        op.add_argument("-b", "--backtrace-limit", type=int,
                        help="Limit number of stack frames in backtrace to supplied value. "
                        "0 means no limit.", default=1000)

        try:
            args = op.parse_args(gdb.string_to_argv(argstr))
        except NiceArgParserExit:
            return

        import logging
        try:
            target = arch.getTarget(args.asan_log, args.backtrace_limit)
            c = classifier.Classifier().getClassification(target)
        except Exception as e:
            logging.exception(e)
            raise e

        if args.pkl_file:
            import pickle as pickle
            pickle.dump(c, args.pkl_file, 2)
            return

        if args.verbose:
            print("'exploitable' version {}".format(version))
            print(" ".join([str(i) for i in os.uname()]))
            print("Signal si_signo: {} Signal si_addr: {}".format(
                target.si_signo(), target.si_addr()))
            print("Nearby code:")
            self.print_disassembly()
            print("Stack trace:")
            print(str(target.backtrace()))
            print("Faulting frame: {}".format(target.faulting_frame()))

        if args.machine:
            print_machine_string(c, target)
        else:
            gdb.write(str(c))
        gdb.flush()
Esempio n. 11
0
    def print_msg(self, level, str):
        try:
            current_level = pydslice_debugger.debug_print_level
        except NameError:
            current_level = pydslice.pydslice_debugger.debug_print_level

        if level <= current_level:
            gdb.write(str + '\n')
            gdb.flush()
Esempio n. 12
0
    def _runner():
        i = gdb.selected_inferior()
        if i.progspace.filename is not None:
            res = gdb.execute('add-inferior -no-connection', False, True)
            n = res.split()[-1]
            gdb.execute('inferior %s' % n)
        gdb.execute('set remote exec-file %s' % filename)
        gdb.execute('file %s' % filename)
        gdb.flush()

        cb_notify_event({'pk': pk, 'inferior': i.num, 'filename': filename})
 def do(self):
     self.called = True
     global db_written
     if db_written:
         return
     global start
     gdb.flush()
     db_info.get(self.stage).flush_tracedb()
     stop = time.time()
     gdb.write(".. finished in %f minutes\n" % ((stop - start) / 60),
               gdb.STDOUT)
     db_written = True
Esempio n. 14
0
    def _runner():
        gdb.execute('kill inferiors %s' % num)
        gdb.execute('detach inferior %s' % num)
        if num > 1:
            if gdb.selected_inferior().num == num:
                gdb.execute('inferior 1')
            gdb.execute('remove-inferiors %s' % num)
        gdb.flush()

        cb_notify_event({
            'pk': pk,
        })
Esempio n. 15
0
	def reset(self):
		if hasattr(self, "output"):
			output.flush()
			if output != gdb:
				output.close()
		gdb.flush()
		self.connected = False
		self.threads = {}
		self.output_desc = "gdb console"
		self.output = gdb
		gdb.events.inferior_call_pre.disconnect(self.call_pre)
		gdb.events.inferior_call_post.disconnect(self.call_post)
Esempio n. 16
0
 def reset(self):
     if hasattr(self, "output"):
         output.flush()
         if output != gdb:
             output.close()
     gdb.flush()
     self.connected = False
     self.threads = {}
     self.output_desc = "gdb console"
     self.output = gdb
     gdb.events.inferior_call_pre.disconnect(self.call_pre)
     gdb.events.inferior_call_post.disconnect(self.call_post)
Esempio n. 17
0
    def invoke(self, argument, from_tty):
        self.dont_repeat()

        period = 0.5

        args = gdb.string_to_argv(argument)

        if len(args) > 0:
            try:

                period = int(args[0])
            except ValueError:
                print("Invalid number \"%s\"." % args[0])
                return

        def breaking_continue_handler(event):
            sleep(period)
            os.kill(gdb.selected_inferior().pid, signal.SIGINT)

        call_chain_frequencies = defaultdict(int)
        sleeps = 0

        try:
            while True:
                gdb.events.cont.connect(breaking_continue_handler)
                gdb.execute("continue", to_string=True)
                gdb.events.cont.disconnect(breaking_continue_handler)

                call_chain_frequencies[get_call_chain()] += 1
                sleeps += 1
                gdb.write(".")
                gdb.flush(gdb.STDOUT)
        except KeyboardInterrupt:
            pass
        finally:
            gdb.events.cont.disconnect(breaking_continue_handler)
            print("\nProfiling complete with %d samples." % sleeps)
            for call_chain, frequency in sorted(
                    call_chain_frequencies.iteritems(),
                    key=lambda x: x[1],
                    reverse=True):
                print("%d\t%s" %
                      (frequency, '->'.join(str(i) for i in call_chain)))

        pid = gdb.selected_inferior().pid
        os.kill(pid,
                signal.SIGSTOP)  # Make sure the process does nothing until
        # it's reattached.
        gdb.execute("detach", to_string=True)
        gdb.execute("attach %d" % pid, to_string=True)
        os.kill(pid, signal.SIGCONT)
        gdb.execute("continue", to_string=True)
Esempio n. 18
0
    def invoke(self, arg, _):
        if not arg:
            arg = DEFAULT_ARG

        addr = gdb.parse_and_eval(arg)

        if addr.type != VOID_P_TYPE:
            gdb.write('Invalid argument: not an address\n', gdb.STDERR)
            return

        infs = gdb.inferiors()

        assert len(infs) == 1

        mem = infs[0].read_memory(addr, STACK_SAMPLE_LENGTH)

        procs = []
        funs = {}

        addr = struct.Struct(ADDR_UNPACK)

        for i in xrange(STACK_SAMPLE_LENGTH / ADDR_SIZE):
            p = addr.unpack(mem[i * ADDR_SIZE:(i + 1) * ADDR_SIZE])[0]

            if p not in funs:
                s = gdb.execute('info symbol %d' % p, to_string=True)
                s = s.strip()

                m = re.match(r'(\w*) \+ \d+ in section', s)
                if m:
                    procs.append(p)
                    funs[p] = m.groups()[0]
            else:
                procs.append(p)

        idx, seq = find_shortest(procs)

        if not seq:
            gdb.write('Unable to find recurring call pattern', gdb.STDERR)
            gdb.flush(gdb.STDERR)
            return

        title = 'Recurring call pattern starting at frame %d' % idx
        gdb.write(title)
        gdb.write('\n')
        gdb.write('=' * len(title))
        gdb.write('\n')
        for c in seq:
            gdb.write('%s @ 0x%x\n' % (funs[c], c))
        gdb.flush(gdb.STDOUT)
Esempio n. 19
0
    def invoke(self, arg, _):
        if not arg:
            arg = DEFAULT_ARG

        addr = gdb.parse_and_eval(arg)

        if addr.type != VOID_P_TYPE:
            gdb.write('Invalid argument: not an address\n', gdb.STDERR)
            return

        infs = gdb.inferiors()

        assert len(infs) == 1

        mem = infs[0].read_memory(addr, STACK_SAMPLE_LENGTH)

        procs = []
        funs = {}

        addr = struct.Struct(ADDR_UNPACK)

        for i in xrange(STACK_SAMPLE_LENGTH / ADDR_SIZE):
            p = addr.unpack(mem[i * ADDR_SIZE:(i + 1) * ADDR_SIZE])[0]

            if p not in funs:
                s = gdb.execute('info symbol %d' % p, to_string=True)
                s = s.strip()

                m = re.match(r'(\w*) \+ \d+ in section', s)
                if m:
                    procs.append(p)
                    funs[p] = m.groups()[0]
            else:
                procs.append(p)

        idx, seq = find_shortest(procs)

        if not seq:
            gdb.write('Unable to find recurring call pattern', gdb.STDERR)
            gdb.flush(gdb.STDERR)
            return

        title = 'Recurring call pattern starting at frame %d' % idx
        gdb.write(title)
        gdb.write('\n')
        gdb.write('=' * len(title))
        gdb.write('\n')
        for c in seq:
            gdb.write('%s @ 0x%x\n' % (funs[c], c))
        gdb.flush(gdb.STDOUT)
Esempio n. 20
0
    def invoke(self, arg, from_tty):
        argv = gdb.string_to_argv(arg)
        if argv and len(argv) > 0:
            btdepth = argv[0]
        else:
            btdepth = ""
        waiters = get_fx_waiters(btdepth)
        for w in waiters:
            gdb.write('FX entry: key: %s, lt: %s, deadline: %s\n' %
                      (w.key, w.lt, w.deadline))
            gdb.write(w.backtrace)
            gdb.write("\n")
        gdb.flush()

        return False
Esempio n. 21
0
    def invoke(self, argument, from_tty):
        self.dont_repeat()

        period = 0.5

        args = gdb.string_to_argv(argument)

        if len(args) > 0:
            try:

                period = int(args[0])
            except ValueError:
                print("Invalid number \"%s\"." % args[0])
                return

        def breaking_continue_handler(event):
            sleep(period)
            os.kill(gdb.selected_inferior().pid, signal.SIGINT)

        call_chain_frequencies = defaultdict(int)
        sleeps = 0

        try:
            while True:
                gdb.events.cont.connect(breaking_continue_handler)
                gdb.execute("continue", to_string=True)
                gdb.events.cont.disconnect(breaking_continue_handler)

                call_chain_frequencies[get_call_chain()] += 1
                sleeps += 1
                gdb.write(".")
                gdb.flush(gdb.STDOUT)
        except KeyboardInterrupt:
            pass
        finally:
            gdb.events.cont.disconnect(breaking_continue_handler)
            print("\nProfiling complete with %d samples." % sleeps)
            for call_chain, frequency in sorted(call_chain_frequencies.iteritems(), key=lambda x: x[1], reverse=True):
                print("%d\t%s" % (frequency, '->'.join(str(i) for i in call_chain)))

        pid = gdb.selected_inferior().pid
        os.kill(pid, signal.SIGSTOP)  # Make sure the process does nothing until
                                      # it's reattached.
        gdb.execute("detach", to_string=True)
        gdb.execute("attach %d" % pid, to_string=True)
        os.kill(pid, signal.SIGCONT)
        gdb.execute("continue", to_string=True)
Esempio n. 22
0
    def invoke(self, arg, from_tty):
        """Show the elements in the provided list."""
        args = gdb.string_to_argv(arg)
        if len(args) != 1:
            raise gdb.GdbError(self._usage)

        val = gdb.parse_and_eval(args[0])
        if val is None:
            raise gdb.GdbError('%s is not a valid expression' % (args[0]))
        i = 0
        while not isNullptr(val):
            gdb.write("%s : %s\n" % (val, val.dereference()))
            gdb.flush()
            i += 1
            val = val['next']
        gdb.write("%s contains %d elements\n" % (args[0], i))
        gdb.flush()
Esempio n. 23
0
    def invoke(self, arg, from_tty):
        """Show the argv."""
        args = gdb.string_to_argv(arg)
        if len(args) != 1:
            raise gdb.GdbError(self._usage)

        val = gdb.parse_and_eval(args[0])
        if val is None:
            raise gdb.GdbError('%s is not a valid expression' % (args[0]))

        strs = []
        while not isNullptr(val.dereference()):
            strs.append('"' + val.dereference().string() + '"')
            val += 1

        gdb.write("[%d] = [%s]\n" % (len(strs), ', '.join(strs)))
        gdb.flush()
Esempio n. 24
0
    def invoke(self, arg, from_tty):
        argv = gdb.string_to_argv(arg)
        if not argv:
            gdb.write(
                'No lthread address provided. Usage: lthread-bt <addr> [<btdepth>]\n'
            )
            gdb.flush()
            return False
        lt_addr = argv[0]
        if len(argv) > 1:
            btdepth = argv[1]
        else:
            btdepth = ""

        get_lthread_backtrace(lt_addr, btdepth)

        return False
Esempio n. 25
0
 def invoke(self, arg, from_tty):
     if os.environ.get("SGXLKL_VERBOSE", None) != "1":
         gdb.write("Environment variable SGXLKL_VERBOSE=1 is not set!\n")
         return
     gdb.execute('call dump_stack()')
     frames = self.parse_stack_trace()
     for i, frame in enumerate(frames):
         line = gdb.execute('info line *0x%x' % frame, to_string=True)
         match = info_line.match(line)
         if match:
             symbol_offset = match.group(3)
             filename = match.group(2)
             line = match.group(1)
             gdb.write("[%3d] %50s in %s:%s\n" %
                       (i, symbol_offset, filename, line))
         else:
             # better safe then sorry
             gdb.write("[%3d] %s\n" % (i, line))
     gdb.flush()
Esempio n. 26
0
    def execute_redirect(self, gdb_command, silent=False):
        """
        Execute a gdb command and capture its output

        Args:
            - gdb_command (String)
            - silent: discard command's output, redirect to /dev/null (Bool)

        Returns:
            - output of command (String)
        """
        result = None
        #init redirection
        if silent:
            logfd = open(os.path.devnull, "rw")
        else:
            logfd = tmpfile()
        logname = logfd.name
        gdb.execute('set logging off') # prevent nested call
        gdb.execute('set height 0') # disable paging
        gdb.execute('set logging file %s' % logname)
        gdb.execute('set logging overwrite on')
        gdb.execute('set logging redirect on')
        gdb.execute('set logging on')
        try:
            gdb.execute(gdb_command)
            gdb.flush()
            gdb.execute('set logging off')
            if not silent:
                logfd.flush()
                result = logfd.read()
            logfd.close()
        except Exception as e:
            gdb.execute('set logging off') #to be sure
            if config.Option.get("debug") == "on":
                msg('Exception (%s): %s' % (gdb_command, e), "red")
                traceback.print_exc()
            logfd.close()
        if config.Option.get("verbose") == "on":
            msg(result)
        return result
Esempio n. 27
0
    def execute_redirect(self, gdb_command, silent=False):
        """
        Execute a gdb command and capture its output

        Args:
            - gdb_command (String)
            - silent: discard command's output, redirect to /dev/null (Bool)

        Returns:
            - output of command (String)
        """
        result = None
        #init redirection
        if silent:
            logfd = open(os.path.devnull, "rw")
        else:
            logfd = tmpfile()
        logname = logfd.name
        gdb.execute('set logging off') # prevent nested call
        gdb.execute('set height 0') # disable paging
        gdb.execute('set logging file %s' % logname)
        gdb.execute('set logging overwrite on')
        gdb.execute('set logging redirect on')
        gdb.execute('set logging on')
        try:
            gdb.execute(gdb_command)
            gdb.flush()
            gdb.execute('set logging off')
            if not silent:
                logfd.flush()
                result = logfd.read()
            logfd.close()
        except Exception as e:
            gdb.execute('set logging off') #to be sure
            logfd.close()
            raise e
        return result.decode('ascii')
Esempio n. 28
0
def show_leak():
    tracker = gdb.parse_and_eval("memory::tracker")
    size_allocations = tracker["size_allocations"]
    allocations = tracker["allocations"]
    # Build a list of allocations to be sorted lexicographically by call chain
    # and summarize allocations with the same call chain:
    percent = "   "
    gdb.write("Fetching data from qemu/osv: %s" % percent)
    gdb.flush()
    allocs = []
    for i in range(size_allocations):
        newpercent = "%2d%%" % round(100.0 * i / (size_allocations - 1))
        if newpercent != percent:
            percent = newpercent
            gdb.write("\b\b\b%s" % newpercent)
            gdb.flush()
        a = allocations[i]
        addr = ulong(a["addr"])
        if addr == 0:
            continue
        nbacktrace = a["nbacktrace"]
        backtrace = a["backtrace"]
        callchain = []
        for j in range(nbacktrace):
            callchain.append(ulong(backtrace[nbacktrace - 1 - j]))
        allocs.append((i, callchain))
    gdb.write("\n")

    gdb.write("Merging %d allocations by identical call chain... " % len(allocs))
    gdb.flush()
    allocs.sort(key=lambda entry: entry[1])

    import collections

    Record = collections.namedtuple(
        "Record",
        ["bytes", "allocations", "minsize", "maxsize", "avgsize", "minbirth", "maxbirth", "avgbirth", "callchain"],
    )
    records = []

    total_size = 0
    cur_n = 0
    cur_total_size = 0
    cur_total_seq = 0
    cur_first_seq = -1
    cur_last_seq = -1
    cur_max_size = -1
    cur_min_size = -1
    for k, alloc in enumerate(allocs):
        i = alloc[0]
        callchain = alloc[1]
        seq = ulong(allocations[i]["seq"])
        size = ulong(allocations[i]["size"])
        total_size += size
        cur_n += 1
        cur_total_size += size
        cur_total_seq += seq
        if cur_first_seq < 0 or seq < cur_first_seq:
            cur_first_seq = seq
        if cur_last_seq < 0 or seq > cur_last_seq:
            cur_last_seq = seq
        if cur_min_size < 0 or size < cur_min_size:
            cur_min_size = size
        if cur_max_size < 0 or size > cur_max_size:
            cur_max_size = size
        # If the next entry has the same call chain, just continue summing
        if k != len(allocs) - 1 and callchain == allocs[k + 1][1]:
            continue
        # We're done with a bunch of allocations with same call chain:
        r = Record(
            bytes=cur_total_size,
            allocations=cur_n,
            minsize=cur_min_size,
            maxsize=cur_max_size,
            avgsize=cur_total_size / cur_n,
            minbirth=cur_first_seq,
            maxbirth=cur_last_seq,
            avgbirth=cur_total_seq / cur_n,
            callchain=callchain,
        )
        records.append(r)
        cur_n = 0
        cur_total_size = 0
        cur_total_seq = 0
        cur_first_seq = -1
        cur_last_seq = -1
        cur_max_size = -1
        cur_min_size = -1
    gdb.write("generated %d records.\n" % len(records))

    # Now sort the records by total number of bytes
    records.sort(key=lambda r: r.bytes, reverse=True)

    gdb.write("\nAllocations still in memory at this time (seq=%d):\n\n" % tracker["current_seq"])
    for r in records:
        gdb.write("Found %d bytes in %d allocations [size " % (r.bytes, r.allocations))
        if r.minsize != r.maxsize:
            gdb.write("%d/%.1f/%d" % (r.minsize, r.avgsize, r.maxsize))
        else:
            gdb.write("%d" % r.minsize)
        gdb.write(", birth ")
        if r.minbirth != r.maxbirth:
            gdb.write("%d/%.1f/%d" % (r.minbirth, r.avgbirth, r.maxbirth))
        else:
            gdb.write("%d" % r.minbirth)
        gdb.write("]\nfrom:\n")
        for f in reversed(r.callchain):
            si = syminfo(f)
            gdb.write("\t%s\n" % (si,))
        gdb.write("\n")
Esempio n. 29
0
    '{meOne, meThree}',
    'MyOtherEnum(1)',
    '5',
    'array = {1, 2, 3, 4, 5}',
    'seq(0, 0)',
    'seq(0, 10)',
    'array = {"one", "two"}',
    'seq(3, 3) = {1, 2, 3}',
    'seq(3, 3) = {"one", "two", "three"}',
    'Table(3, 64) = {[4] = "four", [5] = "five", [6] = "six"}',
    'Table(3, 8) = {["two"] = 2, ["three"] = 3, ["one"] = 1}',
]

for i, expected in enumerate(outputs):
    gdb.write(f"{i+1}) expecting: {expected}: ", gdb.STDLOG)
    gdb.flush()

    functionSymbol = gdb.selected_frame().block().function
    assert functionSymbol.line == 21

    if i == 6:
        # myArray is passed as pointer to int to myDebug. I look up myArray up in the stack
        gdb.execute("up")
        raw = gdb.parse_and_eval("myArray")
    elif i == 9:
        # myOtherArray is passed as pointer to int to myDebug. I look up myOtherArray up in the stack
        gdb.execute("up")
        raw = gdb.parse_and_eval("myOtherArray")
    else:
        raw = gdb.parse_and_eval("arg")
Esempio n. 30
0
def show_leak():
    tracker = gdb.parse_and_eval('memory::tracker')
    size_allocations = tracker['size_allocations']
    allocations = tracker['allocations']
    # Build a list of allocations to be sorted lexicographically by call chain
    # and summarize allocations with the same call chain:
    percent='   ';
    gdb.write('Fetching data from qemu/osv: %s' % percent);
    gdb.flush();
    allocs = [];
    for i in range(size_allocations) :
        newpercent = '%2d%%' % round(100.0*i/(size_allocations-1));
        if newpercent != percent :
            percent = newpercent;
            gdb.write('\b\b\b%s' % newpercent);
            gdb.flush();
        a = allocations[i]
        addr = ulong(a['addr'])
        if addr == 0 :
            continue
        nbacktrace = a['nbacktrace']
        backtrace = a['backtrace']
        callchain = []
        for j in range(nbacktrace) :
            callchain.append(ulong(backtrace[nbacktrace-1-j]))
        allocs.append((i, callchain))
    gdb.write('\n');

    gdb.write('Merging %d allocations by identical call chain... ' %
              len(allocs))
    gdb.flush();
    allocs.sort(key=lambda entry: entry[1])
    
    import collections
    Record = collections.namedtuple('Record',
                                    ['bytes', 'allocations', 'minsize',
                                     'maxsize', 'avgsize', 'minbirth',
                                     'maxbirth', 'avgbirth', 'callchain'])
    records = [];
    
    total_size = 0
    cur_n = 0
    cur_total_size = 0
    cur_total_seq = 0
    cur_first_seq = -1
    cur_last_seq = -1
    cur_max_size = -1
    cur_min_size = -1
    for k, alloc in enumerate(allocs) :
        i = alloc[0]
        callchain = alloc[1]
        seq = ulong(allocations[i]['seq'])
        size = ulong(allocations[i]['size'])
        total_size += size
        cur_n += 1
        cur_total_size += size
        cur_total_seq += seq
        if cur_first_seq<0 or seq<cur_first_seq :
            cur_first_seq = seq
        if cur_last_seq<0 or seq>cur_last_seq :
            cur_last_seq = seq
        if cur_min_size<0 or size<cur_min_size :
            cur_min_size = size
        if cur_max_size<0 or size>cur_max_size :
            cur_max_size = size
        # If the next entry has the same call chain, just continue summing
        if k!=len(allocs)-1 and callchain==allocs[k+1][1] :
            continue;
        # We're done with a bunch of allocations with same call chain:
        r = Record(bytes = cur_total_size,
                   allocations = cur_n,
                   minsize = cur_min_size,
                   maxsize = cur_max_size,
                   avgsize = cur_total_size/cur_n,
                   minbirth = cur_first_seq,
                   maxbirth = cur_last_seq,
                   avgbirth = cur_total_seq/cur_n,
                   callchain = callchain)
        records.append(r)
        cur_n = 0
        cur_total_size = 0
        cur_total_seq = 0
        cur_first_seq = -1
        cur_last_seq = -1
        cur_max_size = -1
        cur_min_size = -1
    gdb.write('generated %d records.\n' % len(records))
        
    # Now sort the records by total number of bytes
    records.sort(key=lambda r: r.bytes, reverse=True)

    gdb.write('\nAllocations still in memory at this time (seq=%d):\n\n' %
              tracker['current_seq'])
    for r in records :
        gdb.write('Found %d bytes in %d allocations [size ' % (r.bytes, r.allocations))
        if r.minsize != r.maxsize :
            gdb.write('%d/%.1f/%d' % (r.minsize, r.avgsize, r.maxsize))
        else :
            gdb.write('%d' % r.minsize)
        gdb.write(', birth ')
        if r.minbirth != r.maxbirth :
            gdb.write('%d/%.1f/%d' % (r.minbirth, r.avgbirth, r.maxbirth))
        else :
            gdb.write('%d' % r.minbirth)
        gdb.write(']\nfrom:\n')
        for f in reversed(r.callchain):
            si = syminfo(f)
            gdb.write('\t%s\n' % (si,))
        gdb.write('\n')
Esempio n. 31
0
def progress(fmt, *args):
    sys.stdout.write(fmt % args + '\n')
    gdb.flush(gdb.STDOUT)
Esempio n. 32
0
    def __init__(self, asan_output):
        self.__memo__ = {"isPossibleStackCorruption()": False,
                         "isStackCorruption()": False,
                         "isStackOverflow()": False,
                         "si_signo()": 11}
        if not asan_output:
            raise GdbWrapperError("no ASan data to analyze")

        # symbolize asan_message
        self.asan_stack = []
        out = []
        last = 0
        all_frames = []
        maps = self.procmaps()
        for m in self._re_asan_bt.finditer(asan_output):
            frame, addr, img, offset = m.group("frame", "addr", "img", "offset")
            frame = int(frame)
            addr = int(addr, 16) #+ 1
            if img:
                maps.add_file(img, addr - offset)
            out.append(asan_output[last:m.end("all")])
            all_frames.append((frame, addr, offset, img, len(out)))
            out.append(None)
            last = m.end()
        if not all_frames:
            raise GdbWrapperError("No frames found in address sanitizer log")

        out.append(asan_output[last:])
        frame = -1
        for num, addr, offset, img, outpos in all_frames:
            region = maps.findByAddr(addr)
            symbol = gdb.execute("info symbol {:#x}".format(addr), False, True)
            symline = gdb.execute("info line *{:#x}".format(addr), False, True)
            if symline and symline.startswith("Line"):
                symline = "\n\t{}".format(self._re_symline_trim.sub("", symline))
            else:
                symline = ""
            symbol_m = self._re_gdb_info_sym.search(symbol)
            if img:
                lib = img
            elif region:
                lib = region.name
            else:
                lib = None
            if symbol_m is None:
                sym = None
                off = offset
            else:
                sym = symbol_m.group("sym")
                off = int(symbol_m.group("off"))
            if frame == -1:
                self.asan_pc_img = lib, offset
            if frame is not None and num > frame:
                frame = num
                if lib:
                    lib = os.path.basename(lib)
                self.asan_stack.append(AttrDict(addr=addr,
                                                lib=lib,
                                                off=off,
                                                name=sym))
            else:
                frame = None
            out[outpos] = "{}){}".format(ASanFrame.create(self, addr, sym, off).terse(), symline)
        asan_output = "".join(out)
        gdb.write(asan_output)
        gdb.flush()
        # parse ASAN's analysis
        m = self._re_asan_fault.search(asan_output)
        self.__memo__["si_addr()"] = int(m.group("fault"), 16)
        self.asan_reason = m.group("desc")
        if self.asan_reason == "double-free":
            self.__memo__["pc()"] = self.asan_stack[1].addr
            self.__memo__["stack_pointer()"] = None # what to do? ....
        else:
            self.__memo__["pc()"] = int(m.group("pc"), 16)
            if m.group("bspid1") == "sp":
                self.__memo__["stack_pointer()"] = int(m.group("bsp1"), 16)
            else:
                self.__memo__["stack_pointer()"] = int(m.group("bsp2"), 16)
            if self.asan_reason != "SEGV":
                self.asan_operation = m.group("operation")
Esempio n. 33
0
def show_leak():
    tracker = gdb.parse_and_eval('memory::tracker')
    size_allocations = tracker['size_allocations']
    allocations = tracker['allocations']
    # Build a list of allocations to be sorted lexicographically by call chain
    # and summarize allocations with the same call chain:
    percent = '   '
    gdb.write('Fetching data from qemu/osv: %s' % percent)
    gdb.flush()
    allocs = []
    for i in range(size_allocations):
        newpercent = '%2d%%' % round(100.0 * i / (size_allocations - 1))
        if newpercent != percent:
            percent = newpercent
            gdb.write('\b\b\b%s' % newpercent)
            gdb.flush()
        a = allocations[i]
        addr = ulong(a['addr'])
        if addr == 0:
            continue
        nbacktrace = a['nbacktrace']
        backtrace = a['backtrace']
        callchain = []
        for j in range(nbacktrace):
            callchain.append(ulong(backtrace[nbacktrace - 1 - j]))
        allocs.append((i, callchain))
    gdb.write('\n')

    gdb.write('Merging %d allocations by identical call chain... ' %
              len(allocs))
    gdb.flush()
    allocs.sort(key=lambda entry: entry[1])

    import collections
    Record = collections.namedtuple('Record', [
        'bytes', 'allocations', 'minsize', 'maxsize', 'avgsize', 'minbirth',
        'maxbirth', 'avgbirth', 'callchain'
    ])
    records = []

    total_size = 0
    cur_n = 0
    cur_total_size = 0
    cur_total_seq = 0
    cur_first_seq = -1
    cur_last_seq = -1
    cur_max_size = -1
    cur_min_size = -1
    for k, alloc in enumerate(allocs):
        i = alloc[0]
        callchain = alloc[1]
        seq = ulong(allocations[i]['seq'])
        size = ulong(allocations[i]['size'])
        total_size += size
        cur_n += 1
        cur_total_size += size
        cur_total_seq += seq
        if cur_first_seq < 0 or seq < cur_first_seq:
            cur_first_seq = seq
        if cur_last_seq < 0 or seq > cur_last_seq:
            cur_last_seq = seq
        if cur_min_size < 0 or size < cur_min_size:
            cur_min_size = size
        if cur_max_size < 0 or size > cur_max_size:
            cur_max_size = size
        # If the next entry has the same call chain, just continue summing
        if k != len(allocs) - 1 and callchain == allocs[k + 1][1]:
            continue
        # We're done with a bunch of allocations with same call chain:
        r = Record(bytes=cur_total_size,
                   allocations=cur_n,
                   minsize=cur_min_size,
                   maxsize=cur_max_size,
                   avgsize=cur_total_size / cur_n,
                   minbirth=cur_first_seq,
                   maxbirth=cur_last_seq,
                   avgbirth=cur_total_seq / cur_n,
                   callchain=callchain)
        records.append(r)
        cur_n = 0
        cur_total_size = 0
        cur_total_seq = 0
        cur_first_seq = -1
        cur_last_seq = -1
        cur_max_size = -1
        cur_min_size = -1
    gdb.write('generated %d records.\n' % len(records))

    # Now sort the records by total number of bytes
    records.sort(key=lambda r: r.bytes, reverse=True)

    gdb.write('\nAllocations still in memory at this time (seq=%d):\n\n' %
              tracker['current_seq'])
    for r in records:
        gdb.write('Found %d bytes in %d allocations [size ' %
                  (r.bytes, r.allocations))
        if r.minsize != r.maxsize:
            gdb.write('%d/%.1f/%d' % (r.minsize, r.avgsize, r.maxsize))
        else:
            gdb.write('%d' % r.minsize)
        gdb.write(', birth ')
        if r.minbirth != r.maxbirth:
            gdb.write('%d/%.1f/%d' % (r.minbirth, r.avgbirth, r.maxbirth))
        else:
            gdb.write('%d' % r.minbirth)
        gdb.write(']\nfrom:\n')
        for f in reversed(r.callchain):
            si = syminfo(f)
            gdb.write('\t%s\n' % (si, ))
        gdb.write('\n')
Esempio n. 34
0
    def invoke(self, argstr, from_tty):
        '''
        Called when this Command is invoked from GDB. Prints classification of
        Inferior to GDB's STDOUT.

        Note that sys.stdout is automatically redirected to GDB's STDOUT.
        See GDB Python API documentation for details
        '''
        check_version()

        op = NiceArgParser(prog=self._cmdstr, description=self.__doc__)
        op.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="print analysis info from the Inferior")
        op.add_argument("-m",
                        "--machine",
                        action="store_true",
                        help="Print output in a machine parsable format")
        op.add_argument(
            "-p",
            "--pkl-file",
            type=argparse.FileType("wb"),
            help=
            "pickle exploitability classification object and store to PKL_FILE"
        )
        op.add_argument(
            "-a",
            "--asan-log",
            type=argparse.FileType(),
            help="Symbolize and analyze AddressSanitizer output (assumes "
            "executable is loaded) (WARNING: untested).")
        op.add_argument(
            "-b",
            "--backtrace-limit",
            type=int,
            help="Limit number of stack frames in backtrace to supplied value. "
            "0 means no limit.",
            default=1000)

        try:
            args = op.parse_args(gdb.string_to_argv(argstr))
        except NiceArgParserExit:
            return

        import logging
        import lib.gdb_wrapper.x86 as gdb_wrapper
        try:
            target = arch.getTarget(args.asan_log, args.backtrace_limit)
            c = classifier.Classifier().getClassification(target)
        except gdb_wrapper.NoThreadRunningError:
            # Prevent exploitable.py from raising an exception if no threads
            # are running (our target exited gracefully). These exceptions
            # would interrupt the automatic crash classification process in gdb
            # scripts that analyze many crash samples.
            c = classifier.Classification(arch.x86Target)
            dummy_tag = dict(ranking=(0, 0),
                             category="NOT_EXPLOITABLE",
                             desc="The target process exited normally.",
                             short_desc="GracefulExit",
                             explanation="The target process exited normally.",
                             hash=classifier.AttrDict(major=0, minor=0))
            c.__add__(classifier.Tag(dummy_tag))
        except gdb_wrapper.MultipleInferiorsError:
            # This is another hack to prevent exploitable from crashing
            # when the target has more than one inferior (this occurs f.e.
            # when the target spawns a shell and crashes).
            # Again we'll create a dummy classification that will hint
            # the user what happened.
            c = classifier.Classification(arch.x86Target)
            dummy_tag = dict(ranking=(0, 1),
                             category="UNKNOWN",
                             desc="The target has more than one inferior!",
                             short_desc="MultipleInferiors",
                             explanation="The target has multiple inferiors.",
                             hash=classifier.AttrDict(major=0, minor=0))
            c.__add__(classifier.Tag(dummy_tag))
        except Exception as e:
            logging.exception(e)
            raise e

        if args.pkl_file:
            import pickle as pickle
            pickle.dump(c, args.pkl_file, 2)
            return

        if args.verbose:
            print("'exploitable' version {}".format(version))
            print(" ".join([str(i) for i in os.uname()]))
            print("Signal si_signo: {} Signal si_addr: {}".format(
                target.si_signo(), target.si_addr()))
            print("Nearby code:")
            self.print_disassembly()
            print("Stack trace:")
            print(str(target.backtrace()))
            print("Faulting frame: {}".format(target.faulting_frame()))

        if args.machine:
            print_machine_string(c, target)
        else:
            gdb.write(str(c))
        gdb.flush()
Esempio n. 35
0
    def invoke(self, argstr, from_tty):
        '''
        Called when this Command is invoked from GDB. Prints classification of
        Inferior to GDB's STDOUT.

        Note that sys.stdout is automatically redirected to GDB's STDOUT.
        See GDB Python API documentation for details
        '''
        check_version()

        op = NiceArgParser(prog=self._cmdstr, description=self.__doc__)
        op.add_argument("-v", "--verbose", action="store_true",
            help="print analysis info from the Inferior")
        op.add_argument("-m", "--machine", action="store_true",
            help="Print output in a machine parsable format")
        op.add_argument("-p", "--pkl-file", type=argparse.FileType("wb"),
            help="pickle exploitability classification object and store to PKL_FILE")
        op.add_argument("-a", "--asan-log", type=argparse.FileType(),
            help="Symbolize and analyze AddressSanitizer output (assumes "
            "executable is loaded) (WARNING: untested).")
        op.add_argument("-b", "--backtrace-limit", type=int,
            help="Limit number of stack frames in backtrace to supplied value. "
            "0 means no limit.", default=1000)

        try:
            args = op.parse_args(gdb.string_to_argv(argstr))
        except NiceArgParserExit:
            return

        import logging
        import lib.gdb_wrapper.x86 as gdb_wrapper
        try:
            target = arch.getTarget(args.asan_log, args.backtrace_limit)
            c = classifier.Classifier().getClassification(target)
        except gdb_wrapper.NoThreadRunningError:
            # Prevent exploitable.py from raising an exception if no threads
            # are running (our target exited gracefully). These exceptions
            # would interrupt the automatic crash classification process in gdb
            # scripts that analyze many crash samples.
            c = classifier.Classification(arch.x86Target)
            dummy_tag = dict(ranking=(0, 0),
                    category="NOT_EXPLOITABLE",
                    desc="The target process exited normally.",
                    short_desc="GracefulExit",
                    explanation="The target process exited normally.",
                    hash=classifier.AttrDict(major=0, minor=0))
            c.__add__(classifier.Tag(dummy_tag))
        except gdb_wrapper.MultipleInferiorsError:
            # This is another hack to prevent exploitable from crashing
            # when the target has more than one inferior (this occurs f.e.
            # when the target spawns a shell and crashes).
            # Again we'll create a dummy classification that will hint
            # the user what happened.
            c = classifier.Classification(arch.x86Target)
            dummy_tag = dict(ranking=(0, 1),
                    category="UNKNOWN",
                    desc="The target has more than one inferior!",
                    short_desc="MultipleInferiors",
                    explanation="The target has multiple inferiors.",
                    hash=classifier.AttrDict(major=0, minor=0))
            c.__add__(classifier.Tag(dummy_tag))
        except Exception as e:
            logging.exception(e)
            raise e

        if args.pkl_file:
            import pickle as pickle
            pickle.dump(c, args.pkl_file, 2)
            return

        if args.verbose:
            print("'exploitable' version {}".format(version))
            print(" ".join([str(i) for i in os.uname()]))
            print("Signal si_signo: {} Signal si_addr: {}".format(target.si_signo(), target.si_addr()))
            print("Nearby code:")
            self.print_disassembly()
            print("Stack trace:")
            print(str(target.backtrace()))
            print("Faulting frame: {}".format(target.faulting_frame()))

        if args.machine:
            print_machine_string(c, target)
        else:
            gdb.write(str(c))
        gdb.flush()
Esempio n. 36
0
 def _runner():
     for cmd in cmdlist:
         gdb.execute(cmd)
     gdb.flush()
 def flushlog(self, args):
     gdb.flush()
Esempio n. 38
0
def post_write(s, *args):
    if not '\n' in s: s += '\n'
    gdb.write(s % args, gdb.STDOUT)
    gdb.flush(gdb.STDOUT)
 def gdb_exit(self, event):
     gdb.flush()
     for e in self.exit_hooks:
         e(event)
Esempio n. 40
0
    def invoke(self, argument, from_tty):
        self.dont_repeat()

        period = 0.1

        args = gdb.string_to_argv(argument)

        if len(args) > 0:
            try:

                period = int(args[0])
            except ValueError:
                print("Invalid number \"%s\"." % args[0])
                return

        def breaking_continue_handler(event):
            sleep(period)
            os.kill(gdb.selected_inferior().pid, signal.SIGINT)

#        call_chain_frequencies = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))

        top = Function("Top", 2)
        sleeps = 0

        threads = {}
        for i in range(0, 200):
            gdb.events.cont.connect(breaking_continue_handler)
            gdb.execute("continue", to_string=True)
            gdb.events.cont.disconnect(breaking_continue_handler)

            for inf in gdb.inferiors():
                inum = inf.num
                for th in inf.threads():
                    thn = th.num
                    th.switch()
                    #              call_chain_frequencies[inum][thn][get_call_chain()] += 1
                    frame = gdb.newest_frame()
                    while (frame.older() != None):
                        frame = frame.older()
#              top.inverse_add_frame(frame);
#              top.add_frame(gdb.newest_frame())
                    if thn not in threads:
                        threads[thn] = Function(str(thn), 2)
                    threads[thn].inverse_add_frame(frame)

            sleeps += 1
            gdb.write(".")
            gdb.flush(gdb.STDOUT)

        print("")
        for thn, function in sorted(threads.items()):
            print("")
            print("Thread: %s" % thn)
            print("")
            function.print_percent("", function.get_samples())


#        top.print_percent("", top.get_samples())

#        print("\nProfiling complete with %d samples." % sleeps)
#        for inum, i_chain_frequencies in sorted(call_chain_frequencies.iteritems()):
#            print ""
#            print "INFERIOR NUM: %s" % inum
#            print ""
#            for thn, t_chain_frequencies in sorted (i_chain_frequencies.iteritems()):
#                print ""
#                print "THREAD NUM: %s" % thn
#                print ""
#
#                for call_chain, frequency in sorted(t_chain_frequencies.iteritems(), key=lambda x: x[1], reverse=True):
#                    print("%d\t%s" % (frequency, '->'.join(str(i) for i in call_chain)))
#
#        for call_chain, frequency in sorted(call_chain_frequencies.iteritems(), key=lambda x: x[1], reverse=True):
#            print("%d\t%s" % (frequency, '->'.join(str(i) for i in call_chain)))

        pid = gdb.selected_inferior().pid
        os.kill(pid,
                signal.SIGSTOP)  # Make sure the process does nothing until
        # it's reattached.
        gdb.execute("detach", to_string=True)
        gdb.execute("attach %d" % pid, to_string=True)
        os.kill(pid, signal.SIGCONT)
        gdb.execute("continue", to_string=True)
Esempio n. 41
0
    def invoke(self, argstr, from_tty):
        '''
        Called when this Command is invoked from GDB. Prints classification of
        Inferior to GDB's STDOUT.

        Note that sys.stdout is automatically redirected to GDB's STDOUT.
        See GDB Python API documentation for details
        '''
        check_version()

        op = NiceArgParser(prog=self._cmdstr, description=self.__doc__)
        op.add_argument("-v", "--verbose", action="store_true",
            help="print analysis info from the Inferior")
        op.add_argument("-m", "--machine", action="store_true",
            help="Print output in a machine parsable format")
        op.add_argument("-p", "--pkl-file", type=argparse.FileType("wb"),
            help="pickle exploitability classification object and store to PKL_FILE")
        op.add_argument("-a", "--asan-log", type=argparse.FileType(),
            help="Symbolize and analyze AddressSanitizer output (assumes "
            "executable is loaded) (WARNING: untested).")
        op.add_argument("-b", "--backtrace-limit", type=int,
            help="Limit number of stack frames in backtrace to supplied value. "
            "0 means no limit.", default=1000)

        try:
            args = op.parse_args(gdb.string_to_argv(argstr))
        except NiceArgParserExit:
            return

        import logging
        try:
            target = arch.getTarget(args.asan_log, args.backtrace_limit)
            c = classifier.Classifier().getClassification(target)
        except Exception as e:
            logging.exception(e)
            raise e

        if args.pkl_file:
            import pickle as pickle
            pickle.dump(c, args.pkl_file, 2)
            return

        if args.verbose:
            print("'exploitable' version {}".format(version))
            print(" ".join([str(i) for i in os.uname()]))
            print("Signal si_signo: {} Signal si_addr: {}".format(target.si_signo(), target.si_addr()))
            print("Nearby code:")
            self.print_disassembly()
            print("Stack trace:")
            print(str(target.backtrace()))
            print("Faulting frame: {}".format(target.faulting_frame()))

        if args.machine:
            '''
            Returns a machine-parsable string representation of this
            Classification.
            NOTE: This code was designed by a specific user and hasn't
            been fully tested since it was contributed.
            '''
            if not c.tags:
                print("No matches")
            else:
                print("EXCEPTION_FAULTING_ADDRESS:{:#016x}".format(target.si_addr()))
                print("EXCEPTION_CODE:{}".format(target.si_signo()))
                try:
                    print("FAULTING_INSTRUCTION:{}".format(str(target.current_instruction()).split(":\t")[1].strip()))
                except IndexError:
                    print("FAULTING_INSTRUCTION:?")
                print("MAJOR_HASH:{}".format(c.hash.major))
                print("MINOR_HASH:{}".format(c.hash.minor))
                bt_result = ["STACK_FRAME:{}".format(i.terse()) for i in target.backtrace() if i.type() != 2]
                print("STACK_DEPTH:{}".format(len(bt_result)))
                for bt in bt_result:
                    print(bt)
                print("INSTRUCTION_ADDRESS:{:#016x}".format(target.pc()))
                try:
                    print("INVOKING_STACK_FRAME:{}".format(target.faulting_frame().position))
                except AttributeError:
                    print("INVOKING_STACK_FRAME:?")
                print("DESCRIPTION:{}".format(c.desc))
                print("SHORT_DESCRIPTION:{}".format(c.tags[0]))
                if len(c.tags) > 1:
                    print("OTHER_RULES:{}".format(", ".join(str(t) for t in c.tags[1:])))
                print("CLASSIFICATION:{}".format(c.category))
                print("EXPLANATION:{}".format(c.explanation))
        else:
            gdb.write(str(c))
        gdb.flush()