示例#1
0
    def _run_daemon(self, use_daemon=None):

        if use_daemon is None:
            # Load it from the configuration file
            use_daemon = Conf.use_daemon

        if not use_daemon:
            return

        # connect to daemon (if there is one)
        if not daemon_exists():
            print("[+] Starting a new daemon.")
            run_daemon_process()
            time.sleep(0.2)
        else:
            print("[+] Connecting to an existing angr management daemon.")

        while True:
            try:
                GlobalInfo.daemon_conn = daemon_conn(service=ClientService)
            except ConnectionRefusedError:
                print("[-] Connection failed... try again.")
                time.sleep(0.4)
                continue
            print("[+] Connected to daemon.")
            break

        from rpyc import BgServingThread  # pylint:disable=import-outside-toplevel
        _ = BgServingThread(GlobalInfo.daemon_conn)
示例#2
0
    def _try_agent_connection(self):
        """ Attempts to connect to the agents that are not connected yet """
        if self._closed:
            return

        for entry, info in enumerate(self._agents_info):
            if self._agents[entry] is None:
                try:
                    conn = rpyc.connect(info['host'],
                                        info['port'],
                                        service=self._get_rpyc_server(entry),
                                        config={
                                            "allow_public_attrs": True,
                                            'allow_pickle': True
                                        })
                    # Try to access conn.root. This raises an exception when the remote RPyC is not yet fully initialized
                    if not conn.root:
                        raise Exception("Cannot get remote service")
                except:
                    self._agents[entry] = None
                    self._agents_thread[entry] = None
                    self._logger.warning(
                        "Cannot connect to agent {}-{}".format(
                            info['host'], info['port']))
                else:
                    self._agents[entry] = conn
                    self._agents_thread[entry] = BgServingThread(conn)
                    self._synchronize_image_aliases(self._agents[entry])
                    self._synchronize_task_dir(self._agents[entry])

        if not self._is_testing:
            # if we are not in a test, retry in 10 secs
            retry_in = 10
        elif self._connection_attempts < 3 and len(
                filter(lambda elem: elem is None, self._agents)):
            # if we are testing, do maximum three attempts to connect to all agents
            self._connection_attempts += 1
            retry_in = 10
        else:
            # do not retry
            retry_in = None

        if retry_in is not None:
            self._timers[self._try_agent_connection] = threading.Timer(
                retry_in, self._try_agent_connection)
            self._timers[self._try_agent_connection].start()
示例#3
0
def gdb_api_client(address, machine, *, gdb_args=None, expose_extra=None):
    raise Exception("This is currently broken")

    if gdb_args is None:
        gdb_args = []
    if expose_extra is None:
        expose_extra = []

    socket_dir_path = Path(mkdtemp())
    socket_path = socket_dir_path / "socket"
    gdb_api_bridge_path = Path(__file__).parent / "gdb_api_bridge.py"
    host, port = address

    args = [
        "gdb",
        *("-ex", f"python socket_path = {repr(str(socket_path))}"),
        *("-ex", f"python expose_extra = {str(expose_extra)}"),
        *("-ex", f"source {gdb_api_bridge_path}"),
        *("-ex", f"target remote {host}:{port}"),
        *gdb_args,
        machine.argv[0],
    ]
    process = Popen(args, stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL)

    for _ in range(100):
        if socket_path.exists():
            break
        sleep(0.1)

    conn = unix_connect(str(socket_path))
    socket_path.unlink()
    socket_dir_path.rmdir()

    BgServingThread(conn, callback=lambda: None)

    return GDB(conn, extra=expose_extra)
示例#4
0
def attach(target,
           gdbscript='',
           exe=None,
           gdb_args=None,
           ssh=None,
           sysroot=None,
           api=False):
    r"""
    Start GDB in a new terminal and attach to `target`.

    Arguments:
        target: The target to attach to.
        gdbscript(:obj:`str` or :obj:`file`): GDB script to run after attaching.
        exe(str): The path of the target binary.
        arch(str): Architechture of the target binary.  If `exe` known GDB will
          detect the architechture automatically (if it is supported).
        gdb_args(list): List of additional arguments to pass to GDB.
        sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries
            and Android targets.
        api(bool): Enable access to GDB Python API.

    Returns:
        PID of the GDB process (or the window which it is running in).
        When ``api=True``, a (PID, :class:`Gdb`) tuple.

    Notes:

        The ``target`` argument is very robust, and can be any of the following:

        :obj:`int`
            PID of a process
        :obj:`str`
            Process name.  The youngest process is selected.
        :obj:`tuple`
            Host, port pair of a listening ``gdbserver``
        :class:`.process`
            Process to connect to
        :class:`.sock`
            Connected socket. The executable on the other end of the connection is attached to.
            Can be any socket type, including :class:`.listen` or :class:`.remote`.
        :class:`.ssh_channel`
            Remote process spawned via :meth:`.ssh.process`.
            This will use the GDB installed on the remote machine.
            If a password is required to connect, the ``sshpass`` program must be installed.

    Examples:

        Attach to a process by PID

        >>> pid = gdb.attach(1234) # doctest: +SKIP

        Attach to the youngest process by name

        >>> pid = gdb.attach('bash') # doctest: +SKIP

        Attach a debugger to a :class:`.process` tube and automate interaction

        >>> io = process('bash')
        >>> pid = gdb.attach(io, gdbscript='''
        ... call puts("Hello from process debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline()
        b'Hello from process debugger!\n'
        >>> io.sendline(b'echo Hello from bash && exit')
        >>> io.recvall()
        b'Hello from bash\n'

        Using GDB Python API:

        .. doctest
           :skipif: six.PY2

            >>> io = process('bash')

            Attach a debugger

            >>> pid, io_gdb = gdb.attach(io, api=True)

            Force the program to write something it normally wouldn't

            >>> io_gdb.execute('call puts("Hello from process debugger!")')

            Resume the program

            >>> io_gdb.continue_nowait()

            Observe the forced line

            >>> io.recvline()
            b'Hello from process debugger!\n'

            Interact with the program in a regular way

            >>> io.sendline(b'echo Hello from bash && exit')

            Observe the results

            >>> io.recvall()
            b'Hello from bash\n'

        Attach to the remote process from a :class:`.remote` or :class:`.listen` tube,
        as long as it is running on the same machine.

        >>> server = process(['socat', 'tcp-listen:12345,reuseaddr,fork', 'exec:/bin/bash,nofork'])
        >>> sleep(1) # Wait for socat to start
        >>> io = remote('127.0.0.1', 12345)
        >>> sleep(1) # Wait for process to fork
        >>> pid = gdb.attach(io, gdbscript='''
        ... call puts("Hello from remote debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline()
        b'Hello from remote debugger!\n'
        >>> io.sendline(b'echo Hello from bash && exit')
        >>> io.recvall()
        b'Hello from bash\n'

        Attach to processes running on a remote machine via an SSH :class:`.ssh` process

        >>> shell = ssh('travis', 'example.pwnme', password='******')
        >>> io = shell.process(['cat'])
        >>> pid = gdb.attach(io, gdbscript='''
        ... call sleep(5)
        ... call puts("Hello from ssh debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline(timeout=5)  # doctest: +SKIP
        b'Hello from ssh debugger!\n'
        >>> io.sendline(b'This will be echoed back')
        >>> io.recvline()
        b'This will be echoed back\n'
        >>> io.close()
    """
    if context.noptrace:
        log.warn_once("Skipping debug attach since context.noptrace==True")
        return

    # if gdbscript is a file object, then read it; we probably need to run some
    # more gdb script anyway
    if hasattr(gdbscript, 'read'):
        with gdbscript:
            gdbscript = gdbscript.read()

    # enable gdb.attach(p, 'continue')
    if gdbscript and not gdbscript.endswith('\n'):
        gdbscript += '\n'

    # Use a sane default sysroot for Android
    if not sysroot and context.os == 'android':
        sysroot = 'remote:/'

    # gdb script to run before `gdbscript`
    pre = ''
    if not context.native:
        pre += 'set endian %s\n' % context.endian
        pre += 'set architecture %s\n' % get_gdb_arch()
        if sysroot:
            pre += 'set sysroot %s\n' % sysroot

        if context.os == 'android':
            pre += 'set gnutarget ' + _bfdname() + '\n'

        if exe and context.os != 'baremetal':
            pre += 'file "%s"\n' % exe

    # let's see if we can find a pid to attach to
    pid = None
    if isinstance(target, six.integer_types):
        # target is a pid, easy peasy
        pid = target
    elif isinstance(target, str):
        # pidof picks the youngest process
        pidof = proc.pidof

        if context.os == 'android':
            pidof = adb.pidof

        pids = list(pidof(target))
        if not pids:
            log.error('No such process: %s', target)
        pid = pids[0]
        log.info('Attaching to youngest process "%s" (PID = %d)' %
                 (target, pid))
    elif isinstance(target, tubes.ssh.ssh_channel):
        if not target.pid:
            log.error("PID unknown for channel")

        shell = target.parent

        tmpfile = shell.mktemp()
        gdbscript = b'shell rm %s\n%s' % (
            tmpfile, packing._need_bytes(gdbscript, 2, 0x80))
        shell.upload_data(gdbscript or b'', tmpfile)

        cmd = [
            'ssh', '-C', '-t', '-p',
            str(shell.port), '-l', shell.user, shell.host
        ]
        if shell.password:
            if not misc.which('sshpass'):
                log.error("sshpass must be installed to debug ssh processes")
            cmd = ['sshpass', '-p', shell.password] + cmd
        if shell.keyfile:
            cmd += ['-i', shell.keyfile]
        cmd += ['gdb', '-q', target.executable, str(target.pid), '-x', tmpfile]

        misc.run_in_new_terminal(cmd)
        return

    elif isinstance(target, tubes.sock.sock):
        pids = proc.pidof(target)
        if not pids:
            log.error('Could not find remote process (%s:%d) on this machine' %
                      target.sock.getpeername())
        pid = pids[0]

        # Specifically check for socat, since it has an intermediary process
        # if you do not specify "nofork" to the EXEC: argument
        # python(2640)───socat(2642)───socat(2643)───bash(2644)
        if proc.exe(pid).endswith('/socat') and time.sleep(
                0.1) and proc.children(pid):
            pid = proc.children(pid)[0]

        # We may attach to the remote process after the fork but before it performs an exec.
        # If an exe is provided, wait until the process is actually running the expected exe
        # before we attach the debugger.
        t = Timeout()
        with t.countdown(2):
            while exe and os.path.realpath(
                    proc.exe(pid)) != os.path.realpath(exe) and t.timeout:
                time.sleep(0.1)

    elif isinstance(target, tubes.process.process):
        pid = proc.pidof(target)[0]
        exe = exe or target.executable
    elif isinstance(target, tuple) and len(target) == 2:
        host, port = target

        if context.os != 'android':
            pre += 'target remote %s:%d\n' % (host, port)
        else:
            # Android debugging is done over gdbserver, which can't follow
            # new inferiors (tldr; follow-fork-mode child) unless it is run
            # in extended-remote mode.
            pre += 'target extended-remote %s:%d\n' % (host, port)
            pre += 'set detach-on-fork off\n'

        def findexe():
            for spid in proc.pidof(target):
                sexe = proc.exe(spid)
                name = os.path.basename(sexe)
                # XXX: parse cmdline
                if name.startswith('qemu-') or name.startswith('gdbserver'):
                    exe = proc.cmdline(spid)[-1]
                    return os.path.join(proc.cwd(spid), exe)

        exe = exe or findexe()
    elif isinstance(target, elf.corefile.Corefile):
        pre += 'target core "%s"\n' % target.path
    else:
        log.error("don't know how to attach to target: %r", target)

    # if we have a pid but no exe, just look it up in /proc/
    if pid and not exe:
        exe_fn = proc.exe
        if context.os == 'android':
            exe_fn = adb.proc_exe
        exe = exe_fn(pid)

    if not pid and not exe and not ssh:
        log.error('could not find target process')

    gdb_binary = binary()
    cmd = [gdb_binary]

    if gdb_args:
        cmd += gdb_args

    if context.gdbinit:
        cmd += ['-nh']  # ignore ~/.gdbinit
        cmd += ['-x', context.gdbinit]  # load custom gdbinit

    cmd += ['-q']

    if exe and context.native:
        if not ssh and not os.path.isfile(exe):
            log.error('No such file: %s', exe)
        cmd += [exe]

    if pid and not context.os == 'android':
        cmd += [str(pid)]

    if context.os == 'android' and pid:
        runner = _get_runner()
        which = _get_which()
        gdb_cmd = _gdbserver_args(pid=pid, which=which)
        gdbserver = runner(gdb_cmd)
        port = _gdbserver_port(gdbserver, None)
        host = context.adb_host
        pre += 'target extended-remote %s:%i\n' % (context.adb_host, port)

        # gdbserver on Android sets 'detach-on-fork on' which breaks things
        # when you're trying to debug anything that forks.
        pre += 'set detach-on-fork off\n'

    if api:
        # create a UNIX socket for talking to GDB
        socket_dir = tempfile.mkdtemp()
        socket_path = os.path.join(socket_dir, 'socket')
        bridge = os.path.join(os.path.dirname(__file__), 'gdb_api_bridge.py')

        # inject the socket path and the GDB Python API bridge
        pre = 'python socket_path = ' + repr(socket_path) + '\n' + \
              'source ' + bridge + '\n' + \
              pre

    gdbscript = pre + (gdbscript or '')

    if gdbscript:
        tmp = tempfile.NamedTemporaryFile(prefix='pwn',
                                          suffix='.gdb',
                                          delete=False,
                                          mode='w+')
        log.debug('Wrote gdb script to %r\n%s', tmp.name, gdbscript)
        gdbscript = 'shell rm %s\n%s' % (tmp.name, gdbscript)

        tmp.write(gdbscript)
        tmp.close()
        cmd += ['-x', tmp.name]

    log.info('running in new terminal: %s', cmd)

    if api:
        # prevent gdb_faketerminal.py from messing up api doctests
        def preexec_fn():
            os.environ['GDB_FAKETERMINAL'] = '0'
    else:
        preexec_fn = None
    gdb_pid = misc.run_in_new_terminal(cmd, preexec_fn=preexec_fn)

    if pid and context.native:
        proc.wait_for_debugger(pid, gdb_pid)

    if not api:
        return gdb_pid

    # connect to the GDB Python API bridge
    from rpyc import BgServingThread
    from rpyc.utils.factory import unix_connect
    if six.PY2:
        retriable = socket.error
    else:
        retriable = ConnectionRefusedError, FileNotFoundError

    t = Timeout()
    with t.countdown(10):
        while t.timeout:
            try:
                conn = unix_connect(socket_path)
                break
            except retriable:
                time.sleep(0.1)
        else:
            # Check to see if RPyC is installed at all in GDB
            rpyc_check = [
                gdb_binary, '--nx', '-batch', '-ex',
                'python import rpyc; import sys; sys.exit(123)'
            ]

            if 123 != tubes.process.process(rpyc_check).poll(block=True):
                log.error('Failed to connect to GDB: rpyc is not installed')

            # Check to see if the socket ever got created
            if not os.path.exists(socket_path):
                log.error(
                    'Failed to connect to GDB: Unix socket %s was never created',
                    socket_path)

            # Check to see if the remote RPyC client is a compatible version
            version_check = [
                gdb_binary, '--nx', '-batch', '-ex',
                'python import platform; print(platform.python_version())'
            ]
            gdb_python_version = tubes.process.process(
                version_check).recvall().strip()
            python_version = str(platform.python_version())

            if gdb_python_version != python_version:
                log.error(
                    'Failed to connect to GDB: Version mismatch (%s vs %s)',
                    gdb_python_version, python_version)

            # Don't know what happened
            log.error('Failed to connect to GDB: Unknown error')

    # now that connection is up, remove the socket from the filesystem
    os.unlink(socket_path)
    os.rmdir(socket_dir)

    # create a thread for receiving breakpoint notifications
    BgServingThread(conn, callback=lambda: None)

    return gdb_pid, Gdb(conn)
示例#5
0
def start_management(filepath=None, use_daemon=False):

    if sys.platform == "darwin":
        macos_bigsur_wants_layer()

    if not check_dependencies():
        sys.exit(1)

    set_app_user_model_id()
    set_windows_event_loop_policy()

    from PySide2.QtWidgets import QApplication, QSplashScreen, QMessageBox
    from PySide2.QtGui import QFontDatabase, QPixmap, QIcon
    from PySide2.QtCore import Qt

    from .config import FONT_LOCATION, IMG_LOCATION, Conf

    app = QApplication(sys.argv)
    app.setApplicationDisplayName("angr management")
    app.setApplicationName("angr management")
    icon_location = os.path.join(IMG_LOCATION, 'angr.png')
    QApplication.setWindowIcon(QIcon(icon_location))

    # URL scheme
    from .logic.url_scheme import AngrUrlScheme
    scheme = AngrUrlScheme()
    registered, _ = scheme.is_url_scheme_registered()
    supported = scheme.is_url_scheme_supported()
    if not registered and supported:
        btn = QMessageBox.question(None, "Setting up angr URL scheme",
                "angr URL scheme allows \"deep linking\" from browsers and other applications by registering the "
                "angr:// protocol to the current user. Do you want to register it? You may unregister at any "
                "time in Preferences.",
                defaultButton=QMessageBox.Yes)
        if btn == QMessageBox.Yes:
            try:
                AngrUrlScheme().register_url_scheme()
            except (ValueError, FileNotFoundError) as ex:
                QMessageBox.warning(None, "Error in registering angr URL scheme",
                        "Failed to register the angr URL scheme.\n"
                        "The following exception occurred:\n"
                        + str(ex))

    # Make + display splash screen
    splashscreen_location = os.path.join(IMG_LOCATION, 'angr-splash.png')
    splash_pixmap = QPixmap(splashscreen_location)
    splash = QSplashScreen(splash_pixmap, Qt.WindowStaysOnTopHint)
    icon_location = os.path.join(IMG_LOCATION, 'angr.png')
    splash.setWindowIcon(QIcon(icon_location))
    splash.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
    splash.setEnabled(False)
    splash.show()
    for _ in range(5):
        time.sleep(0.01)
        app.processEvents()

    from .logic import GlobalInfo
    from .ui.css import CSS
    from .ui.main_window import MainWindow
    from .daemon import daemon_exists, run_daemon_process, daemon_conn
    from .daemon.client import ClientService

    # Load fonts
    QFontDatabase.addApplicationFont(os.path.join(FONT_LOCATION, "SourceCodePro-Regular.ttf"))
    QFontDatabase.addApplicationFont(os.path.join(FONT_LOCATION, "DejaVuSansMono.ttf"))

    # Initialize font-related configuration
    Conf.init_font_config()
    # Set global font
    app.setFont(Conf.ui_default_font)

    GlobalInfo.gui_thread = threading.get_ident()

    # apply the CSS
    app.setStyleSheet(CSS.global_css())

    if use_daemon:
        # connect to daemon (if there is one)
        if not daemon_exists():
            print("[+] Starting a new daemon.")
            run_daemon_process()
            time.sleep(0.2)
        else:
            print("[+] Connecting to an existing angr management daemon.")

        while True:
            try:
                GlobalInfo.daemon_conn = daemon_conn(service=ClientService)
            except ConnectionRefusedError:
                print("[-] Connection failed... try again.")
                time.sleep(0.4)
                continue
            print("[+] Connected to daemon.")
            break

        from rpyc import BgServingThread
        th = BgServingThread(GlobalInfo.daemon_conn)

    file_to_open = filepath if filepath else None
    main_window = MainWindow()
    splash.finish(main_window)

    if file_to_open is not None:
        main_window.load_file(file_to_open)

    app.exec_()
示例#6
0
    def _ce_proxy(cls):
        """
        Dinamically connect to the Central Engine.
        This is a class method.
        """
        stack = inspect.stack()
        # The upper stack is either the EP, or the library that derives this
        stack_fpath = stack[1][1]
        stack_fname = os.path.split(stack_fpath)[1]
        proxy = None

        # If the upper stack is not ExecutionProcess, the library is derived
        if stack_fname != 'ExecutionProcess.py':
            # The EP stack is always the last
            ep_code = stack[-1][0]
            # It's impossible to access the globals from the EP any other way
            p = ep_code.f_globals.get('ceProxy')
            if p:
                return p.root
        del stack, stack_fpath

        # Try to reuse the old connection
        try:
            cls.__ce_proxy.echo('ping')
            return cls.__ce_proxy
        except Exception:
            pass

        # RPyc config
        config = {
            'allow_pickle': True,
            'allow_getattr': True,
            'allow_setattr': True,
            'allow_delattr': True,
            'allow_all_attrs': True,
        }

        ce_ip, ce_port = cls.proxy_path.split(':')

        # If the old connection is broken, connect to the RPyc server
        try:
            # Transform XML-RPC port into RPyc Port; RPyc port = XML-RPC port + 10 !
            ce_port = int(ce_port) + 10
            proxy = rpyc.connect(ce_ip, ce_port, config=config)
            proxy.root.hello('lib::{}'.format(cls.epName))
        except Exception:
            print('*ERROR* Cannot connect to CE path `{}`! Exiting!'.format(
                cls.proxy_path))
            raise Exception('Cannot connect to CE')

        # Authenticate on RPyc server
        try:
            proxy.root.login(cls.userName, 'EP')
        except Exception:
            print(
                '*ERROR* Cannot authenticate on CE path `{}`! Exiting!'.format(
                    cls.proxy_path))
            raise Exception('Cannot authenticate on CE')

        # Launch bg server
        try:
            BgServingThread(proxy)
            cls.__ce_proxy = proxy.root
            return cls.__ce_proxy
        except Exception:
            print('*ERROR* Cannot launch Bg serving thread! Exiting!')
            raise Exception('Cannot launch Bg thread')
示例#7
0
    def _create_conn(self, ce_ip, ce_port, ep_names, debug=False):
        """
        Helper for creating a Central Engine connection, the most basic func.
        """
        proxy = None
        config = {
            'allow_pickle': True,
            'allow_getattr': True,
            'allow_setattr': True,
            'allow_delattr': True,
            'allow_all_attrs': True,
        }

        def close_conn():
            try:
                proxy.close()
            except Exception:
                pass
            proxy = None

        # Connect to RPyc server
        try:
            r_stream = rpyc.SocketStream.connect(ce_ip, ce_port, timeout=3.0)
            proxy = rpyc.connect_stream(r_stream,
                                        service=TwisterClientService,
                                        config=config)
            logPrint('Client Debug: Connected to CE at `{}:{}`...'.format(
                ce_ip, ce_port))
        except Exception as e:
            if debug:
                logPrint('*ERROR* Cannot connect to CE path `{}:{}`! '\
                    'Exception: `{}`!'.format(ce_ip, ce_port, e))
            close_conn()
            return None

        # Authenticate on RPyc server
        try:
            check = proxy.root.login(self.user_name, 'EP')
            if check:
                logPrint('Client Debug: Authentication successful!')
        except Exception as e:
            check = False

        if not check:
            if debug:
                logPrint(
                    '*ERROR* Cannot authenticate on CE path `{}:{}`! Invalid login!'
                    .format(ce_ip, ce_port))
            close_conn()
            return None

        # Say Hello and Register all EPs on the current Central Engine
        if ep_names:
            try:
                proxy.ping(data='Hello', timeout=3)
                # Call the user status to create the User Project
                s = proxy.root.get_user_variable('user_roles')
                if not s:
                    logPrint(
                        '*ERROR* Cannot register! Cannot get roles for user `{}`!'
                        .format(self.user_name))
                    close_conn()
                    return None
                # Fire up the User Service
                proxy.root.read_file('~/twister/config/fwmconfig.xml')
            except Exception as e:
                logPrint('Exception: `{}`'.format(e))
                check = False

            try:
                proxy.root.hello('client', {'eps': ep_names})
                logPrint('Client Debug: Register EPs successful!')
            except Exception as e:
                logPrint('Exception: `{}`'.format(e))
                check = False

        if not check:
            if debug:
                logPrint(
                    '*ERROR* Cannot register! Cannot send hello on CE path `{}:{}`!'
                    .format(ce_ip, ce_port))
            close_conn()
            return None

        BgServingThread(proxy)
        return proxy