Esempio n. 1
0
    def __do_deferred_initialization(self):
        if self._initialized:
            return

        with context.local(device=self.serial):
            abi = getprop('ro.product.cpu.abi')
            context.clear()
            context.arch = str(abi)
            self._arch = context.arch
            self._bits = context.bits
            self._endian = context.endian

        if self.port == 'emulator':
            emulator, port = self.serial.split('-')
            port = int(port)
            try:
                with remote('localhost', port, level='error') as r:
                    r.recvuntil('OK')
                    r.recvline()  # Rest of the line
                    r.sendline('avd name')
                    self.avd = r.recvline().strip()
            except:
                pass

        self._initialized = True
Esempio n. 2
0
    def __do_deferred_initialization(self):
        if self._initialized:
            return

        with context.local(device=self.serial):
            abi = str(properties.ro.product.cpu.abi)
            context.clear()
            context.arch = str(abi)
            self._arch = context.arch
            self._bits = context.bits
            self._endian = context.endian

        if self.port == 'emulator':
            emulator, port = self.serial.split('-')
            port = int(port)
            try:
                with remote('localhost', port, level='error') as r:
                    r.recvuntil('OK')
                    r.recvline() # Rest of the line
                    r.sendline('avd name')
                    self.avd = r.recvline().strip()
            except:
                pass

        self._initialized = True
Esempio n. 3
0
    def clean_and_log(self, timeout=0.05):
        r"""clean_and_log(timeout = 0.05)

        Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs received
        data with :meth:`pwnlib.self.info`.

        Returns:

            All data received

        Examples:

            >>> def recv(n, data=['', 'hooray_data']):
            ...     while data: return data.pop()
            >>> t = tube()
            >>> t.recv_raw      = recv
            >>> t.connected_raw = lambda d: True
            >>> t.fileno        = lambda: 1234
            >>> with context.local(log_level='info'):
            ...     data = t.clean_and_log() #doctest: +ELLIPSIS
            [DEBUG] Received 0xb bytes:
                'hooray_data'
            >>> data
            'hooray_data'
            >>> context.clear()
        """
        with context.local(log_level='debug'):
            return self.clean(timeout)
Esempio n. 4
0
def main():
    if len(sys.argv) < 2:
        parser.print_usage()
        sys.exit()
    args = parser.parse_args()
    with context.local(log_console=sys.stderr):
        commands[args.command](args)
Esempio n. 5
0
    def setLevel(self, level):
        """setLevel(level)

        Set the logging level for the underlying logger.
        """
        with context.local(log_level=level):
            self._logger.setLevel(context.log_level)
Esempio n. 6
0
    def setLevel(self, level):
        """setLevel(level)

        Set the logging level for the underlying logger.
        """
        with context.local(log_level=level):
            self._logger.setLevel(context.log_level)
Esempio n. 7
0
    def __getattr__(self, name):
        """Provides scoped access to ``adb`` module propertise, in the context
        of this device.

        .. doctest::
           :skipif: skip_android

            >>> property = 'ro.build.fingerprint'
            >>> device = adb.wait_for_device()
            >>> adb.getprop(property) == device.getprop(property)
            True
        """
        if name.startswith('_'):
            raise AttributeError(name)

        with context.local(device=self):
            g = globals()

            if name not in g:
                raise AttributeError('%r object has no attribute %r' %
                                     (type(self).__name__, name))
            value = g[name]

        if not hasattr(value, '__call__'):
            return value

        return self.__wrapped(value)
Esempio n. 8
0
 def __str__(self):
     frame = ""
     with context.local(arch=self.arch):
         for register_offset in sorted(self.register_offsets):
             if len(frame) < register_offset:
                 frame += "\x00"*(register_offset - len(frame))
             frame += pack(self[self.registers[register_offset]])
     return frame
Esempio n. 9
0
 def __bytes__(self):
     frame = b""
     with context.local(arch=self.arch):
         for register_offset in sorted(self.register_offsets):
             if len(frame) < register_offset:
                 frame += b"\x00" * (register_offset - len(frame))
             frame += pack(self[self.registers[register_offset]])
     return frame
Esempio n. 10
0
def corefile(process):
    r"""Drops a core file for a running local process.

    Note:
        You should use :meth:`.process.corefile` instead of using this method directly.

    Arguments:
        process: Process to dump

    Returns:
        :class:`.Core`: The generated core file

    Example:

        >>> io = process('bash')
        >>> core = gdb.corefile(io)
        >>> core.exe.name # doctest: +ELLIPSIS
        '.../bin/bash'
    """

    if context.noptrace:
        log.warn_once("Skipping corefile since context.noptrace==True")
        return

    corefile_path = './core.%s.%i' % (os.path.basename(process.executable),
                                    process.pid)

    # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092
    # will disregard coredump_filter, and will not dump private mappings.
    if version() < (7,11):
        log.warn_once('The installed GDB (%s) does not emit core-dumps which '
                      'contain all of the data in the process.\n'
                      'Upgrade to GDB >= 7.11 for better core-dumps.' % binary())

    # This is effectively the same as what the 'gcore' binary does
    gdb_args = ['-batch',
                '-q',
                '-nx',
                '-ex', '"set pagination off"',
                '-ex', '"set height 0"',
                '-ex', '"set width 0"',
                '-ex', '"set use-coredump-filter on"',
                '-ex', '"generate-core-file %s"' % corefile_path,
                '-ex', 'detach']

    with context.local(terminal = ['sh', '-c']):
        with context.quiet:
            pid = attach(process, gdb_args=gdb_args)
            log.debug("Got GDB pid %d", pid)
            try:
                os.waitpid(pid, 0)
            except Exception:
                pass

    if not os.path.exists(corefile_path):
        log.error("Could not generate a corefile for process %d", process.pid)

    return elf.corefile.Core(corefile_path)
Esempio n. 11
0
    def _srop_call(self, resolvable, arguments):
        # Check that the call is a valid syscall
        resolvable    = 'SYS_' + resolvable.lower()
        syscall_number = getattr(constants, resolvable, None)
        if syscall_number is None:
            return False

        log.info_once("Using sigreturn for %r" % resolvable)

        # Find an int 0x80 or similar instruction we can use
        syscall_gadget       = None
        syscall_instructions = srop.syscall_instructions[context.arch]

        for instruction in syscall_instructions:
            syscall_gadget = self.find_gadget([instruction])
            if syscall_gadget:
                break
        else:
            log.error("Could not find any instructions in %r" % syscall_instructions)

        # Generate the SROP frame which would invoke the syscall
        with context.local(arch=self.elfs[0].arch):
            frame         = srop.SigreturnFrame()
            frame.pc      = syscall_gadget
            frame.syscall = syscall_number

            try:
                SYS_sigreturn  = constants.SYS_sigreturn
            except AttributeError:
                SYS_sigreturn  = constants.SYS_rt_sigreturn

            for register, value in zip(frame.arguments, arguments):
                if not isinstance(value, six.integer_types + (Unresolved,)):
                    frame[register] = AppendedArgument(value)
                else:
                    frame[register] = value

        # Set up a call frame which will set EAX and invoke the syscall
        call = Call('SYS_sigreturn',
                    syscall_gadget,
                    [SYS_sigreturn],
                    abi.ABI.sigreturn())

        self.raw(call)
        self.raw(frame)


        # We do not expect to ever recover after the syscall, as it would
        # require something like 'int 0x80; ret' which does not ever occur
        # in the wild.
        self.migrated = True

        return True
Esempio n. 12
0
    def _srop_call(self, resolvable, arguments):
        # Check that the call is a valid syscall
        resolvable    = 'SYS_' + resolvable.lower()
        syscall_number = getattr(constants, resolvable, None)
        if syscall_number is None:
            return False

        log.info_once("Using sigreturn for %r" % resolvable)

        # Find an int 0x80 or similar instruction we can use
        syscall_gadget       = None
        syscall_instructions = srop.syscall_instructions[context.arch]

        for instruction in syscall_instructions:
            syscall_gadget = self.find_gadget([instruction])
            if syscall_gadget:
                break
        else:
            log.error("Could not find any instructions in %r" % syscall_instructions)

        # Generate the SROP frame which would invoke the syscall
        with context.local(arch=self.elfs[0].arch):
            frame         = srop.SigreturnFrame()
            frame.pc      = syscall_gadget
            frame.syscall = syscall_number

            try:
                SYS_sigreturn  = constants.SYS_sigreturn
            except AttributeError:
                SYS_sigreturn  = constants.SYS_rt_sigreturn

            for register, value in zip(frame.arguments, arguments):
                frame[register] = value

        # Set up a call frame which will set EAX and invoke the syscall
        call = Call('SYS_sigreturn',
                    syscall_gadget,
                    [SYS_sigreturn],
                    abi.ABI.sigreturn())

        self.raw(call)
        self.raw(frame)


        # We do not expect to ever recover after the syscall, as it would
        # require something like 'int 0x80; ret' which does not ever occur
        # in the wild.
        self.migrated = True

        return True
Esempio n. 13
0
    def get_fill_size(self, size=None):
        """
        Retrieves the default fill size for this buffer class.

        Arguments:
            size (int): (Optional) If set and not None, returns the size variable back.

        Returns:
            Fill size as integer if size == None, else size.
        """
        if size is None:
            size = self.buffer_fill_size

        with context.local(buffer_size=size):
            return context.buffer_size
Esempio n. 14
0
    def get_fill_size(self, size=None):
        """
        Retrieves the default fill size for this buffer class.

        Arguments:
            size (int): (Optional) If set and not None, returns the size variable back.

        Returns:
            Fill size as integer if size == None, else size.
        """
        if size is None:
            size = self.buffer_fill_size

        with context.local(buffer_size=size):
            return context.buffer_size
Esempio n. 15
0
def wait_for_device(kick=False):
    """Waits for a device to be connected.

    By default, waits for the currently-selected device (via ``context.device``).
    To wait for a specific device, set ``context.device``.
    To wait for *any* device, clear ``context.device``.

    Return:
        An ``AdbDevice`` instance for the device.

    Examples:

    .. doctest::
       :skipif: skip_android

        >>> device = adb.wait_for_device()
    """
    with log.waitfor("Waiting for device to come online") as w:
        with AdbClient() as c:
            if kick:
                try:
                    c.reconnect()
                except Exception:
                    pass

            serial = ''
            if context.device:
                serial = str(context.device)

        with AdbClient() as c:
            c.wait_for_device(serial)

        for device in devices():
            if context.device == device:
                return device

            if not serial:
                break
        else:
            log.error("Could not find any devices")

        with context.local(device=device):
            # There may be multiple devices, so context.device is
            # insufficient.  Pick the first device reported.
            w.success('%s (%s %s %s)' %
                      (device, product(), build(), _build_date()))

            return context.device
Esempio n. 16
0
def wait_for_device(kick=False):
    """Waits for a device to be connected.

    By default, waits for the currently-selected device (via ``context.device``).
    To wait for a specific device, set ``context.device``.
    To wait for *any* device, clear ``context.device``.

    Return:
        An ``AdbDevice`` instance for the device.

    Examples:

        >>> device = adb.wait_for_device()
    """
    with log.waitfor("Waiting for device to come online") as w:
        with AdbClient() as c:
            if kick:
                try:
                    c.reconnect()
                except Exception:
                    pass

            serial = ''
            if context.device:
                serial = str(context.device)

        with AdbClient() as c:
            c.wait_for_device(serial)

        for device in devices():
            if context.device == device:
                return device

            if not serial:
                break
        else:
            log.error("Could not find any devices")

        with context.local(device=device):
            # There may be multiple devices, so context.device is
            # insufficient.  Pick the first device reported.
            w.success('%s (%s %s %s)' % (device,
                                         product(),
                                         build(),
                                         _build_date()))

            return context.device
Esempio n. 17
0
    def enable_uart(self):
        """Reboots the device with kernel logging to the UART enabled."""
        model = getprop('ro.product.model')

        known_commands = {
            'Nexus 4': None,
            'Nexus 5': None,
            'Nexus 6': 'oem config console enable',
            'Nexus 5X': None,
            'Nexus 6P': 'oem uart enable',
            'Nexus 7': 'oem uart-on',
        }

        with log.waitfor('Enabling kernel UART'):

            if model not in known_commands:
                log.error("Device UART is unsupported.")

            command = known_commands[model]

            if command is None:
                w.success('Always enabled')
                return

            # Check the current commandline, it may already be enabled.
            if any(s.startswith('console=tty') for s in self.cmdline.split()):
                w.success("Already enabled")
                return

            # Need to be root
            with context.local(device=context.device):
                # Save off the command line before rebooting to the bootloader
                cmdline = kernel.cmdline

                reboot_bootloader()

                # Wait for device to come online
                while context.device not in fastboot(['devices', ' -l']):
                    time.sleep(0.5)

                # Try the 'new' way
                fastboot(command.split())
                fastboot(['continue'])
                wait_for_device()
Esempio n. 18
0
    def enable_uart(self):
        """Reboots the device with kernel logging to the UART enabled."""
        model = str(properties.ro.product.model)

        known_commands = {
            'Nexus 4': None,
            'Nexus 5': None,
            'Nexus 6': 'oem config console enable',
            'Nexus 5X': None,
            'Nexus 6P': 'oem uart enable',
            'Nexus 7': 'oem uart-on',
        }

        with log.waitfor('Enabling kernel UART') as w:

            if model not in known_commands:
                log.error("Device UART is unsupported.")

            command = known_commands[model]

            if command is None:
                w.success('Always enabled')
                return

            # Check the current commandline, it may already be enabled.
            if any(s.startswith('console=tty') for s in self.cmdline.split()):
                w.success("Already enabled")
                return

            # Need to be root
            with context.local(device=context.device):
                # Save off the command line before rebooting to the bootloader
                cmdline = kernel.cmdline

                reboot_bootloader()

                # Wait for device to come online
                while context.device not in fastboot(['devices',' -l']):
                    time.sleep(0.5)

                # Try the 'new' way
                fastboot(command.split())
                fastboot(['continue'])
                wait_for_device()
Esempio n. 19
0
    def _find_linkmap_assisted(self, path):
        """Uses an ELF file to assist in finding the link_map.
        """
        if isinstance(path, ELF):
            path = path.path

        # Load a fresh copy of the ELF
        with context.local(log_level='error'):
            elf = ELF(path)
        elf.address = self.libbase

        w = self.waitfor("Loading from %r" % elf.path)

        # Save our real leaker
        real_leak = self.leak

        # Create a fake leaker which just leaks out of the 'loaded' ELF
        # However, we may load things which are outside of the ELF (e.g.
        # the linkmap or GOT) so we need to fall back on the real leak.
        @MemLeak
        def fake_leak(address):
            try:
                return elf.read(address, 4)
            except ValueError:
                return real_leak.b(address)

        # Save off our real leaker, use the fake leaker
        self.leak = fake_leak

        # Get useful pointers for resolving the linkmap faster
        w.status("Searching for DT_PLTGOT")
        pltgot = self._find_dt(constants.DT_PLTGOT)

        w.status("Searching for DT_DEBUG")
        debug  = self._find_dt(constants.DT_DEBUG)

        # Restore the real leaker
        self.leak = real_leak

        # Find the linkmap using the helper pointers
        self._find_linkmap(pltgot, debug)
        self.success('Done')
Esempio n. 20
0
    def _find_linkmap_assisted(self, path):
        """Uses an ELF file to assist in finding the link_map.
        """
        if isinstance(path, ELF):
            path = path.path

        # Load a fresh copy of the ELF
        with context.local(log_level='error'):
            elf = ELF(path)
        elf.address = self.libbase

        w = self.waitfor("Loading from %r" % elf.path)

        # Save our real leaker
        real_leak = self.leak

        # Create a fake leaker which just leaks out of the 'loaded' ELF
        # However, we may load things which are outside of the ELF (e.g.
        # the linkmap or GOT) so we need to fall back on the real leak.
        @MemLeak
        def fake_leak(address):
            try:
                return elf.read(address, 4)
            except ValueError:
                return real_leak.b(address)

        # Save off our real leaker, use the fake leaker
        self.leak = fake_leak

        # Get useful pointers for resolving the linkmap faster
        w.status("Searching for DT_PLTGOT")
        pltgot = self._find_dt(constants.DT_PLTGOT)

        w.status("Searching for DT_DEBUG")
        debug = self._find_dt(constants.DT_DEBUG)

        # Restore the real leaker
        self.leak = real_leak

        # Find the linkmap using the helper pointers
        self._find_linkmap(pltgot, debug)
        self.success('Done')
Esempio n. 21
0
def corefile(process):
    r"""Drops a core file for the process.

    Arguments:
        process: Process to dump

    Returns:
        :class:`.Core`: The generated core file
    """

    if context.noptrace:
        log.warn_once("Skipping corefile since context.noptrace==True")
        return

    corefile_path = './core.%s.%i' % (os.path.basename(process.executable),
                                    process.pid)

    # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092
    # will disregard coredump_filter, and will not dump private mappings.
    if version() < (7,11):
        log.warn_once('The installed GDB (%s) does not emit core-dumps which '
                      'contain all of the data in the process.\n'
                      'Upgrade to GDB >= 7.11 for better core-dumps.' % binary())

    # This is effectively the same as what the 'gcore' binary does
    gdb_args = ['-batch',
                '-q',
                '--nx',
                '-ex', '"set pagination off"',
                '-ex', '"set height 0"',
                '-ex', '"set width 0"',
                '-ex', '"set use-coredump-filter on"',
                '-ex', '"generate-core-file %s"' % corefile_path,
                '-ex', 'detach']

    with context.local(terminal = ['sh', '-c']):
        with context.quiet:
            pid = attach(process, gdb_args=gdb_args)
            os.waitpid(pid, 0)

    return elf.corefile.Core(corefile_path)
Esempio n. 22
0
def corefile(process):
    r"""Drops a core file for the process.

    Arguments:
        process: Process to dump

    Returns:
        :class:`.Core`: The generated core file
    """

    if context.noptrace:
        log.warn_once("Skipping corefile since context.noptrace==True")
        return

    corefile_path = './core.%s.%i' % (os.path.basename(process.executable),
                                    process.pid)

    # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092
    # will disregard coredump_filter, and will not dump private mappings.
    if version() < (7,11):
        log.warn_once('The installed GDB (%s) does not emit core-dumps which '
                      'contain all of the data in the process.\n'
                      'Upgrade to GDB >= 7.11 for better core-dumps.' % binary())

    # This is effectively the same as what the 'gcore' binary does
    gdb_args = ['-batch',
                '-q',
                '--nx',
                '-ex', '"set pagination off"',
                '-ex', '"set height 0"',
                '-ex', '"set width 0"',
                '-ex', '"set use-coredump-filter on"',
                '-ex', '"generate-core-file %s"' % corefile_path,
                '-ex', 'detach']

    with context.local(terminal = ['sh', '-c']):
        with context.quiet:
            pid = attach(process, gdb_args=gdb_args)
            os.waitpid(pid, 0)

    return elf.corefile.Core(corefile_path)
Esempio n. 23
0
    def __getattr__(self, name):
        """Provides scoped access to ``adb`` module propertise, in the context
        of this device.

        >>> property = 'ro.build.fingerprint'
        >>> device = adb.wait_for_device()
        >>> adb.getprop(property) == device.getprop(property)
        True
        """
        with context.local(device=self):
            g = globals()

            if name not in g:
                raise AttributeError('%r object has no attribute %r' % (type(self).__name__,name))

            value = g[name]

        if not hasattr(value, '__call__'):
            return value

        return self.__wrapped(value)
Esempio n. 24
0
    def libs(self):
        """libs() -> dict

        Return a dictionary mapping the path of each shared library loaded
        by the process to the address it is loaded at in the process' address
        space.

        If ``/proc/$PID/maps`` for the process cannot be accessed, the output
        of ``ldd`` alone is used.  This may give inaccurate results if ASLR
        is enabled.
        """
        with context.local(log_level='error'):
            ldd = process(['ldd', self.executable]).recvall()

        maps = parse_ldd_output(ldd)

        try:
            maps_raw = open('/proc/%d/maps' % self.pid).read()
        except IOError:
            return maps

        # Enumerate all of the libraries actually loaded right now.
        for line in maps_raw.splitlines():
            if '/' not in line: continue
            path = line[line.index('/'):]
            path = os.path.realpath(path)
            if path not in maps:
                maps[path]=0

        for lib in maps:
            path = os.path.realpath(lib)
            for line in maps_raw.splitlines():
                if line.endswith(path):
                    address = line.split('-')[0]
                    maps[lib] = int(address, 16)
                    break

        return maps
Esempio n. 25
0
    def libs(self):
        """libs() -> dict

        Return a dictionary mapping the path of each shared library loaded
        by the process to the address it is loaded at in the process' address
        space.

        If ``/proc/$PID/maps`` for the process cannot be accessed, the output
        of ``ldd`` alone is used.  This may give inaccurate results if ASLR
        is enabled.
        """
        with context.local(log_level='error'):
            ldd = process(['ldd', self.executable]).recvall()

        maps = parse_ldd_output(ldd)

        try:
            maps_raw = open('/proc/%d/maps' % self.pid).read()
        except IOError:
            return maps

        # Enumerate all of the libraries actually loaded right now.
        for line in maps_raw.splitlines():
            if '/' not in line: continue
            path = line[line.index('/'):]
            path = os.path.realpath(path)
            if path not in maps:
                maps[path]=0

        for lib in maps:
            path = os.path.realpath(lib)
            for line in maps_raw.splitlines():
                if line.endswith(path):
                    address = line.split('-')[0]
                    maps[lib] = int(address, 16)
                    break

        return maps
Esempio n. 26
0
def _run_handlers():
    """_run_handlers()

    Run registered handlers.  They run in the reverse order of which they were
    registered.

    If a handler raises an exception, it will be printed but nothing else
    happens, i.e. other handlers will be run.
    """
    for _ident, (func, args, kwargs, ctx) in \
        sorted(_handlers.items(), reverse = True):
        try:
            with context.local():
                context.clear()
                context.update(**ctx)
                func(*args, **kwargs)
        except SystemExit:
            pass
        except Exception:
            # extract the current exception and rewind the traceback to where it
            # originated
            typ, val, tb = sys.exc_info()
            traceback.print_exception(typ, val, tb.tb_next)
Esempio n. 27
0
def _run_handlers():
    """_run_handlers()

    Run registered exit-handlers.  They run in the reverse order of which they
    were registered.

    If a handler raises an exception, it will be printed but nothing else
    happens, i.e. other handlers will be run and `sys.excepthook` will not be
    called for that reason.
    """
    context.clear()
    for _ident, (func, args, kwargs, ctx) in \
        sorted(_handlers.items(), reverse = True):
        try:
            with context.local(**ctx):
                func(*args, **kwargs)
        except SystemExit:
            pass
        except Exception:
            # extract the current exception and rewind the traceback to where it
            # originated
            typ, val, tb = sys.exc_info()
            traceback.print_exception(typ, val, tb.tb_next)
Esempio n. 28
0
def main():
    args = parser.parse_args()
    with context.local(log_console = sys.stderr):
        commands[args.command](args)
Esempio n. 29
0
    def __init__(self, *a, **kw):
        #: The PT_NOTE segments
        #: A coredump file can have multiple note segments for multithreaded program
        #: crashes. Each PT_NOTE stands for a single thread context.
        self.core_segments = []

        #: The NT_PRSTATUS object
        #: A coredump file can have single PT_NOTE segment but multiple NT_PRSTATUS object
        #: due to multithreaded program crashes.
        self.prstatus = []

        #: The NT_PRPSINFO object
        self.prpsinfo = None

        #: The NT_SIGINFO object
        #: Same as NT_PRSTATUS
        self.siginfo = []

        #: :class:`dict`: Dictionary of memory mappings from ``address`` to ``name``
        self.mappings = []
        self.mapping_cnt = 0

        #: :class:`int`: Address of the stack base
        self.stack = None

        #: :class`PTHeap`: Ptmalloc heap information
        self.ptheap = None

        #: :class`JEHeap`: Jemalloc heap information
        self.jeheap = None

        #: :class:`dict`: Environment variables read from the stack.  Keys are
        #: the environment variable name, values are the memory address of the
        #: variable.
        #:
        #: Note: Use with the :meth:`.ELF.string` method to extract them.
        #:
        #: Note: If FOO=BAR is in the environment, self.env['FOO'] is the
        #:       address of the string "BAR\x00".
        self.env = {}

        #: :class:`int`: Pointer to envp on the stack
        self.envp_address = 0

        #: :class:`list`: List of addresses of arguments on the stack.
        self.argv = []

        #: :class:`int`: Pointer to argv on the stack
        self.argv_address = 0

        #: :class:`int`: Number of arguments passed
        self.argc = 0

        #: :class:`int`: Pointer to argc on the stack
        self.argc_address = 0

        # Pointer to the executable filename on the stack
        self.at_execfn = 0

        # Pointer to the entry point
        self.at_entry = 0

        #: :class:`list[int]`: Sorted mapping intervals that have permission
        #: NOTE every two numbers decides start and end of the interval
        self.intvls = []

        #: :class:`dict{(vaddr, sz): cstr}`: Suspicious patterns in the memory
        self.payloads = {}

        try:
            super(Corefile, self).__init__(*a, **kw)
        except IOError:
            log.warning(
                "No corefile.  Have you set /proc/sys/kernel/core_pattern?")
            raise

        self.load_addr = 0
        self._address = 0

        if not self.elftype == 'CORE':
            log.error("%s is not a valid corefile" % self.file.name)

        if not self.arch in prstatus_types.keys():
            log.warn_once(
                "%s does not use a supported corefile architecture, registers are unavailable"
                % self.file.name)

        prstatus_type = prstatus_types.get(self.arch, None)
        prpsinfo_type = prpsinfo_types.get(self.bits, None)
        siginfo_type = siginfo_types.get(self.bits, None)
        fpregset_type = fpregset_types.get(self.bits, None)

        with log.waitfor("Parsing corefile...") as w:
            self._load_mappings()

            for segment in self.segments:
                if not isinstance(segment, elftools.elf.segments.NoteSegment):
                    continue

                # store all PT_NOTE segments
                self.core_segments.append(segment)

                for note in iter_notes(segment):
                    # Try to find NT_PRSTATUS.
                    if prstatus_type and \
                       note.n_descsz == ctypes.sizeof(prstatus_type) and \
                       note.n_type == 'NT_PRSTATUS':
                        self.prstatus.append(
                            prstatus_type.from_buffer_copy(note.n_desc))

                    # Try to find NT_PRPSINFO
                    elif prpsinfo_type and \
                       note.n_descsz == ctypes.sizeof(prpsinfo_type) and \
                       note.n_type == 'NT_PRPSINFO':
                        self.prpsinfo = prpsinfo_type.from_buffer_copy(
                            note.n_desc)

                    # Try to find NT_SIGINFO so we can see the fault
                    elif note.n_type == 'NT_SIGINFO':
                        self.siginfo.append(
                            siginfo_type.from_buffer_copy(note.n_desc))

                    # Try to find the list of mapped files
                    elif note.n_type == 'NT_FILE':
                        with context.local(bytes=self.bytes):
                            self._parse_nt_file(note)

                    # Try to find the auxiliary vector, which will tell us
                    # where the top of the stack is.
                    elif note.n_type == 'NT_AUXV':
                        with context.local(bytes=self.bytes):
                            self._parse_auxv(note)
                    ''' RD: no need for now
                    if note.n_type in (constants.NT_PRFPREG, "NT_PRFPREG"):
                        self.pregset = fpregset_type.from_buffer_copy(note.n_desc)

                    if note.n_type in (constants.NT_PRXFPREG, "NT_PRXFPREG"):
                        assert(self.bits == 32)
                        self.prxfpreg = elf_fxsr_32.from_buffer_copy(note.n_desc)

                    # RD: fix parsing in pwnlib/elf/datatypes.py
                    if note.n_type in (constants.NT_X86_XSTATE, "NT_X86_XSTATE"):
                        self.x86_xstate = elf_xstateregs.from_buffer_copy(note.n_desc)

                    if note.n_type in (constants.NT_386_TLS, "NT_386_TLS"):
                        self._parse_x86_386_tls(note.n_desc)
                    '''

            if not self.stack and self.mappings:
                self.stack = self.mappings[-1]

            if self.stack and self.mappings:
                for mapping in self.mappings:
                    if self.stack in mapping or self.stack == mapping.stop:
                        mapping.name = '[stack]'
                        self.stack = mapping
                        break
                else:
                    log.warn('Could not find the stack!')
                    self.stack = None

            with context.local(bytes=self.bytes, log_level='warn'):
                try:
                    self._parse_stack()
                except ValueError:
                    # If there are no environment variables, we die by running
                    # off the end of the stack.
                    pass

            # parse heap information
            self.ptheap = PTHeap(self)
            self.jeheap = JEHeap(self)

            # extract format string
            for (addr, sz, cstr) in self.search_cstrings(re.compile(
                    config.fmtstr_regex_n),
                                                         lookup=50,
                                                         limit=50,
                                                         writable=True):
                if sz > self.capsz:
                    log.debug(repr("[fmtstr] 0x%x(%d): %s" % (addr, sz, cstr)))
                    self.payloads[(addr, sz)] = cstr

            # extract shellcode payload
            for shcode_regex in config.shcode_db:
                for (addr, sz,
                     cstr) in self.search_cstrings(re.compile(shcode_regex),
                                                   lookup=100,
                                                   limit=100,
                                                   writable=True):
                    if sz > self.capsz:
                        log.debug(
                            repr("[shcode] 0x%x(%d): %s" % (addr, sz, cstr)))
                        self.payloads[(addr, sz)] = cstr
Esempio n. 30
0
def LOG_LEVEL(x):
    """Sets the logging verbosity used via ``context.log_level``,
    e.g. ``LOG_LEVEL=debug``.
    """
    with context.local(log_level=x):
        context.defaults['log_level']=context.log_level
Esempio n. 31
0
def pack(number, word_size = None, endianness = None, sign = None, **kwargs):
    """pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str

    Packs arbitrary-sized integer.

    Word-size, endianness and signedness is done according to context.

    `word_size` can be any positive number or the string "all". Choosing the
    string "all" will output a string long enough to contain all the significant
    bits and thus be decodable by :func:`unpack`.

    `word_size` can be any positive number. The output will contain word_size/8
    rounded up number of bytes. If word_size is not a multiple of 8, it will be
    padded with zeroes up to a byte boundary.

    Arguments:
        number (int): Number to convert
        word_size (int): Word size of the converted integer or the string 'all'.
        endianness (str): Endianness of the converted integer ("little"/"big")
        sign (str): Signedness of the converted integer (False/True)
        kwargs: Anything that can be passed to context.local

    Returns:
        The packed number as a string.

    Examples:
        >>> pack(0x414243, 24, 'big', True)
        'ABC'
        >>> pack(0x414243, 24, 'little', True)
        'CBA'
        >>> pack(0x814243, 24, 'big', False)
        '\\x81BC'
        >>> pack(0x814243, 24, 'big', True)
        Traceback (most recent call last):
           ...
        ValueError: pack(): number does not fit within word_size
        >>> pack(0x814243, 25, 'big', True)
        '\\x00\\x81BC'
        >>> pack(-1, 'all', 'little', True)
        '\\xff'
        >>> pack(-256, 'all', 'big', True)
        '\\xff\\x00'
        >>> pack(0x0102030405, 'all', 'little', True)
        '\\x05\\x04\\x03\\x02\\x01'
        >>> pack(-1)
        '\\xff\\xff\\xff\\xff'
        >>> pack(0x80000000, 'all', 'big', True)
        '\\x00\\x80\\x00\\x00\\x00'
"""
    if sign is None and number < 0:
        sign = True

    if word_size != 'all':
        kwargs.setdefault('word_size', word_size)

    kwargs.setdefault('endianness', endianness)
    kwargs.setdefault('sign', sign)

    with context.local(**kwargs):
        # Lookup in context if not found
        word_size  = 'all' if word_size == 'all' else context.word_size
        endianness = context.endianness
        sign       = context.sign

        if not isinstance(number, (int,long)):
            raise ValueError("pack(): number must be of type (int,long) (got %r)" % type(number))

        if sign not in [True, False]:
            raise ValueError("pack(): sign must be either True or False (got %r)" % sign)

        if endianness not in ['little', 'big']:
            raise ValueError("pack(): endianness must be either 'little' or 'big' (got %r)" % endianness)

        # Verify that word_size make sense
        if word_size == 'all':
            if number == 0:
                word_size = 8
            elif number > 0:
                if sign == False:
                    word_size = ((number.bit_length() - 1) | 7) + 1
                else:
                    word_size = (number.bit_length() | 7) + 1
            else:
                if sign == False:
                    raise ValueError("pack(): number does not fit within word_size")
                word_size = ((number + 1).bit_length() | 7) + 1
        elif not isinstance(word_size, (int, long)) or word_size <= 0:
            raise ValueError("pack(): word_size must be a positive integer or the string 'all'")

        if sign == True:
            limit = 1 << (word_size-1)
            if not -limit <= number < limit:
                raise ValueError("pack(): number does not fit within word_size")
        else:
            limit = 1 << word_size
            if not 0 <= number < limit:
                raise ValueError("pack(): number does not fit within word_size [%i, %r, %r]" % (0, number, limit))

        # Normalize number and size now that we have verified them
        # From now on we can treat positive and negative numbers the same
        number = number & ((1 << word_size) - 1)
        byte_size = (word_size + 7) / 8

        out = []

        for _ in range(byte_size):
            out.append(chr(number & 0xff))
            number = number >> 8

        if endianness == 'little':
            return ''.join(out)
        else:
            return ''.join(reversed(out))
Esempio n. 32
0
 def level(self, value):
     with context.local(log_level=value):
         self._logger.level = context.log_level
Esempio n. 33
0
def main(target):
    for arch in ARCHITECTURES:
        with context.local(arch=arch):
            generate_one(target)
Esempio n. 34
0
 def level(self, value):
     with context.local(log_level=value):
         self._logger.level = context.log_level
def main(target):
    for arch in ARCHITECTURES:
        with context.local(arch=arch):
            generate_one(target)
Esempio n. 36
0
def LOG_LEVEL(x):
    """Sets the logging verbosity used via ``context.log_level``,
    e.g. ``LOG_LEVEL=debug``.
    """
    with context.local(log_level=x):
        context.defaults['log_level'] = context.log_level
Esempio n. 37
0
    def __init__(self, *a, **kw):
        #: The NT_PRSTATUS object.
        self.prstatus = None

        #: Dictionary of memory mappings from {address:name}
        self.mappings = []

        #: Address of the stack base
        self.stack = None

        #: Environment variables read from the stack {name:address}.
        #: N.B. Use with the ``string`` method to extract them.
        self.env = {}

        try:
            super(Core, self).__init__(*a, **kw)
        except IOError:
            log.warning(
                "No corefile.  Have you set /proc/sys/kernel/core_pattern?")
            raise

        self.load_addr = 0
        self._address = 0

        if not self.elftype == 'CORE':
            log.error("%s is not a valid corefile" % e.file.name)

        if not self.arch in ('i386', 'amd64'):
            log.error("%s does not use a supported corefile architecture" %
                      e.file.name)

        prstatus_type = types[self.arch]

        with log.waitfor("Parsing corefile...") as w:
            self._load_mappings()

            for segment in self.segments:
                if not isinstance(segment, elftools.elf.segments.NoteSegment):
                    continue
                for note in iter_notes(segment):
                    # Try to find NT_PRSTATUS.  Note that pyelftools currently
                    # mis-identifies the enum name as 'NT_GNU_ABI_TAG'.
                    if note.n_descsz == ctypes.sizeof(prstatus_type) and \
                       note.n_type == 'NT_GNU_ABI_TAG':
                        self.NT_PRSTATUS = note
                        self.prstatus = prstatus_type.from_buffer_copy(
                            note.n_desc)

                    # Try to find the list of mapped files
                    if note.n_type == constants.NT_FILE:
                        with context.local(bytes=self.bytes):
                            self._parse_nt_file(note)

                    # Try to find the auxiliary vector, which will tell us
                    # where the top of the stack is.
                    if note.n_type == constants.NT_AUXV:
                        with context.local(bytes=self.bytes):
                            self._parse_auxv(note)

            if self.stack and self.mappings:
                for mapping in self.mappings:
                    if mapping.stop == self.stack:
                        mapping.name = '[stack]'
                        self.stack = mapping

            with context.local(bytes=self.bytes, log_level='error'):
                try:
                    self._parse_stack()
                except ValueError:
                    # If there are no environment variables, we die by running
                    # off the end of the stack.
                    pass
Esempio n. 38
0
    def lookup (self, symb = None, lib = None):
        """lookup(symb = None, lib = None) -> int

        Find the address of ``symbol``, which is found in ``lib``.

        Arguments:
            symb(str): Named routine to look up
            lib(str): Substring to match for the library name.
              If omitted, the current library is searched.
              If set to ``'libc'``, ``'libc.so'`` is assumed.

        Returns:
            Address of the named symbol, or :const:`None`.
        """
        result = None

        if lib == 'libc':
            lib = 'libc.so'

        #
        # Get a pretty name for the symbol to show the user
        #
        if symb and lib:
            pretty = '%r in %r' % (symb, lib)
        else:
            pretty = repr(symb or lib)

        if not pretty:
            self.failure("Must specify a library or symbol")

        self.waitfor('Resolving %s' % pretty)

        #
        # If we are loading from a different library, create
        # a DynELF instance for it.
        #
        if lib is not None: dynlib = self._dynamic_load_dynelf(lib)
        else:   dynlib = self

        if dynlib is None:
            log.failure("Could not find %r" % lib)
            return None

        #
        # If we are resolving a symbol in the library, find it.
        #
        if symb and self.libcdb:
            # Try a quick lookup by build ID
            self.status("Trying lookup based on Build ID")
            build_id = dynlib._lookup_build_id(lib=lib)
            result   = None
            if build_id:
                log.info("Trying lookup based on Build ID: %s" % build_id)
                path = libcdb.search_by_build_id(build_id)
                if path:
                    with context.local(log_level='error'):
                        e = ELF(path)
                        e.address = dynlib.libbase
                        result = e.symbols[symb]

            if not result:
                self.status("Trying remote lookup")
                result = dynlib._lookup(symb)
        else:
            result = dynlib.libbase

        #
        # Did we win?
        #
        if result: self.success("%#x" % result)
        else:      self.failure("Could not find %s" % pretty)

        return result
Esempio n. 39
0
    def __init__(self, *a, **kw):
        #: The NT_PRSTATUS object.
        self.prstatus = None

        #: The NT_PRPSINFO object
        self.prpsinfo = None

        #: The NT_SIGINFO object
        self.siginfo = None

        #: :class:`dict`: Dictionary of memory mappings from ``address`` to ``name``
        self.mappings = []

        #: :class:`int`: Address of the stack base
        self.stack = None

        #: :class:`dict`: Environment variables read from the stack.  Keys are
        #: the environment variable name, values are the memory address of the
        #: variable.
        #:
        #: Note: Use with the :meth:`.ELF.string` method to extract them.
        #:
        #: Note: If FOO=BAR is in the environment, self.env['FOO'] is the
        #:       address of the string "BAR\x00".
        self.env = {}

        #: :class:`int`: Pointer to envp on the stack
        self.envp_address = 0

        #: :class:`list`: List of addresses of arguments on the stack.
        self.argv = []

        #: :class:`int`: Pointer to argv on the stack
        self.argv_address = 0

        #: :class:`int`: Number of arguments passed
        self.argc = 0

        #: :class:`int`: Pointer to argc on the stack
        self.argc_address = 0

        # Pointer to the executable filename on the stack
        self.at_execfn = 0

        # Pointer to the entry point
        self.at_entry = 0

        try:
            super(Corefile, self).__init__(*a, **kw)
        except IOError:
            log.warning(
                "No corefile.  Have you set /proc/sys/kernel/core_pattern?")
            raise

        self.load_addr = 0
        self._address = 0

        if not self.elftype == 'CORE':
            log.error("%s is not a valid corefile" % self.file.name)

        if not self.arch in prstatus_types.keys():
            log.warn_once(
                "%s does not use a supported corefile architecture, registers are unavailable"
                % self.file.name)

        prstatus_type = prstatus_types.get(self.arch, None)
        prpsinfo_type = prpsinfo_types.get(self.bits, None)
        siginfo_type = siginfo_types.get(self.bits, None)

        with log.waitfor("Parsing corefile...") as w:
            self._load_mappings()

            for segment in self.segments:
                if not isinstance(segment, elftools.elf.segments.NoteSegment):
                    continue

                # Note that older versions of pyelftools (<=0.24) are missing enum values
                # for NT_PRSTATUS, NT_PRPSINFO, NT_AUXV, etc.
                # For this reason, we have to check if note.n_type is any of several values.
                for note in iter_notes(segment):
                    # Try to find NT_PRSTATUS.
                    if prstatus_type and \
                       note.n_descsz == ctypes.sizeof(prstatus_type) and \
                       note.n_type in ('NT_GNU_ABI_TAG', 'NT_PRSTATUS'):
                        self.NT_PRSTATUS = note
                        self.prstatus = prstatus_type.from_buffer_copy(
                            note.n_desc.encode("latin-1"))

                    # Try to find NT_PRPSINFO
                    if prpsinfo_type and \
                       note.n_descsz == ctypes.sizeof(prpsinfo_type) and \
                       note.n_type in ('NT_GNU_ABI_TAG', 'NT_PRPSINFO'):
                        self.NT_PRPSINFO = note
                        self.prpsinfo = prpsinfo_type.from_buffer_copy(
                            note.n_desc.encode("latin-1"))

                    # Try to find NT_SIGINFO so we can see the fault
                    if note.n_type in (0x53494749, 'NT_SIGINFO'):
                        self.NT_SIGINFO = note
                        self.siginfo = siginfo_type.from_buffer_copy(
                            note.n_desc.encode("latin-1"))

                    # Try to find the list of mapped files
                    if note.n_type in (constants.NT_FILE, 'NT_FILE'):
                        with context.local(bytes=self.bytes):
                            self._parse_nt_file(note)

                    # Try to find the auxiliary vector, which will tell us
                    # where the top of the stack is.
                    if note.n_type in (constants.NT_AUXV, 'NT_AUXV'):
                        self.NT_AUXV = note
                        with context.local(bytes=self.bytes):
                            self._parse_auxv(note)

            if not self.stack and self.mappings:
                self.stack = self.mappings[-1]

            if self.stack and self.mappings:
                for mapping in self.mappings:
                    if self.stack in mapping or self.stack == mapping.stop:
                        mapping.name = '[stack]'
                        self.stack = mapping
                        break
                else:
                    log.warn('Could not find the stack!')
                    self.stack = None

            with context.local(bytes=self.bytes, log_level='warn'):
                try:
                    self._parse_stack()
                except ValueError:
                    # If there are no environment variables, we die by running
                    # off the end of the stack.
                    pass

            self._describe_core()
Esempio n. 40
0
def pack(number, word_size=None, endianness=None, sign=None, **kwargs):
    r"""pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str

    Packs arbitrary-sized integer.

    Word-size, endianness and signedness is done according to context.

    `word_size` can be any positive number or the string "all". Choosing the
    string "all" will output a string long enough to contain all the significant
    bits and thus be decodable by :func:`unpack`.

    `word_size` can be any positive number. The output will contain word_size/8
    rounded up number of bytes. If word_size is not a multiple of 8, it will be
    padded with zeroes up to a byte boundary.

    Arguments:
        number (int): Number to convert
        word_size (int): Word size of the converted integer or the string 'all' (in bits).
        endianness (str): Endianness of the converted integer ("little"/"big")
        sign (str): Signedness of the converted integer (False/True)
        kwargs: Anything that can be passed to context.local

    Returns:
        The packed number as a string.

    Examples:
        >>> pack(0x414243, 24, 'big', True)
        b'ABC'
        >>> pack(0x414243, 24, 'little', True)
        b'CBA'
        >>> pack(0x814243, 24, 'big', False)
        b'\x81BC'
        >>> pack(0x814243, 24, 'big', True)
        Traceback (most recent call last):
           ...
        ValueError: pack(): number does not fit within word_size
        >>> pack(0x814243, 25, 'big', True)
        b'\x00\x81BC'
        >>> pack(-1, 'all', 'little', True)
        b'\xff'
        >>> pack(-256, 'all', 'big', True)
        b'\xff\x00'
        >>> pack(0x0102030405, 'all', 'little', True)
        b'\x05\x04\x03\x02\x01'
        >>> pack(-1)
        b'\xff\xff\xff\xff'
        >>> pack(0x80000000, 'all', 'big', True)
        b'\x00\x80\x00\x00\x00'
"""
    if sign is None and number < 0:
        sign = True

    if word_size != 'all':
        kwargs.setdefault('word_size', word_size)

    kwargs.setdefault('endianness', endianness)
    kwargs.setdefault('sign', sign)

    with context.local(**kwargs):
        # Lookup in context if not found
        word_size = 'all' if word_size == 'all' else context.word_size
        endianness = context.endianness
        sign = context.sign

        if not isinstance(number, six.integer_types):
            raise ValueError(
                "pack(): number must be of type (int,long) (got %r)" %
                type(number))

        if not isinstance(sign, bool):
            raise ValueError(
                "pack(): sign must be either True or False (got %r)" % sign)

        if endianness not in ['little', 'big']:
            raise ValueError(
                "pack(): endianness must be either 'little' or 'big' (got %r)"
                % endianness)

        # Verify that word_size make sense
        if word_size == 'all':
            if number == 0:
                word_size = 8
            elif number > 0:
                if sign:
                    word_size = (number.bit_length() | 7) + 1
                else:
                    word_size = ((number.bit_length() - 1) | 7) + 1
            else:
                if not sign:
                    raise ValueError(
                        "pack(): number does not fit within word_size")
                word_size = ((number + 1).bit_length() | 7) + 1
        elif not isinstance(word_size, six.integer_types) or word_size <= 0:
            raise ValueError(
                "pack(): word_size must be a positive integer or the string 'all'"
            )

        if sign:
            limit = 1 << (word_size - 1)
            if not -limit <= number < limit:
                raise ValueError(
                    "pack(): number does not fit within word_size")
        else:
            limit = 1 << word_size
            if not 0 <= number < limit:
                raise ValueError(
                    "pack(): number does not fit within word_size [%i, %r, %r]"
                    % (0, number, limit))

        # Normalize number and size now that we have verified them
        # From now on we can treat positive and negative numbers the same
        number = number & ((1 << word_size) - 1)
        byte_size = (word_size + 7) // 8

        out = []

        for _ in range(byte_size):
            out.append(_p8lu(number & 0xff))
            number = number >> 8

        if endianness == 'little':
            return b''.join(out)
        else:
            return b''.join(reversed(out))
Esempio n. 41
0
def which_binutils(util):
    """
    Finds a binutils in the PATH somewhere.
    Expects that the utility is prefixed with the architecture name.

    Examples:

        >>> import platform
        >>> which_binutils = pwnlib.asm.which_binutils
        >>> which_binutils('as', arch=platform.machine())
        '.../bin/...as'
        >>> which_binutils('as', arch='arm') #doctest: +ELLIPSIS
        '.../bin/arm-...-as'
        >>> which_binutils('as', arch='powerpc') #doctest: +ELLIPSIS
        '.../bin/powerpc...-as'
        >>> which_binutils('as', arch='msp430') #doctest: +SKIP
        ...
        Traceback (most recent call last):
        ...
        Exception: Could not find 'as' installed for ContextType(arch = 'msp430')
    """
    arch = context.arch
    bits = context.bits

    # Fix up pwntools vs Debian triplet naming, and account
    # for 'thumb' being its own pwntools architecture.
    arches = [arch] + {
        'thumb':  ['arm',    'aarch64'],
        'i386':   ['x86_64', 'amd64'],
        'i686':   ['x86_64', 'amd64'],
        'amd64':  ['x86_64', 'i386'],
    }.get(arch, [])

    # If one of the candidate architectures matches the native
    # architecture, use that as a last resort.
    machine = platform.machine()
    machine = 'i386' if machine == 'i686' else machine
    try:
        with context.local(arch = machine):
            if context.arch in arches:
                arches.append(None)
    except AttributeError:
        log.warn_once("Your local binutils won't be used because architecture %r is not supported." % machine)

    utils = [util]

    # hack for homebrew-installed binutils on mac
    if platform.system() == 'Darwin':
        utils = ['g'+util, util]

    for arch in arches:
        for gutil in utils:
            # e.g. objdump
            if arch is None:
                pattern = gutil

            # e.g. aarch64-linux-gnu-objdump
            else:
                pattern = '%s*linux*-%s' % (arch,gutil)

            for dir in environ['PATH'].split(':'):
                res = sorted(glob(path.join(dir, pattern)))
                if res:
                    return res[0]

    # No dice!
    print_binutils_instructions(util, context)
Esempio n. 42
0
def main():
    args = parser.parse_args()
    with context.local(log_console = sys.stderr):
        commands[args.command](args)
Esempio n. 43
0
    def lookup(self, symb=None, lib=None):
        """lookup(symb = None, lib = None) -> int

        Find the address of ``symbol``, which is found in ``lib``.

        Arguments:
            symb(str): Named routine to look up
              If omitted, the base address of the library will be returned.
            lib(str): Substring to match for the library name.
              If omitted, the current library is searched.
              If set to ``'libc'``, ``'libc.so'`` is assumed.

        Returns:
            Address of the named symbol, or :const:`None`.
        """
        result = None

        if lib == 'libc':
            lib = 'libc.so'

        #
        # Get a pretty name for the symbol to show the user
        #
        if symb and lib:
            pretty = '%r in %r' % (symb, lib)
        else:
            pretty = repr(symb or lib)

        if not pretty:
            self.failure("Must specify a library or symbol")

        self.waitfor('Resolving %s' % pretty)

        #
        # If we are loading from a different library, create
        # a DynELF instance for it.
        #
        if lib is not None: dynlib = self._dynamic_load_dynelf(lib)
        else: dynlib = self

        if dynlib is None:
            log.failure("Could not find %r" % lib)
            return None

        #
        # If we are resolving a symbol in the library, find it.
        #
        if symb and self.libcdb:
            # Try a quick lookup by build ID
            self.status("Trying lookup based on Build ID")
            build_id = dynlib._lookup_build_id(lib=lib)
            if build_id:
                log.info("Trying lookup based on Build ID: %s" % build_id)
                path = libcdb.search_by_build_id(build_id)
                if path:
                    with context.local(log_level='error'):
                        e = ELF(path)
                        e.address = dynlib.libbase
                        result = e.symbols[symb]
        if symb and not result:
            self.status("Trying remote lookup")
            result = dynlib._lookup(symb)
        if not symb:
            result = dynlib.libbase

        #
        # Did we win?
        #
        if result: self.success("%#x" % result)
        else: self.failure("Could not find %s" % pretty)

        return result
Esempio n. 44
0
 def wrapper(*a, **kw):
     with context.local(device=self):
         return function(*a,**kw)
Esempio n. 45
0
def make_packer(word_size=None, sign=None, **kwargs):
    """make_packer(word_size = None, endianness = None, sign = None) -> number → str

    Creates a packer by "freezing" the given arguments.

    Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling
    ``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
    faster to call this function, since it will then use a specialized version.

    Arguments:
        word_size (int): The word size to be baked into the returned packer or the string all (in bits).
        endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
        sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
        kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)

    Returns:
        A function, which takes a single argument in the form of a number and returns a string
        of that number in a packed form.

    Examples:
        >>> p = make_packer(32, endian='little', sign='unsigned')
        >>> p
        <function _p32lu at 0x...>
        >>> p(42)
        b'*\\x00\\x00\\x00'
        >>> p(-1)
        Traceback (most recent call last):
            ...
        error: integer out of range for 'I' format code
        >>> make_packer(33, endian='little', sign='unsigned')
        <function ...<lambda> at 0x...>
"""
    with context.local(sign=sign, **kwargs):
        word_size = word_size or context.word_size
        endianness = context.endianness
        sign = sign if sign is None else context.sign

        if word_size in [8, 16, 32, 64]:
            packer = {
                (8, 0, 0): _p8lu,
                (8, 0, 1): _p8ls,
                (8, 1, 0): _p8bu,
                (8, 1, 1): _p8bs,
                (16, 0, 0): _p16lu,
                (16, 0, 1): _p16ls,
                (16, 1, 0): _p16bu,
                (16, 1, 1): _p16bs,
                (32, 0, 0): _p32lu,
                (32, 0, 1): _p32ls,
                (32, 1, 0): _p32bu,
                (32, 1, 1): _p32bs,
                (64, 0, 0): _p64lu,
                (64, 0, 1): _p64ls,
                (64, 1, 0): _p64bu,
                (64, 1, 1): _p64bs,
            }.get((word_size, {
                'big': 1,
                'little': 0
            }[endianness], sign))

            if packer:
                return packer

        return lambda number: pack(number, word_size, endianness, sign)
Esempio n. 46
0
    def __init__(self, *a, **kw):
        #: The NT_PRSTATUS object.
        self.prstatus = None

        #: The NT_PRPSINFO object
        self.prpsinfo = None

        #: The NT_SIGINFO object
        self.siginfo = None

        #: :class:`dict`: Dictionary of memory mappings from ``address`` to ``name``
        self.mappings = []

        #: :class:`int`: Address of the stack base
        self.stack    = None

        #: :class:`dict`: Environment variables read from the stack.  Keys are
        #: the environment variable name, values are the memory address of the
        #: variable.
        #:
        #: Note: Use with the :meth:`.ELF.string` method to extract them.
        self.env = {}

        #: :class:`list`: List of addresses of arguments on the stack.
        self.argv = []

        #: :class:`int`: Number of arguments passed
        self.argc = 0

        # Pointer to the executable filename on the stack
        self.at_execfn = 0

        # Pointer to the entry point
        self.at_entry = 0

        try:
            super(Corefile, self).__init__(*a, **kw)
        except IOError:
            log.warning("No corefile.  Have you set /proc/sys/kernel/core_pattern?")
            raise

        self.load_addr = 0
        self._address  = 0

        if not self.elftype == 'CORE':
            log.error("%s is not a valid corefile" % self.file.name)

        if not self.arch in prstatus_types.keys():
            log.warn_once("%s does not use a supported corefile architecture, registers are unavailable" % self.file.name)

        prstatus_type = prstatus_types.get(self.arch, None)
        prpsinfo_type = prpsinfo_types.get(self.bits, None)
        siginfo_type = siginfo_types.get(self.bits, None)

        with log.waitfor("Parsing corefile...") as w:
            self._load_mappings()

            for segment in self.segments:
                if not isinstance(segment, elftools.elf.segments.NoteSegment):
                    continue
                for note in iter_notes(segment):
                    # Try to find NT_PRSTATUS.  Note that pyelftools currently
                    # mis-identifies the enum name as 'NT_GNU_ABI_TAG'.
                    if prstatus_type and \
                       note.n_descsz == ctypes.sizeof(prstatus_type) and \
                       note.n_type == 'NT_GNU_ABI_TAG':
                        self.NT_PRSTATUS = note
                        self.prstatus = prstatus_type.from_buffer_copy(note.n_desc)

                    # Try to find NT_PRPSINFO
                    # Note that pyelftools currently mis-identifies the enum name
                    # as 'NT_GNU_BUILD_ID'
                    if note.n_descsz == ctypes.sizeof(prpsinfo_type) and \
                       note.n_type == 'NT_GNU_BUILD_ID':
                        self.NT_PRPSINFO = note
                        self.prpsinfo = prpsinfo_type.from_buffer_copy(note.n_desc)

                    # Try to find NT_SIGINFO so we can see the fault
                    if note.n_type == 0x53494749:
                        self.NT_SIGINFO = note
                        self.siginfo = siginfo_type.from_buffer_copy(note.n_desc)

                    # Try to find the list of mapped files
                    if note.n_type == constants.NT_FILE:
                        with context.local(bytes=self.bytes):
                            self._parse_nt_file(note)

                    # Try to find the auxiliary vector, which will tell us
                    # where the top of the stack is.
                    if note.n_type == constants.NT_AUXV:
                        with context.local(bytes=self.bytes):
                            self._parse_auxv(note)

            if not self.stack and self.mappings:
                self.stack = self.mappings[-1]

            if self.stack and self.mappings:
                for mapping in self.mappings:
                    if mapping.stop == self.stack:
                        mapping.name = '[stack]'
                        self.stack   = mapping
                        break
                else:
                    for mapping in self.mappings:
                        if self.stack in mapping:
                            mapping.name = '[stack]'
                            self.stack   = mapping
                            break
                    else:
                        log.warn('Could not find the stack!')
                        self.stack = None

            with context.local(bytes=self.bytes, log_level='error'):
                try:
                    self._parse_stack()
                except ValueError:
                    # If there are no environment variables, we die by running
                    # off the end of the stack.
                    pass

            self._describe_core()
Esempio n. 47
0
def which_binutils(util):
    """
    Finds a binutils in the PATH somewhere.
    Expects that the utility is prefixed with the architecture name.

    Examples:

        >>> import platform
        >>> which_binutils = pwnlib.asm.which_binutils
        >>> which_binutils('as', arch=platform.machine())
        '.../bin/...as'
        >>> which_binutils('as', arch='arm') #doctest: +ELLIPSIS
        '.../bin/arm-...-as'
        >>> which_binutils('as', arch='powerpc') #doctest: +ELLIPSIS
        '.../bin/powerpc...-as'
        >>> which_binutils('as', arch='msp430') #doctest: +SKIP
        ...
        Traceback (most recent call last):
        ...
        Exception: Could not find 'as' installed for ContextType(arch = 'msp430')
    """
    arch = context.arch
    bits = context.bits

    # Fix up pwntools vs Debian triplet naming, and account
    # for 'thumb' being its own pwntools architecture.
    arches = [arch] + {
        'thumb': ['arm', 'aarch64'],
        'i386': ['x86_64', 'amd64'],
        'i686': ['x86_64', 'amd64'],
        'amd64': ['x86_64', 'i386'],
    }.get(arch, [])

    # If one of the candidate architectures matches the native
    # architecture, use that as a last resort.
    machine = platform.machine()
    machine = 'i386' if machine == 'i686' else machine
    try:
        with context.local(arch=machine):
            if context.arch in arches:
                arches.append(None)
    except AttributeError:
        log.warn_once(
            "Your local binutils won't be used because architecture %r is not supported."
            % machine)

    utils = [util]

    # hack for homebrew-installed binutils on mac
    if platform.system() == 'Darwin':
        utils = ['g' + util, util]

    for arch in arches:
        for gutil in utils:
            # e.g. objdump
            if arch is None:
                pattern = gutil

            # e.g. aarch64-linux-gnu-objdump
            else:
                pattern = '%s*linux*-%s' % (arch, gutil)

            for dir in environ['PATH'].split(':'):
                res = sorted(glob(path.join(dir, pattern)))
                if res:
                    return res[0]

    # No dice!
    print_binutils_instructions(util, context)
Esempio n. 48
0
    def __init__(self, *a, **kw):
        #: The NT_PRSTATUS object.
        self.prstatus = None

        #: The NT_PRSPINFO object
        self.prspinfo = None

        #: The NT_SIGINFO object
        self.siginfo = None

        #: :class:`dict`: Dictionary of memory mappings from ``address`` to ``name``
        self.mappings = []

        #: :class:`int`: Address of the stack base
        self.stack    = None

        #: :class:`dict`: Environment variables read from the stack.  Keys are
        #: the environment variable name, values are the memory address of the
        #: variable.
        #:
        #: Note: Use with the :meth:`.ELF.string` method to extract them.
        self.env = {}

        #: :class:`list`: List of addresses of arguments on the stack.
        self.argv = []

        #: :class:`int`: Number of arguments passed
        self.argc = 0

        # Pointer to the executable filename on the stack
        self.at_execfn = 0

        # Pointer to the entry point
        self.at_entry = 0

        try:
            super(Corefile, self).__init__(*a, **kw)
        except IOError:
            log.warning("No corefile.  Have you set /proc/sys/kernel/core_pattern?")
            raise

        self.load_addr = 0
        self._address  = 0

        if not self.elftype == 'CORE':
            log.error("%s is not a valid corefile" % self.file.name)

        if not self.arch in prstatus_types.keys():
            log.warn_once("%s does not use a supported corefile architecture, registers are unavailable" % self.file.name)

        prstatus_type = prstatus_types.get(self.arch, None)
        prspinfo_type = prspinfo_types.get(self.bits, None)
        siginfo_type = siginfo_types.get(self.bits, None)

        with log.waitfor("Parsing corefile...") as w:
            self._load_mappings()

            for segment in self.segments:
                if not isinstance(segment, elftools.elf.segments.NoteSegment):
                    continue
                for note in iter_notes(segment):
                    # Try to find NT_PRSTATUS.  Note that pyelftools currently
                    # mis-identifies the enum name as 'NT_GNU_ABI_TAG'.
                    if prstatus_type and \
                       note.n_descsz == ctypes.sizeof(prstatus_type) and \
                       note.n_type == 'NT_GNU_ABI_TAG':
                        self.NT_PRSTATUS = note
                        self.prstatus = prstatus_type.from_buffer_copy(note.n_desc)

                    # Try to find NT_PRPSINFO
                    # Note that pyelftools currently mis-identifies the enum name
                    # as 'NT_GNU_BUILD_ID'
                    if note.n_descsz == ctypes.sizeof(prspinfo_type) and \
                       note.n_type == 'NT_GNU_BUILD_ID':
                        self.NT_PRSPINFO = note
                        self.prspinfo = prspinfo_type.from_buffer_copy(note.n_desc)

                    # Try to find NT_SIGINFO so we can see the fault
                    if note.n_type == 0x53494749:
                        self.NT_SIGINFO = note
                        self.siginfo = siginfo_type.from_buffer_copy(note.n_desc)

                    # Try to find the list of mapped files
                    if note.n_type == constants.NT_FILE:
                        with context.local(bytes=self.bytes):
                            self._parse_nt_file(note)

                    # Try to find the auxiliary vector, which will tell us
                    # where the top of the stack is.
                    if note.n_type == constants.NT_AUXV:
                        with context.local(bytes=self.bytes):
                            self._parse_auxv(note)

            if not self.stack and self.mappings:
                self.stack = self.mappings[-1]

            if self.stack and self.mappings:
                for mapping in self.mappings:
                    if mapping.stop == self.stack:
                        mapping.name = '[stack]'
                        self.stack   = mapping
                        break
                else:
                    for mapping in self.mappings:
                        if self.stack in mapping:
                            mapping.name = '[stack]'
                            self.stack   = mapping
                            break
                    else:
                        log.warn('Could not find the stack!')
                        self.stack = None

            with context.local(bytes=self.bytes, log_level='error'):
                try:
                    self._parse_stack()
                except ValueError:
                    # If there are no environment variables, we die by running
                    # off the end of the stack.
                    pass

            self._describe_core()
Esempio n. 49
0
 def wrapper(*a, **kw):
     with context.local(device=self):
         return function(*a, **kw)
Esempio n. 50
0
def make_packer(word_size = None, sign = None, **kwargs):
    """make_packer(word_size = None, endianness = None, sign = None) -> number → str

    Creates a packer by "freezing" the given arguments.

    Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling
    ``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
    faster to call this function, since it will then use a specialized version.

    Arguments:
        word_size (int): The word size to be baked into the returned packer or the string all.
        endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
        sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
        kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)

    Returns:
        A function, which takes a single argument in the form of a number and returns a string
        of that number in a packed form.

    Examples:
        >>> p = make_packer(32, endian='little', sign='unsigned')
        >>> p
        <function _p32lu at 0x...>
        >>> p(42)
        '*\\x00\\x00\\x00'
        >>> p(-1)
        Traceback (most recent call last):
            ...
        error: integer out of range for 'I' format code
        >>> make_packer(33, endian='little', sign='unsigned')
        <function <lambda> at 0x...>
"""
    with context.local(sign=sign, **kwargs):
        word_size  = word_size or context.word_size
        endianness = context.endianness
        sign       = sign if sign is None else context.sign

        if word_size in [8, 16, 32, 64]:
            packer = {
                (8, 0, 0):  _p8lu,
                (8, 0, 1):  _p8ls,
                (8, 1, 0):  _p8bu,
                (8, 1, 1):  _p8bs,
                (16, 0, 0): _p16lu,
                (16, 0, 1): _p16ls,
                (16, 1, 0): _p16bu,
                (16, 1, 1): _p16bs,
                (32, 0, 0): _p32lu,
                (32, 0, 1): _p32ls,
                (32, 1, 0): _p32bu,
                (32, 1, 1): _p32bs,
                (64, 0, 0): _p64lu,
                (64, 0, 1): _p64ls,
                (64, 1, 0): _p64bu,
                (64, 1, 1): _p64bs,
            }.get((word_size, {'big': 1, 'little': 0}[endianness], sign), None)

            if packer:
                return packer

        return lambda number: pack(number, word_size, endianness, sign)