Esempio n. 1
0
def search_by_hash(hex_encoded_id, hash_type='build_id', unstrip=True):
    assert hash_type in HASHES, hash_type

    # Ensure that the libcdb cache directory exists
    cache, cache_valid = _check_elf_cache('libcdb', hex_encoded_id, hash_type)
    if cache_valid:
        return cache

    # Run through all available libc database providers to see if we have a match.
    for provider in PROVIDERS:
        data = provider(hex_encoded_id, hash_type)
        if data and data.startswith(b'\x7FELF'):
            break

    if not data:
        log.warn_once("Could not find libc for %s %s anywhere", hash_type, hex_encoded_id)

    # Save whatever we got to the cache
    write(cache, data or b'')

    # Return ``None`` if we did not get a valid ELF file
    if not data or not data.startswith(b'\x7FELF'):
        return None

    # Try to find debug info for this libc.
    if unstrip:
        unstrip_libc(cache)

    return cache
Esempio n. 2
0
def _search_debuginfo_by_hash(base_url, hex_encoded_id):
    # Deferred import because it's slow
    import requests
    from six.moves import urllib

    # Check if we tried this buildid before.
    cache, cache_valid = _check_elf_cache('libcdb_dbg', hex_encoded_id, 'build_id')
    if cache_valid:
        return cache

    # Try to find separate debuginfo.
    url  = '/buildid/{}/debuginfo'.format(hex_encoded_id)
    url  = urllib.parse.urljoin(base_url, url)
    data = b""
    log.debug("Downloading data from debuginfod: %s", url)
    try:
        data = wget(url, timeout=20)
    except requests.RequestException as e:
        log.warn_once("Failed to fetch libc debuginfo for build_id %s from %s: %s", hex_encoded_id, base_url, e)
    
    # Save whatever we got to the cache
    write(cache, data or b'')

    # Return ``None`` if we did not get a valid ELF file
    if not data or not data.startswith(b'\x7FELF'):
        log.warn_once("Could not fetch libc debuginfo for build_id %s from %s", hex_encoded_id, base_url)
        return None

    return cache
Esempio n. 3
0
def read(path, target=None, callback=None):
    """Download a file from the device, and extract its contents.

    Arguments:
        path(str): Path to the file on the device.
        target(str): Optional, location to store the file.
            Uses a temporary file by default.
        callback(callable): See the documentation for
            ``adb.protocol.AdbClient.read``.

    Examples:

        >>> print adb.read('/proc/version') # doctest: +ELLIPSIS
        Linux version ...
        >>> adb.read('/does/not/exist')
        Traceback (most recent call last):
        ...
        PwnlibException: Could not stat '/does/not/exist'
    """
    with AdbClient() as c:
        stat = c.stat(path)
        if not stat:
            log.error('Could not stat %r' % path)
        data = c.read(path, stat['size'], callback=callback)

    if target:
        misc.write(target, data)

    return data
Esempio n. 4
0
def pull(remote_path, local_path=None):
    """Download a file from the device.

    Arguments:
        remote_path(str): Path or directory of the file on the device.
        local_path(str): Path to save the file to.
            Uses the file's name by default.

    Return:
        The contents of the file.

    Example:

        >>> _=adb.pull('/proc/version', './proc-version')
        >>> print(read('./proc-version').decode('utf-8')) # doctest: +ELLIPSIS
        Linux version ...
    """
    if local_path is None:
        local_path = os.path.basename(remote_path)

    msg = "Pulling %r to %r" % (remote_path, local_path)

    if log.isEnabledFor(logging.DEBUG):
        msg += ' (%s)' % context.device

    with log.waitfor(msg) as w:
        data = read(remote_path, callback=_create_adb_push_pull_callback(w))
        misc.write(local_path, data)

    return data
Esempio n. 5
0
def pull(remote_path, local_path=None):
    """Download a file from the device.

    Arguments:
        remote_path(str): Path or directory of the file on the device.
        local_path(str): Path to save the file to.
            Uses the file's name by default.

    Return:
        The contents of the file.

    Example:

        >>> _=adb.pull('/proc/version', './proc-version')
        >>> print read('./proc-version') # doctest: +ELLIPSIS
        Linux version ...
    """
    if local_path is None:
        local_path = os.path.basename(remote_path)

    msg = "Pulling %r to %r" % (remote_path, local_path)

    if log.isEnabledFor(logging.DEBUG):
        msg += ' (%s)' % context.device

    with log.waitfor(msg) as w:
        data = read(remote_path, callback=_create_adb_push_pull_callback(w))
        misc.write(local_path, data)

    return data
Esempio n. 6
0
def read(path, target=None, callback=None):
    """Download a file from the device, and extract its contents.

    Arguments:
        path(str): Path to the file on the device.
        target(str): Optional, location to store the file.
            Uses a temporary file by default.
        callback(callable): See the documentation for
            ``adb.protocol.AdbClient.read``.

    Examples:

        >>> print(adb.read('/proc/version').decode('utf-8')) # doctest: +ELLIPSIS
        Linux version ...
        >>> adb.read('/does/not/exist')
        Traceback (most recent call last):
        ...
        PwnlibException: Could not stat '/does/not/exist'
    """
    with AdbClient() as c:
        stat = c.stat(path)
        if not stat:
            log.error('Could not stat %r' % path)
        data = c.read(path, stat['size'], callback=callback)

    if target:
        misc.write(target, data)

    return data
Esempio n. 7
0
    def save(self, path=None):
        """Save the ELF to a file

        >>> bash = ELF(which('bash'))
        >>> bash.save('/tmp/bash_copy')
        >>> copy = file('/tmp/bash_copy')
        >>> bash = file(which('bash'))
        >>> bash.read() == copy.read()
        True
        """
        if path is None:
            path = self.path
        misc.write(path, self.data)
Esempio n. 8
0
def search_by_hash(hex_encoded_id, hash_type='build_id'):
    assert hash_type in HASHES, hash_type

    # Ensure that the libcdb cache directory exists
    cache_dir = os.path.join(context.cache_dir, 'libcdb', hash_type)

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    # If we already downloaded the file, and it looks even passingly like
    # a valid ELF file, return it.
    cache = os.path.join(cache_dir, hex_encoded_id)

    if os.path.exists(cache):
        log.debug("Found existing cached libc at %r", cache)

        data = read(cache)
        if data.startswith(b'\x7FELF'):
            log.info_once("Using cached data from %r", cache)
            return cache
        else:
            log.info_once("Skipping unavialable libc %s", hex_encoded_id)
            return None

    # Build the URL using the requested hash type
    url_base = "https://gitlab.com/libcdb/libcdb/raw/master/hashes/%s/" % hash_type
    url = urllib.parse.urljoin(url_base, hex_encoded_id)

    data = b""
    while not data.startswith(b'\x7fELF'):
        log.debug("Downloading data from LibcDB: %s", url)
        data = wget(url)

        if not data:
            log.warn_once("Could not fetch libc for build_id %s",
                          hex_encoded_id)
            break

        # GitLab serves up symlinks with
        if data.startswith(b'..'):
            url = os.path.dirname(url) + '/'
            url = urllib.parse.urljoin(url.encode('utf-8'), data)

    # Save whatever we got to the cache
    write(cache, data or b'')

    # Return ``None`` if we did not get a valid ELF file
    if not data or not data.startswith(b'\x7FELF'):
        return None

    return cache
Esempio n. 9
0
def write(path, data=''):
    """Create a file on the device with the provided contents.

    Arguments:
        path(str): Path to the file on the device
        data(str): Contents to store in the file

    Examples:

        >>> adb.write('/dev/null', 'data')
        >>> adb.write('/data/local/tmp/')
    """
    with tempfile.NamedTemporaryFile() as temp:
        misc.write(temp.name, data)
        push(temp.name, path)
Esempio n. 10
0
def write(path, data=b''):
    """Create a file on the device with the provided contents.

    Arguments:
        path(str): Path to the file on the device
        data(str): Contents to store in the file

    Examples:

        >>> adb.write('/dev/null', b'data')
        >>> adb.write('/data/local/tmp/')
    """
    with tempfile.NamedTemporaryFile() as temp:
        misc.write(temp.name, data)
        push(temp.name, path)
Esempio n. 11
0
def cache_file():
    """Returns the path of the file used to cache update data, and ensures that it exists."""
    cache_dir = context.cache_dir

    if not cache_dir:
        return None

    cache_file = os.path.join(cache_dir, 'update')

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    if not os.path.exists(cache_file):
        write(cache_file, '')

    return cache_file
Esempio n. 12
0
def cache_file():
    """Returns the path of the file used to cache update data, and ensures that it exists."""
    cache_dir = context.cache_dir

    if not cache_dir:
        return None

    cache_file = os.path.join(cache_dir, 'update')

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    if not os.path.exists(cache_file):
        write(cache_file, '')

    return cache_file
Esempio n. 13
0
def search_by_hash(hex_encoded_id, hash_type='build_id'):
    assert hash_type in HASHES, hash_type

    # Ensure that the libcdb cache directory exists
    cache_dir = os.path.join(context.cache_dir, 'libcdb', hash_type)

    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)

    # If we already downloaded the file, and it looks even passingly like
    # a valid ELF file, return it.
    cache = os.path.join(cache_dir, hex_encoded_id)

    if os.path.exists(cache):
        log.debug("Found existing cached libc at %r", cache)

        data = read(cache)
        if data.startswith(b'\x7FELF'):
            log.info_once("Using cached data from %r", cache)
            return cache
        else:
            log.info_once("Skipping unavailable libc %s", hex_encoded_id)
            return None

    # Run through all available libc database providers to see if we have a match.
    for provider in PROVIDERS:
        data = provider(hex_encoded_id, hash_type)
        if data and data.startswith(b'\x7FELF'):
            break

    if not data:
        log.warn_once("Could not find libc for %s %s anywhere", hash_type,
                      hex_encoded_id)

    # Save whatever we got to the cache
    write(cache, data or b'')

    # Return ``None`` if we did not get a valid ELF file
    if not data or not data.startswith(b'\x7FELF'):
        return None

    return cache
Esempio n. 14
0
def search_by_build_id(hex_encoded_id):
    """
    Given a hex-encoded Build ID, return the path to an ELF with that Build ID
    only the local system.

    If it can't be found, return None.

    Arguments:
        hex_encoded_id(str):
            Hex-encoded Build ID (e.g. 'ABCDEF...') of the library

    Returns:
        Path to the downloaded library on disk, or :const:`None`.
    """
    cache = cache_dir + '-libc.so.' + hex_encoded_id

    if os.path.exists(cache) and read(cache).startswith('\x7FELF'):
        log.info_once("Using cached data from %r" % cache)
        return cache

    log.info("Downloading data from GitHub")

    url_base = "https://gitlab.com/libcdb/libcdb/raw/master/hashes/build_id/"
    url = urlparse.urljoin(url_base, hex_encoded_id)

    data = ""
    while not data.startswith('\x7fELF'):
        data = wget(url)

        if not data:
            return None

        if data.startswith('..'):
            url = os.path.dirname(url) + '/'
            url = urlparse.urljoin(url, data)

    write(cache, data)
    return cache
Esempio n. 15
0
    def __init__(self, proc):
        if proc.poll() is None:
            log.error("Process %i has not exited" % (process.pid))

        self.process = proc
        self.pid = proc.pid
        self.uid = proc.suid
        self.gid = proc.sgid
        self.exe = proc.executable
        self.basename = os.path.basename(self.exe)
        self.cwd = proc.cwd

        # XXX: Should probably break out all of this logic into
        #      its own class, so that we can support "file ops"
        #      locally, via SSH, and over ADB, in a transparent way.
        if isinstance(proc, process):
            self.read = read
            self.unlink = os.unlink
        elif isinstance(proc, ssh_channel):
            self.read = proc.parent.read
            self.unlink = proc.parent.unlink

        self.kernel_core_pattern = self.read('/proc/sys/kernel/core_pattern').strip()
        self.kernel_core_uses_pid = bool(int(self.read('/proc/sys/kernel/core_uses_pid')))

        log.debug("core_pattern: %r" % self.kernel_core_pattern)
        log.debug("core_uses_pid: %r" % self.kernel_core_uses_pid)

        self.interpreter = self.binfmt_lookup()

        log.debug("interpreter: %r" % self.interpreter)

        # If we have already located the corefile, we will
        # have renamed it to 'core.<pid>'
        core_path = 'core.%i' % (proc.pid)
        self.core_path = None

        if os.path.isfile(core_path):
            log.debug("Found core immediately: %r" % core_path)
            self.core_path = core_path

        # Try QEMU first, since it's unlikely to be a false-positive unless
        # there is a PID *and* filename collision.
        if not self.core_path:
            log.debug("Looking for QEMU corefile")
            self.core_path = self.qemu_corefile()

        # Check for native coredumps as a last resort
        if not self.core_path:
            log.debug("Looking for native corefile")
            self.core_path = self.native_corefile()

        if not self.core_path:
            return

        core_pid = self.load_core_check_pid()

        # Move the corefile if we're configured that way
        if context.rename_corefiles:
            new_path = 'core.%i' % core_pid
            if core_pid > 0 and new_path != self.core_path:
                write(new_path, self.read(self.core_path))
                self.core_path = new_path

        # Check the PID
        if core_pid != self.pid:
            log.warn("Corefile PID does not match! (got %i)" % core_pid)

        # Register the corefile for removal only if it's an exact match
        elif context.delete_corefiles:
            atexit.register(lambda: os.unlink(self.core_path))
Esempio n. 16
0
    def __init__(self, proc):
        if proc.poll() is None:
            log.error("Process %i has not exited" % (process.pid))

        self.process = proc
        self.pid = proc.pid
        self.uid = proc.suid
        self.gid = proc.sgid
        self.exe = proc.executable
        self.basename = os.path.basename(self.exe)
        self.cwd = proc.cwd

        # XXX: Should probably break out all of this logic into
        #      its own class, so that we can support "file ops"
        #      locally, via SSH, and over ADB, in a transparent way.
        if isinstance(proc, process):
            self.read = read
            self.unlink = os.unlink
        elif isinstance(proc, ssh_channel):
            self.read = proc.parent.read
            self.unlink = proc.parent.unlink

        self.kernel_core_pattern = self.read(
            '/proc/sys/kernel/core_pattern').strip()
        self.kernel_core_uses_pid = bool(
            int(self.read('/proc/sys/kernel/core_uses_pid')))

        log.debug("core_pattern: %r" % self.kernel_core_pattern)
        log.debug("core_uses_pid: %r" % self.kernel_core_uses_pid)

        self.interpreter = self.binfmt_lookup()

        log.debug("interpreter: %r" % self.interpreter)

        # If we have already located the corefile, we will
        # have renamed it to 'core.<pid>'
        core_path = 'core.%i' % (proc.pid)
        self.core_path = None

        if os.path.isfile(core_path):
            log.debug("Found core immediately: %r" % core_path)
            self.core_path = core_path

        # Check for native coredumps if we don't 100% know the target
        # is running under qemu-user emulation.
        if not self.core_path:
            log.debug("Looking for native corefile")
            self.core_path = self.native_corefile()

        # If we still have not found the corefile, the process may have
        # been running under qemu-user emulation and we just can't tell
        # (e.g. can't enumerate binfmt_misc in a Docker container).
        if not self.core_path:
            log.debug("Looking for QEMU corefile")
            self.core_path = self.qemu_corefile()

        if not self.core_path:
            return

        core_pid = self.load_core_check_pid()

        # Move the corefile if we're configured that way
        if context.rename_corefiles:
            new_path = 'core.%i' % core_pid
            if core_pid > 0 and new_path != self.core_path:
                write(new_path, self.read(self.core_path))
                self.core_path = new_path

        # Check the PID
        if core_pid != self.pid:
            log.warn("Corefile PID does not match! (got %i)" % core_pid)

        # Register the corefile for removal only if it's an exact match
        elif context.delete_corefiles:
            atexit.register(lambda: os.unlink(self.core_path))
Esempio n. 17
0
    def __init__(self, proc):
        if proc.poll() is None:
            log.error("Process %i has not exited" % (process.pid))

        self.process = proc
        self.pid = proc.pid
        self.uid = proc.suid
        self.gid = proc.sgid
        self.exe = proc.executable
        self.basename = os.path.basename(self.exe)
        self.cwd = proc.cwd

        # XXX: Should probably break out all of this logic into
        #      its own class, so that we can support "file ops"
        #      locally, via SSH, and over ADB, in a transparent way.
        if isinstance(proc, process):
            self.read = read
            self.unlink = os.unlink
        elif isinstance(proc, ssh_channel):
            self.read = proc.parent.read
            self.unlink = proc.parent.unlink

        self.kernel_core_pattern = self.read('/proc/sys/kernel/core_pattern').strip()
        self.kernel_core_uses_pid = bool(int(self.read('/proc/sys/kernel/core_uses_pid')))

        log.debug("core_pattern: %r" % self.kernel_core_pattern)
        log.debug("core_uses_pid: %r" % self.kernel_core_uses_pid)

        self.interpreter = self.binfmt_lookup()

        log.debug("interpreter: %r" % self.interpreter)

        # If we have already located the corefile, we will
        # have renamed it to 'core.<pid>'
        core_path = 'core.%i' % (proc.pid)
        self.core_path = None

        if os.path.isfile(core_path):
            log.debug("Found core immediately: %r" % core_path)
            self.core_path = core_path

        # Try QEMU first, since it's unlikely to be a false-positive unless
        # there is a PID *and* filename collision.
        if not self.core_path:
            log.debug("Looking for QEMU corefile")
            self.core_path = self.qemu_corefile()

        # Check for native coredumps as a last resort
        if not self.core_path:
            log.debug("Looking for native corefile")
            self.core_path = self.native_corefile()

        if not self.core_path:
            return

        core_pid = self.load_core_check_pid()

        # Move the corefile if we're configured that way
        if context.rename_corefiles:
            new_path = 'core.%i' % core_pid
            if core_pid > 0 and new_path != self.core_path:
                write(new_path, self.read(self.core_path))
                self.core_path = new_path

        # Check the PID
        if core_pid != self.pid:
            log.warn("Corefile PID does not match! (got %i)" % core_pid)

        # Register the corefile for removal only if it's an exact match
        elif context.delete_corefiles:
            atexit.register(lambda: os.unlink(self.core_path))