예제 #1
0
파일: rpm_metadata.py 프로젝트: marusak/faf
    def _get_primary_file_path(self, reponame, repourl):
        self._setup_dirs(reponame)

        repomdfilename = self._get_repo_file_path(reponame,
                                                  repourl,
                                                  "repodata/repomd.xml")
        rplh = RepomdPrimaryLocationHandler()
        repomdparser = xml.sax.make_parser()
        repomdparser.setContentHandler(rplh)

        try:
            mdfp = open(repomdfilename, "r")
        except Exception as ex:
            raise FafError("Reading: {0}".format(str(ex)))
        else:
            with mdfp:
                try:
                    repomdparser.parse(mdfp)
                except SAXException as ex:
                    raise FafError("Failed to parse repomd.xml: {0}"
                                   .format(str(ex)))

        return self._get_repo_file_path(reponame,
                                        repourl,
                                        rplh.primary_location)
예제 #2
0
파일: faf_rpm.py 프로젝트: michalfabik/faf
def store_rpm_provides(db: Database, package: Package, nogpgcheck: bool = False) -> None:
    """
    Save RPM provides of `package` to storage.

    Expects pyfaf.storage.opsys.Package object.
    """

    pkg_id = package.id
    ts = rpm.ts()
    rpm_file = package.get_lob_fd("package")
    if not rpm_file:
        raise FafError("Package {0} has no lob stored".format(package.name))

    if nogpgcheck:
        ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) #pylint: disable=protected-access

    try:
        header = ts.hdrFromFdno(rpm_file.fileno())
    except rpm.error as exc:
        rpm_file.close()
        raise FafError("rpm error: {0}".format(exc)) from exc

    files = header.fiFromHeader()
    log.debug("%s contains %d files", package.nvra(), len(files))

    # Invalid name for type variable
    # pylint: disable-msg=C0103
    for f in files:
        new = PackageDependency()
        new.package_id = pkg_id
        new.type = "PROVIDES"
        new.name = f[0]
        new.flags = 0
        db.session.add(new)

    provides = header.dsFromHeader('providename')
    for p in provides:
        if len(p.N()) > 1024:
            log.warning("Provides item in RPM header of %s longer than 1024 "
                        "characters. Skipping", package.name)
            continue

        new = PackageDependency()
        new.package_id = pkg_id
        new.type = "PROVIDES"
        new.name = p.N()
        new.flags = p.Flags()
        evr = p.EVR()
        if evr:
            try:
                new.epoch, new.version, new.release = parse_evr(evr)
            except ValueError as ex:
                log.warning("Unparsable EVR ‘%s’ of %s in Provides of %s: %s. "
                            "Skipping",
                            evr, p.N(), package.name, ex)
                continue
        db.session.add(new)

    rpm_file.close()
    db.session.flush()
예제 #3
0
    def _parse_kernel_build_id(self, build_id, archs):
        """
        Parses the kernel build string such as
        3.10.0-3.fc19.x86_64
        3.10.0-3.fc19.armv7hl.tegra
        2.6.32-358.14.1.el6.i686.PAE
        3.15.6-200.fc20.i686+PAE
        """

        arch = None
        flavour = None

        splitby = "+" if "+" in build_id else "."

        head, tail = build_id.rsplit(splitby, 1)
        if tail in archs:
            arch = tail
        else:
            flavour = tail
            head, tail = head.rsplit(".", 1)
            if not tail in archs:
                raise FafError("Unable to determine architecture from '{0}'"
                               .format(build_id))

            arch = tail

        try:
            version, release = head.rsplit("-", 1)
        except ValueError:
            raise FafError("Unable to determine release from '{0}'"
                           .format(head))

        return version, release, arch, flavour
예제 #4
0
파일: __init__.py 프로젝트: vikasmulaje/faf
    def save_lob(self,
                 name,
                 data,
                 binary=True,
                 overwrite=False,
                 truncate=False):
        lobpath = self.get_lob_path(name)

        if not overwrite and os.path.isfile(lobpath):
            raise FafError("Lob '{0}' already exists".format(name))

        maxlen = self.__lobs__[name]
        mode = "w"
        if binary:
            mode += "b"

        with open(lobpath, mode) as lob:
            if type(data) in [str, unicode]:
                self._save_lob_string(lob, data, maxlen, truncate)
            elif hasattr(data, "read"):
                if not truncate:
                    raise FafError(
                        "When saving from file, truncate must be enabled")

                self._save_lob_file(lob, data, maxlen)
            else:
                raise FafError(
                    "Data must be either str, unicode or file-like object")
예제 #5
0
    def save_lob(self,
                 name: str,
                 data: Union[bytes, BinaryIO],
                 overwrite: bool = False,
                 truncate: bool = False):
        lobpath = self.get_lob_path(name)

        if not isinstance(data, bytes) and not hasattr(data, "read"):
            raise FafError("Cannot save LOB '{0}'. Data must be either "
                           "a bytestring or a file-like object".format(name))

        if not overwrite and os.path.isfile(lobpath):
            raise FafError("Lob '{0}' already exists".format(name))

        maxlen = self.__lobs__[name]

        with open(lobpath, "wb") as lob:
            if hasattr(data, "read"):
                if not truncate:
                    raise FafError(
                        "When saving from file, truncate must be enabled")

                data = cast(BinaryIO, data)
                self._write_lob_file(lob, data, maxlen)
            else:
                data = cast(bytes, data)
                self._write_lob_bytes(lob, data, maxlen, truncate)
예제 #6
0
    def _get_tasks(self, cmdline, db):
        result = set()

        # no arguments - pull everything for non-EOL releases
        if not cmdline.opsys:
            for osplugin in systems.values():
                db_opsys = get_opsys_by_name(db, osplugin.nice_name)
                if db_opsys is None:
                    raise FafError("Operating system '{0}' is not defined in "
                                   "storage".format(osplugin.nice_name))

                for db_release in db_opsys.releases:
                    if db_release.status != "EOL":
                        result.add((osplugin, db_release))

        # a single opsys - respect opsysrelease
        elif len(cmdline.opsys) == 1:
            if cmdline.opsys[0] not in systems:
                raise FafError("Operating system '{0}' is not supported"
                               .format(cmdline.opsys[0]))

            osplugin = systems[cmdline.opsys[0]]
            db_opsys = get_opsys_by_name(db, osplugin.nice_name)
            if db_opsys is None:
                raise FafError("Operating system '{0}' is not defined in "
                               "storage".format(osplugin.nice_name))

            if len(cmdline.opsys_release) < 1:
                for db_release in db_opsys.releases:
                    result.add((osplugin, db_release))
            else:
                for release in cmdline.opsys_release:
                    db_release = get_osrelease(db, osplugin.nice_name, release)
                    if db_release is None:
                        self.log_warn("Operating system '{0} {1}' is not "
                                      "supported".format(osplugin.nice_name,
                                                         release))
                        continue

                    result.add((osplugin, db_release))

        # multiple opsys - pull all of their releases
        else:
            for opsys_name in cmdline.opsys:
                if not opsys_name in systems:
                    self.log_warn("Operating system '{0}' is not supported"
                                  .format(opsys_name))
                    continue

                osplugin = systems[opsys_name]
                db_opsys = get_opsys_by_name(db, osplugin.nice_name)
                if db_opsys is None:
                    self.log_warn("Operating system '{0}' is not defined in "
                                  "storage".format(osplugin.nice_name))
                    continue

                for db_release in db_opsys.releases:
                    result.add((osplugin, db_release))

        return sorted(result, key=lambda p_r: (p_r[1].opsys.name, p_r[1].version))
예제 #7
0
파일: __init__.py 프로젝트: Riju19/faf
    def save_lob(self,
                 name,
                 data,
                 binary=True,
                 overwrite=False,
                 truncate=False):
        lobpath = self.get_lob_path(name)

        if not overwrite and os.path.isfile(lobpath):
            raise FafError("Lob '{0}' already exists".format(name))

        maxlen = self.__lobs__[name]
        mode = "w"
        if binary:
            mode += "b"

        with open(lobpath, mode) as lob:
            if sys.version_info.major == 3 and isinstance(data, bytes):
                data = data.decode("utf-8")
            if isinstance(data, six.string_types):
                self._save_lob_string(lob, data, maxlen, truncate)
            elif hasattr(data, "read"):
                if not truncate:
                    raise FafError(
                        "When saving from file, truncate must be enabled")

                self._save_lob_file(lob, data, maxlen)
            else:
                raise FafError(
                    "Data must be either str, unicode or file-like object")
예제 #8
0
    def _db_backtrace_find_crash_thread(self, db_backtrace):
        if len(db_backtrace.threads) == 1:
            return db_backtrace.threads[0]

        db_threads = [t for t in db_backtrace.threads if t.crashthread]
        if not db_threads:
            raise FafError("No crash thread could be found for backtrace #{0}"
                           .format(db_backtrace.id))

        if len(db_threads) > 1:
            raise FafError("Multiple crash threads found for backtrace #{0}"
                           .format(db_backtrace.id))

        return db_threads[0]
예제 #9
0
    def _hash_backtrace(self,
                        db_backtrace,
                        hashbase=None,
                        offset=False) -> str:
        if hashbase is None:
            hashbase = []

        crashthreads = [t for t in db_backtrace.threads if t.crashthread]
        if not crashthreads:
            raise FafError("No crash thread found")

        if len(crashthreads) > 1:
            raise FafError("Multiple crash threads found")

        frames = [f for f in crashthreads[0].frames if not f.inlined][:16]

        hasnames = all([
            f.symbolsource.symbol is not None
            and f.symbolsource.symbol.name is not None
            and f.symbolsource.symbol.name != "??" for f in frames
        ])
        hashashes = all([f.symbolsource.hash is not None for f in frames])

        # use function names if available
        if hasnames:
            # also hash offset for reports that use it as line numbers
            # these reports always have function names
            if offset:
                hashbase.extend([
                    "{0} @ {1} + {2}".format(f.symbolsource.symbol.name,
                                             f.symbolsource.path,
                                             f.symbolsource.offset)
                    for f in frames
                ])
            else:
                hashbase.extend([
                    "{0} @ {1}".format(f.symbolsource.symbol.name,
                                       f.symbolsource.path) for f in frames
                ])
        # fallback to hashes
        elif hashashes:
            hashbase.extend([
                "{0} @ {1}".format(f.symbolsource.hash, f.symbolsource.path)
                for f in frames
            ])
        else:
            raise FafError("either function names or hashes are required")

        return hash_list(hashbase)
예제 #10
0
    def _get_crash_thread(self, stacktrace):
        """
        Searches for a single crash thread and return it. Raises FafError if
        there is no crash thread or if there are multiple crash threads.
        """

        crashthreads = [t for t in stacktrace if ("crash_thread" in t and
                                                  t["crash_thread"])]
        if not crashthreads:
            raise FafError("No crash thread found")

        if len(crashthreads) > 1:
            raise FafError("Multiple crash threads found")

        return crashthreads[0]["frames"]
예제 #11
0
파일: centos.py 프로젝트: mkutlak/faf
    def validate_packages(self, packages) -> bool:
        CentOS.packages_checker.check(packages)
        affected = False
        for package in packages:
            if "package_role" in package:
                if package["package_role"] not in CentOS.pkg_roles:
                    raise FafError("Only the following package roles are allowed: "
                                   "{0}".format(", ".join(CentOS.pkg_roles)))
                if package["package_role"] == "affected":
                    affected = True

        if not(affected or self.allow_unpackaged):
            raise FafError("uReport must contain affected package")

        return True
예제 #12
0
    def _tar_xz(self,
                archive_name,
                archive_dir,
                filepaths,
                unlink=True) -> None:
        archive_path = os.path.join(archive_dir, archive_name)
        archive_path_tmp = os.path.join(archive_dir,
                                        "{0}.tmp".format(archive_name))
        tmpdir = get_temp_dir()
        tmpsubdir = os.path.join(tmpdir, "archive")
        unlink_paths = list(filepaths)

        try:
            os.makedirs(tmpsubdir)
        except OSError as ex:
            if ex.errno == errno.EEXIST:
                raise FafError("The directory '{0}' already exists".format(
                    tmpsubdir)) from ex
            raise

        untar = None
        if os.path.isfile(archive_path):
            self.log_info("An existing archive found, will merge the contents")
            untar = self._untar_xz(archive_path)
            with os.scandir(untar) as iterator:
                for entry in iterator:
                    if entry.is_dir() and entry.name.endswith("archive"):
                        filepaths = [
                            os.path.join(entry.path, f)
                            for f in os.listdir(entry.path)
                        ] + filepaths
                        break

        self.log_info("Creating symlinks")
        for filepath in filepaths:
            linkpath = os.path.join(tmpsubdir, os.path.basename(filepath))
            # merge - do not overwrite already archived data
            try:
                self.log_debug("%s ~> %s", filepath, linkpath)
                os.symlink(filepath, linkpath)
            except OSError as ex:
                if ex.errno != errno.EEXIST:
                    raise

                self.log_debug("Already exists")

        self.log_info("Running tar")
        safe_popen("tar", "chJf", archive_path_tmp, "-C", tmpdir, "archive")
        os.rename(archive_path_tmp, archive_path)

        self.log_info("Cleaning up")

        if untar is not None:
            shutil.rmtree(untar, ignore_errors=True)

        if unlink:
            for path in unlink_paths:
                os.unlink(path)

        shutil.rmtree(tmpsubdir)
예제 #13
0
def unpack_rpm_to_tmp(path, prefix="faf"):
    """
    Unpack RPM package to a temp directory. The directory is either specified
    in storage.tmpdir config option or use the system default temp directory.
    """

    tmpdir = config.get("storage.tmpdir", None)

    result = tempfile.mkdtemp(prefix=prefix, dir=tmpdir)
    for dirname in ["bin", "lib", "lib64", "sbin"]:
        os.makedirs(os.path.join(result, "usr", dirname))
        os.symlink(os.path.join("usr", dirname), os.path.join(result, dirname))

    rpm2cpio = Popen(["rpm2cpio", path], stdout=PIPE, stderr=PIPE)
    cpio = Popen(["cpio", "-id", "--quiet"],
                 stdin=rpm2cpio.stdout,
                 stderr=PIPE,
                 cwd=result)

    #FIXME: false positive by pylint # pylint: disable=fixme
    rpm2cpio.stdout.close()  # pylint: disable=no-member
    try:
        # generous timeout of 15 minutes (kernel unpacking)
        cpio.communicate(timeout=900)
    except TimeoutExpired:
        cpio.kill()
        cpio.communicate()
    finally:
        if cpio.returncode != 0:
            shutil.rmtree(result)
            raise FafError("Failed to unpack RPM '{0}'".format(path))

    return result
예제 #14
0
def save(db, ureport, create_component=False, timestamp=None, count=1):
    """
    Save uReport based on ureport_version element assuming the given uReport "
    is valid. Flush the database at the end.
    """

    if timestamp is None:
        timestamp = datetime.datetime.utcnow()

    ver = get_version(ureport)

    if ver == 1:
        save_ureport1(db,
                      ureport,
                      create_component=create_component,
                      timestamp=timestamp,
                      count=count)
    elif ver == 2:
        save_ureport2(db,
                      ureport,
                      create_component=create_component,
                      timestamp=timestamp,
                      count=count)
    else:
        raise FafError("uReport version {0} is not supported".format(ver))

    db.session.flush()
예제 #15
0
파일: rpm.py 프로젝트: mhabrnal/faf
def unpack_rpm_to_tmp(path, prefix="faf"):
    """
    Unpack RPM package to a temp directory. The directory is either specified
    in storage.tmpdir config option or use the system default temp directory.
    """

    tmpdir = None
    if "storage.tmpdir" in config:
        tmpdir = config["storage.tmpdir"]

    result = tempfile.mkdtemp(prefix=prefix, dir=tmpdir)
    for dirname in ["bin", "lib", "lib64", "sbin"]:
        os.makedirs(os.path.join(result, "usr", dirname))
        os.symlink(os.path.join("usr", dirname), os.path.join(result, dirname))

    rpm2cpio = Popen(["rpm2cpio", path], stdout=PIPE, stderr=PIPE)
    cpio = Popen(["cpio", "-id", "--quiet"],
                 stdin=rpm2cpio.stdout,
                 stderr=PIPE,
                 cwd=result)

    # do not check rpm2cpio exitcode as there may be a bug for large files
    # https://bugzilla.redhat.com/show_bug.cgi?id=790396
    rpm2cpio.wait()
    if cpio.wait() != 0:
        shutil.rmtree(result)
        raise FafError("Failed to unpack RPM '{0}'".format(path))

    return result
예제 #16
0
파일: __init__.py 프로젝트: michalfabik/faf
    def __init__(self,
                 debug=False,
                 dry=False,
                 session_kwargs=None,
                 create_schema=False) -> None:
        if Database.__instance__ and Database.__instancecheck__(
                Database.__instance__):
            raise FafError(
                "Database is a singleton and has already been instantiated. "
                "If you have lost the reference, you can access the object "
                "from Database.__instance__ .")
        if not session_kwargs:
            session_kwargs = {"autoflush": False, "autocommit": True}
        self._db = create_engine(get_connect_string())
        self._db.echo = self._debug = debug
        self._dry = dry
        GenericTable.metadata.bind = self._db
        self.session = Session(self._db, **session_kwargs)
        self.session._flush_orig = self.session.flush  #pylint: disable=protected-access
        self.session.flush = self._flush_session

        if create_schema:
            GenericTable.metadata.create_all()

        Database.__instance__ = self
예제 #17
0
파일: __init__.py 프로젝트: vikasmulaje/faf
    def del_lob(self, name):
        lobpath = self.get_lob_path(name)

        if not os.path.isfile(lobpath):
            raise FafError("Lob '{0}' does not exist".format(name))

        os.unlink(lobpath)
예제 #18
0
파일: __init__.py 프로젝트: juliusmilan/faf
    def _save_lob_string(self, dest, data, maxlen=0, truncate=False):
        if maxlen > 0 and len(data) > maxlen:
            if truncate:
                data = data[:maxlen]
            else:
                raise FafError("Data is too long, '{0}' only allows length of {1}".format(dest.name, maxlen))

        dest.write(data)
예제 #19
0
파일: retrace.py 프로젝트: marusak/faf
def get_base_address(binary_path):
    """
    Runs eu-unstrip on a binary to get the address used
    as base for calculating relative offsets.
    """

    child = safe_popen("eu-unstrip", "-n", "-e", binary_path)

    if child is None:
        raise FafError("eu-unstrip failed")

    match = RE_UNSTRIP_BASE_OFFSET.match(child.stdout)
    if match is None:
        raise FafError("Unexpected output from eu-unstrip: '{0}'".format(
            child.stdout))

    return int(match.group(1), 16)
예제 #20
0
    def _get_repo_file_path(self,
                            reponame,
                            repourl,
                            remote,
                            local=None) -> str:
        url = os.path.join(repourl, remote)
        if url.startswith("file://"):
            return url[len("file://"):]
        if url.startswith("/"):
            return url

        if local is None:
            local = os.path.basename(remote)

        cachename = os.path.join(self.cachedir, self.name, reponame, local)

        mtime = 0
        try:
            mtime = os.path.getmtime(cachename)
        except OSError as ex:
            if errno.ENOENT != ex.errno:
                raise FafError("Cannot access cache: {0}".format(str(ex)))

        if (mtime + self.cacheperiod) <= time.time():
            curl = pycurl.Curl()
            curl.setopt(pycurl.URL, url.encode("ascii", "ignore"))

            try:
                fp = open(cachename, "wb")
            except Exception as ex:
                raise FafError(
                    "Creating cache file {0} filed with: {1}".format(
                        cachename, str(ex)))
            else:
                with fp:
                    curl.setopt(pycurl.WRITEDATA, fp)
                    try:
                        curl.perform()
                    except pycurl.error as ex:
                        raise FafError("Downloading failed: {0}".format(
                            str(ex)))
                    finally:
                        curl.close()

        return cachename
예제 #21
0
 def _setup_dirs(self, reponame):
     dirname = os.path.join(self.cachedir, self.name, reponame)
     try:
         os.makedirs(dirname)
     except OSError as ex:
         if ex.errno != 17:
             raise FafError("Cache directories error '{0}': {1}".format(
                 self.cachedir, str(ex)))
     return dirname
예제 #22
0
    def _write_lob_bytes(self, dest: BinaryIO, data: bytes, maxlen: int = 0,
                         truncate: bool = False):
        if len(data) > maxlen > 0:
            if truncate:
                data = data[:maxlen]
            else:
                raise FafError("Data is too long, '{0}' only allows length of {1}".format(dest.name, maxlen))

        dest.write(data)
예제 #23
0
파일: fedora.py 프로젝트: xsuchy/faf
    def validate_packages(self, packages):
        Fedora.packages_checker.check(packages)
        for package in packages:
            if ("package_role" in package
                    and package["package_role"] not in Fedora.pkg_roles):
                raise FafError("Only the following package roles are allowed: "
                               "{0}".format(", ".join(Fedora.pkg_roles)))

        return True
예제 #24
0
파일: __init__.py 프로젝트: juliusmilan/faf
    def pkstr(self):
        parts = []
        for column in self.__table__._columns:
            if column.primary_key:
                parts.append(str(self.__getattribute__(column.name)))

        if not parts:
            raise FafError("No primary key found for object '{0}'".format(self.__class__.__name__))

        return "-".join(parts)
예제 #25
0
    def pkstr(self) -> str:
        parts = []
        for col in self.__table__._columns: #pylint: disable=no-member, protected-access
            if col.primary_key:
                parts.append(str(self.__getattribute__(col.name)))

        if not parts:
            raise FafError("No primary key found for object '{0}'".format(self.__class__.__name__))

        return "-".join(parts)
예제 #26
0
    def _get_repo_file_path(self,
                            reponame: str,
                            repourl: str,
                            remote: str,
                            local: Optional[str] = None) -> str:
        url = os.path.join(repourl, remote)
        if url.startswith("file://"):
            return url[len("file://"):]
        if url.startswith("/"):
            return url

        if local is None:
            local = os.path.basename(remote)

        cachename = os.path.join(self.cachedir, self.name, reponame, local)

        last_modified: float = 0
        try:
            last_modified = os.path.getmtime(cachename)
        except OSError as ex:
            if errno.ENOENT != ex.errno:
                raise FafError("Cannot access cache: {0}".format(
                    str(ex))) from ex

        # Check for cache expiration.
        if (last_modified + self.cacheperiod) <= time.time():
            try:
                cache_file = open(cachename, "wb")
            except Exception as ex:
                raise FafError(
                    "Creating cache file {0} filed with: {1}".format(
                        cachename, str(ex))) from ex

            with cache_file:
                try:
                    with urlopen(url) as response:
                        cache_file.write(response.read())
                except HTTPError as ex:
                    raise FafError("Downloading failed: {0}".format(
                        str(ex))) from ex

        return cachename
예제 #27
0
파일: problems.py 프로젝트: mkutlak/faf
def precompute_history(report_ids, period, count=20):
    today = datetime.date.today()

    if period == 'day':
        table = ReportHistoryDaily
        first_day = today - datetime.timedelta(days=count - 1)
    elif period == 'week':
        table = ReportHistoryWeekly
        # Last Monday or today if it's Monday.
        last_day = today - datetime.timedelta(days=today.weekday())
        first_day = last_day - datetime.timedelta(days=(count - 1) * 7)
    elif period == 'month':
        table = ReportHistoryMonthly
        # First day of this month.
        last_day = datetime.date(today.year, today.month, 1)
        first_day = last_day - relativedelta(months=count - 1)
    else:
        raise FafError(f'Invalid time period "{period}"')

    date_column = getattr(table, period)
    q = (db.session.query(table.opsysrelease_id,
                          date_column.label('date'),
                          func.sum(table.count).label('total_count'),
                          func.sum(table.unique).label('total_unique'))
         .filter(table.report_id.in_(report_ids))
         .filter(date_column >= first_day)
         .group_by(date_column, table.opsysrelease_id)
         .subquery())

    histories = (db.session.query(OpSysRelease,
                                  q.c.date,
                                  q.c.total_count,
                                  q.c.total_unique)
                 .join(q)
                 .order_by(OpSysRelease.id, q.c.date)
                 .all())

    # Preprocessing to unify output format for all periods and for easier plotting.
    by_opsys = {}
    for osr, entries in groupby(histories, itemgetter(0)):
        counts = [{'date': e.date,
                   'count': e.total_count,
                   'unique': e.total_unique}
                  for e in entries]

        by_opsys[str(osr)] = counts

    result = {
        'by_opsys': by_opsys,
        'from_date': first_day,
        'period_count': count
    }

    return result
예제 #28
0
파일: retrace.py 프로젝트: pypingou/faf
def addr2line(binary_path, address, debuginfo_dir):
    """
    Calls eu-addr2line on a binary, address and directory with debuginfo.
    Returns an ordered list of triplets (function name, source file, line no).
    The last element is always the symbol given to retrace. The elements
    before are inlined symbols that should be placed above the given symbol
    (assuming that entry point is on the bottom of the stacktrace).
    """

    result = []
    child = safe_popen("eu-addr2line", "--executable", binary_path,
                       "--debuginfo-path", debuginfo_dir, "--functions",
                       str(address))

    if child is None:
        raise FafError("eu-add2line failed")

    line1, line2 = child.stdout.splitlines()
    line2_parts = line2.split(":", 1)
    line2_srcfile = line2_parts[0]
    line2_srcline = int(line2_parts[1])

    match = RE_ADDR2LINE_LINE1.match(line1)
    if match is None:
        raise FafError(
            "Unexpected output from eu-addr2line: '{0}'".format(line1))

    if match.group(3) is None:
        funcname = match.group(1)
        srcfile = line2_srcfile
        srcline = line2_srcline
    else:
        funcname = match.group(6)
        srcfile = match.group(4)
        srcline = int(match.group(5))

        result.append((match.group(1), line2_srcfile, line2_srcline))

    result.append((funcname, srcfile, srcline))

    return result
예제 #29
0
    def get_opsys_name(self, cmdline_opsys):
        """
        Get correct opsys name from user passed opsys
        or raise FafError if not available
        """

        cmdline_opsys = cmdline_opsys.lower()
        if not cmdline_opsys in systems:
            raise FafError(
                "Operating system '{0}' does not exist".format(cmdline_opsys))

        return systems[cmdline_opsys].nice_name
예제 #30
0
    def save_lob(self, name, data, overwrite=False, truncate=False):
        lobpath = self.get_lob_path(name)

        if not isinstance(data, bytes) and not hasattr(data, "read"):
            raise FafError(
                "Data must be either a bytestring or a file-like object")

        if not overwrite and os.path.isfile(lobpath):
            raise FafError("Lob '{0}' already exists".format(name))

        maxlen = self.__lobs__[name]

        with open(lobpath, "wb") as lob:
            if hasattr(data, "read"):
                if not truncate:
                    raise FafError(
                        "When saving from file, truncate must be enabled")

                self._write_lob_file(lob, data, maxlen)
            else:
                self._write_lob_bytes(lob, data, maxlen, truncate)