Ejemplo n.º 1
0
    def sync(cls):
        """
        Execute Portage and Overlays sync
        """
        portdir = os.getenv("PORTDIR", "/usr/portage")
        portdir_lock_file = os.path.join(portdir, ".matter_sync.lock")

        print_info("synchronizing the repositories...")
        print_info("About to acquire %s..." % (portdir_lock_file,))
        with open(portdir_lock_file, "a+") as lock_f:
            while True:
                try:
                    fcntl.flock(lock_f.fileno(), fcntl.LOCK_EX)
                    break
                except IOError as err:
                    if err.errno == errno.EINTR:
                        continue
                    raise

            sync_cmd = cls.PORTAGE_SYNC_CMD
            std_env = cls._build_standard_environment()
            exit_st = subprocess.call(sync_cmd, env = std_env)
            if exit_st != 0:
                return exit_st

            # overlays update
            overlay_cmd = cls.OVERLAYS_SYNC_CMD
            return subprocess.call(overlay_cmd, env = std_env)
Ejemplo n.º 2
0
    def sync(cls):
        """
        Execute Portage and Overlays sync
        """
        portdir = os.getenv("PORTDIR", "/usr/portage")
        portdir_lock_file = os.path.join(portdir, ".matter_sync.lock")

        print_info("synchronizing the repositories...")
        print_info("About to acquire %s..." % (portdir_lock_file, ))
        with open(portdir_lock_file, "a+") as lock_f:
            while True:
                try:
                    fcntl.flock(lock_f.fileno(), fcntl.LOCK_EX)
                    break
                except IOError as err:
                    if err.errno == errno.EINTR:
                        continue
                    raise

            sync_cmd = cls.PORTAGE_SYNC_CMD
            std_env = cls._build_standard_environment()
            exit_st = subprocess.call(sync_cmd, env=std_env)
            if exit_st != 0:
                return exit_st

            # overlays update
            overlay_cmd = cls.OVERLAYS_SYNC_CMD
            return subprocess.call(overlay_cmd, env=std_env)
Ejemplo n.º 3
0
    def teardown(cls, executable_hook_f, cwd, exit_st):
        hook_name = executable_hook_f.name
        if not hook_name.startswith("/"):
            # complete with current directory
            hook_name = os.path.join(cwd, hook_name)

        print_info("spawning post hook: %s, passing exit status: %d" % (
            hook_name, exit_st,))
        env = cls._build_standard_environment()
        env["MATTER_EXIT_STATUS"] = str(exit_st)
        return subprocess.call([hook_name], env = env)
Ejemplo n.º 4
0
    def post_build(cls, spec, emerge_config):
        """
        Execute Portage post-build tasks.
        """
        print_info("executing post-build operations, please wait...")

        emerge_settings, emerge_trees, mtimedb = emerge_config
        if "yes" == emerge_settings.get("AUTOCLEAN"):
            build_args = list(cls._setup_build_args(spec))
            _action, opts, _files = parse_opts(build_args)
            unmerge(emerge_trees[emerge_settings["ROOT"]]["root_config"],
                    opts, "clean", [], mtimedb["ldpath"], autoclean=1)
Ejemplo n.º 5
0
    def setup(cls, executable_hook_f, cwd):

        # ignore exit status
        subprocess.call(["env-update"])

        hook_name = executable_hook_f.name
        if not hook_name.startswith("/"):
            # complete with current directory
            hook_name = os.path.join(cwd, hook_name)

        print_info("spawning pre hook: %s" % (hook_name,))
        return subprocess.call([hook_name],
            env = cls._build_standard_environment())
Ejemplo n.º 6
0
    def teardown(cls, executable_hook_f, cwd, exit_st):
        hook_name = executable_hook_f.name
        if not hook_name.startswith("/"):
            # complete with current directory
            hook_name = os.path.join(cwd, hook_name)

        print_info("spawning post hook: %s, passing exit status: %d" % (
            hook_name,
            exit_st,
        ))
        env = cls._build_standard_environment()
        env["MATTER_EXIT_STATUS"] = str(exit_st)
        return subprocess.call([hook_name], env=env)
Ejemplo n.º 7
0
    def setup(cls, executable_hook_f, cwd):

        # ignore exit status
        subprocess.call(["env-update"])

        hook_name = executable_hook_f.name
        if not hook_name.startswith("/"):
            # complete with current directory
            hook_name = os.path.join(cwd, hook_name)

        print_info("spawning pre hook: %s" % (hook_name, ))
        return subprocess.call([hook_name],
                               env=cls._build_standard_environment())
Ejemplo n.º 8
0
    def post_build(cls, spec, emerge_config):
        """
        Execute Portage post-build tasks.
        """
        print_info("executing post-build operations, please wait...")

        emerge_settings, emerge_trees, mtimedb = emerge_config
        if "yes" == emerge_settings.get("AUTOCLEAN"):
            build_args = list(cls._setup_build_args(spec))
            _action, opts, _files = parse_opts(build_args)
            unmerge(emerge_trees[emerge_settings["ROOT"]]["root_config"],
                    opts,
                    "clean", [],
                    mtimedb["ldpath"],
                    autoclean=1)
Ejemplo n.º 9
0
    def _commit_build_only(self, spec, packages):
        """
        Commit packages that have been built with -B.
        Just move the Portage generated tbz2s to our PKGDIR.
        """
        repository = spec["repository"]
        matter_pkgdir = self._build_pkgdir(repository)

        print_info("committing build-only packages: %s, to PKGDIR: %s" % (
            ", ".join(sorted(packages)),
            matter_pkgdir,
        ))

        settings, _trees, _db = self.load_emerge_config()
        pkgdir = settings["PKGDIR"]

        exit_st = 0
        for package in packages:
            tbz2_atom = package + ".tbz2"
            source_path = os.path.join(pkgdir, tbz2_atom)
            if not os.path.isfile(source_path):
                print_warning("cannot find package tarball: %s" %
                              (source_path, ))
                exit_st = 1
                continue

            dest_path = os.path.join(matter_pkgdir, tbz2_atom)
            dest_dir = os.path.dirname(dest_path)
            try:
                os.makedirs(dest_dir)
            except OSError as err:
                if err.errno != errno.EEXIST:
                    raise

            try:
                shutil.move(source_path, dest_path)
            except shutil.Error as err:
                raise BaseBinaryPMS.RepositoryCommitError(
                    "cannot commit packages, generic error: %s" %
                    (repr(err), ))
            except (OSError, IOError) as err:
                raise BaseBinaryPMS.RepositoryCommitError(
                    "cannot commit packages, system error: %s" % (repr(err), ))

        return exit_st
Ejemplo n.º 10
0
    def _commit_build_only(self, spec, packages):
        """
        Commit packages that have been built with -B.
        Just move the Portage generated tbz2s to our PKGDIR.
        """
        repository = spec["repository"]
        matter_pkgdir = self._build_pkgdir(repository)

        print_info("committing build-only packages: %s, to PKGDIR: %s" % (
            ", ".join(sorted(packages)), matter_pkgdir,))

        settings, _trees, _db = self.load_emerge_config()
        pkgdir = settings["PKGDIR"]

        exit_st = 0
        for package in packages:
            tbz2_atom = package + ".tbz2"
            source_path = os.path.join(pkgdir, tbz2_atom)
            if not os.path.isfile(source_path):
                print_warning(
                    "cannot find package tarball: %s" % (source_path,))
                exit_st = 1
                continue

            dest_path = os.path.join(matter_pkgdir, tbz2_atom)
            dest_dir = os.path.dirname(dest_path)
            try:
                os.makedirs(dest_dir)
            except OSError as err:
                if err.errno != errno.EEXIST:
                    raise

            try:
                shutil.move(source_path, dest_path)
            except shutil.Error as err:
                raise BaseBinaryPMS.RepositoryCommitError(
                    "cannot commit packages, generic error: %s" % (
                        repr(err),))
            except (OSError, IOError) as err:
                raise BaseBinaryPMS.RepositoryCommitError(
                    "cannot commit packages, system error: %s" % (
                        repr(err),))

        return exit_st
Ejemplo n.º 11
0
    def _commit(self, spec, packages):
        """
        Commit packages that have been merged into the system.
        """
        repository = spec["repository"]
        pkgdir = self._build_pkgdir(repository)
        print_info("committing packages: %s, to PKGDIR: %s" % (
            ", ".join(sorted(packages)), pkgdir,))

        env = os.environ.copy()
        env["PKGDIR"] = pkgdir
        exit_st = subprocess.call(
            ["quickpkg", "--include-config=y"] + [
                "=" + x for x in packages], env=env)
        if exit_st != 0:
            raise BaseBinaryPMS.RepositoryCommitError(
                "cannot commit packages, exit status: %d" % (
                exit_st,))
        return exit_st
Ejemplo n.º 12
0
    def _commit(self, spec, packages):
        """
        Commit packages that have been merged into the system.
        """
        repository = spec["repository"]
        pkgdir = self._build_pkgdir(repository)
        print_info("committing packages: %s, to PKGDIR: %s" % (
            ", ".join(sorted(packages)),
            pkgdir,
        ))

        env = os.environ.copy()
        env["PKGDIR"] = pkgdir
        exit_st = subprocess.call(["quickpkg", "--include-config=y"] +
                                  ["=" + x for x in packages],
                                  env=env)
        if exit_st != 0:
            raise BaseBinaryPMS.RepositoryCommitError(
                "cannot commit packages, exit status: %d" % (exit_st, ))
        return exit_st
Ejemplo n.º 13
0
def matter_main(binary_pms, nsargs, cwd, specs):
    """
    Main application code run after all the resources setup.
    """

    try:
        binary_pms.validate_system()
    except BaseBinaryPMS.SystemValidationError as err:
        print_error("%s" % (err,))
        return 1

    print_info("matter loaded, starting to scan particles, pid: %s" % (
        os.getpid(),))

    def _teardown(_exit_st):
        if nsargs.post:
            _rc = PackageBuilder.teardown(
                nsargs.post, cwd, _exit_st)
            if _exit_st == 0 and _rc != 0:
                _exit_st = _rc
        return _exit_st

    # setup
    if nsargs.pre:
        _rc = PackageBuilder.setup(nsargs.pre, cwd)
        if _rc != 0:
            return _teardown(_rc)

    # sync portage
    if nsargs.sync:
        _rc = PackageBuilder.sync()
        if _rc != 0 and not nsargs.sync_best_effort:
            return _teardown(_rc)

    exit_st = 0
    completed = collections.deque()
    not_found = collections.deque()
    not_installed = collections.deque()
    not_merged = collections.deque()
    uninstalled = collections.deque()
    missing_use = {}
    unstable_keywords = set()
    pmask_changes = set()
    license_changes = {}
    tainted_repositories = set()
    spec_count = 0
    tot_spec = len(specs)
    preserved_libs = False
    emerge_config = binary_pms.load_emerge_config()

    for spec in specs:

        spec_count += 1
        keep_going = spec["keep-going"] == "yes"
        local_completed = []
        local_uninstalled = []

        tot_pkgs = len(spec["packages"])
        for pkg_count, packages in enumerate(spec["packages"], 1):

            builder = PackageBuilder(
                binary_pms, emerge_config, packages,
                spec, spec_count, tot_spec, pkg_count, tot_pkgs,
                nsargs.pretend)
            _rc = builder.run()

            not_found.extend(builder.get_not_found_packages())
            not_installed.extend(
                builder.get_not_installed_packages())
            not_merged.extend(
                builder.get_not_merged_packages())
            uninstalled = builder.get_uninstalled_packages()
            uninstalled.extend(uninstalled)
            local_uninstalled.extend(uninstalled)

            # Merge at least the first layer of dicts.
            for k, v in builder.get_missing_use_packages().items():
                obj = missing_use.setdefault(k, {})
                obj.update(v)

            unstable_keywords.update(
                builder.get_needed_unstable_keywords())
            pmask_changes.update(
                builder.get_needed_package_mask_changes())

            # We need to merge the two dicts, not just update()
            # or we can lose the full set of licenses associated
            # to a single cpv.
            for k, v in builder.get_needed_license_changes().items():
                obj = license_changes.setdefault(k, set())
                obj.update(v)

            preserved_libs = binary_pms.check_preserved_libraries(
                emerge_config)

            if preserved_libs and not nsargs.disable_preserved_libs:
                # abort, library breakages detected
                exit_st = 1
                print_error(
                    "preserved libraries detected, aborting")
                break

            # ignore _rc, we may have built pkgs even if _rc != 0
            built_packages = builder.get_built_packages()
            if built_packages:
                print_info("built packages, in queue: %s" % (
                        " ".join(built_packages),))
                local_completed.extend(
                    [x for x in built_packages \
                         if x not in local_completed])
                tainted_repositories.add(spec["repository"])

            # make some room
            print_info("")
            if _rc < 0:
                # ignore warning and go ahead
                continue
            else:
                exit_st = _rc
                if not keep_going:
                    break

        # call post-build cleanup operations
        if local_completed or local_uninstalled:
            PackageBuilder.post_build(spec, emerge_config)

        completed.extend([x for x in local_completed \
            if x not in completed])
        # portage calls setcwd()
        os.chdir(cwd)

        if preserved_libs and not nsargs.disable_preserved_libs:
            # completely abort
            break

        if local_completed and nsargs.commit:
            _rc = binary_pms.commit(
                spec,
                local_completed)
            if exit_st == 0 and _rc != 0:
                exit_st = _rc
                if not keep_going:
                    break

        PackageBuilder.clear_caches(emerge_config)

    if tainted_repositories and nsargs.push and nsargs.commit:
        if preserved_libs and nsargs.disable_preserved_libs:
            # cannot push anyway
            print_warning("Preserved libraries detected, cannot push !")
        elif not preserved_libs:
            for repository in tainted_repositories:
                _rc = binary_pms.push(repository)
                if exit_st == 0 and _rc != 0:
                    exit_st = _rc

    # print summary
    print_generic("")
    print_generic("Summary")
    print_generic("Packages built:\n  %s" % (
        "\n  ".join(sorted(completed)),))
    print_generic("Packages not built:\n  %s" % (
        "\n  ".join(sorted(not_merged)),))
    print_generic("Packages not found:\n  %s" % (
        "\n  ".join(sorted(not_found)),))
    print_generic("Packages not installed:\n  %s" % (
        "\n  ".join(sorted(not_installed)),))
    print_generic("Packages uninstalled:\n  %s" % (
        "\n  ".join(sorted(uninstalled)),))

    if missing_use:
        print_generic("Packages not built due to missing USE flags:")
        for atom in sorted(missing_use.keys()):
            use_data = missing_use[atom]
            use_l = []
            for use in sorted(use_data["changes"]):
                if use_data["changes"][use]:
                    use_l.append(use)
                else:
                    use_l.append("-" + use)
            print_generic("%s %s" % (
                    use_data["cp:slot"], " ".join(use_l)))
        print_generic("")

    if unstable_keywords:
        print_generic("Packages not built due to missing unstable keywords:")
        for atom in sorted(unstable_keywords):
            print_generic("%s" % (atom,))
        print_generic("")

    if pmask_changes:
        print_generic("Packages not built due to needed package.mask changes:")
        for atom in sorted(pmask_changes):
            print_generic("%s" % (atom,))
        print_generic("")

    print_generic("Preserved libs: %s" % (
        preserved_libs,))
    print_generic("")

    return _teardown(exit_st)
Ejemplo n.º 14
0
    def _pre_graph_filters(self, package, portdb, vardb):
        """
        Execute basic, pre-graph generation (dependencies calculation)
        filters against the package dependency to see if it's eligible
        for the graph.
        """
        allow_rebuild = self._params["rebuild"] == "yes"
        allow_not_installed = self._params["not-installed"] == "yes"
        allow_downgrade = self._params["downgrade"] == "yes"
        accepted = []

        # now determine what's the installed version.
        best_installed = portage.best(vardb.match(package, use_cache=0))
        if (not best_installed) and (not allow_not_installed):
            # package not installed
            print_error("package not installed: %s, ignoring this one" %
                        (package, ))
            self._not_installed_packages.append(package)
            return accepted

        if (not best_installed) and allow_not_installed:
            print_warning(
                "%s not installed, but 'not-installed: yes' provided" %
                (package, ))

        best_visibles = []
        try:
            best_visibles += portdb.xmatch("match-visible", package)
        except portage.exception.InvalidAtom:
            print_error("cannot match: %s, invalid atom" % (package, ))

        # map all the cpvs to their slots
        cpv_slot_map = {}
        for pkg in best_visibles:
            obj = cpv_slot_map.setdefault(pkg.slot, [])
            obj.append(pkg)

        # then pick the best for each slot
        del best_visibles[:]
        for slot, pkgs in cpv_slot_map.items():
            pkg = portage.best(pkgs)
            best_visibles.append(pkg)
        best_visibles.sort()  # deterministic is better

        if not best_visibles:
            # package not found, return error
            print_error("cannot match: %s, ignoring this one" % (package, ))
            self._not_found_packages.append(package)
            return accepted

        print_info("matched: %s for %s" % (
            ", ".join(best_visibles),
            package,
        ))

        for best_visible in best_visibles:

            cp = best_visible.cp
            slot = best_visible.slot
            cp_slot = "%s:%s" % (cp, slot)

            # determine what's the installed version.
            # we know that among all the best_visibles, there is one that
            # is installed. The question is whether we got it now.
            best_installed = portage.best(vardb.match(cp_slot, use_cache=0))
            if (not best_installed) and (not allow_not_installed):
                # package not installed
                print_warning("%s not installed, skipping" % (cp_slot, ))
                continue

            build_only = self._params["build-only"] == "yes"
            cmp_res = -1
            if best_installed:
                print_info("found installed: %s for %s" % (
                    best_installed,
                    package,
                ))
                # now compare
                # -1 if best_installed is older than best_visible
                # 1 if best_installed is newer than best_visible
                # 0 if they are equal
                cmp_res = portage.versions.pkgcmp(
                    portage.versions.pkgsplit(best_installed),
                    portage.versions.pkgsplit(best_visible))
            elif (not best_installed) and build_only:
                # package is not installed, and build-only
                # is provided. We assume that the package
                # is being built and added to repositories directly.
                # This means that we need to query binpms to know
                # about the current version.
                print_info("package is not installed, and 'build-only: yes'. "
                           "Asking the binpms about the package state.")
                best_available = self._binpms.best_available(cp_slot)
                print_info("found available: %s for %s" %
                           (best_available, cp_slot))
                if best_available:
                    cmp_res = portage.versions.pkgcmp(
                        portage.versions.pkgsplit(best_available),
                        portage.versions.pkgsplit(best_visible))

            is_rebuild = cmp_res == 0

            if (cmp_res == 1) and (not allow_downgrade):
                # downgrade in action and downgrade not allowed, aborting!
                print_warning("%s would be downgraded, %s to %s, ignoring" % (
                    cp_slot,
                    best_installed,
                    best_visible,
                ))
                continue

            if is_rebuild and (not allow_rebuild):
                # rebuild in action and rebuild not allowed, aborting!
                print_warning("%s would be rebuilt to %s, ignoring" % (
                    cp_slot,
                    best_visible,
                ))
                continue

            # at this point we can go ahead accepting package in queue
            print_info("package: %s [%s], accepted in queue" % (
                best_visible,
                cp_slot,
            ))
            accepted.append(best_visible)

        return accepted
Ejemplo n.º 15
0
    def run(self):
        """
        Execute Package building action.
        """
        header = self._build_execution_header_output()
        print_info(header + "spawning package build: %s" %
                   (" ".join(self._packages), ))

        std_env = self._build_standard_environment(
            repository=self._params["repository"])

        matter_package_names = " ".join(self._packages)
        std_env["MATTER_PACKAGE_NAMES"] = matter_package_names

        # run pkgpre, if any
        pkgpre = self._params["pkgpre"]
        if pkgpre is not None:
            print_info("spawning --pkgpre: %s" % (pkgpre, ))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpre, "rb") as pkgpre_f:
                    tmp_f.write(pkgpre_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env=std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)
                # data might have become stale
                self._binpms.clear_cache()

        dirs_cleanup = []
        exit_st = self._run_builder(dirs_cleanup)

        std_env["MATTER_BUILT_PACKAGES"] = " ".join(self._built_packages)
        std_env["MATTER_FAILED_PACKAGES"] = " ".join(self._not_merged_packages)
        std_env["MATTER_NOT_INSTALLED_PACKAGES"] = " ".join(
            self._not_installed_packages)
        std_env["MATTER_NOT_FOUND_PACKAGES"] = " ".join(
            self._not_found_packages)
        std_env["MATTER_UNINSTALLED_PACKAGES"] = " ".join(
            self._uninstalled_packages)

        print_info("builder terminated, exit status: %d" % (exit_st, ))

        # cleanup temporary directories registered on the queue
        for tmp_dir in dirs_cleanup:
            self.__cleanup_dir(tmp_dir)

        # run pkgpost, if any
        pkgpost = self._params["pkgpost"]
        if pkgpost is not None:
            print_info("spawning --pkgpost: %s" % (pkgpost, ))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpost, "rb") as pkgpost_f:
                    tmp_f.write(pkgpost_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                post_exit_st = subprocess.call(
                    [tmp_path, str(exit_st)], env=std_env)
                if post_exit_st != 0:
                    return post_exit_st
            finally:
                os.remove(tmp_path)
                # data might have become stale
                self._binpms.clear_cache()

        return exit_st
Ejemplo n.º 16
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
            suffix="." + first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings,
                emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(
                    exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                    package, best_visible,))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args),))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
            emerge_trees, myopts, params, myaction, myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" % (
                                    tup,))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv,), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom,)
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv,))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add(
                            "%s" % (pkg.cpv,))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv,), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                            k, v,))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv,))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv,))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings, emerge_trees, mtimedb,
            myopts, spinner, favorites=favorites,
            graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist", [])
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom,)
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                        pkg, cpv,))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv,))
                    self._built_packages.append("%s" % (cpv,))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" % (cpv,))
                    self._uninstalled_packages.append("%s" % (cpv,))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
            emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (failed_package.cpv,
                failed_package.repo,))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(log_dir,
                "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env = std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval,))
        return retval
Ejemplo n.º 17
0
    def _post_graph_filters(self, graph, vardb, portdb):
        """
        Execute post-graph generation (dependencies calculation)
        filters against the package dependencies to see if they're
        eligible for building.
        """
        # list of _emerge.Package.Package objects
        package_queue = graph.altlist()

        allow_soft_blocker = self._params["soft-blocker"] == "yes"
        if not allow_soft_blocker:
            blockers = [x for x in package_queue if isinstance(x, Blocker)]
            if blockers:
                # sorry, we're not allowed to have soft-blockers
                print_warning("the following soft-blockers were found:")
                print_warning("\n  ".join([x.atom for x in blockers]))
                print_warning("but 'soft-blocker: no' in config, aborting")
                return None

        # filter out blockers
        real_queue = [x for x in package_queue if not isinstance(x, Blocker)]
        # filter out broken or corrupted objects
        real_queue = [x for x in real_queue if x.cpv]

        # package_queue can also contain _emerge.Blocker.Blocker objects
        # not exposing .cpv field (but just .cp).
        dep_list = []
        for pobj in package_queue:
            if isinstance(pobj, Blocker):
                # blocker, list full atom
                dep_list.append(pobj.atom)
                continue
            cpv = pobj.cpv
            repo = pobj.repo
            if repo:
                repo = "::" + repo
            if cpv:
                dep_list.append(cpv + repo)
            else:
                print_warning("attention, %s has broken cpv: '%s', ignoring" %
                              (
                                  pobj,
                                  cpv,
                              ))

        # calculate dependencies, if --dependencies is not enabled
        # because we have to validate it
        if (self._params["dependencies"] == "no") \
                and (len(package_queue) > 1):
            deps = "\n  ".join(dep_list)
            print_warning("dependencies pulled in:")
            print_warning(deps)
            print_warning("but 'dependencies: no' in config, aborting")
            return None

        # protect against unwanted package unmerges
        if self._params["unmerge"] == "no":
            unmerges = [x for x in real_queue if x.operation == "uninstall"]
            if unmerges:
                deps = "\n  ".join([x.cpv for x in unmerges])
                print_warning("found package unmerges:")
                print_warning(deps)
                print_warning("but 'unmerge: no' in config, aborting")
                return None

        # inspect use flags changes
        allow_new_useflags = self._params["new-useflags"] == "yes"
        allow_removed_useflags = \
            self._params["removed-useflags"] == "yes"

        use_flags_give_up = False
        if (not allow_new_useflags) or (not allow_removed_useflags):
            # checking for use flag changes
            for pkg in real_queue:
                # frozenset
                enabled_flags = pkg.use.enabled
                inst_atom = portage.best(
                    vardb.match(pkg.slot_atom, use_cache=0))
                if not inst_atom:
                    # new package, ignore check
                    continue
                installed_flags = frozenset(
                    vardb.aux_get(inst_atom, ["USE"])[0].split())

                new_flags = enabled_flags - installed_flags
                removed_flags = installed_flags - enabled_flags

                if (not allow_new_useflags) and new_flags:
                    print_warning("ouch: %s wants these new USE flags: %s" % (
                        pkg.cpv + "::" + pkg.repo,
                        " ".join(sorted(new_flags)),
                    ))
                    use_flags_give_up = True
                if (not allow_removed_useflags) and removed_flags:
                    print_warning("ouch: %s has these USE flags removed: %s" %
                                  (
                                      pkg.cpv + "::" + pkg.repo,
                                      " ".join(sorted(removed_flags)),
                                  ))
                    use_flags_give_up = True

        if use_flags_give_up:
            print_warning("cannot continue due to unmet "
                          "USE flags constraint")
            return None

        allow_downgrade = self._params["downgrade"] == "yes"
        # check the whole queue against downgrade directive
        if not allow_downgrade:
            allow_downgrade_give_ups = []
            for pkg in real_queue:
                inst_atom = portage.best(
                    vardb.match(pkg.slot_atom, use_cache=0))
                cmp_res = -1
                if inst_atom:
                    # -1 if inst_atom is older than pkg.cpv
                    # 1 if inst_atom is newer than pkg.cpv
                    # 0 if they are equal
                    cmp_res = portage.versions.pkgcmp(
                        portage.versions.pkgsplit(inst_atom),
                        portage.versions.pkgsplit(pkg.cpv))
                if cmp_res > 0:
                    allow_downgrade_give_ups.append((inst_atom, pkg.cpv))

            if allow_downgrade_give_ups:
                print_warning("cannot continue due to package "
                              "downgrade not allowed for:")
                for inst_atom, avail_atom in allow_downgrade_give_ups:
                    print_warning("  installed: %s | wanted: %s" % (
                        inst_atom,
                        avail_atom,
                    ))
                return None

        changing_repo_pkgs = []
        for pkg in real_queue:
            wanted_repo = pkg.repo
            inst_atom = portage.best(vardb.match(pkg.slot_atom, use_cache=0))
            current_repo = vardb.aux_get(inst_atom, ["repository"])[0]
            if current_repo:
                if current_repo != wanted_repo:
                    changing_repo_pkgs.append(
                        (pkg.cpv, pkg.slot, current_repo, wanted_repo))

        if changing_repo_pkgs:
            print_warning("")
            print_warning(
                "Attention, packages are moving across SPM repositories:")
            for pkg_atom, pkg_slot, c_repo, w_repo in changing_repo_pkgs:
                print_warning("  %s:%s [%s->%s]" % (
                    pkg_atom,
                    pkg_slot,
                    c_repo,
                    w_repo,
                ))
            print_warning("")

        allow_spm_repo_change = self._params["spm-repository-change"] \
            == "yes"
        allow_spm_repo_change_if_ups = \
            self._params["spm-repository-change-if-upstreamed"] == "yes"

        if (not allow_spm_repo_change) and allow_spm_repo_change_if_ups:
            print_info("SPM repository change allowed if the original "
                       "repository does no longer contain "
                       "current packages.")

            # check if source repository still contains the package
            # in this case, set allow_spm_repo_change to True
            _allow = True
            for pkg_atom, pkg_slot, c_repo, w_repo in changing_repo_pkgs:
                pkg_key = portage.dep.dep_getkey("=%s" % (pkg_atom, ))
                pkg_target = "%s:%s::%s" % (pkg_key, pkg_slot, c_repo)
                pkg_match = portdb.xmatch("bestmatch-visible", pkg_target)
                if pkg_match:
                    # package still available in source repo
                    _allow = False
                    print_warning("  %s:%s, still in repo: %s" % (
                        pkg_atom,
                        pkg_slot,
                        c_repo,
                    ))
                    # do not break, print all the list
                    # break

            if _allow and changing_repo_pkgs:
                print_info(
                    "current packages are no longer in their "
                    "original repository, SPM repository change allowed.")
                allow_spm_repo_change = True

        if changing_repo_pkgs and (not allow_spm_repo_change):
            print_warning("cannot continue due to unmet SPM repository "
                          "change constraint")
            return None

        print_info("USE flags constraints are met for all "
                   "the queued packages")
        return real_queue
Ejemplo n.º 18
0
    def _pre_graph_filters(self, package, portdb, vardb):
        """
        Execute basic, pre-graph generation (dependencies calculation)
        filters against the package dependency to see if it's eligible
        for the graph.
        """
        allow_rebuild = self._params["rebuild"] == "yes"
        allow_not_installed = self._params["not-installed"] == "yes"
        allow_downgrade = self._params["downgrade"] == "yes"
        accepted = []

        # now determine what's the installed version.
        best_installed = portage.best(vardb.match(package, use_cache=0))
        if (not best_installed) and (not allow_not_installed):
            # package not installed
            print_error("package not installed: %s, ignoring this one" % (
                    package,))
            self._not_installed_packages.append(package)
            return accepted

        if (not best_installed) and allow_not_installed:
            print_warning(
                "%s not installed, but 'not-installed: yes' provided" % (
                    package,))

        best_visibles = []
        try:
            best_visibles += portdb.xmatch("match-visible", package)
        except portage.exception.InvalidAtom:
            print_error("cannot match: %s, invalid atom" % (package,))

        # map all the cpvs to their slots
        cpv_slot_map = {}
        for pkg in best_visibles:
            obj = cpv_slot_map.setdefault(pkg.slot, [])
            obj.append(pkg)

        # then pick the best for each slot
        del best_visibles[:]
        for slot, pkgs in cpv_slot_map.items():
            pkg = portage.best(pkgs)
            best_visibles.append(pkg)
        best_visibles.sort()  # deterministic is better

        if not best_visibles:
            # package not found, return error
            print_error("cannot match: %s, ignoring this one" % (package,))
            self._not_found_packages.append(package)
            return accepted

        print_info("matched: %s for %s" % (", ".join(best_visibles), package,))

        for best_visible in best_visibles:

            cp = best_visible.cp
            slot = best_visible.slot
            cp_slot = "%s:%s" % (cp, slot)

            # determine what's the installed version.
            # we know that among all the best_visibles, there is one that
            # is installed. The question is whether we got it now.
            best_installed = portage.best(vardb.match(cp_slot, use_cache=0))
            if (not best_installed) and (not allow_not_installed):
                # package not installed
                print_warning("%s not installed, skipping" % (cp_slot,))
                continue

            build_only = self._params["build-only"] == "yes"
            cmp_res = -1
            if best_installed:
                print_info("found installed: %s for %s" % (
                        best_installed, package,))
                # now compare
                # -1 if best_installed is older than best_visible
                # 1 if best_installed is newer than best_visible
                # 0 if they are equal
                cmp_res = portage.versions.pkgcmp(
                    portage.versions.pkgsplit(best_installed),
                    portage.versions.pkgsplit(best_visible))
            elif (not best_installed) and build_only:
                # package is not installed, and build-only
                # is provided. We assume that the package
                # is being built and added to repositories directly.
                # This means that we need to query binpms to know
                # about the current version.
                print_info("package is not installed, and 'build-only: yes'. "
                           "Asking the binpms about the package state.")
                best_available = self._binpms.best_available(cp_slot)
                print_info("found available: %s for %s" % (
                        best_available, cp_slot))
                if best_available:
                    cmp_res = portage.versions.pkgcmp(
                        portage.versions.pkgsplit(best_available),
                        portage.versions.pkgsplit(best_visible))

            is_rebuild = cmp_res == 0

            if (cmp_res == 1) and (not allow_downgrade):
                # downgrade in action and downgrade not allowed, aborting!
                print_warning(
                    "%s would be downgraded, %s to %s, ignoring" % (
                        cp_slot, best_installed, best_visible,))
                continue

            if is_rebuild and (not allow_rebuild):
                # rebuild in action and rebuild not allowed, aborting!
                print_warning(
                    "%s would be rebuilt to %s, ignoring" % (
                        cp_slot, best_visible,))
                continue

            # at this point we can go ahead accepting package in queue
            print_info("package: %s [%s], accepted in queue" % (
                    best_visible, cp_slot,))
            accepted.append(best_visible)

        return accepted
Ejemplo n.º 19
0
    def _post_graph_filters(self, graph, vardb, portdb):
        """
        Execute post-graph generation (dependencies calculation)
        filters against the package dependencies to see if they're
        eligible for building.
        """
        # list of _emerge.Package.Package objects
        package_queue = graph.altlist()

        allow_soft_blocker = self._params["soft-blocker"] == "yes"
        if not allow_soft_blocker:
            blockers = [x for x in package_queue if isinstance(x, Blocker)]
            if blockers:
                # sorry, we're not allowed to have soft-blockers
                print_warning("the following soft-blockers were found:")
                print_warning("\n  ".join([x.atom for x in blockers]))
                print_warning("but 'soft-blocker: no' in config, aborting")
                return None

        # filter out blockers
        real_queue = [x for x in package_queue if not isinstance(
                x, Blocker)]
        # filter out broken or corrupted objects
        real_queue = [x for x in real_queue if x.cpv]

        # package_queue can also contain _emerge.Blocker.Blocker objects
        # not exposing .cpv field (but just .cp).
        dep_list = []
        for pobj in package_queue:
            if isinstance(pobj, Blocker):
                # blocker, list full atom
                dep_list.append(pobj.atom)
                continue
            cpv = pobj.cpv
            repo = pobj.repo
            if repo:
                repo = "::" + repo
            if cpv:
                dep_list.append(cpv+repo)
            else:
                print_warning(
                    "attention, %s has broken cpv: '%s', ignoring" % (
                        pobj, cpv,))

        # calculate dependencies, if --dependencies is not enabled
        # because we have to validate it
        if (self._params["dependencies"] == "no") \
                and (len(package_queue) > 1):
            deps = "\n  ".join(dep_list)
            print_warning("dependencies pulled in:")
            print_warning(deps)
            print_warning("but 'dependencies: no' in config, aborting")
            return None

        # protect against unwanted package unmerges
        if self._params["unmerge"] == "no":
            unmerges = [x for x in real_queue if x.operation == "uninstall"]
            if unmerges:
                deps = "\n  ".join([x.cpv for x in unmerges])
                print_warning("found package unmerges:")
                print_warning(deps)
                print_warning("but 'unmerge: no' in config, aborting")
                return None

        # inspect use flags changes
        allow_new_useflags = self._params["new-useflags"] == "yes"
        allow_removed_useflags = \
            self._params["removed-useflags"] == "yes"

        use_flags_give_up = False
        if (not allow_new_useflags) or (not allow_removed_useflags):
            # checking for use flag changes
            for pkg in real_queue:
                # frozenset
                enabled_flags = pkg.use.enabled
                inst_atom = portage.best(
                    vardb.match(pkg.slot_atom, use_cache=0))
                if not inst_atom:
                    # new package, ignore check
                    continue
                installed_flags = frozenset(
                    vardb.aux_get(inst_atom, ["USE"])[0].split())

                new_flags = enabled_flags - installed_flags
                removed_flags = installed_flags - enabled_flags

                if (not allow_new_useflags) and new_flags:
                    print_warning(
                        "ouch: %s wants these new USE flags: %s" % (
                            pkg.cpv+"::"+pkg.repo,
                            " ".join(sorted(new_flags)),))
                    use_flags_give_up = True
                if (not allow_removed_useflags) and removed_flags:
                    print_warning(
                        "ouch: %s has these USE flags removed: %s" % (
                            pkg.cpv+"::"+pkg.repo,
                        " ".join(sorted(removed_flags)),))
                    use_flags_give_up = True

        if use_flags_give_up:
            print_warning("cannot continue due to unmet "
                          "USE flags constraint")
            return None

        allow_downgrade = self._params["downgrade"] == "yes"
        # check the whole queue against downgrade directive
        if not allow_downgrade:
            allow_downgrade_give_ups = []
            for pkg in real_queue:
                inst_atom = portage.best(
                    vardb.match(pkg.slot_atom, use_cache=0))
                cmp_res = -1
                if inst_atom:
                    # -1 if inst_atom is older than pkg.cpv
                    # 1 if inst_atom is newer than pkg.cpv
                    # 0 if they are equal
                    cmp_res = portage.versions.pkgcmp(
                        portage.versions.pkgsplit(inst_atom),
                        portage.versions.pkgsplit(pkg.cpv))
                if cmp_res > 0:
                    allow_downgrade_give_ups.append((inst_atom, pkg.cpv))

            if allow_downgrade_give_ups:
                print_warning(
                    "cannot continue due to package "
                    "downgrade not allowed for:")
                for inst_atom, avail_atom in allow_downgrade_give_ups:
                    print_warning("  installed: %s | wanted: %s" % (
                        inst_atom, avail_atom,))
                return None

        changing_repo_pkgs = []
        for pkg in real_queue:
            wanted_repo = pkg.repo
            inst_atom = portage.best(
                vardb.match(pkg.slot_atom, use_cache=0))
            current_repo = vardb.aux_get(inst_atom, ["repository"])[0]
            if current_repo:
                if current_repo != wanted_repo:
                    changing_repo_pkgs.append(
                        (pkg.cpv, pkg.slot, current_repo, wanted_repo))

        if changing_repo_pkgs:
            print_warning("")
            print_warning(
                "Attention, packages are moving across SPM repositories:")
            for pkg_atom, pkg_slot, c_repo, w_repo in changing_repo_pkgs:
                print_warning("  %s:%s [%s->%s]" % (pkg_atom, pkg_slot,
                    c_repo, w_repo,))
            print_warning("")

        allow_spm_repo_change = self._params["spm-repository-change"] \
            == "yes"
        allow_spm_repo_change_if_ups = \
            self._params["spm-repository-change-if-upstreamed"] == "yes"

        if (not allow_spm_repo_change) and allow_spm_repo_change_if_ups:
            print_info("SPM repository change allowed if the original "
                       "repository does no longer contain "
                       "current packages.")

            # check if source repository still contains the package
            # in this case, set allow_spm_repo_change to True
            _allow = True
            for pkg_atom, pkg_slot, c_repo, w_repo in changing_repo_pkgs:
                pkg_key = portage.dep.dep_getkey("=%s" % (pkg_atom,))
                pkg_target = "%s:%s::%s" % (
                    pkg_key, pkg_slot, c_repo)
                pkg_match = portdb.xmatch("bestmatch-visible", pkg_target)
                if pkg_match:
                    # package still available in source repo
                    _allow = False
                    print_warning("  %s:%s, still in repo: %s" % (
                        pkg_atom, pkg_slot, c_repo,))
                    # do not break, print all the list
                    # break

            if _allow and changing_repo_pkgs:
                print_info(
                    "current packages are no longer in their "
                    "original repository, SPM repository change allowed.")
                allow_spm_repo_change = True

        if changing_repo_pkgs and (not allow_spm_repo_change):
            print_warning(
                "cannot continue due to unmet SPM repository "
                "change constraint")
            return None

        print_info("USE flags constraints are met for all "
                   "the queued packages")
        return real_queue
Ejemplo n.º 20
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
                          suffix="." +
                          first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings, emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                package,
                best_visible,
            ))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args), ))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
                                                       emerge_trees, myopts,
                                                       params, myaction,
                                                       myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" %
                                (tup, ))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv, ), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom, )
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv, ))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add("%s" %
                                                              (pkg.cpv, ))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv, ), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                        k,
                        v,
                    ))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv, ))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv, ))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings,
                              emerge_trees,
                              mtimedb,
                              myopts,
                              spinner,
                              favorites=favorites,
                              graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist", [])
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom, )
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                    pkg,
                    cpv,
                ))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv, ))
                    self._built_packages.append("%s" % (cpv, ))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" %
                               (cpv, ))
                    self._uninstalled_packages.append("%s" % (cpv, ))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
                    emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (
                failed_package.cpv,
                failed_package.repo,
            ))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(
                log_dir, "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail, ))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env=std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval, ))
        return retval
Ejemplo n.º 21
0
    def run(self):
        """
        Execute Package building action.
        """
        header = self._build_execution_header_output()
        print_info(
            header + "spawning package build: %s" % (
                " ".join(self._packages),))

        std_env = self._build_standard_environment(
            repository=self._params["repository"])

        matter_package_names = " ".join(self._packages)
        std_env["MATTER_PACKAGE_NAMES"] = matter_package_names

        # run pkgpre, if any
        pkgpre = self._params["pkgpre"]
        if pkgpre is not None:
            print_info("spawning --pkgpre: %s" % (pkgpre,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpre, "rb") as pkgpre_f:
                    tmp_f.write(pkgpre_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env = std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)
                # data might have become stale
                self._binpms.clear_cache()

        dirs_cleanup = []
        exit_st = self._run_builder(dirs_cleanup)

        std_env["MATTER_BUILT_PACKAGES"] = " ".join(self._built_packages)
        std_env["MATTER_FAILED_PACKAGES"] = " ".join(self._not_merged_packages)
        std_env["MATTER_NOT_INSTALLED_PACKAGES"] = " ".join(
            self._not_installed_packages)
        std_env["MATTER_NOT_FOUND_PACKAGES"] = " ".join(
            self._not_found_packages)
        std_env["MATTER_UNINSTALLED_PACKAGES"] = " ".join(
            self._uninstalled_packages)

        print_info("builder terminated, exit status: %d" % (exit_st,))

        # cleanup temporary directories registered on the queue
        for tmp_dir in dirs_cleanup:
            self.__cleanup_dir(tmp_dir)

        # run pkgpost, if any
        pkgpost = self._params["pkgpost"]
        if pkgpost is not None:
            print_info("spawning --pkgpost: %s" % (pkgpost,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(pkgpost, "rb") as pkgpost_f:
                    tmp_f.write(pkgpost_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                post_exit_st = subprocess.call([tmp_path, str(exit_st)],
                    env = std_env)
                if post_exit_st != 0:
                    return post_exit_st
            finally:
                os.remove(tmp_path)
                # data might have become stale
                self._binpms.clear_cache()

        return exit_st
Ejemplo n.º 22
0
def main():
    """
    Main App.
    """
    install_exception_handler()

    # disable color if standard output is not a TTY
    if not is_stdout_a_tty():
        nocolor()

    # Load Binary PMS modules
    import _entropy.matter.binpms as _pms
    pms_dir = os.path.dirname(_pms.__file__)
    for thing in os.listdir(pms_dir):
        if thing.startswith("__init__.py"):
            continue

        thing = os.path.join(pms_dir, thing)
        if not os.path.isfile(thing):
            continue
        if not thing.endswith(".py"):
            continue

        name = os.path.basename(thing)
        name = name.rstrip(".py")
        package = "_entropy.matter.binpms.%s" % (name, )

        try:
            importlib.import_module(package)  # they will then register
        except ImportError as err:
            pass
    avail_binpms = BaseBinaryPMS.available_pms

    matter_spec = MatterSpec()
    parser_data = matter_spec.data()
    matter_spec_params = ""
    for spec_key in sorted(parser_data.keys()):
        par = parser_data[spec_key]
        matter_spec_params += "%s: %s\n" % (
            purple(spec_key),
            darkgreen(par.get("desc", "N/A")),
        )

    _env_vars_help = """\

Environment variables for Package Builder module:
%s       =  repository identifier
%s    =  alternative command used to sync Portage
                              default: %s
%s   =  alternative command used to sync Portage overlays
                              default: %s

Environment variables passed to --post executables:
%s        = exit status from previous execution phases, useful for detecting
                             execution errors.

Matter Resources Lock file you can use to detect if matter is running:
%s (--blocking switch makes it acquire in blocking mode)

Matter .spec file supported parameters:
%s

Available Binary PMSs:
%s
""" % (
        purple("MATTER_REPOSITORY_ID"),
        purple("MATTER_PORTAGE_SYNC_CMD"),
        darkgreen(PackageBuilder.DEFAULT_PORTAGE_SYNC_CMD),
        purple("MATTER_OVERLAYS_SYNC_CMD"),
        darkgreen(PackageBuilder.DEFAULT_OVERLAYS_SYNC_CMD),
        purple("MATTER_EXIT_STATUS"),
        darkgreen(MatterResourceLock.LOCK_FILE_PATH),
        matter_spec_params,
        "\n".join(
        ["%s: %s" % (purple(k.NAME), darkgreen(k.__name__)) \
             for k in avail_binpms]),)

    parser = argparse.ArgumentParser(
        description="Automated Packages Builder",
        epilog=_env_vars_help,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # * instead of + in order to support --sync only tasks
    parser.add_argument("spec",
                        nargs="+",
                        metavar="<spec>",
                        type=open,
                        help="matter spec file")

    default_pms = avail_binpms[0]
    for k in avail_binpms:
        if k.DEFAULT:
            default_pms = k
            break
    parser.add_argument(
        "--pms",
        default=default_pms.NAME,
        help="specify an alternative Binary PMS (see --help for a list), "
        "current default: %s" % (default_pms.NAME, ))

    parser.add_argument("--blocking",
                        help="when trying to acquire Binary PMS locks, "
                        "block until success.",
                        action="store_true")

    parser.add_argument("--commit",
                        help="commit built packages to repository.",
                        action="store_true")

    parser.add_argument(
        "--gentle",
        help="increase the system validation checks, be extremely "
        "careful wrt the current system status.",
        action="store_true")

    parser.add_argument(
        "--pre",
        metavar="<exec>",
        type=open,
        help="executable to be called once for setup purposes.",
        default=None)

    parser.add_argument(
        "--post",
        metavar="<exec>",
        type=open,
        help="executable to be called once for teardown purposes.",
        default=None)

    parser.add_argument("--push",
                        help="push Binary PMS package updates to online "
                        "repository (only if --commit).",
                        action="store_true")

    parser.add_argument(
        "--sync",
        help="sync Portage tree, and attached overlays, before starting.",
        action="store_true")

    parser.add_argument(
        "--sync-best-effort",
        default=False,
        help="sync Portage tree and attached overlays, as --sync, but do "
        "not exit if sync fails.",
        action="store_true")

    parser.add_argument("--disable-preserved-libs",
                        dest="disable_preserved_libs",
                        default=False,
                        help="disable prerserved libraries check.",
                        action="store_true")

    parser.add_argument(
        "--pretend",
        dest="pretend",
        default=False,
        help="show what would be done without alterint the current system.",
        action="store_true")

    # extend parser arguments
    for k in avail_binpms:
        k.extend_parser(parser)

    try:
        nsargs = parser.parse_args(sys.argv[1:])
    except IOError as err:
        if err.errno == errno.ENOENT:
            print_error(err.strerror + ": " + err.filename)
            return 1
        raise

    if os.getuid() != 0:
        # root access required
        print_error("superuser access required")
        return 1

    # parse spec files
    specs = []
    for spec_f in nsargs.spec:
        spec = SpecParser(spec_f)
        data = spec.parse()
        if data:
            specs.append(data)

    if not specs:
        print_error("invalid spec files provided")
        return 1

    # O(n) determine what is the BinaryPMS to use
    klass = None
    for k in avail_binpms:
        if k.NAME == nsargs.pms:
            klass = k
            break
    if klass is None:
        print_error("invalid Binary PMS specified: %s" % (nsargs.pms, ))
        return 1

    binary_pms = None
    exit_st = 0
    cwd = os.getcwd()
    try:
        try:
            binary_pms = klass(cwd, nsargs)
        except BaseBinaryPMS.BinaryPMSLoadError as err:
            # repository not available or not configured
            print_error("Cannot load Binary Package Manager: %s" % (err, ))
            return 3

        print_info("Loaded Binary PMS: %s" % (klass.NAME, ))

        # validate repository entries of spec metadata
        for spec in specs:
            try:
                binary_pms.validate_spec(spec)
            except BaseBinaryPMS.SpecParserError as err:
                print_error("%s" % (err, ))
                return 1

        if nsargs.blocking:
            print_info("--blocking enabled, please wait for locks...")

        resource_lock = binary_pms.get_resource_lock(nsargs.blocking)
        with resource_lock:
            with MatterResourceLock(nsargs.blocking):
                exit_st = matter_main(binary_pms, nsargs, cwd, specs)

    except BaseBinaryResourceLock.NotAcquired:
        print_error("unable to acquire PMS Resources lock")
        return 42
    except MatterResourceLock.NotAcquired:
        print_error("unable to acquire Matter Resources lock")
        return 42
    except KeyboardInterrupt:
        print_error("Keyboard Interrupt, pid: %s" % (os.getpid(), ))
        return 42
    finally:
        if binary_pms is not None:
            binary_pms.shutdown()

    print_warning("")
    print_warning("")
    print_warning("Tasks complete, exit status: %d" % (exit_st, ))
    return exit_st
Ejemplo n.º 23
0
def matter_main(binary_pms, nsargs, cwd, specs):
    """
    Main application code run after all the resources setup.
    """

    try:
        binary_pms.validate_system()
    except BaseBinaryPMS.SystemValidationError as err:
        print_error("%s" % (err, ))
        return 1

    print_info("matter loaded, starting to scan particles, pid: %s" %
               (os.getpid(), ))

    def _teardown(_exit_st):
        if nsargs.post:
            _rc = PackageBuilder.teardown(nsargs.post, cwd, _exit_st)
            if _exit_st == 0 and _rc != 0:
                _exit_st = _rc
        return _exit_st

    # setup
    if nsargs.pre:
        _rc = PackageBuilder.setup(nsargs.pre, cwd)
        if _rc != 0:
            return _teardown(_rc)

    # sync portage
    if nsargs.sync:
        _rc = PackageBuilder.sync()
        if _rc != 0 and not nsargs.sync_best_effort:
            return _teardown(_rc)

    exit_st = 0
    completed = collections.deque()
    not_found = collections.deque()
    not_installed = collections.deque()
    not_merged = collections.deque()
    uninstalled = collections.deque()
    missing_use = {}
    unstable_keywords = set()
    pmask_changes = set()
    license_changes = {}
    tainted_repositories = set()
    spec_count = 0
    tot_spec = len(specs)
    preserved_libs = False
    emerge_config = binary_pms.load_emerge_config()

    for spec in specs:

        spec_count += 1
        keep_going = spec["keep-going"] == "yes"
        local_completed = []
        local_uninstalled = []

        tot_pkgs = len(spec["packages"])
        for pkg_count, packages in enumerate(spec["packages"], 1):

            builder = PackageBuilder(binary_pms, emerge_config, packages, spec,
                                     spec_count, tot_spec, pkg_count, tot_pkgs,
                                     nsargs.pretend)
            _rc = builder.run()

            not_found.extend(builder.get_not_found_packages())
            not_installed.extend(builder.get_not_installed_packages())
            not_merged.extend(builder.get_not_merged_packages())
            uninstalled = builder.get_uninstalled_packages()
            uninstalled.extend(uninstalled)
            local_uninstalled.extend(uninstalled)

            # Merge at least the first layer of dicts.
            for k, v in builder.get_missing_use_packages().items():
                obj = missing_use.setdefault(k, {})
                obj.update(v)

            unstable_keywords.update(builder.get_needed_unstable_keywords())
            pmask_changes.update(builder.get_needed_package_mask_changes())

            # We need to merge the two dicts, not just update()
            # or we can lose the full set of licenses associated
            # to a single cpv.
            for k, v in builder.get_needed_license_changes().items():
                obj = license_changes.setdefault(k, set())
                obj.update(v)

            preserved_libs = binary_pms.check_preserved_libraries(
                emerge_config)

            if preserved_libs and not nsargs.disable_preserved_libs:
                # abort, library breakages detected
                exit_st = 1
                print_error("preserved libraries detected, aborting")
                break

            # ignore _rc, we may have built pkgs even if _rc != 0
            built_packages = builder.get_built_packages()
            if built_packages:
                print_info("built packages, in queue: %s" %
                           (" ".join(built_packages), ))
                local_completed.extend(
                    [x for x in built_packages \
                         if x not in local_completed])
                tainted_repositories.add(spec["repository"])

            # make some room
            print_info("")
            if _rc < 0:
                # ignore warning and go ahead
                continue
            else:
                exit_st = _rc
                if not keep_going:
                    break

        # call post-build cleanup operations
        if local_completed or local_uninstalled:
            PackageBuilder.post_build(spec, emerge_config)

        completed.extend([x for x in local_completed \
            if x not in completed])
        # portage calls setcwd()
        os.chdir(cwd)

        if preserved_libs and not nsargs.disable_preserved_libs:
            # completely abort
            break

        if local_completed and nsargs.commit:
            _rc = binary_pms.commit(spec, local_completed)
            if exit_st == 0 and _rc != 0:
                exit_st = _rc
                if not keep_going:
                    break

        PackageBuilder.clear_caches(emerge_config)

    if tainted_repositories and nsargs.push and nsargs.commit:
        if preserved_libs and nsargs.disable_preserved_libs:
            # cannot push anyway
            print_warning("Preserved libraries detected, cannot push !")
        elif not preserved_libs:
            for repository in tainted_repositories:
                _rc = binary_pms.push(repository)
                if exit_st == 0 and _rc != 0:
                    exit_st = _rc

    # print summary
    print_generic("")
    print_generic("Summary")
    print_generic("Packages built:\n  %s" % ("\n  ".join(sorted(completed)), ))
    print_generic("Packages not built:\n  %s" %
                  ("\n  ".join(sorted(not_merged)), ))
    print_generic("Packages not found:\n  %s" %
                  ("\n  ".join(sorted(not_found)), ))
    print_generic("Packages not installed:\n  %s" %
                  ("\n  ".join(sorted(not_installed)), ))
    print_generic("Packages uninstalled:\n  %s" %
                  ("\n  ".join(sorted(uninstalled)), ))

    if missing_use:
        print_generic("Packages not built due to missing USE flags:")
        for atom in sorted(missing_use.keys()):
            use_data = missing_use[atom]
            use_l = []
            for use in sorted(use_data["changes"]):
                if use_data["changes"][use]:
                    use_l.append(use)
                else:
                    use_l.append("-" + use)
            print_generic("%s %s" % (use_data["cp:slot"], " ".join(use_l)))
        print_generic("")

    if unstable_keywords:
        print_generic("Packages not built due to missing unstable keywords:")
        for atom in sorted(unstable_keywords):
            print_generic("%s" % (atom, ))
        print_generic("")

    if pmask_changes:
        print_generic("Packages not built due to needed package.mask changes:")
        for atom in sorted(pmask_changes):
            print_generic("%s" % (atom, ))
        print_generic("")

    print_generic("Preserved libs: %s" % (preserved_libs, ))
    print_generic("")

    return _teardown(exit_st)
Ejemplo n.º 24
0
    def _commit_build_only(self, spec, packages):
        """
        Commit packages that have been built with -B.
        Overridden from BaseBinaryPMS.
        """
        settings, _trees, _db = self.load_emerge_config()
        pkgdir = settings["PKGDIR"]
        repository = spec["repository"]
        drop_old_injected = spec["drop-old-injected"] == "yes"

        print_info("committing build-only packages: %s, to repository: %s" % (
            ", ".join(sorted(packages)), repository,))

        exit_st = 0
        package_files = []
        for package in packages:
            tbz2_atom = package + ".tbz2"
            source_path = os.path.join(pkgdir, tbz2_atom)
            if not os.path.isfile(source_path):
                print_warning(
                    "cannot find package tarball: %s" % (source_path,))
                exit_st = 1
                continue
            package_files.append(source_path)

        pkg_files = [([x], True) for x in package_files]
        package_ids = self._entropy.add_packages_to_repository(
            repository, pkg_files, ask=False)
        self._entropy.commit_repositories()

        if package_ids:

            # drop old injected packages if they are in the
            # same key + slot of the newly added ones.
            # This is not atomic, but we don't actually care.
            if drop_old_injected:
                repo = self._entropy.open_repository(repository)

                key_slots = set()
                for package_id in package_ids:
                    key, slot = repo.retrieveKeySlot(package_id)
                    key_slots.add((key, slot))

                key_slot_package_ids = set()
                for key, slot in key_slots:
                    ks_package_ids = [x for x in repo.searchKeySlot(key, slot) \
                                          if repo.isInjected(x)]
                    key_slot_package_ids.update(ks_package_ids)
                # remove the newly added packages, of course
                key_slot_package_ids -= package_ids
                key_slot_package_ids = sorted(key_slot_package_ids)
                if key_slot_package_ids:
                    print_info("removing old injected packages, "
                               "as per drop-old-injected:")
                    for package_id in key_slot_package_ids:
                        atom = repo.retrieveAtom(package_id)
                        print_info("  %s" % (atom,))
                    self._entropy.remove_packages(
                        repository, key_slot_package_ids)

            self._entropy.dependencies_test(repository)

        return exit_st
Ejemplo n.º 25
0
    def _commit(self, spec, packages):
        """
        Commit packages that have been merged into the system.
        Overridden from BaseBinaryPMS.
        """
        repository = spec["repository"]
        spm = self._entropy.Spm()
        spm_atoms = set()
        exit_st = 0

        print_info("committing packages: %s, to repository: %s" % (
            ", ".join(sorted(packages)), repository,))

        # if we get here, something has been compiled
        # successfully
        for package in packages:
            try:
                spm_atom = spm.match_installed_package(package)
                spm_atoms.add(spm_atom)
            except KeyError:
                exit_st = 1
                print_warning(
                    "cannot find installed package: %s" % (
                        package,))
                continue

        if not spm_atoms:
            return exit_st

        print_info("about to commit:")
        spm_packages = sorted(spm_atoms)

        for atom in spm_packages:
            item_txt = atom

            # this is a spm atom
            spm_key = portage.dep.dep_getkey("=%s" % (atom,))
            try:
                spm_slot = spm.get_installed_package_metadata(
                    atom, "SLOT")
                spm_repo = spm.get_installed_package_metadata(
                    atom, "repository")
            except KeyError:
                spm_slot = None
                spm_repo = None

            etp_repo = None
            if spm_repo is not None:
                pkg_id, repo_id = self._entropy.atom_match(spm_key,
                    match_slot = spm_slot)
                if repo_id != 1:
                    repo_db = self._entropy.open_repository(repo_id)
                    etp_repo = repo_db.retrieveSpmRepository(pkg_id)

                    if (etp_repo is not None) and (etp_repo != spm_repo):
                        item_txt += ' [%s {%s=>%s}]' % ("warning",
                            etp_repo, spm_repo,)

            print_info(item_txt)

        # always stuff new configuration files here
        # if --gentle was specified, the uncommitted stuff here belongs
        # to our packages.
        # if --gentle was NOT specified, we just don't give a shit
        # Due to bug #2480 -- sometimes (app-misc/tracker)
        # _check_config_file_updates() doesn't return all the files
        subprocess.call("echo -5 | etc-update", shell = True)
        uncommitted = self._entropy._check_config_file_updates()
        if uncommitted:
            # ouch, wtf? better aborting
            print_error("tried to commit configuration file changes and failed")
            return 1

        print_info("about to compress:")

        store_dir = self._entropy._get_local_store_directory(repository)
        package_paths = []
        for atom in spm_packages:
            print_info(atom)
            try:
                pkg_list = spm.generate_package(atom, store_dir)
            except OSError:
                print_traceback()
                print_error("problem during package generation, aborting")
                return 1
            except Exception:
                print_traceback()
                print_error("problem during package generation (2), aborting")
                return 1
            package_paths.append(pkg_list)

        etp_pkg_files = [(pkg_list, False) for pkg_list in package_paths]
        # NOTE: any missing runtime dependency will be added
        # (beside those blacklisted), since this execution is not interactive
        try:
            package_ids = self._entropy.add_packages_to_repository(
                repository, etp_pkg_files, ask=False)
        except OnlineMirrorError as err:
            print_traceback()
            print_error("problem during package commit: %s" % (err,))
            return 1

        self._entropy.commit_repositories()

        if package_ids:
            self._entropy.dependencies_test(repository)

        return exit_st
Ejemplo n.º 26
0
def main():
    """
    Main App.
    """
    install_exception_handler()

    # disable color if standard output is not a TTY
    if not is_stdout_a_tty():
        nocolor()

    # Load Binary PMS modules
    import _entropy.matter.binpms as _pms
    pms_dir = os.path.dirname(_pms.__file__)
    for thing in os.listdir(pms_dir):
        if thing.startswith("__init__.py"):
            continue

        thing = os.path.join(pms_dir, thing)
        if not os.path.isfile(thing):
            continue
        if not thing.endswith(".py"):
            continue

        name = os.path.basename(thing)
        name = name.rstrip(".py")
        package = "_entropy.matter.binpms.%s" % (name,)

        try:
            importlib.import_module(package)  # they will then register
        except ImportError as err:
            pass
    avail_binpms = BaseBinaryPMS.available_pms

    matter_spec = MatterSpec()
    parser_data = matter_spec.data()
    matter_spec_params = ""
    for spec_key in sorted(parser_data.keys()):
        par = parser_data[spec_key]
        matter_spec_params += "%s: %s\n" % (
            purple(spec_key),
            darkgreen(par.get("desc", "N/A")),)

    _env_vars_help = """\

Environment variables for Package Builder module:
%s       =  repository identifier
%s    =  alternative command used to sync Portage
                              default: %s
%s   =  alternative command used to sync Portage overlays
                              default: %s

Environment variables passed to --post executables:
%s        = exit status from previous execution phases, useful for detecting
                             execution errors.

Matter Resources Lock file you can use to detect if matter is running:
%s (--blocking switch makes it acquire in blocking mode)

Matter .spec file supported parameters:
%s

Available Binary PMSs:
%s
""" % (
        purple("MATTER_REPOSITORY_ID"),
        purple("MATTER_PORTAGE_SYNC_CMD"),
        darkgreen(PackageBuilder.DEFAULT_PORTAGE_SYNC_CMD),
        purple("MATTER_OVERLAYS_SYNC_CMD"),
        darkgreen(PackageBuilder.DEFAULT_OVERLAYS_SYNC_CMD),
        purple("MATTER_EXIT_STATUS"),
        darkgreen(MatterResourceLock.LOCK_FILE_PATH),
        matter_spec_params,
        "\n".join(
        ["%s: %s" % (purple(k.NAME), darkgreen(k.__name__)) \
             for k in avail_binpms]),)

    parser = argparse.ArgumentParser(
        description="Automated Packages Builder",
        epilog=_env_vars_help,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # * instead of + in order to support --sync only tasks
    parser.add_argument(
        "spec", nargs="+", metavar="<spec>", type=file,
        help="matter spec file")

    default_pms = avail_binpms[0]
    for k in avail_binpms:
        if k.DEFAULT:
            default_pms = k
            break
    parser.add_argument(
        "--pms", default=default_pms.NAME,
        help="specify an alternative Binary PMS (see --help for a list), "
        "current default: %s" % (default_pms.NAME,))

    parser.add_argument(
        "--blocking",
        help="when trying to acquire Binary PMS locks, "
        "block until success.",
        action="store_true")

    parser.add_argument("--commit",
        help="commit built packages to repository.",
        action="store_true")

    parser.add_argument(
        "--gentle",
        help="increase the system validation checks, be extremely "
        "careful wrt the current system status.",
        action="store_true")

    parser.add_argument("--pre", metavar="<exec>", type=file,
        help="executable to be called once for setup purposes.",
        default=None)

    parser.add_argument("--post", metavar="<exec>", type=file,
        help="executable to be called once for teardown purposes.",
        default=None)

    parser.add_argument(
        "--push",
        help="push Binary PMS package updates to online "
        "repository (only if --commit).",
        action="store_true")

    parser.add_argument(
        "--sync",
        help="sync Portage tree, and attached overlays, before starting.",
        action="store_true")

    parser.add_argument(
        "--sync-best-effort", default=False,
        help="sync Portage tree and attached overlays, as --sync, but do "
        "not exit if sync fails.",
        action="store_true")

    parser.add_argument(
        "--disable-preserved-libs",
        dest="disable_preserved_libs", default=False,
        help="disable prerserved libraries check.",
        action="store_true")

    parser.add_argument(
        "--pretend",
        dest="pretend", default=False,
        help="show what would be done without alterint the current system.",
        action="store_true")

    # extend parser arguments
    for k in avail_binpms:
        k.extend_parser(parser)

    try:
        nsargs = parser.parse_args(sys.argv[1:])
    except IOError as err:
        if err.errno == errno.ENOENT:
            print_error(err.strerror + ": " + err.filename)
            return 1
        raise

    if os.getuid() != 0:
        # root access required
        print_error("superuser access required")
        return 1

    # parse spec files
    specs = []
    for spec_f in nsargs.spec:
        spec = SpecParser(spec_f)
        data = spec.parse()
        if data:
            specs.append(data)

    if not specs:
        print_error("invalid spec files provided")
        return 1

    # O(n) determine what is the BinaryPMS to use
    klass = None
    for k in avail_binpms:
        if k.NAME == nsargs.pms:
            klass = k
            break
    if klass is None:
        print_error("invalid Binary PMS specified: %s" % (nsargs.pms,))
        return 1

    binary_pms = None
    exit_st = 0
    cwd = os.getcwd()
    try:
        try:
            binary_pms = klass(cwd, nsargs)
        except BaseBinaryPMS.BinaryPMSLoadError as err:
            # repository not available or not configured
            print_error("Cannot load Binary Package Manager: %s" % (err,))
            return 3

        print_info("Loaded Binary PMS: %s" % (klass.NAME,))

        # validate repository entries of spec metadata
        for spec in specs:
            try:
                binary_pms.validate_spec(spec)
            except BaseBinaryPMS.SpecParserError as err:
                print_error("%s" % (err,))
                return 1

        if nsargs.blocking:
            print_info("--blocking enabled, please wait for locks...")

        resource_lock = binary_pms.get_resource_lock(nsargs.blocking)
        with resource_lock:
            with MatterResourceLock(nsargs.blocking):
                exit_st = matter_main(binary_pms, nsargs, cwd, specs)

    except BaseBinaryResourceLock.NotAcquired:
        print_error("unable to acquire PMS Resources lock")
        return 42
    except MatterResourceLock.NotAcquired:
        print_error("unable to acquire Matter Resources lock")
        return 42
    except KeyboardInterrupt:
        print_error("Keyboard Interrupt, pid: %s" % (os.getpid(),))
        return 42
    finally:
        if binary_pms is not None:
            binary_pms.shutdown()

    print_warning("")
    print_warning("")
    print_warning("Tasks complete, exit status: %d" % (exit_st,))
    return exit_st