def _load_config(self):
		portdir_overlay = []
		for repo_name in sorted(self.repo_dirs):
			path = self.repo_dirs[repo_name]
			if path != self.portdir:
				portdir_overlay.append(path)

		env = {
			"ACCEPT_KEYWORDS": "x86",
			"DISTDIR" : self.distdir,
			"PKGDIR": os.path.join(self.eroot, "usr/portage/packages"),
			"PORTDIR": self.portdir,
			"PORTDIR_OVERLAY": " ".join(portdir_overlay),
			'PORTAGE_TMPDIR'       : os.path.join(self.eroot, 'var/tmp'),
		}

		if os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			env["FEATURES"] = "-sandbox"

		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
		# need to be inherited by ebuild subprocesses.
		if 'PORTAGE_USERNAME' in os.environ:
			env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
		if 'PORTAGE_GRPNAME' in os.environ:
			env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

		trees = portage.create_trees(env=env, eprefix=self.eprefix)
		for root, root_trees in trees.items():
			settings = root_trees["vartree"].settings
			settings._init_dirs()
			setconfig = load_default_config(settings, root_trees)
			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
		
		return settings, trees
Example #2
0
    def _load_config(self):

        create_trees_kwargs = {}
        if self.target_root != os.sep:
            create_trees_kwargs["target_root"] = self.target_root

        env = {
            "PORTAGE_REPOSITORIES":
            "\n".join("[%s]\n%s" % (
                repo_name,
                "\n".join("%s = %s" % (k, v) for k, v in repo_config.items()),
            ) for repo_name, repo_config in self._repositories.items())
        }

        if self.debug:
            env["PORTAGE_DEBUG"] = "1"

        trees = portage.create_trees(env=env,
                                     eprefix=self.eprefix,
                                     **create_trees_kwargs)

        for root, root_trees in trees.items():
            settings = root_trees["vartree"].settings
            settings._init_dirs()
            setconfig = load_default_config(settings, root_trees)
            root_trees["root_config"] = RootConfig(settings, root_trees,
                                                   setconfig)

        return trees[trees._target_eroot]["vartree"].settings, trees
	def _load_config(self):
		env = {
			"ACCEPT_KEYWORDS": "x86",
			"PORTDIR": self.portdir,
			'PORTAGE_TMPDIR'       : os.path.join(self.eroot, 'var/tmp'),
		}

		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
		# need to be inherited by ebuild subprocesses.
		if 'PORTAGE_USERNAME' in os.environ:
			env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
		if 'PORTAGE_GRPNAME' in os.environ:
			env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

		settings = config(_eprefix=self.eprefix, env=env)
		settings.lock()

		trees = {
			self.root: {
					"vartree": vartree(settings=settings),
					"porttree": portagetree(self.root, settings=settings),
					"bintree": binarytree(self.root,
						os.path.join(self.eroot, "usr/portage/packages"),
						settings=settings)
				}
			}

		for root, root_trees in trees.items():
			settings = root_trees["vartree"].settings
			settings._init_dirs()
			setconfig = load_default_config(settings, root_trees)
			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
		
		return settings, trees
Example #4
0
 def __init__(self):
     self.invalid = []
     self.not_installed = []
     self.okay = []
     from portage._sets import load_default_config
     setconfig = load_default_config(portage.settings,
                                     portage.db[portage.settings['EROOT']])
     self._sets = setconfig.getSets()
Example #5
0
	def __init__(self):
		self.invalid = []
		self.not_installed = []
		self.okay = []
		from portage._sets import load_default_config
		setconfig = load_default_config(portage.settings,
			portage.db[portage.settings['EROOT']])
		self._sets = setconfig.getSets()
	def _load_config(self):

		create_trees_kwargs = {}
		if self.target_root != os.sep:
			create_trees_kwargs["target_root"] = self.target_root

		trees = portage.create_trees(env={}, eprefix=self.eprefix,
			**create_trees_kwargs)

		for root, root_trees in trees.items():
			settings = root_trees["vartree"].settings
			settings._init_dirs()
			setconfig = load_default_config(settings, root_trees)
			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)

		return trees[trees._target_eroot]["vartree"].settings, trees
Example #7
0
	def _load_config(self):

		create_trees_kwargs = {}
		if self.target_root != os.sep:
			create_trees_kwargs["target_root"] = self.target_root

		env = {
			"PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
		}

		trees = portage.create_trees(env=env, eprefix=self.eprefix,
			**create_trees_kwargs)

		for root, root_trees in trees.items():
			settings = root_trees["vartree"].settings
			settings._init_dirs()
			setconfig = load_default_config(settings, root_trees)
			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)

		return trees[trees._target_eroot]["vartree"].settings, trees
Example #8
0
    def _load_config(self):
        portdir_overlay = []
        for repo_name in sorted(self.repo_dirs):
            path = self.repo_dirs[repo_name]
            if path != self.portdir:
                portdir_overlay.append(path)

        env = {
            "ACCEPT_KEYWORDS": "x86",
            "DISTDIR": self.distdir,
            "PKGDIR": self.pkgdir,
            "PORTDIR": self.portdir,
            "PORTDIR_OVERLAY": " ".join(portdir_overlay),
            'PORTAGE_TMPDIR': os.path.join(self.eroot, 'var/tmp'),
        }

        if os.environ.get("NOCOLOR"):
            env["NOCOLOR"] = os.environ["NOCOLOR"]

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox"

        # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
        # need to be inherited by ebuild subprocesses.
        if 'PORTAGE_USERNAME' in os.environ:
            env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
        if 'PORTAGE_GRPNAME' in os.environ:
            env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

        trees = portage.create_trees(env=env, eprefix=self.eprefix)
        for root, root_trees in trees.items():
            settings = root_trees["vartree"].settings
            settings._init_dirs()
            setconfig = load_default_config(settings, root_trees)
            root_trees["root_config"] = RootConfig(settings, root_trees,
                                                   setconfig)

        return settings, trees
Example #9
0
    def world(self) -> typing.List[str]:
        """
        Packages currently enabled via @world set

        Get the list of packages listed in the @world set.  The atoms
        present in the result are returned as plain package names.
        Return an empty list if there is no @world set.
        """

        setconf = load_default_config(self.dbapi.settings, self.tree)
        ret = set()
        for x in setconf.getSetAtoms('world'):
            m = self.vdb.dep_bestmatch(x)
            if not m:
                # skip uninstalled packages
                continue
            repo, = self.vdb.dbapi.aux_get(m, ['repository'])
            if repo and repo != 'gentoo':
                # skip packages from other repositories
                continue
            ret.add(x.cp)
        return sorted(ret)
Example #10
0
    def _load_config(self):
        env = {
            "ACCEPT_KEYWORDS": "x86",
            "PORTDIR": self.portdir,
            'PORTAGE_TMPDIR': os.path.join(self.eroot, 'var/tmp'),
        }

        # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
        # need to be inherited by ebuild subprocesses.
        if 'PORTAGE_USERNAME' in os.environ:
            env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
        if 'PORTAGE_GRPNAME' in os.environ:
            env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

        settings = config(_eprefix=self.eprefix, env=env)
        settings.lock()

        trees = {
            self.root: {
                "vartree":
                vartree(settings=settings),
                "porttree":
                portagetree(self.root, settings=settings),
                "bintree":
                binarytree(self.root,
                           os.path.join(self.eroot, "usr/portage/packages"),
                           settings=settings)
            }
        }

        for root, root_trees in trees.items():
            settings = root_trees["vartree"].settings
            settings._init_dirs()
            setconfig = load_default_config(settings, root_trees)
            root_trees["root_config"] = RootConfig(settings, root_trees,
                                                   setconfig)

        return settings, trees
Example #11
0
	def get_set(set_name='world', recursive=True):
		"""
		Returns a dictionary containing the given set and all of its
		atoms/subsets. If recursive is True, this is done recursively.
		"""

		eroot    = portage.settings["EROOT"]
		trees    = portage.db[eroot]
		vartree  = trees["vartree"]
		settings = vartree.settings

		setconfig = load_default_config(settings=settings, trees=trees)
		setconfig._parse()

		# selected sets (includes at least the 'selected' set):
		selected_sets = dict()

		def _include_set(s, recursive=True):
			if s in selected_sets:
				return

			if s not in setconfig.psets:
				raise Exception("Non existent set: " + s)

			atoms    = setconfig.psets[s].getAtoms()
			nonatoms = setconfig.psets[s].getNonAtoms()

			# atoms and nonatoms for each set:
			selected_sets[s] = list(atoms.union(nonatoms))
			# (use a list so that it's JSON serializable by default)

			# recursevely add any sets included by the current set:
			if recursive:
				subsets = [x[len(SETPREFIX):] for x in nonatoms if x.startswith(SETPREFIX)]
				map(_include_set, subsets)

		_include_set(set_name, recursive=recursive)
		return selected_sets
Example #12
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
            suffix="." + first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings,
                emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(
                    exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                    package, best_visible,))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args),))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
            emerge_trees, myopts, params, myaction, myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" % (
                                    tup,))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv,), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom,)
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv,))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add(
                            "%s" % (pkg.cpv,))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv,), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                            k, v,))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv,))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv,))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings, emerge_trees, mtimedb,
            myopts, spinner, favorites=favorites,
            graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist")
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom,)
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                        pkg, cpv,))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv,))
                    self._built_packages.append("%s" % (cpv,))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" % (cpv,))
                    self._uninstalled_packages.append("%s" % (cpv,))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
            emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (failed_package.cpv,
                failed_package.repo,))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(log_dir,
                "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env = std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval,))
        return retval
Example #13
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
                          suffix="." +
                          first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings, emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                package,
                best_visible,
            ))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args), ))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
                                                       emerge_trees, myopts,
                                                       params, myaction,
                                                       myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" %
                                (tup, ))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv, ), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom, )
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv, ))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add("%s" %
                                                              (pkg.cpv, ))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv, ), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                        k,
                        v,
                    ))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv, ))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv, ))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings,
                              emerge_trees,
                              mtimedb,
                              myopts,
                              spinner,
                              favorites=favorites,
                              graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist", [])
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom, )
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                    pkg,
                    cpv,
                ))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv, ))
                    self._built_packages.append("%s" % (cpv, ))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" %
                               (cpv, ))
                    self._uninstalled_packages.append("%s" % (cpv, ))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
                    emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (
                failed_package.cpv,
                failed_package.repo,
            ))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(
                log_dir, "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail, ))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env=std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval, ))
        return retval