Exemple #1
0
def main():
    args = sys.argv[1:]
    args.extend(["--tree", "--pretend"])
    myaction, myopts, myfiles = parse_opts(args, silent=True)

    emerge_config = load_emerge_config(action=myaction, args=myfiles,
                                       opts=myopts)
    success, depgraph, favorites = False, None, None
    while not success:
        print("Building %s with %s" % (myfiles, myopts))
        success, depgraph, favorites = build(emerge_config)
        if not success:
            fixed = False
            emerge_config, fixed = fix_conflict(emerge_config, depgraph)
            if not fixed:
                break
    if success:
        dynamic_config = depgraph._dynamic_config
        # draw_graph(dynamic_config.digraph.copy())
        # depgraph.display(depgraph.altlist(), favorites=favorites)
        for x in dynamic_config.digraph.leaf_nodes():
            print("%s %r" % (x, x))
        # emerge_config = load_emerge_config(emerge_config=emerge_config)
        # run_action(emerge_config)
    else:
        depgraph.display_problems()
def autoresolve(args):
    success, depgraph = False, None
    while not success:
        action, opts, files = parse_opts(args, silent=True)
        config = load_emerge_config(action=action, args=files, opts=opts)
        print(("Targets: %s\nOptions: %s\nCalculating dependency  "
               % (files, opts)), end="")
        # print("Calculating dependency  ", end="")
        success, depgraph, _ = calcdep(config)
        print()
        if success:
            break
        newpkgs = []
        depgraph.display_problems()
        newpkgs += fix_conflict(depgraph)
        added = False
        reinstalls = opts.get("--reinstall-atoms", [])
        oneshot = opts.get("--oneshot", False)
        for pkg in newpkgs:
            if pkg not in files and pkg not in reinstalls:
                # print("Adding %s" % opt)
                if oneshot:
                    args.append(pkg)
                else:
                    args.append("--reinstall-atoms=" + pkg)
                added = True
        if not added:
            return False, depgraph, args
    return True, depgraph, args
Exemple #3
0
def get_emerge_info_id(settings, trees, session, config_id):
	args = []
	args.append("--info")
	myaction, myopts, myfiles = parse_opts(args, silent=True)
	status, emerge_info_list = action_info(settings, trees, myopts, myfiles)
	emerge_info = ""
	return "\n".join(emerge_info_list)
Exemple #4
0
 def post_build(cls, spec, emerge_config):
     """
     Execute Portage post-build tasks.
     """
     emerge_settings, emerge_trees, mtimedb = emerge_config
     if "yes" == emerge_settings.get("AUTOCLEAN"):
         print_info("executing post-build operations, please wait...")
         build_args = list(cls._setup_build_args(spec))
         _action, opts, _files = parse_opts(build_args)
         unmerge(emerge_trees[emerge_settings["ROOT"]]["root_config"],
             opts, "clean", [], mtimedb["ldpath"], autoclean=1)
Exemple #5
0
    def post_build(cls, spec, emerge_config):
        """
        Execute Portage post-build tasks.
        """
        print_info("executing post-build operations, please wait...")

        emerge_settings, emerge_trees, mtimedb = emerge_config
        if "yes" == emerge_settings.get("AUTOCLEAN"):
            build_args = list(cls._setup_build_args(spec))
            _action, opts, _files = parse_opts(build_args)
            unmerge(emerge_trees[emerge_settings["ROOT"]]["root_config"],
                    opts,
                    "clean", [],
                    mtimedb["ldpath"],
                    autoclean=1)
Exemple #6
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
            suffix="." + first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings,
                emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(
                    exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                    package, best_visible,))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args),))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
            emerge_trees, myopts, params, myaction, myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" % (
                                    tup,))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv,), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom,)
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv,))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add(
                            "%s" % (pkg.cpv,))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv,), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                            k, v,))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv,))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv,))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings, emerge_trees, mtimedb,
            myopts, spinner, favorites=favorites,
            graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist")
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom,)
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                        pkg, cpv,))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv,))
                    self._built_packages.append("%s" % (cpv,))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" % (cpv,))
                    self._uninstalled_packages.append("%s" % (cpv,))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
            emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (failed_package.cpv,
                failed_package.repo,))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(log_dir,
                "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail,))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env = std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval,))
        return retval
    def Initialize(self, args):
        """Initializer. Parses arguments and sets up portage state."""

        # Parse and strip out args that are just intended for parallel_emerge.
        emerge_args = self.ParseParallelEmergeArgs(args)

        if self.sysroot and self.board:
            cros_build_lib.Die('--sysroot and --board are incompatible.')

        # Setup various environment variables based on our current board. These
        # variables are normally setup inside emerge-${BOARD}, but since we don't
        # call that script, we have to set it up here. These variables serve to
        # point our tools at /build/BOARD and to setup cross compiles to the
        # appropriate board as configured in toolchain.conf.
        if self.board:
            self.sysroot = os.environ.get(
                'SYSROOT', cros_build_lib.GetSysroot(self.board))

        if self.sysroot:
            os.environ['PORTAGE_CONFIGROOT'] = self.sysroot
            os.environ['SYSROOT'] = self.sysroot

        # Turn off interactive delays
        os.environ['EBEEP_IGNORE'] = '1'
        os.environ['EPAUSE_IGNORE'] = '1'
        os.environ['CLEAN_DELAY'] = '0'

        # Parse the emerge options.
        action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)

        # Set environment variables based on options. Portage normally sets these
        # environment variables in emerge_main, but we can't use that function,
        # because it also does a bunch of other stuff that we don't want.
        # TODO(davidjames): Patch portage to move this logic into a function we can
        # reuse here.
        if '--debug' in opts:
            os.environ['PORTAGE_DEBUG'] = '1'
        if '--config-root' in opts:
            os.environ['PORTAGE_CONFIGROOT'] = opts['--config-root']
        if '--root' in opts:
            os.environ['ROOT'] = opts['--root']
        elif self.board and 'ROOT' not in os.environ:
            os.environ['ROOT'] = self.sysroot
        if '--accept-properties' in opts:
            os.environ['ACCEPT_PROPERTIES'] = opts['--accept-properties']

        # If we're installing packages to the board, we can disable vardb locks.
        # This is safe because we only run up to one instance of parallel_emerge in
        # parallel.
        # TODO(davidjames): Enable this for the host too.
        if self.sysroot:
            os.environ.setdefault('PORTAGE_LOCKS', 'false')

        # Now that we've setup the necessary environment variables, we can load the
        # emerge config from disk.
        # pylint: disable=unpacking-non-sequence
        settings, trees, mtimedb = load_emerge_config()

        # Add in EMERGE_DEFAULT_OPTS, if specified.
        tmpcmdline = []
        if '--ignore-default-opts' not in opts:
            tmpcmdline.extend(settings['EMERGE_DEFAULT_OPTS'].split())
        tmpcmdline.extend(emerge_args)
        action, opts, cmdline_packages = parse_opts(tmpcmdline)

        # If we're installing to the board, we want the --root-deps option so that
        # portage will install the build dependencies to that location as well.
        if self.sysroot:
            opts.setdefault('--root-deps', True)

        # Check whether our portage tree is out of date. Typically, this happens
        # when you're setting up a new portage tree, such as in setup_board and
        # make_chroot. In that case, portage applies a bunch of global updates
        # here. Once the updates are finished, we need to commit any changes
        # that the global update made to our mtimedb, and reload the config.
        #
        # Portage normally handles this logic in emerge_main, but again, we can't
        # use that function here.
        if _global_updates(trees, mtimedb['updates']):
            mtimedb.commit()
            # pylint: disable=unpacking-non-sequence
            settings, trees, mtimedb = load_emerge_config(trees=trees)

        # Setup implied options. Portage normally handles this logic in
        # emerge_main.
        if '--buildpkgonly' in opts or 'buildpkg' in settings.features:
            opts.setdefault('--buildpkg', True)
        if '--getbinpkgonly' in opts:
            opts.setdefault('--usepkgonly', True)
            opts.setdefault('--getbinpkg', True)
        if 'getbinpkg' in settings.features:
            # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
            opts['--getbinpkg'] = True
        if '--getbinpkg' in opts or '--usepkgonly' in opts:
            opts.setdefault('--usepkg', True)
        if '--fetch-all-uri' in opts:
            opts.setdefault('--fetchonly', True)
        if '--skipfirst' in opts:
            opts.setdefault('--resume', True)
        if '--buildpkgonly' in opts:
            # --buildpkgonly will not merge anything, so it overrides all binary
            # package options.
            for opt in ('--getbinpkg', '--getbinpkgonly', '--usepkg',
                        '--usepkgonly'):
                opts.pop(opt, None)
        if (settings.get('PORTAGE_DEBUG', '') == '1'
                and 'python-trace' in settings.features):
            portage.debug.set_trace(True)

        # Complain about unsupported options
        for opt in ('--ask', '--ask-enter-invalid', '--resume', '--skipfirst'):
            if opt in opts:
                print('%s is not supported by parallel_emerge' % opt)
                sys.exit(1)

        # Make emerge specific adjustments to the config (e.g. colors!)
        adjust_configs(opts, trees)

        # Save our configuration so far in the emerge object
        emerge = self.emerge
        emerge.action, emerge.opts = action, opts
        emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
        emerge.cmdline_packages = cmdline_packages
        root = settings['ROOT']
        emerge.root_config = trees[root]['root_config']

        if '--usepkg' in opts:
            emerge.trees[root]['bintree'].populate('--getbinpkg' in opts)
Exemple #8
0
    def _run_builder(self, dirs_cleanup_queue):
        """
        This method is called by _run and executes the whole package build
        logic, including constraints validation given by argv parameters.
        NOTE: negative errors indicate warnings that can be skipped.
        """
        if self._packages:
            first_package = self._packages[0]
        else:
            first_package = "_empty_"

        log_dir = mkdtemp(prefix="matter_build.",
                          suffix="." +
                          first_package.replace("/", "_").lstrip("<>=~"))
        dirs_cleanup_queue.append(log_dir)

        emerge_settings, emerge_trees, mtimedb = self._emerge_config

        # reset settings to original state, variables will be reconfigured
        # while others may remain saved due to backup_changes().
        emerge_settings.unlock()
        emerge_settings.reset()
        emerge_settings.lock()

        # Setup stable/unstable keywords, must be done on
        # emerge_settings bacause the reference is spread everywhere
        # in emerge_trees.
        # This is not thread-safe, but Portage isn't either, so
        # who cares!
        # ACCEPT_KEYWORDS is not saved and reset every time by the
        # reset() call above.
        portdb = emerge_trees[emerge_settings["ROOT"]]["porttree"].dbapi

        self._setup_keywords(portdb, emerge_settings)

        portdb.freeze()
        vardb = emerge_trees[emerge_settings["ROOT"]]["vartree"].dbapi
        vardb.settings.unlock()
        vardb.settings["PORT_LOGDIR"] = log_dir
        vardb.settings.backup_changes("PORT_LOGDIR")
        vardb.settings.lock()

        # Load the most current variables from /etc/profile.env, which
        # has been re-generated by the env-update call in _run()
        emerge_settings.unlock()
        emerge_settings.reload()
        emerge_settings.regenerate()
        emerge_settings.lock()

        sets = self._get_sets_mod()  # can be None
        sets_conf = None
        if sets is not None:
            sets_conf = sets.load_default_config(
                emerge_settings, emerge_trees[emerge_settings["ROOT"]])

        packages = []
        # execute basic, pre-graph generation filters against each
        # package dependency in self._packages.
        # This is just fast pruning of obvious obviousness.
        for package in self._packages:
            expanded_pkgs = []

            # package sets support
            if package.startswith("@") and sets_conf:
                try:
                    set_pkgs = sets_conf.getSetAtoms(package[1:])
                    expanded_pkgs.extend(sorted(set_pkgs))
                except sets.PackageSetNotFound:
                    # make it fail, add set directly
                    expanded_pkgs.append(package)
            else:
                expanded_pkgs.append(package)

            for exp_pkg in expanded_pkgs:
                accepted = self._pre_graph_filters(exp_pkg, portdb, vardb)
                for best_visible in accepted:
                    packages.append((exp_pkg, best_visible))

        if not packages:
            print_warning("No remaining packages in queue, aborting.")
            return 0

        # at this point we can go ahead building packages
        print_info("starting to build:")
        for package, best_visible in packages:
            print_info(": %s -> %s" % (
                package,
                best_visible,
            ))

        if not getcolor():
            portage.output.nocolor()

        # non interactive properties, this is not really required
        # accept-properties just sets os.environ...
        build_args = list(self._setup_build_args(self._params))
        build_args += ["=" + best_v for _x, best_v in packages]

        myaction, myopts, myfiles = parse_opts(build_args)
        adjust_configs(myopts, emerge_trees)
        apply_priorities(emerge_settings)

        spinner = stdout_spinner()
        if "--quiet" in myopts:
            spinner.update = spinner.update_basic
        elif "--nospinner" in myopts:
            spinner.update = spinner.update_basic
        if emerge_settings.get("TERM") == "dumb" or not is_stdout_a_tty():
            spinner.update = spinner.update_basic

        print_info("emerge args: %s" % (" ".join(build_args), ))

        params = create_depgraph_params(myopts, myaction)
        success, graph, favorites = backtrack_depgraph(emerge_settings,
                                                       emerge_trees, myopts,
                                                       params, myaction,
                                                       myfiles, spinner)

        if not success:
            # print issues to stdout and give up
            print_warning("dependencies calculation failed, aborting")
            graph.display_problems()

            # try to collect some info about the failure
            bt_config = (graph.get_backtrack_infos() or {}).get("config", {})
            for k, v in bt_config.items():
                if k == "needed_use_config_changes":
                    for tup in v:
                        try:
                            pkg, (new_use, new_changes) = tup
                        except (ValueError, TypeError):
                            print_error(
                                "unsupported needed_use_config_changes: %s" %
                                (tup, ))
                            continue
                        obj = self._missing_use_packages.setdefault(
                            "%s" % (pkg.cpv, ), {})
                        obj["cp:slot"] = "%s" % (pkg.slot_atom, )
                        changes = obj.setdefault("changes", {})
                        changes.update(copy.deepcopy(new_changes))
                elif k == "needed_unstable_keywords":
                    for pkg in v:
                        self._needed_unstable_keywords.add("%s" % (pkg.cpv, ))
                elif k == "needed_p_mask_changes":
                    for pkg in v:
                        self._needed_package_mask_changes.add("%s" %
                                                              (pkg.cpv, ))
                elif k == "needed_license_changes":
                    for pkg, lics in v:
                        obj = self._needed_license_changes.setdefault(
                            "%s" % (pkg.cpv, ), set())
                        obj.update(lics)
                else:
                    print_warning("unsupported backtrack info: %s -> %s" % (
                        k,
                        v,
                    ))

            return 0
        print_info("dependency graph generated successfully")

        real_queue = self._post_graph_filters(graph, vardb, portdb)
        if real_queue is None:
            # post-graph filters not passed, giving up
            return 0

        merge_queue = [x for x in real_queue if x.operation == "merge"]
        unmerge_queue = [x for x in real_queue if x.operation == "uninstall"]
        if merge_queue:
            print_info("about to build the following packages:")
            for pkg in merge_queue:
                print_info("  %s" % (pkg.cpv, ))
        if unmerge_queue:
            print_info("about to uninstall the following packages:")
            for pkg in unmerge_queue:
                print_info("  %s" % (pkg.cpv, ))

        if self._pretend:
            print_info("portage spawned with --pretend, done!")
            return 0

        # re-calling action_build(), deps are re-calculated though
        validate_ebuild_environment(emerge_trees)
        mergetask = Scheduler(emerge_settings,
                              emerge_trees,
                              mtimedb,
                              myopts,
                              spinner,
                              favorites=favorites,
                              graph_config=graph.schedulerGraph())
        del graph
        self.clear_caches(self._emerge_config)
        retval = mergetask.merge()

        not_merged = []
        real_queue_map = dict((pkg.cpv, pkg) for pkg in real_queue)
        failed_package = None
        if retval != 0:
            merge_list = mtimedb.get("resume", {}).get("mergelist", [])
            for _merge_type, _merge_root, merge_atom, _merge_act in merge_list:
                merge_atom = "%s" % (merge_atom, )
                if failed_package is None:
                    # we consider the first encountered package the one
                    # that failed. It makes sense since packages are built
                    # serially as of today.
                    # Also, the package object must be available in our
                    # package queue, so grab it from there.
                    failed_package = real_queue_map.get(merge_atom)
                not_merged.append(merge_atom)
                self._not_merged_packages.append(merge_atom)

        for pkg in real_queue:
            cpv = pkg.cpv
            if not cpv:
                print_warning("package: %s, has broken cpv: '%s', ignoring" % (
                    pkg,
                    cpv,
                ))
            elif cpv not in not_merged:
                if pkg.operation == "merge":
                    # add to build queue
                    print_info("package: %s, successfully built" % (cpv, ))
                    self._built_packages.append("%s" % (cpv, ))
                else:
                    # add to uninstall queue
                    print_info("package: %s, successfully uninstalled" %
                               (cpv, ))
                    self._uninstalled_packages.append("%s" % (cpv, ))

        post_emerge(myaction, myopts, myfiles, emerge_settings["ROOT"],
                    emerge_trees, mtimedb, retval)

        subprocess.call(["env-update"])

        if failed_package is not None:
            print_warning("failed package: %s::%s" % (
                failed_package.cpv,
                failed_package.repo,
            ))

        if self._params["buildfail"] and (failed_package is not None):

            std_env = self._build_standard_environment(
                repository=self._params["repository"])
            std_env["MATTER_PACKAGE_NAMES"] = " ".join(self._packages)
            std_env["MATTER_PORTAGE_FAILED_PACKAGE_NAME"] = failed_package.cpv
            std_env["MATTER_PORTAGE_REPOSITORY"] = failed_package.repo
            # call pkgfail hook if defined
            std_env["MATTER_PORTAGE_BUILD_LOG_DIR"] = os.path.join(
                log_dir, "build")

            buildfail = self._params["buildfail"]
            print_info("spawning buildfail: %s" % (buildfail, ))
            tmp_fd, tmp_path = mkstemp()
            with os.fdopen(tmp_fd, "wb") as tmp_f:
                with open(buildfail, "rb") as buildfail_f:
                    tmp_f.write(buildfail_f.read())
            try:
                # now execute
                os.chmod(tmp_path, 0o700)
                exit_st = subprocess.call([tmp_path], env=std_env)
                if exit_st != 0:
                    return exit_st
            finally:
                os.remove(tmp_path)

        print_info("portage spawned, return value: %d" % (retval, ))
        return retval
  def parse_emerge_args(self,args):
	action, opts, files = parse_opts(args, silent=True)
	return action, opts, files