def _send_jobs(reqs): jobs = [] for req in iflatten_instance(reqs, Request): parse = getattr(req, 'parse', ident) iterate = getattr(req, '_iterate', ExtractData) req_parse = getattr(req, 'parse_response', None) raw = getattr(req, '_raw', None) generator = bool(getattr(req, '_reqs', ())) if isinstance(req, Request) and generator: # force subreqs to be sent and parsed in parallel data = _send_jobs(iter(req)) jobs.append(self.executor.submit( _parse, parse, iterate, data, generator)) else: http_reqs = [] if not hasattr(req, '__iter__'): req = [req] for r in iflatten_instance(req, requests.Request): if isinstance(r, requests.Request): func = partial( self._http_send, raw=raw, req_parse=req_parse, **kw) else: func = ident http_reqs.append(self.executor.submit(func, r)) if http_reqs: jobs.append(self.executor.submit( _parse, parse, iterate, http_reqs, generator)) return jobs
def _identify_candidates(self, restrict, sorter): # full expansion if not isinstance(restrict, boolean.base) or isinstance(restrict, atom): return self._fast_identify_candidates(restrict, sorter) dsolutions = [ ([c.restriction for c in collect_package_restrictions(x, ("category",))], [p.restriction for p in collect_package_restrictions(x, ("package",))]) for x in restrict.iter_dnf_solutions(True)] # see if any solution state isn't dependent on cat/pkg in anyway. # if so, search whole search space. for x in dsolutions: if not x[0] and not x[1]: if sorter is iter: return self.versions return ( (c, p) for c in sorter(self.categories) for p in sorter(self.packages.get(c, ()))) # simple cases first. # if one specifies categories, and one doesn't cat_specified = bool(dsolutions[0][0]) pkg_specified = bool(dsolutions[0][1]) pgetter = self.packages.get if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified): if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # merde. so we've got a mix- some specify cats, some # don't, some specify pkgs, some don't. # this may be optimizable return self.versions # ok. so... one doesn't specify a category, but they all # specify packages (or don't) pr = values.OrRestriction( *tuple(iflatten_instance( (x[1] for x in dsolutions if x[1]), values.base))) return ( (c, p) for c in sorter(self.categories) for p in sorter(pgetter(c, [])) if pr.match(p)) elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # one (or more) don't specify pkgs, but they all specify cats. cr = values.OrRestriction( *tuple(iflatten_instance( (x[0] for x in dsolutions), values.base))) cats_iter = (c for c in sorter(self.categories) if cr.match(c)) return ( (c, p) for c in cats_iter for p in sorter(pgetter(c, []))) return self._fast_identify_candidates(restrict, sorter)
def collapse_envd(base): collapsed_d = {} try: env_d_files = sorted(listdir_files(base)) except FileNotFoundError: pass else: for x in env_d_files: if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \ or len(x) <= 2 or not x[0:2].isdigit(): continue d = read_bash_dict(pjoin(base, x)) # inefficient, but works. for k, v in d.items(): collapsed_d.setdefault(k, []).append(v) del d loc_incrementals = set(incrementals) loc_colon_parsed = set(colon_parsed) # split out env.d defined incrementals.. # update incrementals *and* colon parsed for colon_separated; # incrementals on its own is space separated. for x in collapsed_d.pop("COLON_SEPARATED", []): v = x.split() if v: loc_colon_parsed.update(v) loc_incrementals.update(loc_colon_parsed) # now space. for x in collapsed_d.pop("SPACE_SEPARATED", []): v = x.split() if v: loc_incrementals.update(v) # now reinterpret. for k, v in collapsed_d.items(): if k not in loc_incrementals: collapsed_d[k] = v[-1] continue if k in loc_colon_parsed: collapsed_d[k] = [ _f for _f in iflatten_instance(x.split(':') for x in v) if _f ] else: collapsed_d[k] = [ _f for _f in iflatten_instance(x.split() for x in v) if _f ] return collapsed_d, loc_incrementals, loc_colon_parsed
def collapse_envd(base): collapsed_d = {} try: env_d_files = sorted(listdir_files(base)) except OSError as oe: if oe.errno != errno.ENOENT: raise else: for x in env_d_files: if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \ or len(x) <= 2 or not x[0:2].isdigit(): continue d = read_bash_dict(pjoin(base, x)) # inefficient, but works. for k, v in d.iteritems(): collapsed_d.setdefault(k, []).append(v) del d loc_incrementals = set(incrementals) loc_colon_parsed = set(colon_parsed) # split out env.d defined incrementals.. # update incrementals *and* colon parsed for colon_separated; # incrementals on its own is space separated. for x in collapsed_d.pop("COLON_SEPARATED", []): v = x.split() if v: loc_colon_parsed.update(v) loc_incrementals.update(loc_colon_parsed) # now space. for x in collapsed_d.pop("SPACE_SEPARATED", []): v = x.split() if v: loc_incrementals.update(v) # now reinterpret. for k, v in collapsed_d.iteritems(): if k not in loc_incrementals: collapsed_d[k] = v[-1] continue if k in loc_colon_parsed: collapsed_d[k] = filter(None, iflatten_instance( x.split(':') for x in v)) else: collapsed_d[k] = filter(None, iflatten_instance( x.split() for x in v)) return collapsed_d, loc_incrementals, loc_colon_parsed
def _flatten_or_restrictions(i): for x in i: if isinstance(x, OrRestriction): for y in iflatten_instance(x, (atom, )): yield (y, True) else: yield (x, False)
def invokable(self, namespace, attr): l = [] for x in self.attrs: val = getattr(namespace, x, None) if val is None: continue if isinstance(val, bool): # Skip converter call for disabled boolean actions if not val: self.converter = False elif isinstance(val, restriction.base): l.append(val) else: l.extend(val) if self.converter: l = self.converter(l, namespace) l = list(iflatten_instance(l, (restriction.base, ))) if len(l) > 1: val = self.klass(*l) elif l: val = l[0] else: val = None setattr(namespace, attr, val)
def read_updates(path): def f(): d = deque() return [d,d] # mods tracks the start point [0], and the tail, [1]. # via this, pkg moves into a specific pkg can pick up # changes past that point, while ignoring changes prior # to that point. # Aftwards, we flatten it to get a per cp chain of commands. # no need to do lookups basically, although we do need to # watch for cycles. mods = defaultdict(f) moved = {} for fp in _scan_directory(path): fp = pjoin(path, fp) _process_update(readlines(fp), fp, mods, moved) # force a walk of the tree, flattening it commands = {k: list(iflatten_instance(v[0], tuple)) for k,v in mods.iteritems()} # filter out empty nodes. commands = {k: v for k,v in commands.iteritems() if v} return commands
def assertUri(self, obj, uri): uri = list(uri) self.assertEqual(list(iflatten_instance(obj)), uri) if uri: self.assertTrue(obj) else: self.assertFalse(obj)
def read_updates(path): def f(): d = deque() return [d, d] # mods tracks the start point [0], and the tail, [1]. # via this, pkg moves into a specific pkg can pick up # changes past that point, while ignoring changes prior # to that point. # Aftwards, we flatten it to get a per cp chain of commands. # no need to do lookups basically, although we do need to # watch for cycles. mods = defaultdict(f) moved = {} for fp in _scan_directory(path): fp = pjoin(path, fp) _process_update(readlines(fp), fp, mods, moved) # force a walk of the tree, flattening it commands = { k: list(iflatten_instance(v[0], tuple)) for k, v in mods.iteritems() } # filter out empty nodes. commands = {k: v for k, v in commands.iteritems() if v} return commands
def start(self): self.unused_master_licenses = set() self.unused_master_mirrors = set() self.unused_master_eclasses = set() self.unused_master_flags = set() # combine licenses/mirrors/eclasses/flags from all master repos for repo in self.options.target_repo.masters: self.unused_master_licenses.update(repo.licenses) self.unused_master_mirrors.update(repo.mirrors.keys()) self.unused_master_eclasses.update( repo.eclass_cache.eclasses.keys()) self.unused_master_flags.update( flag for matcher, (flag, desc) in repo.config.use_desc) # determine unused licenses/mirrors/eclasses/flags across all master repos for repo in self.options.target_repo.masters: for pkg in repo: self.unused_master_licenses.difference_update( iflatten_instance(pkg.license)) self.unused_master_mirrors.difference_update( self._get_mirrors(pkg)) self.unused_master_eclasses.difference_update(pkg.inherited) self.unused_master_flags.difference_update( pkg.iuse_stripped.difference(pkg.local_use.keys()))
def feed(self, pkg): # report licenses used in the pkg but not in any pkg from the master repo(s) if self.unused_master_licenses: pkg_licenses = set(iflatten_instance(pkg.license)) licenses = self.unused_master_licenses & pkg_licenses if licenses: yield UnusedInMastersLicenses(sorted(licenses), pkg=pkg) # report mirrors used in the pkg but not in any pkg from the master repo(s) if self.unused_master_mirrors: pkg_mirrors = self._get_mirrors(pkg) mirrors = self.unused_master_mirrors & pkg_mirrors if mirrors: yield UnusedInMastersMirrors(sorted(mirrors), pkg=pkg) # report eclasses used in the pkg but not in any pkg from the master repo(s) if self.unused_master_eclasses: pkg_eclasses = set(pkg.inherited) eclasses = self.unused_master_eclasses & pkg_eclasses if eclasses: yield UnusedInMastersEclasses(sorted(eclasses), pkg=pkg) # report global USE flags used in the pkg but not in any pkg from the master repo(s) if self.unused_master_flags: non_local_use = pkg.iuse_stripped.difference(pkg.local_use.keys()) flags = self.unused_master_flags.intersection(non_local_use) if flags: yield UnusedInMastersGlobalUse(sorted(flags), pkg=pkg)
def flatten_restricts(self, v): i = expandable_chain(v) depth = 0 conditionals = [] for x in i: for t, s in ((boolean.OrRestriction, "||"), (boolean.AndRestriction, "&&")): if isinstance(x, t): yield s yield "(" i.appendleft(")") i.appendleft(x.restrictions) depth += 1 break else: if isinstance(x, packages.Conditional): self.assertTrue(x.attr == "use") conditionals.insert( depth, list(self.mangle_cond_payload(x.restriction))) yield set(iflatten_instance(conditionals[:depth + 1])) yield "(" i.appendleft(")") i.appendleft(x.payload) depth += 1 else: if x == ")": self.assertTrue(depth) depth -= 1 yield x self.assertFalse(depth)
def _generate_fetchables(self, mirroring=False): pkg = self.pkg if not mirroring: return pkg.fetchables pkg = getattr(pkg, '_raw_pkg', pkg) return tuple(iflatten_instance(pkg.fetchables, _fetch_module.fetchable))
def _flatten_or_restrictions(i): for x in i: if isinstance(x, OrRestriction): for y in iflatten_instance(x, (atom,)): yield (y, True) else: yield (x, False)
def use_validate(self, klasses, pkg, seq, attr=None): skip_filter = (packages.Conditional,) + klasses nodes = iflatten_instance(seq, skip_filter) unstated = set() vals = dict(self._flatten_restricts( nodes, skip_filter, stated=pkg.iuse_stripped, unstated=unstated, attr=attr)) return vals, self._unstated_iuse(pkg, attr, unstated)
def read_updates(path, eapi): def f(): d = deque() return [d, d] # mods tracks the start point [0], and the tail, [1]. # via this, pkg moves into a specific pkg can pick up # changes past that point, while ignoring changes prior # to that point. # Afterwards, we flatten it to get a per cp chain of commands. # no need to do lookups basically, although we do need to # watch for cycles. mods = defaultdict(f) moved = {} try: for fp in _scan_directory(path, eapi): with open(pjoin(path, fp)) as f: data = (line.rstrip('\n') for line in f) _process_updates(data, fp, mods, moved) except FileNotFoundError: pass # force a walk of the tree, flattening it commands = { k: list(iflatten_instance(v[0], tuple)) for k, v in mods.items() } # filter out empty nodes. commands = {k: v for k, v in commands.items() if v} return commands
def get_data(self, repo, options): data = {} pos = 0 for pos, pkg in enumerate(repo): for license in unstable_unique(iflatten_instance(pkg.license)): data.setdefault(license, 0) data[license] += 1 return data, pos + 1
def use_validate(self, klasses, pkg, seq, reporter, attr=None): skip_filter = (packages.Conditional,) + klasses unstated = set() stated = pkg.iuse_stripped i = expandable_chain(iflatten_instance(seq, skip_filter)) for node in i: if isinstance(node, packages.Conditional): # invert it; get only whats not in pkg.iuse unstated.update(ifilterfalse(stated.__contains__, node.restriction.vals)) i.append(iflatten_instance(node.payload, skip_filter)) continue yield node # implicit IUSE flags unstated.difference_update(self.unstated_iuse) if unstated: reporter.add_report(UnstatedIUSE(pkg, attr, unstated))
def iter_pull_data(self, pkg): l = [self.defaults] for specific in self.freeform: l.extend(data for restrict, data in specific if restrict.match(pkg)) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if len(l) == 1: return iter(self.defaults) return iflatten_instance(l)
def __unwrap_stage_dependencies__(cls): stage_depends = cls.stage_depends for x in set(x for x in iflatten_instance(stage_depends.iteritems()) if x): try: f = getattr(cls, x) except AttributeError: raise TypeError( "class %r stage_depends specifies %r, which doesn't exist" % (cls, x)) setattr(cls, x, getattr(f, 'sd_raw_func', f))
def get_data(self, repo, options): data = {} for pos, pkg in enumerate(repo): for fetchable in iflatten_instance(pkg.fetchables, fetch.fetchable): for mirror in fetchable.uri.visit_mirrors(treat_default_as_mirror=False): if isinstance(mirror, tuple): mirror = mirror[0] data.setdefault(mirror.mirror_name, 0) data[mirror.mirror_name] += 1 return data, pos + 1
def launch_browser(urls, browser=None): """Launch URLs in a browser.""" browser = browser if browser is not None else const.BROWSER urls = list(iflatten_instance(urls)) try: subprocess.Popen( [browser] + urls, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except (PermissionError, FileNotFoundError) as e: raise BiteError(f'failed running browser: {browser}: {e.strerror}')
def parse_target(restriction, repo, livefs_repos, return_none=False): """Use :obj:`parserestrict.parse_match` to produce a list of matches. This matches the restriction against a repo. If multiple pkgs match and a simple package name was provided, then the restriction is applied against installed repos. If multiple matches still exist then pkgs from the 'virtual' category are skipped. If multiple pkgs still match the restriction, AmbiguousQuery is raised otherwise the matched atom is returned. On the other hand, if a globbed match was specified, all repo matches are returned. :param restriction: string to convert. :param repo: :obj:`pkgcore.repository.prototype.tree` instance to search in. :param livefs_repos: :obj:`pkgcore.config.domain.all_livefs_repos` instance to search in. :param return_none: indicates if no matches raises or returns C{None} :return: a list of matches or C{None}. """ key_matches = {x.key for x in repo.itermatch(restriction)} if not key_matches: if return_none: return None raise NoMatches(restriction) elif len(key_matches) > 1: if any( isinstance(r, restricts.PackageDep) for r in iflatten_instance([restriction])): if len(restriction) > 1: # drop repo specific restrictions, ebuild repos will not match installed (vdb) repo restriction = restriction.remove_restriction( restriction_types=(restricts.RepositoryDep, )) # find installed package matches matches = {x.key for x in livefs_repos.itermatch(restriction)} # try removing virtuals if there are multiple installed matches or none at all if not matches: matches = { x for x in key_matches if not x.startswith('virtual/') } elif len(matches) > 1: matches = {x for x in matches if not x.startswith('virtual/')} if len(matches) == 1: return [atom(matches.pop())] raise AmbiguousQuery(restriction, sorted(key_matches)) else: # if a glob was specified then just return every match return [atom(x) for x in key_matches] if isinstance(restriction, atom): # atom is guaranteed to be fine, since it's cat/pkg return [restriction] return [packages.KeyedAndRestriction(restriction, key=key_matches.pop())]
def known_conditionals(self): if self._node_conds is False: return frozenset() if self._known_conditionals is None: kc = set() for payload, restrictions in self.find_cond_nodes(self.restrictions): kc.update(iflatten_instance(x.vals for x in restrictions)) kc = frozenset(kc) object.__setattr__(self, "_known_conditionals", kc) return kc return self._known_conditionals
def known_conditionals(self): if self._node_conds is False: return frozenset() if self._known_conditionals is None: kc = set() for payload, restrictions in self.find_cond_nodes( self.restrictions): kc.update(iflatten_instance(x.vals for x in restrictions)) kc = frozenset(kc) object.__setattr__(self, "_known_conditionals", kc) return kc return self._known_conditionals
def use_validate(self, klasses, pkg, seq, reporter=None, attr=None): skip_filter = (packages.Conditional, ) + klasses unstated = set() stated = pkg.iuse_stripped i = expandable_chain(iflatten_instance(seq, skip_filter)) for node in i: if isinstance(node, packages.Conditional): # invert it; get only whats not in pkg.iuse unstated.update( filterfalse(stated.__contains__, node.restriction.vals)) i.append(iflatten_instance(node.payload, skip_filter)) continue elif attr == 'required_use': unstated.update(filterfalse(stated.__contains__, node.vals)) yield node # implicit IUSE flags if reporter is not None and attr is not None: unstated.difference_update(self.unstated_iuse) if unstated: reporter.add_report(UnstatedIUSE(pkg, attr, sorted(unstated)))
def _yield_deps(inst, d, k): # While at first glance this looks like should use expandable_chain, # it shouldn't. --charlie if k not in d: yield k return s = [k, iflatten_instance(d.get(k, ()))] while s: if isinstance(s[-1], basestring): yield s.pop(-1) continue exhausted = True for x in s[-1]: v = d.get(x) if v: s.append(x) s.append(iflatten_instance(v)) exhausted = False break yield x if exhausted: s.pop(-1)
def __wrap_stage_dependencies__(cls): stage_depends = cls.stage_depends # we use id instead of the cls itself to prevent strong ref issues. cls_id = id(cls) for x in set(x for x in iflatten_instance(stage_depends.iteritems()) if x): try: f = getattr(cls, x) except AttributeError: raise TypeError( "class %r stage_depends specifies %r, which doesn't exist" % (cls, x)) f2 = pre_curry(_ensure_deps, cls_id, x, f) f2.sd_raw_func = f setattr(cls, x, f2)
def _flatten_restricts(self, nodes, skip_filter, stated, unstated, attr, restricts=None): for node in nodes: k = node v = restricts if restricts is not None else [] if isinstance(node, packages.Conditional): # invert it; get only whats not in pkg.iuse unstated.update(filterfalse(stated.__contains__, node.restriction.vals)) v.append(node.restriction) yield from self._flatten_restricts( iflatten_instance(node.payload, skip_filter), skip_filter, stated, unstated, attr, v) continue elif attr == 'required_use': unstated.update(filterfalse(stated.__contains__, node.vals)) yield k, tuple(v)
def pull_data(self, pkg, force_copy=False): l = [] for specific in self.freeform: for restrict, data in specific: if restrict.match(pkg): l.append(data) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if not l: if force_copy: return set(self.defaults) return self.defaults s = set(self.defaults) s.update(iflatten_instance(l)) return s
def parse_target(restriction, repo, livefs_repos, return_none=False): """Use :obj:`parserestrict.parse_match` to produce a list of matches. This matches the restriction against a repo. If multiple pkgs match and a simple package name was provided, then the restriction is applied against installed repos. If multiple matches still exist then pkgs from the 'virtual' category are skipped. If multiple pkgs still match the restriction, AmbiguousQuery is raised otherwise the matched atom is returned. On the other hand, if a globbed match was specified, all repo matches are returned. :param restriction: string to convert. :param repo: :obj:`pkgcore.repository.prototype.tree` instance to search in. :param livefs_repos: :obj:`pkgcore.config.domain.all_livefs_repos` instance to search in. :param return_none: indicates if no matches raises or returns C{None} :return: a list of matches or C{None}. """ key_matches = {x.key for x in repo.itermatch(restriction)} if not key_matches: if return_none: return None raise NoMatches(restriction) elif len(key_matches) > 1: if any(isinstance(r, restricts.PackageDep) for r in iflatten_instance([restriction])): if len(restriction) > 1: # drop repo specific restrictions, ebuild repos will not match installed (vdb) repo restriction = restriction.remove_restriction(restriction_types=(restricts.RepositoryDep,)) # check for installed package matches installed_matches = {x.key for x in livefs_repos.itermatch(restriction)} if len(installed_matches) > 1: # try removing virtuals if there are multiple matches installed_matches = {x for x in installed_matches if not x.startswith('virtual/')} if len(installed_matches) == 1: return [atom(installed_matches.pop())] raise AmbiguousQuery(restriction, sorted(key_matches)) else: # if a glob was specified then just return every match return [atom(x) for x in key_matches] if isinstance(restriction, atom): # atom is guaranteed to be fine, since it's cat/pkg return [restriction] return [packages.KeyedAndRestriction(restriction, key=key_matches.pop())]
def pull_data(self, pkg, force_copy=False, pre_defaults=()): l = [] for specific in self.freeform: for restrict, data in specific: if restrict.match(pkg): l.append(data) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if pre_defaults: s = set(pre_defaults) incremental_expansion(s, self.defaults) else: s = set(self.defaults_finalized) if l: incremental_expansion(s, iflatten_instance(l)) return s
def get_data(self, repo, options): owners = defaultdict(set) iterable = repo.itermatch(packages.AlwaysTrue, sorter=sorted) items = {} for key, subiter in groupby(iterable, attrgetter("key")): for pkg in subiter: if not options.include_restricted and 'fetch' in pkg.restrict: continue if not options.include_nonmirrored and 'mirror' in pkg.restrict: continue for fetchable in iflatten_instance(pkg.fetchables, fetch.fetchable): owners[fetchable.filename].add(key) items[fetchable.filename] = fetchable.chksums.get("size", 0) data = defaultdict(lambda: 0) for filename, keys in owners.items(): for key in keys: data[key] += items[filename] unique = sum(items.values()) shared = sum(items[k] for (k, v) in owners.items() if len(v) > 1) return (data, {"total": unique, "shared": shared}), unique
def __init__(self, domain, pkg, verified_files, eclass_cache, observer=None, force_test=False, **kwargs): """ :param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be building :param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache` we'll be using :param verified_files: mapping of fetchables mapped to their disk location """ self._built_class = ebuild_built.fresh_built_package format.build.__init__(self, domain, pkg, verified_files, observer) domain_settings = self.domain.settings ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs) self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files") self.eclass_cache = eclass_cache self.run_test = force_test or self.feat_or_bool("test", domain_settings) self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings) if "test" in self.restrict: self.run_test = False elif not force_test and "test" not in pkg.use: if self.run_test: logger.warning(f"disabling test for {pkg} due to test use flag being disabled") self.run_test = False # XXX minor hack path = self.env["PATH"].split(os.pathsep) for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")): b = (self.feat_or_bool(s, domain_settings) and s not in self.restrict) setattr(self, s.lower(), b) if b: # looks weird I realize, but # pjoin("/foor/bar", "/barr/foo") == "/barr/foo" # and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf" self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default)) # gentoo bug 355283 libdir = self.env.get("ABI") if libdir is not None: libdir = self.env.get(f"LIBDIR_{libdir}") if libdir is not None: libdir = self.env.get(libdir) if libdir is None: libdir = "lib" path.insert(0, f"/usr/{libdir}/{s.lower()}/bin") else: for y in ("_PATH", "_DIR"): if s + y in self.env: del self.env[s+y] self.env["PATH"] = os.pathsep.join(path) # ordering must match appearance order in SRC_URI per PMS self.env["A"] = ' '.join(iter_stable_unique(pkg.distfiles)) if self.eapi.options.has_AA: pkg = self.pkg while hasattr(pkg, '_raw_pkg'): pkg = getattr(pkg, '_raw_pkg') self.env["AA"] = ' '.join(set(iflatten_instance(pkg.distfiles))) if self.eapi.options.has_KV: self.env["KV"] = domain.KV if self.eapi.options.has_merge_type: self.env["MERGE_TYPE"] = "source" if self.eapi.options.has_portdir: self.env["PORTDIR"] = pkg.repo.location self.env["ECLASSDIR"] = eclass_cache.eclassdir if self.setup_is_for_src: self._init_distfiles_env()
def visit_atoms(pkg, stream): if not pkg.eapi.options.transitive_use_atoms: return iflatten_instance(stream, atom) return iflatten_func(stream, _eapi2_flatten)
def format(self, op): # <type> - ebuild, block or nomerge (for --tree) # N - new package # R - rebuild package # F - fetch restricted # f - fetch restricted already downloaded # D - downgrade # U - updating to another version # # - masked # * - missing keyword # ~ - unstable keyword # Caveats: # - U and D are both displayed to show a downgrade - this is kept # in order to be consistent with existing portage behaviour out = self.out origautoline = out.autoline out.autoline = False self.pkg_disabled_use = self.pkg_forced_use = set() if hasattr(self, 'pkg_get_use'): self.pkg_forced_use, _, self.pkg_disabled_use = self.pkg_get_use( op.pkg) # This is for the summary at the end if self.quiet_repo_display: self.repos.setdefault(op.pkg.repo, len(self.repos) + 1) pkg_is_bold = any( x.match(op.pkg) for x in getattr(self, 'world_list', ())) # We don't do blockers or --tree stuff yet data = ['['] pkg_coloring = [] if pkg_is_bold: pkg_coloring.append(out.bold) if op.desc == 'remove': pkg_coloring.insert(0, out.fg('red')) data += pkg_coloring + ['uninstall'] elif getattr(op.pkg, 'built', False): pkg_coloring.insert(0, out.fg('magenta')) data += pkg_coloring + ['binary'] else: pkg_coloring.insert(0, out.fg('green')) data += pkg_coloring + ['ebuild'] data += [out.reset, ' '] out.write(*data) # Order is important here - look at the above diagram op_type = op.desc op_chars = [[' '] for x in range(7)] if 'fetch' in op.pkg.restrict: if all( os.path.isfile(pjoin(self.distdir, f)) for f in op.pkg.distfiles): fetched = [out.fg('green'), out.bold, 'f', out.reset] else: fetched = [out.fg('red'), out.bold, 'F', out.reset] op_chars[3] = fetched if op.desc == "add": op_chars[1] = [out.fg('green'), out.bold, 'N', out.reset] if op.pkg.slot != '0' and self.installed_repos.match( op.pkg.unversioned_atom): op_chars[2] = [out.fg('green'), out.bold, 'S', out.reset] op_type = 'slotted_add' elif op.desc == "replace": if op.pkg == op.old_pkg: op_chars[2] = [out.fg('yellow'), out.bold, 'R', out.reset] else: op_chars[4] = [out.fg('cyan'), out.bold, 'U', out.reset] if op.pkg > op.old_pkg: op_type = 'upgrade' else: op_chars[5] = [out.fg('blue'), out.bold, 'D', out.reset] op_type = 'downgrade' elif op.desc == 'remove': pass else: logger.warning("unformattable op type: desc(%r), %r", op.desc, op) if self.verbosity > 0: if (self.unstable_arch in op.pkg.keywords and self.unstable_arch not in op.pkg.repo.domain_settings['ACCEPT_KEYWORDS']): op_chars[6] = [out.fg('yellow'), out.bold, '~', out.reset] elif not op.pkg.keywords: op_chars[6] = [out.fg('red'), out.bold, '*', out.reset] else: for masked_atom in op.pkg.repo.default_visibility_limiters: if masked_atom.match(op.pkg.versioned_atom): op_chars[6] = [out.fg('red'), out.bold, '#', out.reset] break out.write(*(iflatten_instance(op_chars))) out.write('] ') self.visit_op(op_type) pkg = [op.pkg.cpvstr] if self.verbosity > 0: if op.pkg.subslot != op.pkg.slot: pkg.append(f":{op.pkg.slot}/{op.pkg.subslot}") elif op.pkg.slot != '0': pkg.append(f":{op.pkg.slot}") if not self.quiet_repo_display and op.pkg.source_repository: pkg.append(f"::{op.pkg.source_repository}") out.write(*(pkg_coloring + pkg + [out.reset])) installed = [] if op.desc == 'replace': old_pkg = [op.old_pkg.fullver] if self.verbosity > 0: if op.old_pkg.subslot != op.old_pkg.slot: old_pkg.append(f":{op.old_pkg.slot}/{op.old_pkg.subslot}") elif op.old_pkg.slot != '0': old_pkg.append(f":{op.old_pkg.slot}") if not self.quiet_repo_display and op.old_pkg.source_repository: old_pkg.append(f"::{op.old_pkg.source_repository}") if op_type != 'replace' or op.pkg.source_repository != op.old_pkg.source_repository: installed = ''.join(old_pkg) elif op_type == 'slotted_add': if self.verbosity > 0: pkgs = sorted(f"{x.fullver}:{x.slot}" for x in self.installed_repos.match( op.pkg.unversioned_atom)) else: pkgs = sorted(x.fullver for x in self.installed_repos.match( op.pkg.unversioned_atom)) installed = ', '.join(pkgs) # output currently installed versions if installed: out.write(' ', out.fg('blue'), out.bold, f'[{installed}]', out.reset) # Build a list of (useflags, use_expand_dicts) tuples. # HACK: if we are in "replace" mode we build a list of length # 4, else this is a list of length 2. We then pass this to # format_use which can take either 2 or 4 arguments. uses = ((), ()) if op.desc == 'replace': uses = (op.pkg.iuse_stripped, op.pkg.use, op.old_pkg.iuse_stripped, op.old_pkg.use) elif op.desc == 'add': uses = (op.pkg.iuse_stripped, op.pkg.use) stuff = list(map(self.use_splitter, uses)) # Convert the list of tuples to a list of lists and a list of # dicts (both length 2 or 4). uselists, usedicts = list(zip(*stuff)) # output USE flags self.format_use('use', *uselists) # output USE_EXPAND flags for expand in sorted(self.use_expand - self.use_expand_hidden): flaglists = [d.get(expand, ()) for d in usedicts] self.format_use(expand, *flaglists) # output download size if self.verbosity > 0: if not op.pkg.built: downloads = set(f for f in op.pkg.distfiles if not os.path.isfile(pjoin(self.distdir, f))) if downloads.difference(self.downloads): self.downloads.update(downloads) size = sum( v.size for dist, v in op.pkg.manifest.distfiles.items() if dist in downloads) if size: self.download_size += size out.write(' ', sizeof_fmt(size)) if self.quiet_repo_display: out.write(out.fg('cyan'), f" [{self.repos[op.pkg.repo]}]") out.write('\n') out.autoline = origautoline
def fake_use_validate(klasses, pkg, seq, attr=None): return {k: () for k in iflatten_instance(seq, klasses)}, ()
def visit_atoms(pkg, stream): if not pkg.eapi.options.transitive_use_atoms: return iflatten_instance(stream, atom) return iflatten_func(stream, \ lambda x: isinstance(x,atom) and not isinstance(x,atom._transitive_use_atom))
def _has_transitive_use_atoms(iterable): kls = transitive_use_atom ifunc = isinstance return any(ifunc(x, kls) for x in iflatten_instance(iterable, atom))
def format(self, op): # <type> - ebuild, block or nomerge (for --tree) # N - new package # R - rebuild package # F - fetch restricted # f - fetch restricted already downloaded # D - downgrade # U - updating to another version # # - masked # * - missing keyword # ~ - unstable keyword # Caveats: # - U and D are both displayed to show a downgrade - this is kept # in order to be consistent with existing portage behaviour out = self.out origautoline = out.autoline out.autoline = False self.pkg_disabled_use = self.pkg_forced_use = set() if hasattr(self, 'pkg_get_use'): self.pkg_forced_use, _, self.pkg_disabled_use = self.pkg_get_use(op.pkg) # This is for the summary at the end if self.quiet_repo_display: self.repos.setdefault(op.pkg.repo, len(self.repos)+1) pkg_is_bold = any(x.match(op.pkg) for x in getattr(self, 'world_list', ())) # We don't do blockers or --tree stuff yet data = ['['] pkg_coloring = [] if pkg_is_bold: pkg_coloring.append(out.bold) if op.desc == 'remove': pkg_coloring.insert(0, out.fg('red')) data += pkg_coloring + ['uninstall'] elif getattr(op.pkg, 'built', False): pkg_coloring.insert(0, out.fg('magenta')) data += pkg_coloring + ['binary'] else: pkg_coloring.insert(0, out.fg('green')) data += pkg_coloring + ['ebuild'] data += [out.reset, ' '] out.write(*data) # Order is important here - look at the above diagram op_type = op.desc op_chars = [[' '] for x in range(7)] if 'fetch' in op.pkg.restrict: if all(os.path.isfile(pjoin(self.distdir, f)) for f in op.pkg.distfiles): fetched = [out.fg('green'), out.bold, 'f', out.reset] else: fetched = [out.fg('red'), out.bold, 'F', out.reset] op_chars[3] = fetched if op.desc == "add": op_chars[1] = [out.fg('green'), out.bold, 'N', out.reset] if op.pkg.slot != '0' and self.installed_repos.match(op.pkg.unversioned_atom): op_chars[2] = [out.fg('green'), out.bold, 'S', out.reset] op_type = 'slotted_add' elif op.desc == "replace": if op.pkg == op.old_pkg: op_chars[2] = [out.fg('yellow'), out.bold, 'R', out.reset] else: op_chars[4] = [out.fg('cyan'), out.bold, 'U', out.reset] if op.pkg > op.old_pkg: op_type = 'upgrade' else: op_chars[5] = [out.fg('blue'), out.bold, 'D', out.reset] op_type = 'downgrade' elif op.desc == 'remove': pass else: logger.warning("unformattable op type: desc(%r), %r", op.desc, op) if self.verbosity > 0: if (self.unstable_arch in op.pkg.keywords and self.unstable_arch not in op.pkg.repo.domain_settings['ACCEPT_KEYWORDS']): op_chars[6] = [out.fg('yellow'), out.bold, '~', out.reset] elif not op.pkg.keywords: op_chars[6] = [out.fg('red'), out.bold, '*', out.reset] else: for masked_atom in op.pkg.repo.default_visibility_limiters: if masked_atom.match(op.pkg.versioned_atom): op_chars[6] = [out.fg('red'), out.bold, '#', out.reset] break out.write(*(iflatten_instance(op_chars))) out.write('] ') self.visit_op(op_type) pkg = [op.pkg.cpvstr] if self.verbosity > 0: if op.pkg.subslot != op.pkg.slot: pkg.append(f":{op.pkg.slot}/{op.pkg.subslot}") elif op.pkg.slot != '0': pkg.append(f":{op.pkg.slot}") if not self.quiet_repo_display and op.pkg.source_repository: pkg.append(f"::{op.pkg.source_repository}") out.write(*(pkg_coloring + pkg + [out.reset])) installed = [] if op.desc == 'replace': old_pkg = [op.old_pkg.fullver] if self.verbosity > 0: if op.old_pkg.subslot != op.old_pkg.slot: old_pkg.append(f":{op.old_pkg.slot}/{op.old_pkg.subslot}") elif op.old_pkg.slot != '0': old_pkg.append(f":{op.old_pkg.slot}") if not self.quiet_repo_display and op.old_pkg.source_repository: old_pkg.append(f"::{op.old_pkg.source_repository}") if op_type != 'replace' or op.pkg.source_repository != op.old_pkg.source_repository: installed = ''.join(old_pkg) elif op_type == 'slotted_add': if self.verbosity > 0: pkgs = sorted( f"{x.fullver}:{x.slot}" for x in self.installed_repos.match(op.pkg.unversioned_atom)) else: pkgs = sorted( x.fullver for x in self.installed_repos.match(op.pkg.unversioned_atom)) installed = ', '.join(pkgs) # output currently installed versions if installed: out.write(' ', out.fg('blue'), out.bold, f'[{installed}]', out.reset) # Build a list of (useflags, use_expand_dicts) tuples. # HACK: if we are in "replace" mode we build a list of length # 4, else this is a list of length 2. We then pass this to # format_use which can take either 2 or 4 arguments. uses = ((), ()) if op.desc == 'replace': uses = ( op.pkg.iuse_stripped, op.pkg.use, op.old_pkg.iuse_stripped, op.old_pkg.use) elif op.desc == 'add': uses = (op.pkg.iuse_stripped, op.pkg.use) stuff = list(map(self.use_splitter, uses)) # Convert the list of tuples to a list of lists and a list of # dicts (both length 2 or 4). uselists, usedicts = list(zip(*stuff)) # output USE flags self.format_use('use', *uselists) # output USE_EXPAND flags for expand in sorted(self.use_expand - self.use_expand_hidden): flaglists = [d.get(expand, ()) for d in usedicts] self.format_use(expand, *flaglists) # output download size if self.verbosity > 0: if not op.pkg.built: downloads = set( f for f in op.pkg.distfiles if not os.path.isfile(pjoin(self.distdir, f))) if downloads.difference(self.downloads): self.downloads.update(downloads) size = sum( v.size for dist, v in op.pkg.manifest.distfiles.items() if dist in downloads) if size: self.download_size += size out.write(' ', sizeof_fmt(size)) if self.quiet_repo_display: out.write(out.fg('cyan'), f" [{self.repos[op.pkg.repo]}]") out.write('\n') out.autoline = origautoline
def fake_use_validate(klasses, pkg, seq, reporter, attr=None): return iflatten_instance(seq, klasses)
def _dist_validate_args(parser, namespace): distdir = namespace.domain.distdir repo = namespace.repo if repo is None: repo = multiplex.tree( *get_virtual_repos(namespace.domain.source_repos, False)) all_dist_files = set(os.path.basename(f) for f in listdir_files(distdir)) target_files = set() installed_dist = set() exists_dist = set() excludes_dist = set() restricted_dist = set() # exclude distfiles used by installed packages -- note that this uses the # distfiles attr with USE settings bound to it if namespace.exclude_installed: for pkg in namespace.domain.all_installed_repos: installed_dist.update(iflatten_instance(pkg.distfiles)) # exclude distfiles for existing ebuilds or fetch restrictions if namespace.exclude_fetch_restricted or (namespace.exclude_exists and not namespace.restrict): for pkg in repo: exists_dist.update( iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles)) if 'fetch' in pkg.restrict: restricted_dist.update( iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles)) # exclude distfiles from specified restrictions if namespace.exclude_restrict: for pkg in repo.itermatch(namespace.exclude_restrict, sorter=sorted): excludes_dist.update( iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles)) # determine dist files for custom restrict targets if namespace.restrict: target_dist = defaultdict(lambda: defaultdict(set)) for pkg in repo.itermatch(namespace.restrict, sorter=sorted): s = set(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles)) target_dist[pkg.unversioned_atom][pkg].update(s) if namespace.exclude_exists: exists_dist.update(s) extra_regex_prefixes = defaultdict(set) pkg_regex_prefixes = set() for catpn, pkgs in target_dist.items(): pn_regex = r'\W'.join(re.split(r'\W', catpn.package)) pkg_regex = re.compile( r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % pn_regex, re.IGNORECASE) pkg_regex_prefixes.add(pn_regex) for pkg, files in pkgs.items(): files = sorted(files) for f in files: if (pkg_regex.match(f) or (extra_regex_prefixes and re.match( r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' % '|'.join(extra_regex_prefixes[catpn]), f))): continue else: pieces = re.split(r'([\W?(0-9)+])+(\W\w+)*(\.\w+)+', f) if pieces[-1] == '': pieces.pop() if len(pieces) > 1: extra_regex_prefixes[catpn].add(pieces[0]) if target_dist: regexes = [] # build regexes to match distfiles for older ebuilds no longer in the tree if pkg_regex_prefixes: pkg_regex_prefixes_str = '|'.join(sorted(pkg_regex_prefixes)) regexes.append( re.compile(r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % (pkg_regex_prefixes_str, ))) if extra_regex_prefixes: extra_regex_prefixes_str = '|'.join( sorted( chain.from_iterable( v for k, v in extra_regex_prefixes.items()))) regexes.append( re.compile(r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' % (extra_regex_prefixes_str, ))) if regexes: for f in all_dist_files: if any(r.match(f) for r in regexes): target_files.add(f) else: target_files = all_dist_files # exclude files tagged for saving saving_files = installed_dist | exists_dist | excludes_dist | restricted_dist target_files.difference_update(saving_files) targets = (pjoin(distdir, f) for f in sorted(all_dist_files.intersection(target_files))) removal_func = partial(os.remove) namespace.remove = ((removal_func, f) for f in filter(namespace.file_filters.run, targets))