def _identify_candidates(self, restrict, sorter): # full expansion if not isinstance(restrict, boolean.base) or isinstance(restrict, atom): return self._fast_identify_candidates(restrict, sorter) dsolutions = [ ([c.restriction for c in collect_package_restrictions(x, ("category",))], [p.restriction for p in collect_package_restrictions(x, ("package",))]) for x in restrict.iter_dnf_solutions(True)] # see if any solution state isn't dependent on cat/pkg in anyway. # if so, search whole search space. for x in dsolutions: if not x[0] and not x[1]: if sorter is iter: return self.versions return ( (c, p) for c in sorter(self.categories) for p in sorter(self.packages.get(c, ()))) # simple cases first. # if one specifies categories, and one doesn't cat_specified = bool(dsolutions[0][0]) pkg_specified = bool(dsolutions[0][1]) pgetter = self.packages.get if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified): if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # merde. so we've got a mix- some specify cats, some # don't, some specify pkgs, some don't. # this may be optimizable return self.versions # ok. so... one doesn't specify a category, but they all # specify packages (or don't) pr = values.OrRestriction( *tuple(iflatten_instance( (x[1] for x in dsolutions if x[1]), values.base))) return ( (c, p) for c in sorter(self.categories) for p in sorter(pgetter(c, [])) if pr.match(p)) elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # one (or more) don't specify pkgs, but they all specify cats. cr = values.OrRestriction( *tuple(iflatten_instance( (x[0] for x in dsolutions), values.base))) cats_iter = (c for c in sorter(self.categories) if cr.match(c)) return ( (c, p) for c in cats_iter for p in sorter(pgetter(c, []))) return self._fast_identify_candidates(restrict, sorter)
def collapse_envd(base): collapsed_d = {} try: env_d_files = sorted(listdir_files(base)) except OSError as oe: if oe.errno != errno.ENOENT: raise else: for x in env_d_files: if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \ or len(x) <= 2 or not x[0:2].isdigit(): continue d = read_bash_dict(pjoin(base, x)) # inefficient, but works. for k, v in d.iteritems(): collapsed_d.setdefault(k, []).append(v) del d loc_incrementals = set(incrementals) loc_colon_parsed = set(colon_parsed) # split out env.d defined incrementals.. # update incrementals *and* colon parsed for colon_separated; # incrementals on it's own is space separated. for x in collapsed_d.pop("COLON_SEPARATED", []): v = x.split() if v: loc_colon_parsed.update(v) loc_incrementals.update(loc_colon_parsed) # now space. for x in collapsed_d.pop("SPACE_SEPARATED", []): v = x.split() if v: loc_incrementals.update(v) # now reinterpret. for k, v in collapsed_d.iteritems(): if k not in loc_incrementals: collapsed_d[k] = v[-1] continue if k in loc_colon_parsed: collapsed_d[k] = filter(None, iflatten_instance(x.split(':') for x in v)) else: collapsed_d[k] = filter(None, iflatten_instance(x.split() for x in v)) return collapsed_d, loc_incrementals, loc_colon_parsed
def _identify_candidates(self, restrict, sorter): # full expansion if not isinstance(restrict, boolean.base) or isinstance( restrict, atom): return self._fast_identify_candidates(restrict, sorter) dsolutions = [([ c.restriction for c in collect_package_restrictions(x, ("category", )) ], [ p.restriction for p in collect_package_restrictions(x, ("package", )) ]) for x in restrict.iter_dnf_solutions(True)] # see if any solution state isn't dependent on cat/pkg in anyway. # if so, search whole search space. for x in dsolutions: if not x[0] and not x[1]: if sorter is iter: return self.versions return ((c, p) for c in sorter(self.categories) for p in sorter(self.packages.get(c, ()))) # simple cases first. # if one specifies categories, and one doesn't cat_specified = bool(dsolutions[0][0]) pkg_specified = bool(dsolutions[0][1]) pgetter = self.packages.get if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified): if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # merde. so we've got a mix- some specify cats, some # don't, some specify pkgs, some don't. # this may be optimizable return self.versions # ok. so... one doesn't specify a category, but they all # specify packages (or don't) pr = values.OrRestriction(*tuple( iflatten_instance((x[1] for x in dsolutions if x[1]), values.base))) return ((c, p) for c in sorter(self.categories) for p in sorter(pgetter(c, [])) if pr.match(p)) elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified): # one (or more) don't specify pkgs, but they all specify cats. cr = values.OrRestriction(*tuple( iflatten_instance((x[0] for x in dsolutions), values.base))) cats_iter = (c for c in sorter(self.categories) if cr.match(c)) return ((c, p) for c in cats_iter for p in sorter(pgetter(c, []))) return self._fast_identify_candidates(restrict, sorter)
def collapse_envd(base): collapsed_d = {} try: env_d_files = sorted(listdir_files(base)) except OSError as oe: if oe.errno != errno.ENOENT: raise else: for x in env_d_files: if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \ or len(x) <= 2 or not x[0:2].isdigit(): continue d = read_bash_dict(pjoin(base, x)) # inefficient, but works. for k, v in d.iteritems(): collapsed_d.setdefault(k, []).append(v) del d loc_incrementals = set(incrementals) loc_colon_parsed = set(colon_parsed) # split out env.d defined incrementals.. # update incrementals *and* colon parsed for colon_separated; # incrementals on its own is space separated. for x in collapsed_d.pop("COLON_SEPARATED", []): v = x.split() if v: loc_colon_parsed.update(v) loc_incrementals.update(loc_colon_parsed) # now space. for x in collapsed_d.pop("SPACE_SEPARATED", []): v = x.split() if v: loc_incrementals.update(v) # now reinterpret. for k, v in collapsed_d.iteritems(): if k not in loc_incrementals: collapsed_d[k] = v[-1] continue if k in loc_colon_parsed: collapsed_d[k] = filter(None, iflatten_instance( x.split(':') for x in v)) else: collapsed_d[k] = filter(None, iflatten_instance( x.split() for x in v)) return collapsed_d, loc_incrementals, loc_colon_parsed
def main(options, out, err): if options.debug: out.write('starting scanning distdir %s...' % options.distdir) files = set(basename(file) for file in listdir_files(options.distdir)) if options.debug: out.write('scanning repo...') pfiles = set() for pkg in options.repo.itermatch(options.restrict, sorter=sorted): try: pfiles.update(fetchable.filename for fetchable in iflatten_instance( pkg.fetchables, fetchable_kls)) except ParseChksumError as e: err.write("got corruption error '%s', with package %s " % (e, pkg.cpvstr)) if options.ignore_failures: err.write("skipping...") err.write() else: err.write("aborting...") return 1 except Exception as e: err.write("got error '%s', parsing package %s in repo '%s'" % (e, pkg.cpvstr, pkg.repo)) raise d = options.distdir for file in (files - pfiles): out.write(pjoin(d, file))
def read_updates(path): def f(): d = deque() return [d,d] # mods tracks the start point [0], and the tail, [1]. # via this, pkg moves into a specific pkg can pick up # changes past that point, while ignoring changes prior # to that point. # Aftwards, we flatten it to get a per cp chain of commands. # no need to do lookups basically, although we do need to # watch for cycles. mods = defaultdict(f) moved = {} for fp in _scan_directory(path): fp = pjoin(path, fp) _process_update(readlines(fp), fp, mods, moved) # force a walk of the tree, flattening it commands = dict((k, list(iflatten_instance(v[0], tuple))) for k,v in mods.iteritems()) # filter out empty nodes. commands = dict((k,v) for k,v in commands.iteritems() if v) return commands
def assertUri(self, obj, uri): uri = list(uri) self.assertEqual(list(iflatten_instance(obj)), uri) if uri: self.assertTrue(obj) else: self.assertFalse(obj)
def _collect_virtuals(virtuals, iterable): for pkg in iterable: for virtualpkg in iflatten_instance( pkg.provides.evaluate_depset(pkg.use)): virtuals.setdefault(virtualpkg.package, {}).setdefault(pkg.fullver, []).append(pkg.versioned_atom)
def flatten_restricts(self, v): i = expandable_chain(v) depth = 0 conditionals = [] for x in i: for t, s in ((boolean.OrRestriction, "||"), (boolean.AndRestriction, "&&")): if isinstance(x, t): yield s yield "(" i.appendleft(")") i.appendleft(x.restrictions) depth += 1 break else: if isinstance(x, packages.Conditional): self.assertTrue(x.attr == "use") conditionals.insert( depth, list(self.mangle_cond_payload(x.restriction))) yield set(iflatten_instance(conditionals[:depth + 1])) yield "(" i.appendleft(")") i.appendleft(x.payload) depth += 1 else: if x == ")": self.assertTrue(depth) depth -= 1 yield x self.assertFalse(depth)
def main(options, out, err): if options.debug: out.write('starting scanning distdir %s...' % options.distdir) files = set(basename(file) for file in listdir_files(options.distdir)) if options.debug: out.write('scanning repo...') pfiles = set() for pkg in options.repo.itermatch(options.restrict, sorter=sorted): try: pfiles.update(fetchable.filename for fetchable in iflatten_instance(pkg.fetchables, fetchable_kls)) except ParseChksumError as e: err.write("got corruption error '%s', with package %s " % (e, pkg.cpvstr)) if options.ignore_failures: err.write("skipping...") err.write() else: err.write("aborting...") return 1 except Exception as e: err.write("got error '%s', parsing package %s in repo '%s'" % (e, pkg.cpvstr, pkg.repo)) raise d = options.distdir for file in (files - pfiles): out.write(pjoin(d, file))
def read_updates(path): def f(): d = deque() return [d, d] # mods tracks the start point [0], and the tail, [1]. # via this, pkg moves into a specific pkg can pick up # changes past that point, while ignoring changes prior # to that point. # Aftwards, we flatten it to get a per cp chain of commands. # no need to do lookups basically, although we do need to # watch for cycles. mods = defaultdict(f) moved = {} for fp in _scan_directory(path): fp = pjoin(path, fp) _process_update(readlines(fp), fp, mods, moved) # force a walk of the tree, flattening it commands = { k: list(iflatten_instance(v[0], tuple)) for k, v in mods.iteritems() } # filter out empty nodes. commands = {k: v for k, v in commands.iteritems() if v} return commands
def __unwrap_stage_dependencies__(cls): stage_depends = cls.stage_depends for x in set(x for x in iflatten_instance(stage_depends.iteritems()) if x): try: f = getattr(cls, x) except AttributeError: raise TypeError("class %r stage_depends specifies " "%r, which doesn't exist" % (cls, x)) setattr(cls, x, getattr(f, 'sd_raw_func', f))
def iter_pull_data(self, pkg): l = [self.defaults] for specific in self.freeform: l.extend(data for restrict, data in specific if restrict.match(pkg)) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if len(l) == 1: return iter(self.defaults) return iflatten_instance(l)
def known_conditionals(self): if self._node_conds is False: return frozenset() if self._known_conditionals is None: kc = set() for payload, restrictions in self.find_cond_nodes( self.restrictions): kc.update(iflatten_instance(x.vals for x in restrictions)) kc = frozenset(kc) object.__setattr__(self, "_known_conditionals", kc) return kc return self._known_conditionals
def _yield_deps(inst, d, k): # While at first glance this looks like should use expandable_chain, # it shouldn't. --charlie if k not in d: yield k return s = [k, iflatten_instance(d.get(k, ()))] while s: if isinstance(s[-1], basestring): yield s.pop(-1) continue exhausted = True for x in s[-1]: v = d.get(x) if v: s.append(x) s.append(iflatten_instance(v)) exhausted = False break yield x if exhausted: s.pop(-1)
def __wrap_stage_dependencies__(cls): stage_depends = cls.stage_depends # we use id instead of the cls itself to prevent strong ref issues. cls_id = id(cls) for x in set(x for x in iflatten_instance(stage_depends.iteritems()) if x): try: f = getattr(cls, x) except AttributeError: raise TypeError("class %r stage_depends specifies " "%r, which doesn't exist" % (cls, x)) f2 = pre_curry(_ensure_deps, cls_id, x, f) f2.sd_raw_func = f setattr(cls, x, f2)
def pull_data(self, pkg, force_copy=False): l = [] for specific in self.freeform: for restrict, data in specific: if restrict.match(pkg): l.append(data) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if not l: if force_copy: return set(self.defaults) return self.defaults s = set(self.defaults) s.update(iflatten_instance(l)) return s
def pull_data(self, pkg, force_copy=False, pre_defaults=()): l = [] for specific in self.freeform: for restrict, data in specific: if restrict.match(pkg): l.append(data) for atom, data in self.atoms.get(pkg.key, ()): if atom.match(pkg): l.append(data) if pre_defaults: s = set(pre_defaults) incremental_expansion(s, self.defaults) else: s = set(self.defaults_finalized) if l: incremental_expansion(s, iflatten_instance(l)) return s
def _collect_virtuals(virtuals, iterable): for pkg in iterable: for virtualpkg in iflatten_instance( pkg.provides.evaluate_depset(pkg.use)): virtuals.setdefault(virtualpkg.package, {}).setdefault( pkg.fullver, []).append(pkg.versioned_atom)
def _has_transitive_use_atoms(iterable): kls = transitive_use_atom ifunc = isinstance return any(ifunc(x, kls) for x in iflatten_instance(iterable, atom))
def format(self, op): # <type> - ebuild, block or nomerge (for --tree) # N - new package # R - rebuild package # F - fetch restricted # f - fetch restricted already downloaded # D - downgrade # U - updating to another version # # - masked # * - missing keyword # ~ - unstable keyword # Caveats: # - U and D are both displayed to show a downgrade - this is kept # in order to be consistent with existing portage behaviour out = self.out origautoline = out.autoline out.autoline = False self.pkg_disabled_use = self.pkg_forced_use = set() if hasattr(self, 'disabled_use'): self.pkg_disabled_use = self.disabled_use.pull_data(op.pkg) if hasattr(self, 'forced_use'): self.pkg_forced_use = self.forced_use.pull_data(op.pkg) # This is for the summary at the end if self.quiet_repo_display: self.repos.setdefault(op.pkg.repo, len(self.repos)+1) pkg_is_bold = any(x.match(op.pkg) for x in getattr(self, 'world_list', ())) # We don't do blockers or --tree stuff yet data = ['['] pkg_coloring = [] if pkg_is_bold: pkg_coloring.append(out.bold) if op.desc == 'remove': pkg_coloring.insert(0, out.fg('red')) data += pkg_coloring + ['uninstall'] elif getattr(op.pkg, 'built', False): pkg_coloring.insert(0, out.fg('magenta')) data += pkg_coloring + ['binary'] else: pkg_coloring.insert(0, out.fg('green')) data += pkg_coloring + ['ebuild'] data += [out.reset, ' '] out.write(*data) # Order is important here - look at the above diagram op_type = op.desc op_chars = [[' '] for x in range(7)] if 'fetch' in op.pkg.restrict: fetched = [out.fg('red'), out.bold, 'F', out.reset] for fetchable in op.pkg.fetchables: if not os.path.isfile(pjoin(self.distdir, fetchable.filename)): break fetched = [out.fg('green'), out.bold, 'f', out.reset] op_chars[3] = fetched if op.desc == "add": op_chars[1] = [out.fg('green'), out.bold, 'N', out.reset] if op.pkg.slot != '0' and self.livefs_repos.match(op.pkg.unversioned_atom): op_chars[2] = [out.fg('green'), out.bold, 'S', out.reset] op_type = 'slotted_add' elif op.desc == "replace": if op.pkg == op.old_pkg: op_chars[2] = [out.fg('yellow'), out.bold, 'R', out.reset] else: op_chars[4] = [out.fg('cyan'), out.bold, 'U', out.reset] if op.pkg > op.old_pkg: op_type = 'upgrade' else: op_chars[5] = [out.fg('blue'), out.bold, 'D', out.reset] op_type = 'downgrade' elif op.desc == 'remove': pass else: logger.warning("unformattable op type: desc(%r), %r", (op.desc, op)) if self.verbose: if self.unstable_arch in op.pkg.keywords and \ self.unstable_arch not in self.domain_settings['ACCEPT_KEYWORDS']: op_chars[6] = [out.fg('yellow'), out.bold, '~', out.reset] elif not op.pkg.keywords: op_chars[6] = [out.fg('red'), out.bold, '*', out.reset] else: for masked_atom in op.pkg.repo.default_visibility_limiters: if masked_atom.match(op.pkg.versioned_atom): op_chars[6] = [out.fg('red'), out.bold, '#', out.reset] break out.write(*(iflatten_instance(op_chars))) out.write('] ') self.visit_op(op_type) pkg = [op.pkg.cpvstr] if self.verbose: if op.pkg.subslot != op.pkg.slot: pkg.append(':%s/%s' % (op.pkg.slot, op.pkg.subslot)) elif op.pkg.slot != '0': pkg.append(':%s' % op.pkg.slot) if not self.quiet_repo_display and op.pkg.source_repository and \ op.pkg.source_repository != 'gentoo' or \ (op.desc == 'replace' and op_type != 'replace' and \ op.pkg.source_repository != op.old_pkg.source_repository): pkg.append("::%s" % op.pkg.source_repository) out.write(*(pkg_coloring + pkg + [out.reset])) installed = [] if op.desc == 'replace': old_pkg = [op.old_pkg.fullver] if self.verbose: if op.old_pkg.subslot != op.old_pkg.slot: old_pkg.append(':%s/%s' % (op.old_pkg.slot, op.old_pkg.subslot)) elif op.old_pkg.slot != '0': old_pkg.append(':%s' % op.old_pkg.slot) if not self.quiet_repo_display and op.old_pkg.source_repository and \ op.old_pkg.source_repository != 'gentoo' or \ op.pkg.source_repository != op.old_pkg.source_repository: old_pkg.append("::%s" % op.old_pkg.source_repository) if op_type != 'replace' or op.pkg.source_repository != op.old_pkg.source_repository: installed = ''.join(old_pkg) elif op_type == 'slotted_add': if self.verbose: pkgs = sorted(['%s:%s' % (x.fullver, x.slot) for x in \ self.livefs_repos.match(op.pkg.unversioned_atom)]) else: pkgs = sorted([x.fullver for x in \ self.livefs_repos.match(op.pkg.unversioned_atom)]) installed = ', '.join(pkgs) if installed: out.write(' ', out.fg('blue'), out.bold, '[%s]' % installed, out.reset) # Build a list of (useflags, use_expand_dicts) tuples. # HACK: if we are in "replace" mode we build a list of length # 4, else this is a list of length 2. We then pass this to # format_use which can take either 2 or 4 arguments. uses = ((), ()) if op.desc == 'replace': uses = (self.iuse_strip(op.pkg.iuse), op.pkg.use, self.iuse_strip(op.old_pkg.iuse), op.old_pkg.use) elif op.desc == 'add': uses = (self.iuse_strip(op.pkg.iuse), op.pkg.use) stuff = map(self.use_splitter, uses) # Convert the list of tuples to a list of lists and a list of # dicts (both length 2 or 4). uselists, usedicts = zip(*stuff) self.format_use('use', *uselists) for expand in sorted(self.use_expand-self.use_expand_hidden): flaglists = [d.get(expand, ()) for d in usedicts] self.format_use(expand, *flaglists) if self.verbose: if not op.pkg.built: downloads = set(f.filename for f in op.pkg.fetchables if not os.path.isfile(pjoin(self.distdir, f.filename))) if downloads.difference(self.downloads): self.downloads.update(downloads) size = sum(v.size for dist, v in op.pkg.manifest.distfiles.iteritems() if dist in downloads) if size: self.download_size += size out.write(' ', sizeof_fmt(size)) if self.quiet_repo_display: out.write(out.fg('cyan'), " [%d]" % (self.repos[op.pkg.repo])) out.write('\n') out.autoline = origautoline
def format(self, op): # <type> - ebuild, block or nomerge (for --tree) # N - new package # R - rebuild package # F - fetch restricted # f - fetch restricted already downloaded # D - downgrade # U - updating to another version # # - masked # * - missing keyword # ~ - unstable keyword # Caveats: # - U and D are both displayed to show a downgrade - this is kept # in order to be consistent with existing portage behaviour out = self.out origautoline = out.autoline out.autoline = False self.pkg_disabled_use = self.pkg_forced_use = set() if hasattr(self, 'pkg_get_use'): self.pkg_forced_use, _, self.pkg_disabled_use = self.pkg_get_use(op.pkg) # This is for the summary at the end if self.quiet_repo_display: self.repos.setdefault(op.pkg.repo, len(self.repos)+1) pkg_is_bold = any(x.match(op.pkg) for x in getattr(self, 'world_list', ())) # We don't do blockers or --tree stuff yet data = ['['] pkg_coloring = [] if pkg_is_bold: pkg_coloring.append(out.bold) if op.desc == 'remove': pkg_coloring.insert(0, out.fg('red')) data += pkg_coloring + ['uninstall'] elif getattr(op.pkg, 'built', False): pkg_coloring.insert(0, out.fg('magenta')) data += pkg_coloring + ['binary'] else: pkg_coloring.insert(0, out.fg('green')) data += pkg_coloring + ['ebuild'] data += [out.reset, ' '] out.write(*data) # Order is important here - look at the above diagram op_type = op.desc op_chars = [[' '] for x in range(7)] if 'fetch' in op.pkg.restrict: fetched = [out.fg('red'), out.bold, 'F', out.reset] for fetchable in op.pkg.fetchables: if not os.path.isfile(pjoin(self.distdir, fetchable.filename)): break fetched = [out.fg('green'), out.bold, 'f', out.reset] op_chars[3] = fetched if op.desc == "add": op_chars[1] = [out.fg('green'), out.bold, 'N', out.reset] if op.pkg.slot != '0' and self.livefs_repos.match(op.pkg.unversioned_atom): op_chars[2] = [out.fg('green'), out.bold, 'S', out.reset] op_type = 'slotted_add' elif op.desc == "replace": if op.pkg == op.old_pkg: op_chars[2] = [out.fg('yellow'), out.bold, 'R', out.reset] else: op_chars[4] = [out.fg('cyan'), out.bold, 'U', out.reset] if op.pkg > op.old_pkg: op_type = 'upgrade' else: op_chars[5] = [out.fg('blue'), out.bold, 'D', out.reset] op_type = 'downgrade' elif op.desc == 'remove': pass else: logger.warning("unformattable op type: desc(%r), %r", (op.desc, op)) if self.verbose: if self.unstable_arch in op.pkg.keywords and \ self.unstable_arch not in self.domain_settings['ACCEPT_KEYWORDS']: op_chars[6] = [out.fg('yellow'), out.bold, '~', out.reset] elif not op.pkg.keywords: op_chars[6] = [out.fg('red'), out.bold, '*', out.reset] else: for masked_atom in op.pkg.repo.default_visibility_limiters: if masked_atom.match(op.pkg.versioned_atom): op_chars[6] = [out.fg('red'), out.bold, '#', out.reset] break out.write(*(iflatten_instance(op_chars))) out.write('] ') self.visit_op(op_type) pkg = [op.pkg.cpvstr] if self.verbose: if op.pkg.subslot != op.pkg.slot: pkg.append(':%s/%s' % (op.pkg.slot, op.pkg.subslot)) elif op.pkg.slot != '0': pkg.append(':%s' % op.pkg.slot) if not self.quiet_repo_display and op.pkg.source_repository and \ op.pkg.source_repository != 'gentoo' or \ (op.desc == 'replace' and op_type != 'replace' and \ op.pkg.source_repository != op.old_pkg.source_repository): pkg.append("::%s" % op.pkg.source_repository) out.write(*(pkg_coloring + pkg + [out.reset])) installed = [] if op.desc == 'replace': old_pkg = [op.old_pkg.fullver] if self.verbose: if op.old_pkg.subslot != op.old_pkg.slot: old_pkg.append(':%s/%s' % (op.old_pkg.slot, op.old_pkg.subslot)) elif op.old_pkg.slot != '0': old_pkg.append(':%s' % op.old_pkg.slot) if not self.quiet_repo_display and op.old_pkg.source_repository and \ op.old_pkg.source_repository != 'gentoo' or \ op.pkg.source_repository != op.old_pkg.source_repository: old_pkg.append("::%s" % op.old_pkg.source_repository) if op_type != 'replace' or op.pkg.source_repository != op.old_pkg.source_repository: installed = ''.join(old_pkg) elif op_type == 'slotted_add': if self.verbose: pkgs = sorted([ '%s:%s' % (x.fullver, x.slot) for x in self.livefs_repos.match(op.pkg.unversioned_atom)]) else: pkgs = sorted([ x.fullver for x in self.livefs_repos.match(op.pkg.unversioned_atom)]) installed = ', '.join(pkgs) if installed: out.write(' ', out.fg('blue'), out.bold, '[%s]' % installed, out.reset) # Build a list of (useflags, use_expand_dicts) tuples. # HACK: if we are in "replace" mode we build a list of length # 4, else this is a list of length 2. We then pass this to # format_use which can take either 2 or 4 arguments. uses = ((), ()) if op.desc == 'replace': uses = ( self.iuse_strip(op.pkg.iuse), op.pkg.use, self.iuse_strip(op.old_pkg.iuse), op.old_pkg.use) elif op.desc == 'add': uses = (self.iuse_strip(op.pkg.iuse), op.pkg.use) stuff = map(self.use_splitter, uses) # Convert the list of tuples to a list of lists and a list of # dicts (both length 2 or 4). uselists, usedicts = zip(*stuff) self.format_use('use', *uselists) for expand in sorted(self.use_expand-self.use_expand_hidden): flaglists = [d.get(expand, ()) for d in usedicts] self.format_use(expand, *flaglists) if self.verbose: if not op.pkg.built: downloads = set( f.filename for f in op.pkg.fetchables if not os.path.isfile(pjoin(self.distdir, f.filename))) if downloads.difference(self.downloads): self.downloads.update(downloads) size = sum( v.size for dist, v in op.pkg.manifest.distfiles.iteritems() if dist in downloads) if size: self.download_size += size out.write(' ', sizeof_fmt(size)) if self.quiet_repo_display: out.write(out.fg('cyan'), " [%d]" % (self.repos[op.pkg.repo])) out.write('\n') out.autoline = origautoline
def visit_atoms(pkg, stream): if not pkg.eapi_obj.options.transitive_use_atoms: return iflatten_instance(stream, atom) return iflatten_func(stream, _eapi2_flatten)
loc_incrementals.update(loc_colon_parsed) # now space. for x in collapsed_d.pop("SPACE_SEPARATED", []): v = x.split() if v: loc_incrementals.update(v) # now reinterpret. for k, v in collapsed_d.iteritems(): if k not in loc_incrementals: collapsed_d[k] = v[-1] continue if k in loc_colon_parsed: collapsed_d[k] = filter(None, iflatten_instance( x.split(':') for x in v)) else: collapsed_d[k] = filter(None, iflatten_instance( x.split() for x in v)) return collapsed_d, loc_incrementals, loc_colon_parsed def string_collapse_envd(envd_dict, incrementals, colon_incrementals): """transform a passed in dict to strictly strings""" for k, v in envd_dict.iteritems(): if k not in incrementals: continue if k in colon_incrementals: envd_dict[k] = ':'.join(v) else: