def get_index(channel_urls=(), prepend=True, platform=None, use_cache=False, unknown=False, offline=False, prefix=None): """ Return the index of packages available on the channels If prepend=False, only the channels passed in as arguments are used. If platform=None, then the current platform is used. If prefix is supplied, then the packages installed in that prefix are added. """ channel_urls = config.normalize_urls(channel_urls, platform, offline) if prepend: pri0 = max(itervalues(channel_urls)) if channel_urls else 0 for url, rec in iteritems(config.get_channel_urls(platform, offline)): channel_urls[url] = (rec[0], rec[1] + pri0) index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown) if prefix: for dist, info in iteritems(install.linked_data(prefix)): fn = dist + '.tar.bz2' channel = info.get('channel', '') if channel not in channel_urls: channel_urls[channel] = (config.canonical_channel_name(channel, True, True), 0) url_s, priority = channel_urls[channel] key = url_s + '::' + fn if url_s else fn if key not in index: # only if the package in not in the repodata, use local # conda-meta (with 'depends' defaulting to []) info.setdefault('depends', []) info['fn'] = fn info['schannel'] = url_s info['channel'] = channel info['url'] = channel + fn info['priority'] = priority index[key] = info return index
def __init__(self, index, sort=False, processed=False): self.index = index if not processed: for fkey, info in iteritems(index.copy()): if fkey.endswith(']'): continue for fstr in chain(info.get('features', '').split(), info.get('track_features', '').split(), track_features or ()): self.add_feature(fstr, group=False) for fstr in iterkeys(info.get('with_features_depends', {})): index['%s[%s]' % (fkey, fstr)] = info self.add_feature(fstr, group=False) groups = {} trackers = {} installed = set() for fkey, info in iteritems(index): groups.setdefault(info['name'], []).append(fkey) for feat in info.get('track_features', '').split(): trackers.setdefault(feat, []).append(fkey) if 'link' in info and not fkey.endswith(']'): installed.add(fkey) self.groups = groups self.installed = installed self.trackers = trackers self.find_matches_ = {} self.ms_depends_ = {} if sort: for name, group in iteritems(groups): groups[name] = sorted(group, key=self.version_key, reverse=True)
def __init__(self, index, sort=False, processed=False): if not processed: for fkey, info in iteritems(index.copy()): for fstr in chain(info.get('features', '').split(), info.get('track_features', '').split()): fpkg = fstr + '@' if fpkg not in index: index[fpkg] = { 'name': fpkg, 'channel': '@', 'priority': 0, 'version': '0', 'build_number': 0, 'build': '', 'depends': [], 'track_features': fstr} for fstr in iterkeys(info.get('with_features_depends', {})): index['%s[%s]' % (fkey, fstr)] = info groups = {} trackers = {} installed = set() for fkey, info in iteritems(index): groups.setdefault(info['name'], []).append(fkey) for feat in info.get('track_features', '').split(): trackers.setdefault(feat, []).append(fkey) if 'link' in info: installed.add(fkey) self.index = index self.groups = groups self.installed = installed self.trackers = trackers self.find_matches_ = {} self.ms_depends_ = {} if sort: for name, group in iteritems(groups): groups[name] = sorted(group, key=self.version_key, reverse=True)
def gen_clauses(self, specs): C = Clauses() # Creates a variable that represents the proposition: # Does the package set include package "fn"? for name, group in iteritems(self.groups): for fkey in group: C.new_var(fkey) # Install no more than one version of each package C.Require(C.AtMostOne, group) # Create an on/off variable for the entire group name = self.ms_to_v(name) C.name_var(C.Any(group, polarity=None, name=name), name+'?') # Creates a variable that represents the proposition: # Does the package set include track_feature "feat"? for name, group in iteritems(self.trackers): name = self.ms_to_v('@' + name) C.name_var(C.Any(group, polarity=None, name=name), name+'?') # Create propositions that assert: # If package "fn" is installed, its dependencie must be satisfied for group in itervalues(self.groups): for fkey in group: nkey = C.Not(fkey) for ms in self.ms_depends(fkey): if not ms.optional: C.Require(C.Or, nkey, self.push_MatchSpec(C, ms)) return C
def __init__(self, index, sort=False, processed=False): if not processed: for fkey, info in iteritems(index.copy()): for fstr in iterkeys(info.get('with_features_depends', {})): index['%s[%s]' % (fkey, fstr)] = info groups = {} trackers = {} installed = set() for fkey, info in iteritems(index): groups.setdefault(info['name'], []).append(fkey) for feat in info.get('track_features', '').split(): trackers.setdefault(feat, []).append(fkey) if 'link' in info: installed.add(fkey) self.index = index self.groups = groups self.installed = installed self.trackers = trackers self.find_matches_ = {} self.ms_depends_ = {} if sort: for name, group in iteritems(groups): groups[name] = sorted(group, key=self.version_key, reverse=True)
def app_get_index(all_version=False): """ return the index of available applications on the channels By default only the latest version of each app is included in the result, unless all_version is set to True. """ import sys pyxx = 'py%d%d' % sys.version_info[:2] def filter_build(build): return bool(pyxx in build) if 'py' in build else True index = {fn: info for fn, info in iteritems(get_index()) if info.get('type') == 'app' and filter_build(info['build'])} if all_version: return index d = defaultdict(list) # name -> list of Package objects for fn, info in iteritems(index): d[_name_fn(fn)].append(Package(fn, info)) res = {} for pkgs in itervalues(d): pkg = max(pkgs) res[pkg.fn] = index[pkg.fn] return res
def build_groups(index): groups = {} feats = {} for fn, info in iteritems(index): if fn[-1] == '@': assert info['name'] == fn and info.get('track_features','') == fn[:-1] feats[fn] = info else: groups.setdefault(info['name'],[]).append(fn) for feat in info.get('track_features','').split(): groups.setdefault(feat + '@',[]).append(fn) for fn, info in iteritems(feats): groups.setdefault(fn,[]).append(fn) return groups
def filter_group(matches): # If we are here, then this dependency is mandatory, # so add it to the master list. That way it is still # participates in the pruning even if one of its # parents is pruned away match1 = next(ms for ms in matches) isopt = all(ms.optional for ms in matches) name = match1.name isfeat = name[0] == '@' first = name not in snames if isfeat: assert len(matches) == 1 and match1.strictness == 1 group = self.trackers.get(name[1:], []) else: group = self.groups.get(name, []) # Prune packages that don't match any of the patterns # or which have unsatisfiable dependencies nold = nnew = 0 for fkey in group: if filter.setdefault(fkey, True): nold += 1 sat = isfeat or self.match_any(matches, fkey) sat = sat and all(any(filter.get(f2, True) for f2 in self.find_matches(ms)) for ms in self.ms_depends(fkey)) filter[fkey] = sat nnew += sat # Quick exit if we detect unsatisfiability reduced = nnew < nold if reduced: log.debug('%s: pruned from %d -> %d' % (name, nold, nnew)) if nnew == 0: if name in snames: snames.remove(name) if not isopt: raise BadPrune(name) return nnew != 0 if not reduced and not first or isopt or isfeat: return reduced # Perform the same filtering steps on any dependencies shared across # *all* packages in the group. Even if just one of the packages does # not have a particular dependency, it must be ignored in this pass. if first: snames.add(name) cdeps = defaultdict(list) for fkey in group: if filter[fkey]: for m2 in self.ms_depends(fkey): if m2.name[0] != '@' and not m2.optional: cdeps[m2.name].append(m2) cdeps = {mname: set(deps) for mname, deps in iteritems(cdeps) if len(deps) >= nnew} if cdeps: matches = [(ms,) for ms in matches] if sum(filter_group(deps) for deps in itervalues(cdeps)): reduced = True return reduced
def compress_(chains, pfx): if not chains: return () chain0 = set() sname = next(_[0] for _ in chains).split(' ', 1)[0] cdict = defaultdict(set) for c in chains: if len(c) == 1: chain0.add(c[0].partition(' ')[-1]) else: cdict[c[-1]].add(c[1:-1]) res = [] for csuff, cmid in iteritems(cdict): cname, _, cver = csuff.partition(' ') if cmid is not None and (cname != sname or cver not in chain0): cmid = sorted(cmid, key=len) if len(cmid[0]) == 0: c = (pfx, csuff) elif len(cmid[0]) == 1: mids = set(c[0] for c in cmid if len(c) == 1) c = (pfx, ','.join(sorted(mids)), csuff) else: mids = set(c[0] for c in cmid) c = (pfx, ','.join(sorted(mids)), '...', csuff) res.append(c) if chain0: if '' in chain0: res.append((sname,)) else: res.append((sname + ' ' + '|'.join(sorted(chain0)),)) return sorted(res, key=lambda x: (len(x), x))
def launch(fn, prefix=config.root_dir, additional_args=None): info = install.is_linked(prefix, fn[:-8]) if info is None: return None if not info.get("type") == "app": raise Exception("Not an application: %s" % fn) # prepend the bin directory to the path fmt = r"%s\Scripts;%s" if sys.platform == "win32" else "%s/bin:%s" env = {"PATH": fmt % (abspath(prefix), os.getenv("PATH"))} # copy existing environment variables, but not anything with PATH in it for k, v in iteritems(os.environ): if "PATH" not in k: env[k] = v # allow updating environment variables from metadata if "app_env" in info: env.update(info["app_env"]) # call the entry command args = info["app_entry"].split() args = [a.replace("${PREFIX}", prefix) for a in args] arg0 = find_executable(args[0], env["PATH"]) if arg0 is None: raise Exception("Executable not found: %s" % args[0]) args[0] = arg0 cwd = abspath(expanduser("~")) if additional_args: args.extend(additional_args) return subprocess.Popen(args, cwd=cwd, env=env)
def app_get_index(): """ return the index of available applications on the channels """ index = get_index() return {fn: info for fn, info in iteritems(index) if info.get('type') == 'app'}
def get_index(channel_urls=(), prepend=True, platform=None, use_cache=False, unknown=False, offline=False, prefix=None): """ Return the index of packages available on the channels If prepend=False, only the channels passed in as arguments are used. If platform=None, then the current platform is used. If prefix is supplied, then the packages installed in that prefix are added. """ channel_urls = config.normalize_urls(channel_urls, platform=platform) if prepend: channel_urls += config.get_channel_urls(platform=platform) if offline: channel_urls = [url for url in channel_urls if url.startswith('file:')] index = fetch_index(tuple(channel_urls), use_cache=use_cache, unknown=unknown) if prefix: for dist, info in iteritems(install.linked_data(prefix)): fn = dist + '.tar.bz2' if fn not in index: # only if the package in not in the repodata, use local # conda-meta (with 'depends' defaulting to []) info.setdefault('depends', []) index[fn] = info return index
def launch(fn, prefix=config.root_dir, additional_args=None): info = install.is_linked(prefix, fn[:-8]) if info is None: return None if not info.get('type') == 'app': raise Exception('Not an application: %s' % fn) # prepend the bin directory to the path fmt = r'%s\Scripts;%s' if sys.platform == 'win32' else '%s/bin:%s' env = {'PATH': fmt % (abspath(prefix), os.getenv('PATH'))} # copy existing environment variables, but not anything with PATH in it for k, v in iteritems(os.environ): if 'PATH' not in k: env[k] = v # allow updating environment variables from metadata if 'app_env' in info: env.update(info['app_env']) # call the entry command args = info['app_entry'].split() args = [a.replace('${PREFIX}', prefix) for a in args] arg0 = find_executable(args[0], env['PATH']) if arg0 is None: raise Exception('Executable not found: %s' % args[0]) args[0] = arg0 cwd = abspath(expanduser('~')) if additional_args: args.extend(additional_args) return subprocess.Popen(args, cwd=cwd, env=env)
def test_LinearBound(): L = [ ([], [0, 1], 10), ([], [1, 2], 10), ({'x1':2, 'x2':2}, [3, 3], 10), ({'x1':2, 'x2':2}, [0, 1], 1000), ({'x1':1, 'x2':2}, [0, 2], 1000), ({'x1':2, '!x2':2}, [0, 2], 1000), ([(1, 1), (2, 2), (3, 3)], [3, 3], 1000), ([(0, 1), (1, 2), (2, 3), (0, 4), (1, 5), (0, 6), (1, 7)], [0, 2], 1000), ([(0, 1), (1, 2), (2, 3), (0, 4), (1, 5), (0, 6), (1, 7), (3, False), (2, True)], [2, 4], 1000), ([(1, 15), (2, 16), (3, 17), (4, 18), (5, 6), (5, 19), (6, 7), (6, 20), (7, 8), (7, 21), (7, 28), (8, 9), (8, 22), (8, 29), (8, 41), (9, 10), (9, 23), (9, 30), (9, 42), (10, 1), (10, 11), (10, 24), (10, 31), (10, 34), (10, 37), (10, 43), (10, 46), (10, 50), (11, 2), (11, 12), (11, 25), (11, 32), (11, 35), (11, 38), (11, 44), (11, 47), (11, 51), (12, 3), (12, 4), (12, 5), (12, 13), (12, 14), (12, 26), (12, 27), (12, 33), (12, 36), (12, 39), (12, 40), (12, 45), (12, 48), (12, 49), (12, 52), (12, 53), (12, 54)], [192, 204], 100), ] for eq, rhs, max_iter in L: if isinstance(eq, dict): N = len(eq) else: N = max([0]+[a for c,a in eq if a is not True and a is not False]) C = Clauses(N) C2 = Clauses(N) Cpos = Clauses(N) Cneg = Clauses(N) if isinstance(eq, dict): for k in range(1,N+1): nm = 'x%d'%k C.name_var(k, nm) C2.name_var(k, nm) Cpos.name_var(k, nm) Cneg.name_var(k, nm) eq2 = [(v,C.from_name(c)) for c,v in iteritems(eq)] else: eq2 = eq x = C.LinearBound(eq, rhs[0], rhs[1]) x2 = C2.LinearBound(eq, rhs[0], rhs[1], 'sorter') Cpos.Require(Cpos.LinearBound, eq, rhs[0], rhs[1]) Cneg.Prevent(Cneg.LinearBound, eq, rhs[0], rhs[1]) if x is not False: for _, sol in zip(range(max_iter), C.itersolve([] if x is True else [(x,)],N)): assert rhs[0] <= my_EVAL(eq2,sol) <= rhs[1], C.clauses if x2 is not False: for _, sol in zip(range(max_iter), C2.itersolve([] if x2 is True else [(x2,)],N)): assert rhs[0] <= my_EVAL(eq2,sol) <= rhs[1], C2.clauses if x is not True: for _, sol in zip(range(max_iter), C.itersolve([] if x is True else [(C.Not(x),)],N)): assert not(rhs[0] <= my_EVAL(eq2,sol) <= rhs[1]), C.clauses if x2 is not True: for _, sol in zip(range(max_iter), C2.itersolve([] if x is True else [(C2.Not(x),)],N)): assert not(rhs[0] <= my_EVAL(eq2,sol) <= rhs[1]), C2.clauses for _, sol in zip(range(max_iter), Cpos.itersolve([],N)): assert rhs[0] <= my_EVAL(eq2,sol) <= rhs[1], ('Cpos',Cpos.clauses) for _, sol in zip(range(max_iter), Cneg.itersolve([],N)): assert not(rhs[0] <= my_EVAL(eq2,sol) <= rhs[1]), ('Cneg',Cneg.clauses)
def get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=False, offline=False, prefix=None): """ Return the index of packages available on the channels If prepend=False, only the channels passed in as arguments are used. If platform=None, then the current platform is used. If prefix is supplied, then the packages installed in that prefix are added. """ if use_local: channel_urls = ['local'] + list(channel_urls) channel_urls = normalize_urls(channel_urls, platform, offline) if prepend: channel_urls.extend(get_channel_urls(platform, offline)) channel_urls = prioritize_channels(channel_urls) index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown) if prefix: priorities = {c: p for c, p in itervalues(channel_urls)} for dist, info in iteritems(install.linked_data(prefix)): fn = info['fn'] schannel = info['schannel'] prefix = '' if schannel == 'defaults' else schannel + '::' priority = priorities.get(schannel, 0) key = prefix + fn if key in index: # Copy the link information so the resolver knows this is installed index[key]['link'] = info.get('link') else: # only if the package in not in the repodata, use local # conda-meta (with 'depends' defaulting to []) info.setdefault('depends', []) info['priority'] = priority index[key] = info return index
def prune_features(): feats = set() for ms in specs: for fn in self.groups.get(ms.name, []): if valid.get(fn, True): feats.update(self.track_features(fn)) pruned = False for feat in active - feats: active.remove(feat) for fn in self.groups[feat+'@']: if valid.get(fn,True): valid[fn] = False pruned = True for name, group in iteritems(self.groups): nold = npruned = 0 for fn in group: if valid.get(fn, True): nold += 1 if self.features(fn) - feats: valid[fn] = False npruned += 1 if npruned: pruned = True log.debug('%s: pruned from %d -> %d for missing features'%(name,nold,nold-npruned)) if npruned == nold: for ms in specs: if ms.name == name and not ms.optional: bad_deps.append((ms,name+'@')) return pruned
def check_fields(self): for section, submeta in iteritems(self.meta): if section not in FIELDS: sys.exit("Error: unknown section: %s" % section) for key in submeta: if key not in FIELDS[section]: sys.exit("Error: in section %r: unknown key %r" % (section, key))
def build(m): env = dict(os.environ) env.update(environ.get_dict(m)) for name in 'BIN', 'INC', 'LIB': path = env['LIBRARY_' + name] if not isdir(path): os.makedirs(path) src_dir = source.get_dir() bld_bat = join(m.path, 'bld.bat') with open(bld_bat) as fi: data = fi.read() with open(join(src_dir, 'bld.bat'), 'w') as fo: fo.write(msvc_env_cmd()) # more debuggable with echo on fo.write('@echo on\n') for kv in iteritems(env): fo.write('set %s=%s\n' % kv) fo.write("REM ===== end generated header =====\n") fo.write(data) cmd = [os.environ['COMSPEC'], '/c', 'bld.bat'] _check_call(cmd, cwd=src_dir) kill_processes() fix_staged_scripts()
def groupby(key, seq): """ Group a collection by a key function >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'] >>> groupby(len, names) # doctest: +SKIP {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} >>> iseven = lambda x: x % 2 == 0 >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} Non-callable keys imply grouping on a member. >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'}, ... {'name': 'Bob', 'gender': 'M'}, ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP {'F': [{'gender': 'F', 'name': 'Alice'}], 'M': [{'gender': 'M', 'name': 'Bob'}, {'gender': 'M', 'name': 'Charlie'}]} See Also: countby """ if not callable(key): key = getter(key) d = defaultdict(lambda: [].append) for item in seq: d[key(item)](item) rv = {} for k, v in iteritems(d): rv[k] = v.__self__ return rv
def generate_version_metrics(self, C, specs): eqv = {} eqb = {} sdict = {} for s in specs: s = MatchSpec(s) # needed for testing sdict.setdefault(s.name, []).append(s) for name, mss in iteritems(sdict): pkgs = [(self.version_key(p), p) for p in self.groups.get(name, [])] # If the "target" field in the MatchSpec is supplied, that means we want # to minimize the changes to the currently installed package. We prefer # any upgrade over any downgrade, but beyond that we want minimal change. targets = [ms.target for ms in mss if ms.target and ms.target in self.index] if targets: v1 = [(self.version_key(p), p) for p in targets] tver = max(v1) v2 = [p for p in pkgs if p > tver] v3 = list(reversed([p for p in pkgs if p <= tver and p not in v1])) pkgs = v1 + v2 + v3 pkey = None for nkey, npkg in pkgs: if pkey is None: iv = ib = 0 elif pkey[0] != nkey[0] or pkey[1] != nkey[1]: iv += 1 ib = 0 elif pkey[2] != nkey[2]: ib += 1 if iv: eqv[npkg] = iv if ib: eqb[npkg] = ib pkey = nkey return eqv, eqb
def get_index(channel_urls=(), prepend=True, platform=None, use_cache=False, unknown=False, offline=False, prefix=None): """ Return the index of packages available on the channels If prepend=False, only the channels passed in as arguments are used. If platform=None, then the current platform is used. If prefix is supplied, then the packages installed in that prefix are added. """ channel_urls = config.normalize_urls(channel_urls, platform=platform) if prepend: channel_urls += config.get_channel_urls(platform=platform) if offline: channel_urls = [url for url in channel_urls if url.startswith('file:')] index = fetch_index(tuple(channel_urls), use_cache=use_cache, unknown=unknown) if prefix: for fn, info in iteritems(install.linked_data(prefix)): fn = fn + '.tar.bz2' orec = index.get(fn) if orec is not None: if orec.get('md5',None) == info.get('md5',None): continue info.setdefault('depends',orec.get('depends',[])) index[fn] = info return index
def __init__(self, index): self.index = index self.groups = defaultdict(list) # map name to list of filenames for fn, info in iteritems(index): self.groups[info['name']].append(fn) self.msd_cache = {} self.clear_filter()
def generate_version_eq(self, v, groups, specs, majoronly=False, include0=False): eq = [] sdict = {} for s in specs: s = MatchSpec(s) # needed for testing sdict.setdefault(s.name,[]).append(s) key = lambda x: self.version_key(x,majoronly) for name, mss in iteritems(sdict): if name[-1] == '@' or all(ms.optional for ms in mss): continue pkgs = [(key(p),p) for p in groups[name]] # If the "target" field in the MatchSpec is supplied, that means we want # to minimize the changes to the currently installed package. We prefer # any upgrade over any downgrade, but beyond that we want minimal change. targets = [ms.target for ms in mss if ms.target] if targets: v1 = sorted(((key(t),t) for t in targets), reverse=True) v2 = sorted((p for p in pkgs if p > v1[0])) v3 = sorted((p for p in pkgs if p < v1[0]), reverse=True) pkgs = v1 + v2 + v3 else: pkgs = sorted(pkgs, reverse=True) i = 0 prev = None for nkey, pkg in pkgs: if prev and prev != nkey: i += 1 if i or include0: eq += [(i, v[pkg])] prev = nkey return eq
def launch(fn, prefix=config.root_dir, additional_args=None): info = install.is_linked(prefix, fn[:-8]) if info is None: return None if not info.get('type') == 'app': raise Exception('Not an application: %s' % fn) # prepend the bin directory to the path fmt = r'%s\Scripts;%s' if sys.platform == 'win32' else '%s/bin:%s' env = {'PATH': fmt % (abspath(prefix), os.getenv('PATH'))} # copy existing environment variables, but not anything with PATH in it for k, v in iteritems(os.environ): if 'PATH' not in k: env[k] = v # allow updating environment variables from metadata if 'app_env' in info: env.update(info['app_env']) # call the entry command args = info['app_entry'].split() args = [a.replace('${PREFIX}', prefix) for a in args] arg0 = find_executable(args[0], env['PATH']) if arg0 is None: raise Exception('Executable not found: %s' % args[0]) args[0] = arg0 cwd = abspath(expanduser('~')) if additional_args: args.extend(additional_args) return subprocess.Popen(args, cwd=cwd , env=env)
def build(m): env = dict(os.environ) env.update(environ.get_dict(m)) for name in 'BIN', 'INC', 'LIB': path = env['LIBRARY_' + name] if not isdir(path): os.makedirs(path) src_dir = source.get_dir() bld_bat = join(m.path, 'bld.bat') if exists(bld_bat): with open(bld_bat) as fi: data = fi.read() with open(join(src_dir, 'bld.bat'), 'w') as fo: fo.write(msvc_env_cmd()) # more debuggable with echo on fo.write('@echo on\n') for kv in iteritems(env): fo.write('set %s=%s\n' % kv) fo.write("REM ===== end generated header =====\n") fo.write(data) cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat'] _check_call(cmd, cwd=src_dir) kill_processes() fix_staged_scripts()
def generate_version_metric(self, C, groups, specs, majoronly=False): eq = {} sdict = {} for s in specs: s = MatchSpec(s) # needed for testing sdict.setdefault(s.name, []).append(s) key = lambda x: self.version_key(x, majoronly) for name, mss in iteritems(sdict): pkgs = groups.get(name, []) pkgs = [(key(p), p) for p in pkgs] # If the "target" field in the MatchSpec is supplied, that means we want # to minimize the changes to the currently installed package. We prefer # any upgrade over any downgrade, but beyond that we want minimal change. targets = [ms.target for ms in mss if ms.target] if targets: v1 = sorted(((key(t), t) for t in targets), reverse=True) v2 = sorted((p for p in pkgs if p > v1[0])) v3 = sorted((p for p in pkgs if p < v1[0]), reverse=True) pkgs = v1 + v2 + v3 else: pkgs = sorted(pkgs, reverse=True) i = 0 prev = None for nkey, pkg in pkgs: if prev and prev != nkey: i += 1 if i: eq[pkg] = i prev = nkey return eq
def fetch_index(channel_urls, use_cache=False, unknown=False): log.debug('channel_urls=' + repr(channel_urls)) # pool = ThreadPool(5) index = {} stdoutlog.info("Fetching package metadata ...") if not isinstance(channel_urls, dict): channel_urls = {url: pri + 1 for pri, url in enumerate(channel_urls)} for url in iterkeys(channel_urls): if config.allowed_channels and url not in config.allowed_channels: sys.exit(""" Error: URL '%s' not in allowed channels. Allowed channels are: - %s """ % (url, '\n - '.join(config.allowed_channels))) try: import concurrent.futures executor = concurrent.futures.ThreadPoolExecutor(10) except (ImportError, RuntimeError): # concurrent.futures is only available in Python >= 3.2 or if futures is installed # RuntimeError is thrown if number of threads are limited by OS session = CondaSession() repodatas = [(url, fetch_repodata(url, use_cache=use_cache, session=session)) for url in iterkeys(channel_urls)] else: try: urls = tuple(channel_urls) futures = tuple( executor.submit(fetch_repodata, url, use_cache=use_cache, session=CondaSession()) for url in urls) repodatas = [(u, f.result()) for u, f in zip(urls, futures)] finally: executor.shutdown(wait=True) for channel, repodata in repodatas: if repodata is None: continue new_index = repodata['packages'] url_s, priority = channel_urls[channel] for fn, info in iteritems(new_index): info['fn'] = fn info['schannel'] = url_s info['channel'] = channel info['priority'] = priority info['url'] = channel + fn key = url_s + '::' + fn if url_s else fn index[key] = info stdoutlog.info('\n') if unknown: add_unknown(index, channel_urls) if config.add_pip_as_python_dependency: add_pip_dependency(index) return index
def build_groups(index): groups = {} trackers = {} for fn, info in iteritems(index): groups.setdefault(info['name'], []).append(fn) for feat in info.get('track_features', '').split(): trackers.setdefault(feat, []).append(fn) return groups, trackers
def update_index(dir_path, verbose=False, force=False): if verbose: print("updating index in:", dir_path) index_path = join(dir_path, '.index.json') if force: index = {} else: try: mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'} with open(index_path, **mode_dict) as fi: index = json.load(fi) except (IOError, ValueError): index = {} files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2')) for fn in files: path = join(dir_path, fn) if fn in index and index[fn]['mtime'] == getmtime(path): continue if verbose: print('updating:', fn) d = read_index_tar(path) d.update(file_info(path)) index[fn] = d # remove files from the index which are not on disk for fn in set(index) - files: if verbose: print("removing:", fn) del index[fn] # Deal with Python 2 and 3's different json module type reqs mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'} with open(index_path, **mode_dict) as fo: json.dump(index, fo, indent=2, sort_keys=True) # --- new repodata icons = {} for fn in index: info = index[fn] if '_icondata' in info: icons[info['_iconmd5']] = base64.b64decode(info['_icondata']) assert '%(_iconmd5)s.png' % info == info['icon'] for varname in ('arch', 'platform', 'mtime', 'ucs', '_icondata', '_iconmd5'): try: del info[varname] except KeyError: pass if icons: icons_dir = join(dir_path, 'icons') if not isdir(icons_dir): os.mkdir(icons_dir) for md5, raw in iteritems(icons): with open(join(icons_dir, '%s.png' % md5), 'wb') as fo: fo.write(raw) repodata = {'packages': index, 'info': {}} write_repodata(repodata, dir_path)
def generate_feature_metric(self, C): eq = {} total = 0 for name, group in iteritems(self.groups): nf = [len(self.features(fkey)) for fkey in group] maxf = max(nf) eq.update({fn: maxf-fc for fn, fc in zip(group, nf) if fc < maxf}) total += maxf return eq, total
def check_duplicates(): map_name = defaultdict(list) # map package name to list of filenames for fn in dists: map_name[name_dist(fn)].append(fn) for name, files in iteritems(map_name): if len(files) > 1: sys.exit("Error: '%s' listed multiple times: %s" % (name, ', '.join(files)))
def __init__(self, bad_deps, chains=True): bad_deps = [list(map(lambda x: x.spec, dep)) for dep in bad_deps] if chains: chains = {} for dep in sorted(bad_deps, key=len, reverse=True): dep1 = [str(MatchSpec(s)).partition(' ') for s in dep[1:]] key = (dep[0], ) + tuple(v[0] for v in dep1) vals = ('', ) + tuple(v[2] for v in dep1) found = False for key2, csets in iteritems(chains): if key2[:len(key)] == key: for cset, val in zip(csets, vals): cset.add(val) found = True if not found: chains[key] = [{val} for val in vals] bad_deps = [] for key, csets in iteritems(chains): deps = [] for name, cset in zip(key, csets): if '' not in cset: pass elif len(cset) == 1: cset.clear() else: cset.remove('') cset.add('*') if name[0] == '@': name = 'feature:' + name[1:] deps.append('%s %s' % (name, '|'.join(sorted(cset))) if cset else name) chains[key] = ' -> '.join(deps) bad_deps = [chains[key] for key in sorted(iterkeys(chains))] msg = '''The following specifications were found to be in conflict:%s Use "conda info <package>" to see the dependencies for each package.''' else: bad_deps = [sorted(dep) for dep in bad_deps] bad_deps = [', '.join(dep) for dep in sorted(bad_deps)] msg = '''The following specifications were found to be incompatible with the others, or with the existing package set:%s Use "conda info <package>" to see the dependencies for each package.''' msg = msg % dashlist(bad_deps) super(Unsatisfiable, self).__init__(msg)
def fetch_index(channel_urls, use_cache=False, unknown=False): log.debug('channel_urls=' + repr(channel_urls)) # pool = ThreadPool(5) index = {} stdoutlog.info("Fetching package metadata ...") session = CondaSession() if not isinstance(channel_urls, dict): channel_urls = {url: pri+1 for pri, url in enumerate(channel_urls)} for url in iterkeys(channel_urls): if config.allowed_channels and url not in config.allowed_channels: sys.exit(""" Error: URL '%s' not in allowed channels. Allowed channels are: - %s """ % (url, '\n - '.join(config.allowed_channels))) try: import concurrent.futures from collections import OrderedDict repodatas = [] with concurrent.futures.ThreadPoolExecutor(10) as executor: future_to_url = OrderedDict([(executor.submit( fetch_repodata, url, use_cache=use_cache, session=session), url) for url in iterkeys(channel_urls)]) for future in future_to_url: url = future_to_url[future] repodatas.append((url, future.result())) except ImportError: # concurrent.futures is only available in Python 3 repodatas = map(lambda url: (url, fetch_repodata(url, use_cache=use_cache, session=session)), iterkeys(channel_urls)) for channel, repodata in repodatas: if repodata is None: continue new_index = repodata['packages'] url_s, priority = channel_urls[channel] for fn, info in iteritems(new_index): info['fn'] = fn info['schannel'] = url_s info['channel'] = channel info['priority'] = priority info['url'] = channel + fn key = url_s + '::' + fn if url_s else fn index[key] = info stdoutlog.info('\n') if unknown: add_unknown(index, channel_urls) if config.add_pip_as_python_dependency: add_pip_dependency(index) return index
def fetch_index(channel_urls, use_cache=False, unknown=False, index=None): log.debug('channel_urls=' + repr(channel_urls)) # pool = ThreadPool(5) if index is None: index = {} stdoutlog.info("Fetching package metadata ...") if not isinstance(channel_urls, dict): channel_urls = {url: pri+1 for pri, url in enumerate(channel_urls)} for url in iterkeys(channel_urls): if allowed_channels and url not in allowed_channels: sys.exit(""" Error: URL '%s' not in allowed channels. Allowed channels are: - %s """ % (url, '\n - '.join(allowed_channels))) try: import concurrent.futures executor = concurrent.futures.ThreadPoolExecutor(10) except (ImportError, RuntimeError): # concurrent.futures is only available in Python >= 3.2 or if futures is installed # RuntimeError is thrown if number of threads are limited by OS session = CondaSession() repodatas = [(url, fetch_repodata(url, use_cache=use_cache, session=session)) for url in iterkeys(channel_urls)] else: try: urls = tuple(channel_urls) futures = tuple(executor.submit(fetch_repodata, url, use_cache=use_cache, session=CondaSession()) for url in urls) repodatas = [(u, f.result()) for u, f in zip(urls, futures)] finally: executor.shutdown(wait=True) for channel, repodata in repodatas: if repodata is None: continue new_index = repodata['packages'] url_s, priority = channel_urls[channel] channel = channel.rstrip('/') for fn, info in iteritems(new_index): info['fn'] = fn info['schannel'] = url_s info['channel'] = channel info['priority'] = priority info['url'] = channel + '/' + fn key = url_s + '::' + fn if url_s != 'defaults' else fn index[key] = info stdoutlog.info('\n') if unknown: add_unknown(index, channel_urls) if add_pip_as_python_dependency: add_pip_dependency(index) return index
def test_installable(channel='defaults', verbose=True): if not verbose: sys.stdout = open(os.devnull, 'w') success = False has_py = re.compile(r'py(\d)(\d)') for platform in ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']: print("######## Testing platform %s ########" % platform) channels = [channel] + get_default_urls() index = get_index(channel_urls=channels, prepend=False, platform=platform) for package, rec in iteritems(index): # If we give channels at the command line, only look at # packages from those channels (not defaults). if channel != 'defaults' and rec.get('schannel', 'defaults') == 'defaults': continue name = rec['name'] if name in {'conda', 'conda-build'}: # conda can only be installed in the root environment continue # Don't fail just because the package is a different version of Python # than the default. We should probably check depends rather than the # build string. build = rec['build'] match = has_py.search(build) assert match if 'py' in build else True, build if match: additional_packages = [ 'python=%s.%s' % (match.group(1), match.group(2)) ] else: additional_packages = [] version = rec['version'] print('Testing %s=%s' % (name, version)) # if additional_packages: # print("Including %s" % additional_packages[0]) try: check_install([name + '=' + version] + additional_packages, channel_urls=channels, prepend=False, platform=platform) except KeyboardInterrupt: raise # sys.exit raises an exception that doesn't subclass from Exception except BaseException as e: success = True print("FAIL: %s %s on %s with %s (%s)" % (name, version, platform, additional_packages, e), file=sys.stderr) return success
def __init__(self, bad_deps, chains=True): bad_deps = [list(map(lambda x: x.spec, dep)) for dep in bad_deps] if chains: chains = {} for dep in sorted(bad_deps, key=len, reverse=True): dep1 = [str(MatchSpec(s)).partition(' ') for s in dep[1:]] key = (dep[0],) + tuple(v[0] for v in dep1) vals = ('',) + tuple(v[2] for v in dep1) found = False for key2, csets in iteritems(chains): if key2[:len(key)] == key: for cset, val in zip(csets, vals): cset.add(val) found = True if not found: chains[key] = [{val} for val in vals] bad_deps = [] for key, csets in iteritems(chains): deps = [] for name, cset in zip(key, csets): if '' not in cset: pass elif len(cset) == 1: cset.clear() else: cset.remove('') cset.add('*') if name[0] == '@': name = 'feature:' + name[1:] deps.append('%s %s' % (name, '|'.join(sorted(cset))) if cset else name) chains[key] = ' -> '.join(deps) bad_deps = [chains[key] for key in sorted(iterkeys(chains))] msg = '''The following specifications were found to be in conflict:%s Use "conda info <package>" to see the dependencies for each package.''' else: bad_deps = [sorted(dep) for dep in bad_deps] bad_deps = [', '.join(dep) for dep in sorted(bad_deps)] msg = '''The following specifications were found to be incompatible with the others, or with the existing package set:%s Use "conda info <package>" to see the dependencies for each package.''' msg = msg % dashlist(bad_deps) super(Unsatisfiable, self).__init__(msg)
def LB_Preprocess_(self, equation): if type(equation) is dict: equation = [(c, self.varnum(a)) for a, c in iteritems(equation)] if any(c <= 0 or type(a) is bool for c, a in equation): offset = sum(c for c, a in equation if a is True or a is not False and c <= 0) equation = [(c, a) if c > 0 else (-c, -a) for c, a in equation if type(a) is not bool and c] else: offset = 0 equation = sorted(equation) return equation, offset
def environment_for_conda_environment(prefix=config.root_dir): # prepend the bin directory to the path fmt = r'%s\Scripts' if sys.platform == 'win32' else '%s/bin' binpath = fmt % abspath(prefix) path = os.path.pathsep.join([binpath, os.getenv('PATH')]) env = {'PATH': path} # copy existing environment variables, but not anything with PATH in it for k, v in iteritems(os.environ): if k != 'PATH': env[k] = v return binpath, env
def app_get_index(all_version=False): """ return the index of available applications on the channels By default only the latest version of each app is included in the result, unless all_version is set to True. """ index = {fn: info for fn, info in iteritems(get_index()) if info.get('type') == 'app'} if all_version: return index d = defaultdict(list) # name -> list of Package objects for fn, info in iteritems(index): d[_name_fn(fn)].append(Package(fn, info)) res = {} for pkgs in itervalues(d): pkg = max(pkgs) res[pkg.fn] = index[pkg.fn] return res
def __init__(self, index): self.index = index.copy() self.feats = set() for fn, info in iteritems(index): for fstr in info.get('track_features','').split(): self.add_feature(fstr, False) for fstr in iterkeys(info.get('with_features_depends',{})): fn2 = fn + '[' + fstr + ']' self.index[fn2] = info self.groups = build_groups(self.index) self.find_matches_ = {} self.ms_depends_ = {}
def generate_feature_metric(self, C, groups, specs): eq = {} for name, group in iteritems(groups): nf = [len(self.features(fn)) for fn in group] maxf = max(nf) if min(nf) == maxf: continue if not any(ms.name == name for ms in specs if not ms.optional): maxf += 1 eq.update( {fn: maxf - fc for fn, fc in zip(group, nf) if fc < maxf}) return eq
def full_prune(specs, removes, optional, features): self.default_filter(features, filter) for ms in removes: for fn in self.groups.get(ms.name, []): filter[fn] = False feats = set(self.trackers.keys()) snames.clear() specs = slist = list(specs) onames = set(s.name for s in specs) for iter in range(10): first = True while sum(filter_group([s]) for s in slist) and not unsat: slist = specs + [MatchSpec(n) for n in snames - onames] first = False if unsat: return False if first and iter: return True touched.clear() for fstr in features: touched[fstr + '@'] = True for spec in chain(specs, optional): self.touch(spec, touched, filter) nfeats = set() for fn, val in iteritems(touched): if val: nfeats.update(self.track_features(fn)) if len(nfeats) >= len(feats): return True pruned = False feats &= nfeats for fn, val in iteritems(touched): if val and self.features(fn) - feats: touched[fn] = filter[fn] = False filter[fn] = False pruned = True if not pruned: return True
def generate_package_count(self, v, groups, specs): eq = [] snames = {s.name for s in map(MatchSpec, specs)} for name, pkgs in iteritems(groups): if name[-1] != '@' and name not in snames: pkg_ver = sorted([(self.version_key(p),p) for p in groups[name]], reverse=True) i = 1 prev = None for nkey, pkg in pkg_ver: if prev and prev != nkey: i += 1 eq += [(i, v[pkg])] prev = nkey return eq
def dependency_sort(self, must_have): def lookup(value): return set(ms.name for ms in self.ms_depends(value + '.tar.bz2')) digraph = {} for key, value in iteritems(must_have): depends = lookup(value) digraph[key] = depends sorted_keys = toposort(digraph) must_have = must_have.copy() # Take all of the items in the sorted keys # Don't fail if the key does not exist result = [must_have.pop(key) for key in sorted_keys if key in must_have] # Take any key that were not sorted result.extend(must_have.values()) return result