Esempio n. 1
0
def build_groups(index):
    groups = {}
    trackers = {}
    for fn, info in iteritems(index):
        groups.setdefault(info['name'], []).append(fn)
        for feat in info.get('track_features', '').split():
            trackers.setdefault(feat, []).append(fn)
    return groups, trackers
Esempio n. 2
0
 def generate_feature_metric(self, C, groups):
     eq = {}
     total = 0
     for name, group in iteritems(groups):
         nf = [len(self.features(fn)) for fn in group]
         maxf = max(nf)
         eq.update({fn: maxf-fc for fn, fc in zip(group, nf) if fc < maxf})
         total += maxf
     return eq, total
Esempio n. 3
0
 def LB_Preprocess_(self, equation):
     if type(equation) is dict:
         equation = [(c, self.varnum(a)) for a, c in iteritems(equation)]
     if any(c <= 0 or type(a) is bool for c, a in equation):
         offset = sum(c for c, a in equation if a is True or a is not False and c <= 0)
         equation = [(c, a) if c > 0 else (-c, -a) for c, a in equation
                     if type(a) is not bool and c]
     else:
         offset = 0
     equation = sorted(equation)
     return equation, offset
Esempio n. 4
0
 def LB_Preprocess_(self, equation):
     if type(equation) is dict:
         equation = [(c, self.varnum(a)) for a, c in iteritems(equation)]
     if any(c <= 0 or type(a) is bool for c, a in equation):
         offset = sum(c for c, a in equation
                      if a is True or a is not False and c <= 0)
         equation = [(c, a) if c > 0 else (-c, -a) for c, a in equation
                     if type(a) is not bool and c]
     else:
         offset = 0
     equation = sorted(equation)
     return equation, offset
Esempio n. 5
0
    def __init__(self, bad_deps, chains=True):
        bad_deps = [list(map(str, dep)) for dep in bad_deps]
        if chains:
            chains = {}
            for dep in sorted(bad_deps, key=len, reverse=True):
                dep1 = [str(MatchSpec(s)).partition(' ') for s in dep[1:]]
                key = (dep[0],) + tuple(v[0] for v in dep1)
                vals = ('',) + tuple(v[2] for v in dep1)
                found = False
                for key2, csets in iteritems(chains):
                    if key2[:len(key)] == key:
                        for cset, val in zip(csets, vals):
                            cset.add(val)
                        found = True
                if not found:
                    chains[key] = [{val} for val in vals]
            bad_deps = []
            for key, csets in iteritems(chains):
                deps = []
                for name, cset in zip(key, csets):
                    if '' not in cset:
                        pass
                    elif len(cset) == 1:
                        cset.clear()
                    else:
                        cset.remove('')
                        cset.add('*')
                    deps.append('%s %s' % (name, '|'.join(sorted(cset))) if cset else name)
                chains[key] = ' -> '.join(deps)
            bad_deps = [chains[key] for key in sorted(iterkeys(chains))]
            msg = '''The following specifications were found to be in conflict:%s
Use "conda info <package>" to see the dependencies for each package.'''
        else:
            bad_deps = [sorted(dep) for dep in bad_deps]
            bad_deps = [', '.join(dep) for dep in sorted(bad_deps)]
            msg = '''The following specifications were found to be incompatible with the
others, or with the existing package set:%s
Use "conda info <package>" to see the dependencies for each package.'''
        msg = msg % dashlist(bad_deps)
        super(Unsatisfiable, self).__init__(msg)
Esempio n. 6
0
def test_LinearBound():
    L = [
        ([], [0, 1], 10),
        ([], [1, 2], 10),
        ({'x1':2, 'x2':2}, [3, 3], 10),
        ({'x1':2, 'x2':2}, [0, 1], 1000),
        ({'x1':1, 'x2':2}, [0, 2], 1000),
        ({'x1':2, '!x2':2}, [0, 2], 1000),
        ([(1, 1), (2, 2), (3, 3)], [3, 3], 1000),
        ([(0, 1), (1, 2), (2, 3), (0, 4), (1, 5), (0, 6), (1, 7)], [0, 2], 1000),
        ([(0, 1), (1, 2), (2, 3), (0, 4), (1, 5), (0, 6), (1, 7),
          (3, False), (2, True)], [2, 4], 1000),
        ([(1, 15), (2, 16), (3, 17), (4, 18), (5, 6), (5, 19), (6, 7),
          (6, 20), (7, 8), (7, 21), (7, 28), (8, 9), (8, 22), (8, 29), (8, 41), (9,
          10), (9, 23), (9, 30), (9, 42), (10, 1), (10, 11), (10, 24), (10, 31),
          (10, 34), (10, 37), (10, 43), (10, 46), (10, 50), (11, 2), (11, 12), (11,
          25), (11, 32), (11, 35), (11, 38), (11, 44), (11, 47), (11, 51), (12, 3),
          (12, 4), (12, 5), (12, 13), (12, 14), (12, 26), (12, 27), (12, 33), (12,
          36), (12, 39), (12, 40), (12, 45), (12, 48), (12, 49), (12, 52), (12, 53),
          (12, 54)], [192, 204], 100),
        ]
    for eq, rhs, max_iter in L:
        if isinstance(eq, dict):
            N = len(eq)
        else:
            N = max([0]+[a for c,a in eq if a is not True and a is not False])
        C = Clauses(N)
        Cpos = Clauses(N)
        Cneg = Clauses(N)
        if isinstance(eq, dict):
            for k in range(1,N+1):
                nm = 'x%d'%k
                C.name_var(k, nm)
                Cpos.name_var(k, nm)
                Cneg.name_var(k, nm)
            eq2 = [(v,C.from_name(c)) for c,v in iteritems(eq)]
        else:
            eq2 = eq
        x = C.LinearBound(eq, rhs[0], rhs[1])
        Cpos.Require(Cpos.LinearBound, eq, rhs[0], rhs[1])
        Cneg.Prevent(Cneg.LinearBound, eq, rhs[0], rhs[1])
        if x is not False:
            for _, sol in zip(range(max_iter), C.itersolve([] if x is True else [(x,)],N)):
                assert rhs[0] <= my_EVAL(eq2,sol) <= rhs[1], C.clauses
        if x is not True:
            for _, sol in zip(range(max_iter), C.itersolve([] if x is True else [(C.Not(x),)],N)):
                assert not(rhs[0] <= my_EVAL(eq2,sol) <= rhs[1]), C.clauses
        for _, sol in zip(range(max_iter), Cpos.itersolve([],N)):
            assert rhs[0] <= my_EVAL(eq2,sol) <= rhs[1], ('Cpos',Cpos.clauses)
        for _, sol in zip(range(max_iter), Cneg.itersolve([],N)):
            assert not(rhs[0] <= my_EVAL(eq2,sol) <= rhs[1]), ('Cneg',Cneg.clauses)
Esempio n. 7
0
 def full_prune(specs, removes, optional, features):
     self.default_filter(features, filter)
     for ms in removes:
         for fn in self.find_matches(ms):
             filter[fn] = False
     feats = set(self.trackers.keys())
     snames.clear()
     specs = slist = list(specs)
     onames = set(s.name for s in specs)
     for iter in range(10):
         first = True
         while sum(filter_group([s]) for s in slist):
             slist = specs + [MatchSpec(n) for n in snames - onames]
             first = False
         if unsat:
             return False
         if first and iter:
             return True
         touched.clear()
         for fstr in features:
             touched[fstr+'@'] = True
         for spec in chain(specs, optional):
             self.touch(spec, touched, filter)
         nfeats = set()
         for fn, val in iteritems(touched):
             if val:
                 nfeats.update(self.track_features(fn))
         if len(nfeats) >= len(feats):
             return True
         pruned = False
         feats &= nfeats
         for fn, val in iteritems(touched):
             if val and self.features(fn) - feats:
                 touched[fn] = filter[fn] = False
                 filter[fn] = False
                 pruned = True
         if not pruned:
             return True
Esempio n. 8
0
 def __init__(self, index):
     self.index = index.copy()
     for fn, info in iteritems(index):
         for fstr in chain(info.get('features', '').split(),
                           info.get('track_features', '').split()):
             fpkg = fstr + '@'
             if fpkg not in self.index:
                 self.index[fpkg] = {
                     'name': fpkg, 'version': '0', 'build_number': 0,
                     'build': '', 'depends': [], 'track_features': fstr}
         for fstr in iterkeys(info.get('with_features_depends', {})):
             fn2 = fn + '[' + fstr + ']'
             self.index[fn2] = info
     self.groups, self.trackers = build_groups(self.index)
     self.find_matches_ = {}
     self.ms_depends_ = {}
Esempio n. 9
0
 def dependency_sort(self, must_have):
     def lookup(value):
         return set(ms.name for ms in self.ms_depends(value + '.tar.bz2'))
     digraph = {}
     if not isinstance(must_have, dict):
         must_have = {self.package_name(dist): dist for dist in must_have}
     for key, value in iteritems(must_have):
         depends = lookup(value)
         digraph[key] = depends
     sorted_keys = toposort(digraph)
     must_have = must_have.copy()
     # Take all of the items in the sorted keys
     # Don't fail if the key does not exist
     result = [must_have.pop(key) for key in sorted_keys if key in must_have]
     # Take any key that were not sorted
     result.extend(must_have.values())
     return result
Esempio n. 10
0
 def generate_version_metrics(self, C, groups, specs):
     eqv = {}
     eqb = {}
     sdict = {}
     for s in specs:
         s = MatchSpec(s)  # needed for testing
         sdict.setdefault(s.name, []).append(s)
     for name, mss in iteritems(sdict):
         pkgs = [(self.version_key(p), p) for p in groups.get(name, [])]
         # If the "target" field in the MatchSpec is supplied, that means we want
         # to minimize the changes to the currently installed package. We prefer
         # any upgrade over any downgrade, but beyond that we want minimal change.
         targets = [ms.target for ms in mss if ms.target]
         if targets:
             tver = max(self.version_key(p) for p in targets)
             v1 = [p for p in pkgs if p[1] in targets]
             v2 = sorted((p for p in pkgs if p[0] >= tver and p[-1] not in targets))
             v3 = sorted((p for p in pkgs if p[0] < tver), reverse=True)
             pkgs = v1 + v2 + v3
         else:
             pkgs = sorted(pkgs, reverse=True)
         pkey = ppkg = None
         for nkey, npkg in pkgs:
             if pkey is None:
                 iv = ib = 0
             elif pkey[0] != nkey[0]:
                 iv += 1
                 ib = 0
             elif pkey[1] != nkey[1]:
                 ib += 1
             if iv:
                 eqv[npkg] = iv
             if ib:
                 eqb[npkg] = ib
             pkey, ppkg = nkey, npkg
     return eqv, eqb
Esempio n. 11
0
    def minimize(self, objective, bestsol, trymax=False):
        """
        Minimize the objective function given either by (coeff, integer)
        tuple pairs, or a dictionary of varname: coeff values. The actual
        minimization is multiobjective: first, we minimize the largest
        active coefficient value, then we minimize the sum.
        """
        if not objective:
            log.debug('Empty objective, trivial solution')
            return bestsol, 0
        elif self.unsat:
            log.debug('Constraints are unsatisfiable')
            return bestsol, sum(abs(c) for c, a in objective) + 1

        if type(objective) is dict:
            objective = [(v, self.varnum(k)) for k, v in iteritems(objective)]

        objective, offset = self.LB_Preprocess_(objective)
        maxval = max(c for c, a in objective)

        def peak_val(sol, odict):
            return max(odict.get(s, 0) for s in sol)

        def sum_val(sol, odict):
            return sum(odict.get(s, 0) for s in sol)

        lo = 0
        try0 = 0
        for peak in ((True, False) if maxval > 1 else (False, )):
            if peak:
                log.debug('Beginning peak minimization')
                objval = peak_val
            else:
                log.debug('Beginning sum minimization')
                objval = sum_val

            odict = {a: c for c, a in objective}
            bestval = objval(bestsol, odict)

            # If we got lucky and the initial solution is optimal, we still
            # need to generate the constraints at least once
            hi = bestval
            m_orig = self.m
            nz = len(self.clauses)
            if trymax and not peak:
                try0 = hi - 1

            log.debug("Initial range (%d,%d)" % (lo, hi))
            while True:
                if try0 is None:
                    mid = (lo + hi) // 2
                else:
                    mid = try0
                if peak:
                    self.Prevent(self.Any,
                                 tuple(a for c, a in objective if c > mid))
                    temp = tuple(a for c, a in objective if lo <= c <= mid)
                    if temp:
                        self.Require(self.Any, temp)
                else:
                    self.Require(self.LinearBound, objective, lo, mid, False)
                log.debug('Bisection attempt: (%d,%d), (%d+%d) clauses' %
                          (lo, mid, nz, len(self.clauses) - nz))
                newsol = self.sat()
                if newsol is None:
                    lo = mid + 1
                    log.debug("Bisection failure, new range=(%d,%d)" %
                              (lo, hi))
                    # If this was a failure of the first test after peak minimization,
                    # then it means that the peak minimizer is "tight" and we don't need
                    # any further constraints.
                else:
                    done = lo == mid
                    bestsol = newsol
                    bestval = objval(newsol, odict)
                    hi = bestval
                    log.debug("Bisection success, new range=(%d,%d)" %
                              (lo, hi))
                    if done:
                        break
                self.m = m_orig
                if len(self.clauses) > nz:
                    self.clauses = self.clauses[:nz]
                self.unsat = False
                try0 = None

            log.debug('Final %s objective: %d' %
                      ('peak' if peak else 'sum', bestval))
            if bestval == 0:
                break
            elif peak:
                # Now that we've minimized the peak value, we can drop any terms
                # with coefficients larger than this. Furthermore, since we know
                # at least one peak will be active, our lower bound for the sum
                # equals the peak.
                objective = [(c, a) for c, a in objective if c <= bestval]
                try0 = sum_val(bestsol, odict)
                lo = bestval
            else:
                log.debug('New peak objective: %d' % peak_val(bestsol, odict))

        return bestsol, bestval
Esempio n. 12
0
        def filter_group(matches, chains=None):
            # If we are here, then this dependency is mandatory,
            # so add it to the master list. That way it is still
            # participates in the pruning even if one of its
            # parents is pruned away
            match1 = next(ms for ms in matches)
            name = match1.name
            first = name not in snames
            group = self.groups.get(name, [])

            # Prune packages that don't match any of the patterns
            # or which have unsatisfiable dependencies
            nold = 0
            bad_deps = []
            for fn in group:
                if filter.setdefault(fn, True):
                    nold += 1
                    sat = self.match_any(matches, fn)
                    sat = sat and all(any(filter.get(f2, True) for f2 in self.find_matches(ms))
                                      for ms in self.ms_depends(fn))
                    filter[fn] = sat
                    if not sat:
                        bad_deps.append(fn)

            # Build dependency chains if we detect unsatisfiability
            nnew = nold - len(bad_deps)
            reduced = nnew < nold
            if reduced:
                log.debug(' % s: pruned from %d -> %d' % (name, nold, nnew))
            if nnew == 0:
                if name in snames:
                    snames.remove(name)
                bad_deps = [fn for fn in bad_deps if self.match_any(matches, fn)]
                matches = [(ms,) for ms in matches]
                chains = [a + b for a in chains for b in matches] if chains else matches
                if bad_deps:
                    dep2 = set()
                    for fn in bad_deps:
                        for ms in self.ms_depends(fn):
                            if not any(filter.get(f2, True) for f2 in self.find_matches(ms)):
                                dep2.add(ms)
                    chains = [a + (b,) for a in chains for b in dep2]
                unsat.extend(chains)
                return nnew
            if not reduced and not first:
                return False

            # Perform the same filtering steps on any dependencies shared across
            # *all* packages in the group. Even if just one of the packages does
            # not have a particular dependency, it must be ignored in this pass.
            if first:
                snames.add(name)
                if match1 not in specs:
                    nspecs.add(MatchSpec(name))
            cdeps = defaultdict(list)
            for fn in group:
                if filter[fn]:
                    for m2 in self.ms_depends(fn):
                        if m2.name[0] != '@' and not m2.optional:
                            cdeps[m2.name].append(m2)
            cdeps = {mname: set(deps) for mname, deps in iteritems(cdeps) if len(deps) == nnew}
            if cdeps:
                matches = [(ms,) for ms in matches]
                if chains:
                    matches = [a + b for a in chains for b in matches]
                if sum(filter_group(deps, chains) for deps in itervalues(cdeps)):
                    reduced = True

            return reduced
Esempio n. 13
0
    def get_dists(self, specs):
        log.debug('Retrieving packages for: %s' % specs)

        specs, removes, optional, features = self.verify_specs(specs)
        filter = {}
        touched = {}
        snames = set()
        nspecs = set()
        unsat = []

        def filter_group(matches, chains=None):
            # If we are here, then this dependency is mandatory,
            # so add it to the master list. That way it is still
            # participates in the pruning even if one of its
            # parents is pruned away
            match1 = next(ms for ms in matches)
            name = match1.name
            first = name not in snames
            group = self.groups.get(name, [])

            # Prune packages that don't match any of the patterns
            # or which have unsatisfiable dependencies
            nold = 0
            bad_deps = []
            for fn in group:
                if filter.setdefault(fn, True):
                    nold += 1
                    sat = self.match_any(matches, fn)
                    sat = sat and all(any(filter.get(f2, True) for f2 in self.find_matches(ms))
                                      for ms in self.ms_depends(fn))
                    filter[fn] = sat
                    if not sat:
                        bad_deps.append(fn)

            # Build dependency chains if we detect unsatisfiability
            nnew = nold - len(bad_deps)
            reduced = nnew < nold
            if reduced:
                log.debug(' % s: pruned from %d -> %d' % (name, nold, nnew))
            if nnew == 0:
                if name in snames:
                    snames.remove(name)
                bad_deps = [fn for fn in bad_deps if self.match_any(matches, fn)]
                matches = [(ms,) for ms in matches]
                chains = [a + b for a in chains for b in matches] if chains else matches
                if bad_deps:
                    dep2 = set()
                    for fn in bad_deps:
                        for ms in self.ms_depends(fn):
                            if not any(filter.get(f2, True) for f2 in self.find_matches(ms)):
                                dep2.add(ms)
                    chains = [a + (b,) for a in chains for b in dep2]
                unsat.extend(chains)
                return nnew
            if not reduced and not first:
                return False

            # Perform the same filtering steps on any dependencies shared across
            # *all* packages in the group. Even if just one of the packages does
            # not have a particular dependency, it must be ignored in this pass.
            if first:
                snames.add(name)
                if match1 not in specs:
                    nspecs.add(MatchSpec(name))
            cdeps = defaultdict(list)
            for fn in group:
                if filter[fn]:
                    for m2 in self.ms_depends(fn):
                        if m2.name[0] != '@' and not m2.optional:
                            cdeps[m2.name].append(m2)
            cdeps = {mname: set(deps) for mname, deps in iteritems(cdeps) if len(deps) == nnew}
            if cdeps:
                matches = [(ms,) for ms in matches]
                if chains:
                    matches = [a + b for a in chains for b in matches]
                if sum(filter_group(deps, chains) for deps in itervalues(cdeps)):
                    reduced = True

            return reduced

        # Iterate in the filtering process until no more progress is made
        def full_prune(specs, removes, optional, features):
            self.default_filter(features, filter)
            for ms in removes:
                for fn in self.find_matches(ms):
                    filter[fn] = False
            feats = set(self.trackers.keys())
            snames.clear()
            specs = slist = list(specs)
            onames = set(s.name for s in specs)
            for iter in range(10):
                first = True
                while sum(filter_group([s]) for s in slist):
                    slist = specs + [MatchSpec(n) for n in snames - onames]
                    first = False
                if unsat:
                    return False
                if first and iter:
                    return True
                touched.clear()
                for fstr in features:
                    touched[fstr+'@'] = True
                for spec in chain(specs, optional):
                    self.touch(spec, touched, filter)
                nfeats = set()
                for fn, val in iteritems(touched):
                    if val:
                        nfeats.update(self.track_features(fn))
                if len(nfeats) >= len(feats):
                    return True
                pruned = False
                feats &= nfeats
                for fn, val in iteritems(touched):
                    if val and self.features(fn) - feats:
                        touched[fn] = filter[fn] = False
                        filter[fn] = False
                        pruned = True
                if not pruned:
                    return True

        #
        # In the case of a conflict, look for the minimum satisfiable subset
        #

        if not full_prune(specs, removes, optional, features):
            def minsat_prune(specs):
                return full_prune(specs, removes, [], features)

            save_unsat = set(s for s in unsat if s[0] in specs)
            stderrlog.info('...')
            hint = minimal_unsatisfiable_subset(specs, sat=minsat_prune, log=False)
            save_unsat.update((ms,) for ms in hint)
            raise Unsatisfiable(save_unsat)

        dists = {fn: self.index[fn] for fn, val in iteritems(touched) if val}
        return dists, list(map(MatchSpec, snames - {ms.name for ms in specs}))
Esempio n. 14
0
    def minimize(self, objective, bestsol, trymax=False):
        """
        Minimize the objective function given either by (coeff, integer)
        tuple pairs, or a dictionary of varname: coeff values. The actual
        minimization is multiobjective: first, we minimize the largest
        active coefficient value, then we minimize the sum.
        """
        if not objective:
            log.debug('Empty objective, trivial solution')
            return bestsol, 0
        elif self.unsat:
            log.debug('Constraints are unsatisfiable')
            return bestsol, sum(abs(c) for c, a in objective) + 1

        if type(objective) is dict:
            objective = [(v, self.varnum(k)) for k, v in iteritems(objective)]

        objective, offset = self.LB_Preprocess_(objective)
        maxval = max(c for c, a in objective)

        def peak_val(sol, odict):
            return max(odict.get(s, 0) for s in sol)

        def sum_val(sol, odict):
            return sum(odict.get(s, 0) for s in sol)

        lo = 0
        try0 = 0
        for peak in ((True, False) if maxval > 1 else (False,)):
            if peak:
                log.debug('Beginning peak minimization')
                objval = peak_val
            else:
                log.debug('Beginning sum minimization')
                objval = sum_val

            odict = {a: c for c, a in objective}
            bestval = objval(bestsol, odict)

            # If we got lucky and the initial solution is optimal, we still
            # need to generate the constraints at least once
            hi = bestval
            m_orig = self.m
            nz = len(self.clauses)
            if trymax and not peak:
                try0 = hi - 1

            log.debug("Initial range (%d,%d)" % (lo, hi))
            while True:
                if try0 is None:
                    mid = (lo+hi) // 2
                else:
                    mid = try0
                if peak:
                    self.Prevent(self.Any, tuple(a for c, a in objective if c > mid))
                    temp = tuple(a for c, a in objective if lo <= c <= mid)
                    if temp:
                        self.Require(self.Any, temp)
                else:
                    self.Require(self.LinearBound, objective, lo, mid, False)
                log.debug('Bisection attempt: (%d,%d), (%d+%d) clauses' %
                          (lo, mid, nz, len(self.clauses)-nz))
                newsol = self.sat()
                if newsol is None:
                    lo = mid + 1
                    log.debug("Bisection failure, new range=(%d,%d)" % (lo, hi))
                    # If this was a failure of the first test after peak minimization,
                    # then it means that the peak minimizer is "tight" and we don't need
                    # any further constraints.
                else:
                    done = lo == mid
                    bestsol = newsol
                    bestval = objval(newsol, odict)
                    hi = bestval
                    log.debug("Bisection success, new range=(%d,%d)" % (lo, hi))
                    if done:
                        break
                self.m = m_orig
                if len(self.clauses) > nz:
                    self.clauses = self.clauses[:nz]
                self.unsat = False
                try0 = None

            log.debug('Final %s objective: %d' % ('peak' if peak else 'sum', bestval))
            if bestval == 0:
                break
            elif peak:
                # Now that we've minimized the peak value, we can drop any terms
                # with coefficients larger than this. Furthermore, since we know
                # at least one peak will be active, our lower bound for the sum
                # equals the peak.
                objective = [(c, a) for c, a in objective if c <= bestval]
                try0 = sum_val(bestsol, odict)
                lo = bestval
            else:
                log.debug('New peak objective: %d' % peak_val(bestsol, odict))

        return bestsol, bestval