コード例 #1
0
ファイル: exceptions.py プロジェクト: njalerikson/conda
def maybe_raise(error, context):
    if isinstance(error, CondaMultiError):
        groups = groupby(lambda e: isinstance(e, ClobberError), error.errors)
        clobber_errors = groups.get(True, ())
        groups = groupby(lambda e: isinstance(e, SafetyError), groups.get(False, ()))
        safety_errors = groups.get(True, ())
        other_errors = groups.get(False, ())

        if ((safety_errors and context.safety_checks == SafetyChecks.enabled)
                or (clobber_errors and context.path_conflict == PathConflict.prevent
                    and not context.clobber)
                or other_errors):
            raise error
        elif ((safety_errors and context.safety_checks == SafetyChecks.warn)
              or (clobber_errors and context.path_conflict == PathConflict.warn
                  and not context.clobber)):
            print_conda_exception(error)

    elif isinstance(error, ClobberError):
        if context.path_conflict == PathConflict.prevent and not context.clobber:
            raise error
        elif context.path_conflict == PathConflict.warn and not context.clobber:
            print_conda_exception(error)

    elif isinstance(error, SafetyError):
        if context.safety_checks == SafetyChecks.enabled:
            raise error
        elif context.safety_checks == SafetyChecks.warn:
            print_conda_exception(error)

    else:
        raise error
コード例 #2
0
def maybe_raise(error, context):
    if isinstance(error, CondaMultiError):
        groups = groupby(lambda e: isinstance(e, ClobberError), error.errors)
        clobber_errors = groups.get(True, ())
        groups = groupby(lambda e: isinstance(e, SafetyError),
                         groups.get(False, ()))
        safety_errors = groups.get(True, ())
        other_errors = groups.get(False, ())

        if ((safety_errors and context.safety_checks == SafetyChecks.enabled)
                or
            (clobber_errors and context.path_conflict == PathConflict.prevent
             and not context.clobber) or other_errors):
            raise error
        elif ((safety_errors and context.safety_checks == SafetyChecks.warn)
              or (clobber_errors and context.path_conflict == PathConflict.warn
                  and not context.clobber)):
            print_conda_exception(error)

    elif isinstance(error, ClobberError):
        if context.path_conflict == PathConflict.prevent and not context.clobber:
            raise error
        elif context.path_conflict == PathConflict.warn and not context.clobber:
            print_conda_exception(error)

    elif isinstance(error, SafetyError):
        if context.safety_checks == SafetyChecks.enabled:
            raise error
        elif context.safety_checks == SafetyChecks.warn:
            print_conda_exception(error)

    else:
        raise error
コード例 #3
0
ファイル: test_itertoolz.py プロジェクト: artemru/cytoolz
def test_groupby_non_callable():
    assert groupby(0, [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {1: [(1, 2), (1, 3)],
         2: [(2, 2), (2, 4)]}

    assert groupby([0], [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {(1,): [(1, 2), (1, 3)],
         (2,): [(2, 2), (2, 4)]}

    assert groupby([0, 0], [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {(1, 1): [(1, 2), (1, 3)],
         (2, 2): [(2, 2), (2, 4)]}
コード例 #4
0
ファイル: test_itertoolz.py プロジェクト: adamchainz/cytoolz
def test_groupby_non_callable():
    assert groupby(0, [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {1: [(1, 2), (1, 3)],
         2: [(2, 2), (2, 4)]}

    assert groupby([0], [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {(1,): [(1, 2), (1, 3)],
         (2,): [(2, 2), (2, 4)]}

    assert groupby([0, 0], [(1, 2), (1, 3), (2, 2), (2, 4)]) == \
        {(1, 1): [(1, 2), (1, 3)],
         (2, 2): [(2, 2), (2, 4)]}
コード例 #5
0
ファイル: plan.py プロジェクト: zero-raspberry/conda
def inject_UNLINKLINKTRANSACTION(plan, index, prefix, axn, specs):
    # this is only used for conda-build at this point
    first_unlink_link_idx = next(
        (q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        if isdir(prefix):
            unlink_precs = tuple(index[d] for d in unlink_dists)
        else:
            # there's nothing to unlink in an environment that doesn't exist
            # this is a hack for what appears to be a logic error in conda-build
            # caught in tests/test_subpackages.py::test_subpackage_recipes[python_test_dep]
            unlink_precs = ()
        link_precs = tuple(index[d] for d in link_dists)

        pfe = ProgressiveFetchExtract(link_precs)
        pfe.prepare()

        stp = PrefixSetup(prefix, unlink_precs, link_precs, (), specs)
        plan.insert(first_unlink_link_idx,
                    (UNLINKLINKTRANSACTION, UnlinkLinkTransaction(stp)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))
    elif axn in ('INSTALL', 'CREATE'):
        plan.insert(0, (UNLINKLINKTRANSACTION, (prefix, (), (), (), specs)))

    return plan
コード例 #6
0
    async def write_pending_messages(self) -> int:
        """Flushes the buffer, if there are items in it, to the message store.

        The return value is the number of records that were successfully synced.
        """
        if self._pending.empty():
            return 0

        total = 0
        split_n = math.ceil(self._pending.maxsize / 4.0)

        # ensure the queue is empty before returning
        while not self._pending.empty():
            pending: List[SerializedMessage] = []

            # flush the entire queue
            while not self._pending.empty():
                pending.append(await self._pending.get())

            # divide all the pending into like-typed instances
            partitioned = groupby(attrgetter('type'), pending)
            for _, items in partitioned.items():
                for idx, bundle in partition_all(split_n, items):
                    async with self.connection('write_pending_messages-%d' %
                                               idx) as c:
                        async with c.transaction():
                            for msg in bundle:
                                await c.fetchrow(Procs.write_message, *msg)
                                total += 1
        # ~~ no more in pending queue
        return total
コード例 #7
0
 def all_caches_writable_first(cls, pkgs_dirs=None):
     if pkgs_dirs is None:
         pkgs_dirs = context.pkgs_dirs
     pc_groups = groupby(lambda pc: pc.is_writable,
                         (cls(pd) for pd in pkgs_dirs))
     return tuple(concatv(pc_groups.get(True, ()), pc_groups.get(False,
                                                                 ())))
コード例 #8
0
def inject_UNLINKLINKTRANSACTION(plan, index, prefix):
    # TODO: we really shouldn't be mutating the plan list here; turn plan into a tuple
    first_unlink_link_idx = next(
        (q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        # make sure prefix directory exists
        if link_dists:
            if not isdir(prefix):
                try:
                    mkdir_p(prefix)
                except (IOError, OSError) as e:
                    log.debug(repr(e))
                    raise CondaError(
                        "Unable to create prefix directory '%s'.\n"
                        "Check that you have sufficient permissions." % prefix)

        # TODO: ideally we'd move these two lines before both the y/n confirmation and the --dry-run exit  # NOQA
        pfe = ProgressiveFetchExtract(index, link_dists)
        pfe.prepare()

        plan.insert(first_unlink_link_idx,
                    (UNLINKLINKTRANSACTION, (unlink_dists, link_dists)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))

        # plan = [p for p in plan if p[0] not in (UNLINK, LINK)]  # filter out unlink/link
        # don't filter LINK and UNLINK, just don't do anything with them
    return plan
コード例 #9
0
def inject_UNLINKLINKTRANSACTION(plan, index, prefix, axn, specs):
    # TODO: we really shouldn't be mutating the plan list here; turn plan into a tuple
    first_unlink_link_idx = next(
        (q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        unlink_precs = tuple(index[d] for d in unlink_dists)
        link_precs = tuple(index[d] for d in link_dists)

        # TODO: ideally we'd move these two lines before both the y/n confirmation and the --dry-run exit  # NOQA
        pfe = ProgressiveFetchExtract(link_precs)
        pfe.prepare()

        stp = PrefixSetup(prefix, unlink_precs, link_precs, (), specs)
        plan.insert(first_unlink_link_idx,
                    (UNLINKLINKTRANSACTION, UnlinkLinkTransaction(stp)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))
    elif axn in ('INSTALL', 'CREATE'):
        plan.insert(0, (UNLINKLINKTRANSACTION, (prefix, (), (), (), specs)))

    return plan
コード例 #10
0
ファイル: resolve.py プロジェクト: ymd2015/conda
    def gen_clauses(self):
        C = Clauses()
        for name, group in iteritems(self.groups):
            group = [self.to_sat_name(dist) for dist in group]
            # Create one variable for each package
            for sat_name in group:
                C.new_var(sat_name)
            # Create one variable for the group
            m = C.new_var(self.to_sat_name(MatchSpec(name)))

            # Exactly one of the package variables, OR
            # the negation of the group variable, is true
            C.Require(C.ExactlyOne, group + [C.Not(m)])

        # If a package is installed, its dependencies must be as well
        for dist in iterkeys(self.index):
            nkey = C.Not(self.to_sat_name(dist))
            for ms in self.ms_depends(dist):
                C.Require(C.Or, nkey, self.push_MatchSpec(C, ms))

        tracker_groups = groupby(lambda x: x.split('=', 1)[0], self.trackers)
        for tracker_name, values in iteritems(tracker_groups):
            if len(values) > 1:
                C.Require(
                    C.AtMostOne,
                    tuple(
                        self.push_MatchSpec(C, MatchSpec(
                            provides_features=feat)) for feat in values))

        log.debug("gen_clauses returning with clause count: %s",
                  len(C.clauses))
        return C
コード例 #11
0
def inject_UNLINKLINKTRANSACTION(plan, index, prefix):
    # TODO: we really shouldn't be mutating the plan list here; turn plan into a tuple
    first_unlink_link_idx = next((q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        # make sure prefix directory exists
        if link_dists:
            if not isdir(prefix):
                try:
                    mkdir_p(prefix)
                except (IOError, OSError) as e:
                    log.debug(repr(e))
                    raise CondaError("Unable to create prefix directory '%s'.\n"
                                     "Check that you have sufficient permissions." % prefix)

        # TODO: ideally we'd move these two lines before both the y/n confirmation and the --dry-run exit  # NOQA
        pfe = ProgressiveFetchExtract(index, link_dists)
        pfe.prepare()

        plan.insert(first_unlink_link_idx, (UNLINKLINKTRANSACTION, (unlink_dists, link_dists)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))

        # plan = [p for p in plan if p[0] not in (UNLINK, LINK)]  # filter out unlink/link
        # don't filter LINK and UNLINK, just don't do anything with them
    return plan
コード例 #12
0
ファイル: package_cache_data.py プロジェクト: jhunkeler/conda
 def all_caches_writable_first(cls, pkgs_dirs=None):
     if pkgs_dirs is None:
         pkgs_dirs = context.pkgs_dirs
     pc_groups = groupby(
         lambda pc: pc.is_writable,
         (cls(pd) for pd in pkgs_dirs)
     )
     return tuple(concatv(pc_groups.get(True, ()), pc_groups.get(False, ())))
コード例 #13
0
ファイル: match_spec.py プロジェクト: DamirAinullin/PTVS
 def merge(cls, match_specs):
     match_specs = tuple(cls(s) for s in match_specs)
     grouped = groupby(lambda spec: spec.get_exact_value('name'), match_specs)
     dont_merge_these = grouped.pop('*', []) + grouped.pop(None, [])
     specs_map = {
         name: reduce(lambda x, y: x._merge(y), specs) if len(specs) > 1 else specs[0]
         for name, specs in iteritems(grouped)
     }
     return tuple(concatv(itervalues(specs_map), dont_merge_these))
コード例 #14
0
ファイル: match_spec.py プロジェクト: namitashokeen/conda
 def merge(cls, match_specs):
     match_specs = tuple(cls(s) for s in match_specs)
     grouped = groupby(lambda spec: spec.get_exact_value('name'), match_specs)
     dont_merge_these = grouped.pop('*', []) + grouped.pop(None, [])
     specs_map = {
         name: reduce(lambda x, y: x._merge(y), specs) if len(specs) > 1 else specs[0]
         for name, specs in iteritems(grouped)
     }
     return tuple(concatv(itervalues(specs_map), dont_merge_these))
コード例 #15
0
    def get_user_requests(self):
        """
        return a list of user requested items.  Each item is a dict with the
        following keys:
        'date': the date and time running the command
        'cmd': a list of argv of the actual command which was run
        'action': install/remove/update
        'specs': the specs being used
        """
        res = []
        for dt, unused_cont, comments in self.parse():
            item = {'date': dt}
            for line in comments:
                comment_items = self._parse_comment_line(line)
                item.update(comment_items)

            if 'cmd' in item:
                res.append(item)

            dists = groupby(itemgetter(0), unused_cont)
            item['unlink_dists'] = dists.get('-', ())
            item['link_dists'] = dists.get('+', ())

        conda_versions_from_history = tuple(x['conda_version'] for x in res
                                            if 'conda_version' in x)
        if conda_versions_from_history:
            minimum_conda_version = sorted(conda_versions_from_history,
                                           key=VersionOrder)[-1]
            minimum_major_minor = '.'.join(
                take(2, minimum_conda_version.split('.')))
            current_major_minor = '.'.join(take(2, CONDA_VERSION.split('.')))
            if VersionOrder(current_major_minor) < VersionOrder(
                    minimum_major_minor):
                message = dals("""
                This environment has previously been operated on by a conda version that's newer
                than the conda currently being used. A newer version of conda is required.
                  target environment location: %(target_prefix)s
                  current conda version: %(conda_version)s
                  minimum conda version: %(minimum_version)s
                """) % {
                    "target_prefix": self.prefix,
                    "conda_version": CONDA_VERSION,
                    "minimum_version": minimum_major_minor,
                }
                if not paths_equal(self.prefix, context.root_prefix):
                    message += dedent("""
                    Update conda and try again.
                        $ conda install -p "%(base_prefix)s" "conda>=%(minimum_version)s"
                    """) % {
                        "base_prefix": context.root_prefix,
                        "minimum_version": minimum_major_minor,
                    }
                raise CondaUpgradeError(message)

        return res
コード例 #16
0
def mix_by_recording_id(cut_manifests: List[Pathlike],
                        output_cut_manifest: Pathlike):
    """
    Create a CutSet stored in OUTPUT_CUT_MANIFEST by matching the Cuts from CUT_MANIFESTS by their recording IDs
    and mixing them together.
    """
    all_cuts = combine(*[CutSet.from_json(path) for path in cut_manifests])
    recording_id_to_cuts = groupby(lambda cut: cut.recording_id, all_cuts)
    mixed_cut_set = CutSet.from_cuts(
        mix_cuts(cuts) for recording_id, cuts in recording_id_to_cuts.items())
    mixed_cut_set.to_json(output_cut_manifest)
コード例 #17
0
    def merge(cls, match_specs):
        match_specs = tuple(cls(s) for s in match_specs if s)
        name_groups = groupby(attrgetter('name'), match_specs)
        unmergeable = name_groups.pop('*', []) + name_groups.pop(None, [])

        merged_specs = []
        mergeable_groups = tuple(
            concat(
                itervalues(groupby(lambda s: s.optional, group))
                for group in itervalues(name_groups)))
        for group in mergeable_groups:
            target_groups = groupby(attrgetter('target'), group)
            target_groups.pop(None, None)
            if len(target_groups) > 1:
                raise ValueError("Incompatible MatchSpec merge:%s" %
                                 dashlist(group))
            merged_specs.append(
                reduce(lambda x, y: x._merge(y), group
                       ) if len(group) > 1 else group[0])
        return tuple(concatv(merged_specs, unmergeable))
コード例 #18
0
ファイル: history.py プロジェクト: DamirAinullin/PTVS
    def get_user_requests(self):
        """
        return a list of user requested items.  Each item is a dict with the
        following keys:
        'date': the date and time running the command
        'cmd': a list of argv of the actual command which was run
        'action': install/remove/update
        'specs': the specs being used
        """
        res = []
        for dt, unused_cont, comments in self.parse():
            item = {'date': dt}
            for line in comments:
                comment_items = self._parse_comment_line(line)
                item.update(comment_items)

            if 'cmd' in item:
                res.append(item)

            dists = groupby(itemgetter(0), unused_cont)
            item['unlink_dists'] = dists.get('-', ())
            item['link_dists'] = dists.get('+', ())

        conda_versions_from_history = tuple(x['conda_version'] for x in res
                                            if 'conda_version' in x)
        if conda_versions_from_history:
            minimum_conda_version = sorted(conda_versions_from_history, key=VersionOrder)[-1]
            minimum_major_minor = '.'.join(take(2, minimum_conda_version.split('.')))
            current_major_minor = '.'.join(take(2, CONDA_VERSION.split('.')))
            if VersionOrder(current_major_minor) < VersionOrder(minimum_major_minor):
                message = dals("""
                This environment has previously been operated on by a conda version that's newer
                than the conda currently being used. A newer version of conda is required.
                  target environment location: %(target_prefix)s
                  current conda version: %(conda_version)s
                  minimum conda version: %(minimum_version)s
                """) % {
                    "target_prefix": self.prefix,
                    "conda_version": CONDA_VERSION,
                    "minimum_version": minimum_major_minor,
                }
                if not paths_equal(self.prefix, context.root_prefix):
                    message += dedent("""
                    Update conda and try again.
                        $ conda install -p "%(base_prefix)s" "conda>=%(minimum_version)s"
                    """) % {
                        "base_prefix": context.root_prefix,
                        "minimum_version": minimum_major_minor,
                    }
                raise CondaUpgradeError(message)

        return res
コード例 #19
0
def from_environment(name, prefix, no_builds=False, ignore_channels=False):
    """
        Get environment object from prefix
    Args:
        name: The name of environment
        prefix: The path of prefix
        no_builds: Whether has build requirement
        ignore_channels: whether ignore_channels

    Returns:     Environment object
    """
    # requested_specs_map = History(prefix).get_requested_specs_map()
    pd = PrefixData(prefix, pip_interop_enabled=True)

    precs = tuple(PrefixGraph(pd.iter_records()).graph)
    grouped_precs = groupby(lambda x: x.package_type, precs)
    conda_precs = sorted(concatv(
        grouped_precs.get(None, ()),
        grouped_precs.get(PackageType.NOARCH_GENERIC, ()),
        grouped_precs.get(PackageType.NOARCH_PYTHON, ()),
    ),
                         key=lambda x: x.name)

    pip_precs = sorted(
        concatv(
            grouped_precs.get(PackageType.VIRTUAL_PYTHON_WHEEL, ()),
            grouped_precs.get(PackageType.VIRTUAL_PYTHON_EGG_MANAGEABLE, ()),
            grouped_precs.get(PackageType.VIRTUAL_PYTHON_EGG_UNMANAGEABLE, ()),
            # grouped_precs.get(PackageType.SHADOW_PYTHON_EGG_LINK, ()),
        ),
        key=lambda x: x.name)

    if no_builds:
        dependencies = ['='.join((a.name, a.version)) for a in conda_precs]
    else:
        dependencies = [
            '='.join((a.name, a.version, a.build)) for a in conda_precs
        ]
    if pip_precs:
        dependencies.append(
            {'pip': ["%s==%s" % (a.name, a.version) for a in pip_precs]})

    channels = list(context.channels)
    if not ignore_channels:
        for prec in conda_precs:
            canonical_name = prec.channel.canonical_name
            if canonical_name not in channels:
                channels.insert(0, canonical_name)
    return Environment(name=name,
                       dependencies=dependencies,
                       channels=channels,
                       prefix=prefix)
コード例 #20
0
ファイル: plan.py プロジェクト: ESSS/conda
def inject_UNLINKLINKTRANSACTION(plan, index, prefix):
    # TODO: we really shouldn't be mutating the plan list here; turn plan into a tuple
    first_unlink_link_idx = next((q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        # TODO: ideally we'd move these two lines before both the y/n confirmation and the --dry-run exit  # NOQA
        pfe = ProgressiveFetchExtract(index, link_dists)
        pfe.prepare()

        plan.insert(first_unlink_link_idx, (UNLINKLINKTRANSACTION, (unlink_dists, link_dists)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))

    return plan
コード例 #21
0
ファイル: shell.py プロジェクト: towr/bndl
def main():
    signal.signal(signal.SIGUSR1, dump_threads)

    try:
        args = argparser.parse_args()
        config = bndl.conf

        if args.listen_addresses:
            config['bndl.net.listen_addresses'] = args.listen_addresses
        if args.seeds:
            config['bndl.net.seeds'] = args.seeds
            config['bndl.compute.worker_count'] = 0
        if args.worker_count is not None:
            config['bndl.compute.worker_count'] = args.worker_count

        config['bndl.run.numactl'] = args.numactl
        config['bndl.run.pincore'] = args.pincore
        config['bndl.run.jemalloc'] = args.jemalloc

        config.update(*args.conf)

        ctx = create_ctx(config)
        ns = dict(ctx=ctx)

        if config['bndl.net.seeds'] or config['bndl.compute.worker_count'] != 0:
            print('Connecting with workers ...', end='\r')
            worker_count = ctx.await_workers(args.worker_count)
            node_count = len(
                groupby(identity, [
                    tuple(sorted(worker.ip_addresses()))
                    for worker in ctx.workers
                ]))
            header = HEADER + '\nConnected with %r workers on %r nodes.' % (
                worker_count, node_count)
        else:
            header = HEADER

        try:
            import IPython
            IPython.embed(header=header, user_ns=ns)
        except ImportError:
            import code
            code.interact(header, local=ns)
    finally:
        with catch():
            ctx.stop()
コード例 #22
0
    def get_user_requests(self):
        """
        return a list of user requested items.  Each item is a dict with the
        following keys:
        'date': the date and time running the command
        'cmd': a list of argv of the actual command which was run
        'action': install/remove/update
        'specs': the specs being used
        """
        res = []
        com_pat = re.compile(r'#\s*cmd:\s*(.+)')
        spec_pat = re.compile(r'#\s*(\w+)\s*specs:\s*(.+)?')
        for dt, unused_cont, comments in self.parse():
            item = {'date': dt}
            for line in comments:
                m = com_pat.match(line)
                if m:
                    argv = m.group(1).split()
                    if argv[0].endswith('conda'):
                        argv[0] = 'conda'
                    item['cmd'] = argv
                m = spec_pat.match(line)
                if m:
                    action, specs = m.groups()
                    item['action'] = action
                    specs = specs or ""
                    if specs.startswith('['):
                        specs = literal_eval(specs)
                    elif '[' not in specs:
                        specs = specs.split(',')
                    specs = [
                        spec for spec in specs
                        if spec and not spec.endswith('@')
                    ]
                    if specs and action in ('update', 'install', 'create'):
                        item['update_specs'] = item['specs'] = specs
                    elif specs and action in ('remove', 'uninstall'):
                        item['remove_specs'] = item['specs'] = specs

            if 'cmd' in item:
                res.append(item)
            dists = groupby(itemgetter(0), unused_cont)
            item['unlink_dists'] = dists.get('-', ())
            item['link_dists'] = dists.get('+', ())
        return res
コード例 #23
0
ファイル: env.py プロジェクト: conda/conda
def from_environment(name, prefix, no_builds=False, ignore_channels=False):
    """
        Get environment object from prefix
    Args:
        name: The name of environment
        prefix: The path of prefix
        no_builds: Whether has build requirement
        ignore_channels: whether ignore_channels

    Returns:     Environment object
    """
    # requested_specs_map = History(prefix).get_requested_specs_map()
    pd = PrefixData(prefix, pip_interop_enabled=True)

    precs = tuple(PrefixGraph(pd.iter_records()).graph)
    grouped_precs = groupby(lambda x: x.package_type, precs)
    conda_precs = sorted(concatv(
        grouped_precs.get(None, ()),
        grouped_precs.get(PackageType.NOARCH_GENERIC, ()),
        grouped_precs.get(PackageType.NOARCH_PYTHON, ()),
    ), key=lambda x: x.name)

    pip_precs = sorted(concatv(
        grouped_precs.get(PackageType.VIRTUAL_PYTHON_WHEEL, ()),
        grouped_precs.get(PackageType.VIRTUAL_PYTHON_EGG_MANAGEABLE, ()),
        grouped_precs.get(PackageType.VIRTUAL_PYTHON_EGG_UNMANAGEABLE, ()),
        # grouped_precs.get(PackageType.SHADOW_PYTHON_EGG_LINK, ()),
    ), key=lambda x: x.name)

    if no_builds:
        dependencies = ['='.join((a.name, a.version)) for a in conda_precs]
    else:
        dependencies = ['='.join((a.name, a.version, a.build)) for a in conda_precs]
    if pip_precs:
        dependencies.append({'pip': ["%s==%s" % (a.name, a.version) for a in pip_precs]})

    channels = list(context.channels)
    if not ignore_channels:
        for prec in conda_precs:
            canonical_name = prec.channel.canonical_name
            if canonical_name not in channels:
                channels.insert(0, canonical_name)
    return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)
コード例 #24
0
def maybe_raise(error, context):
    if isinstance(error, CondaMultiError):
        groups = groupby(lambda e: isinstance(e, ClobberError), error.errors)
        clobber_errors = groups.get(True, ())
        non_clobber_errors = groups.get(False, ())
        if clobber_errors:
            if context.path_conflict == PathConflict.prevent and not context.clobber:
                raise error
            elif context.path_conflict == PathConflict.warn and not context.clobber:
                print_conda_exception(CondaMultiError(clobber_errors))
            if non_clobber_errors:
                raise CondaMultiError(non_clobber_errors)
    elif isinstance(error, ClobberError):
        if context.path_conflict == PathConflict.prevent and not context.clobber:
            raise error
        elif context.path_conflict == PathConflict.warn and not context.clobber:
            print_conda_exception(error)
    else:
        raise NotImplementedError()
コード例 #25
0
ファイル: exceptions.py プロジェクト: rlugojr/conda
def maybe_raise(error, context):
    if isinstance(error, CondaMultiError):
        groups = groupby(lambda e: isinstance(e, ClobberError), error.errors)
        clobber_errors = groups.get(True, ())
        non_clobber_errors = groups.get(False, ())
        if clobber_errors:
            if context.path_conflict == PathConflict.prevent and not context.clobber:
                raise error
            elif context.path_conflict == PathConflict.warn and not context.clobber:
                print_conda_exception(CondaMultiError(clobber_errors))
            if non_clobber_errors:
                raise CondaMultiError(non_clobber_errors)
    elif isinstance(error, ClobberError):
        if context.path_conflict == PathConflict.prevent and not context.clobber:
            raise error
        elif context.path_conflict == PathConflict.warn and not context.clobber:
            print_conda_exception(error)
    else:
        raise NotImplementedError()
コード例 #26
0
ファイル: history.py プロジェクト: jhunkeler/conda
    def get_user_requests(self):
        """
        return a list of user requested items.  Each item is a dict with the
        following keys:
        'date': the date and time running the command
        'cmd': a list of argv of the actual command which was run
        'action': install/remove/update
        'specs': the specs being used
        """
        res = []
        com_pat = re.compile(r'#\s*cmd:\s*(.+)')
        spec_pat = re.compile(r'#\s*(\w+)\s*specs:\s*(.+)?')
        for dt, unused_cont, comments in self.parse():
            item = {'date': dt}
            for line in comments:
                m = com_pat.match(line)
                if m:
                    argv = m.group(1).split()
                    if argv[0].endswith('conda'):
                        argv[0] = 'conda'
                    item['cmd'] = argv
                m = spec_pat.match(line)
                if m:
                    action, specs = m.groups()
                    item['action'] = action
                    specs = specs or ""
                    if specs.startswith('['):
                        specs = literal_eval(specs)
                    elif '[' not in specs:
                        specs = specs.split(',')
                    specs = [spec for spec in specs if spec and not spec.endswith('@')]
                    if specs and action in ('update', 'install', 'create'):
                        item['update_specs'] = item['specs'] = specs
                    elif specs and action in ('remove', 'uninstall'):
                        item['remove_specs'] = item['specs'] = specs

            if 'cmd' in item:
                res.append(item)
            dists = groupby(itemgetter(0), unused_cont)
            item['unlink_dists'] = dists.get('-', ())
            item['link_dists'] = dists.get('+', ())
        return res
コード例 #27
0
    def get_entry_to_link(cls, dist):
        pc_entry = next((pc_entry
                         for pc_entry in cls.get_matching_entries(dist)
                         if pc_entry.is_extracted),
                        None)
        if pc_entry is not None:
            return pc_entry

        # this can happen with `conda install path/to/package.tar.bz2`
        #   because dist has channel '<unknown>'
        # if ProgressiveFetchExtract did it's job correctly, what we're looking for
        #   should be the matching dist_name in the first writable package cache
        # we'll search all caches for a match, but search writable caches first
        grouped_caches = groupby(lambda x: x.is_writable,
                                 (PackageCache(pd) for pd in context.pkgs_dirs))
        caches = concatv(grouped_caches.get(True, ()), grouped_caches.get(False, ()))
        pc_entry = next((cache.scan_for_dist_no_channel(dist) for cache in caches if cache), None)
        if pc_entry is not None:
            return pc_entry
        raise CondaError("No package '%s' found in cache directories." % dist)
コード例 #28
0
ファイル: package_cache.py プロジェクト: rlugojr/conda
    def get_entry_to_link(cls, dist):
        pc_entry = next((pc_entry
                         for pc_entry in cls.get_matching_entries(dist)
                         if pc_entry.is_extracted),
                        None)
        if pc_entry is not None:
            return pc_entry

        # this can happen with `conda install path/to/package.tar.bz2`
        #   because dist has channel '<unknown>'
        # if ProgressiveFetchExtract did it's job correctly, what we're looking for
        #   should be the matching dist_name in the first writable package cache
        # we'll search all caches for a match, but search writable caches first
        grouped_caches = groupby(lambda x: x.is_writable,
                                 (PackageCache(pd) for pd in context.pkgs_dirs))
        caches = concatv(grouped_caches.get(True, ()), grouped_caches.get(False, ()))
        pc_entry = next((cache.scan_for_dist_no_channel(dist) for cache in caches if cache), None)
        if pc_entry is not None:
            return pc_entry
        raise CondaError("No package '%s' found in cache directories." % dist)
コード例 #29
0
ファイル: cache.py プロジェクト: ebethon/geotiler
async def caching_downloader(get, set, downloader, tiles, num_workers, **kw):
    """
    Download tiles from cache and missing tiles with the downloader.

    Asynchronous generator of map tiles is returned.

    The code flow is

    - caching downloader gets tile data from cache using URLs
    - the original downloader is used to download missing tile data
    - cache is updated with all existing tile data

    The cache getter function (`get` parameter) should return `None` if
    tile data is not in cache for given URL.

    A collection of tiles is returned.

    :param get: Function to get a tile data from cache.
    :param set: Function to put a tile data in cache.
    :param downloader: Original tiles downloader (asyncio coroutine).
    :param tiles: Collection tiles to fetch.
    :param num_workers: Number of workers used to connect to a map provider
        service.
    :param kw: Parameters passed to downloader coroutine.
    """
    tiles = fetch_from_cache(get, tiles)
    groups = partition_all(10, tiles)
    for tg in groups:
        missing = groupby(lambda t: t.img is None, tg)
        for t in missing.get(False, []):
            # reset cache for new and old tiles
            set(t.url, t.img)
            yield t

        result = downloader(missing.get(True, []), num_workers, **kw)
        async for t in result:
            # reset cache for new and old tiles
            set(t.url, t.img)
            yield t
コード例 #30
0
ファイル: cache.py プロジェクト: wrobell/geotiler
async def caching_downloader(get, set, downloader, tiles, num_workers, **kw):
    """
    Download tiles from cache and missing tiles with the downloader.

    Asynchronous generator of map tiles is returned.

    The code flow is

    - caching downloader gets tile data from cache using URLs
    - the original downloader is used to download missing tile data
    - cache is updated with all existing tile data

    The cache getter function (`get` parameter) should return `None` if
    tile data is not in cache for given URL.

    A collection of tiles is returned.

    :param get: Function to get a tile data from cache.
    :param set: Function to put a tile data in cache.
    :param downloader: Original tiles downloader (asyncio coroutine).
    :param tiles: Collection tiles to fetch.
    :param num_workers: Number of workers used to connect to a map provider
        service.
    :param kw: Parameters passed to downloader coroutine.
    """
    tiles = fetch_from_cache(get, tiles)
    groups = partition_all(10, tiles)
    for tg in groups:
        missing = groupby(lambda t: t.img is None, tg)
        for t in missing.get(False, []):
            # reset cache for new and old tiles
            set(t.url, t.img)
            yield t

        result = downloader(missing.get(True, []), num_workers, **kw)
        async for t in result:
            # reset cache for new and old tiles
            set(t.url, t.img)
            yield t
コード例 #31
0
def get_random_sample(seq, n, stratify=True, random_state=None):
    """
    Args:
        seq (Sequence)
        n (int)
        stratify (bool)
        random_state (int)

    Returns:
        list
    """
    random.seed(a=random_state)
    if stratify is True:
        grped = itertoolz.groupby(operator.itemgetter(1), seq)
        n_per_grp = max(int(round(n / len(grped))), 1)
        sample = list(
            itertoolz.concat(
                random.sample(examples, min(len(examples), n_per_grp))
                for examples in grped.values()))
        random.shuffle(sample)
        return sample[:n]
    else:
        return random.sample(seq, min(len(seq), n))
コード例 #32
0
ファイル: plan.py プロジェクト: SylvainCorlay/conda
def inject_UNLINKLINKTRANSACTION(plan, index, prefix):
    # TODO: we really shouldn't be mutating the plan list here; turn plan into a tuple
    first_unlink_link_idx = next(
        (q for q, p in enumerate(plan) if p[0] in (UNLINK, LINK)), -1)
    if first_unlink_link_idx >= 0:
        grouped_instructions = groupby(lambda x: x[0], plan)
        unlink_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(UNLINK, ()))
        link_dists = tuple(
            Dist(d[1]) for d in grouped_instructions.get(LINK, ()))
        unlink_dists, link_dists = handle_menuinst(unlink_dists, link_dists)

        # TODO: ideally we'd move these two lines before both the y/n confirmation and the --dry-run exit  # NOQA
        pfe = ProgressiveFetchExtract(index, link_dists)
        pfe.prepare()

        plan.insert(first_unlink_link_idx,
                    (UNLINKLINKTRANSACTION, (unlink_dists, link_dists)))
        plan.insert(first_unlink_link_idx, (PROGRESSIVEFETCHEXTRACT, pfe))

        # plan = [p for p in plan if p[0] not in (UNLINK, LINK)]  # filter out unlink/link
        # don't filter LINK and UNLINK, just don't do anything with them
    return plan
コード例 #33
0
def sentences_ents(doc, ents=None):
    """
    Group doc.ents by sentences.
    :param doc: spacy.Doc
    :param ents: iterable of objects with attributes 'start_char' and 'end_char' (if None (default), use doc.ents)
    :yield: Tuple[spacy.token.Span, List[spacy.token.Span]]
    """
    # Group entities by sentences
    sents_bound_tree = IntervalTree.from_tuples([
        (s.start_char, s.end_char, i) for i, s in enumerate(doc.sents)
    ])

    # Help function for convenience
    def index(ent):
        # print(list(sorted([tuple(i) for i in sents_bound_tree.all_intervals], key=lambda t: t[0])))
        # print(ent)
        sents = sents_bound_tree[ent.start_char:ent.end_char]
        if sents: return sents.pop().data

    if ents is None: ents = doc.ents
    ents_in_sents = groupby(index, ents)
    for i, sent in enumerate(doc.sents):
        sent_ents = ents_in_sents.get(i, list())
        yield sent, sent_ents
コード例 #34
0
    def get_user_requests(self):
        """
        return a list of user requested items.  Each item is a dict with the
        following keys:
        'date': the date and time running the command
        'cmd': a list of argv of the actual command which was run
        'action': install/remove/update
        'specs': the specs being used
        """
        res = []
        for dt, unused_cont, comments in self.parse():
            item = {'date': dt}
            for line in comments:
                comment_items = self._parse_comment_line(line)
                item.update(comment_items)

            if 'cmd' in item:
                res.append(item)

            dists = groupby(itemgetter(0), unused_cont)
            item['unlink_dists'] = dists.get('-', ())
            item['link_dists'] = dists.get('+', ())

        return res
コード例 #35
0
ファイル: index.py プロジェクト: randyamiel/conda
def get_reduced_index(prefix, channels, subdirs, specs):

    # # this block of code is a "combine" step intended to filter out redundant specs
    # # causes a problem with py.test tests/core/test_solve.py -k broken_install
    # specs_map = defaultdict(list)
    # for spec in specs:
    #     specs_map[spec.name].append(spec)
    # consolidated_specs = set()
    # for spec_name, specs_group in iteritems(specs_map):
    #     if len(specs_group) == 1:
    #         consolidated_specs.add(specs_group[0])
    #     elif spec_name == '*':
    #         consolidated_specs.update(specs_group)
    #     else:
    #         keep_specs = []
    #         for spec in specs_group:
    #             if len(spec._match_components) > 1 or spec.target or spec.optional:
    #                 keep_specs.append(spec)
    #         consolidated_specs.update(keep_specs)

    with ThreadLimitedThreadPoolExecutor() as executor:

        channel_urls = all_channel_urls(channels, subdirs=subdirs)
        check_whitelist(channel_urls)

        if context.offline:
            grouped_urls = groupby(lambda url: url.startswith('file://'), channel_urls)
            ignored_urls = grouped_urls.get(False, ())
            if ignored_urls:
                log.info("Ignoring the following channel urls because mode is offline.%s",
                         dashlist(ignored_urls))
            channel_urls = IndexedSet(grouped_urls.get(True, ()))
        subdir_datas = tuple(SubdirData(Channel(url)) for url in channel_urls)

        records = IndexedSet()
        collected_names = set()
        collected_track_features = set()
        pending_names = set()
        pending_track_features = set()

        def query_all(spec):
            futures = tuple(executor.submit(sd.query, spec) for sd in subdir_datas)
            return tuple(concat(future.result() for future in as_completed(futures)))

        def push_spec(spec):
            name = spec.get_raw_value('name')
            if name and name not in collected_names:
                pending_names.add(name)
            track_features = spec.get_raw_value('track_features')
            if track_features:
                for ftr_name in track_features:
                    if ftr_name not in collected_track_features:
                        pending_track_features.add(ftr_name)

        def push_record(record):
            for _spec in record.combined_depends:
                push_spec(_spec)
            if record.track_features:
                for ftr_name in record.track_features:
                    push_spec(MatchSpec(track_features=ftr_name))

        for spec in specs:
            push_spec(spec)

        while pending_names or pending_track_features:
            while pending_names:
                name = pending_names.pop()
                collected_names.add(name)
                spec = MatchSpec(name)
                new_records = query_all(spec)
                for record in new_records:
                    push_record(record)
                records.update(new_records)

            while pending_track_features:
                feature_name = pending_track_features.pop()
                collected_track_features.add(feature_name)
                spec = MatchSpec(track_features=feature_name)
                new_records = query_all(spec)
                for record in new_records:
                    push_record(record)
                records.update(new_records)

        reduced_index = {Dist(rec): rec for rec in records}

        if prefix is not None:
            _supplement_index_with_prefix(reduced_index, prefix)

        if context.offline or ('unknown' in context._argparse_args
                               and context._argparse_args.unknown):
            # This is really messed up right now.  Dates all the way back to
            # https://github.com/conda/conda/commit/f761f65a82b739562a0d997a2570e2b8a0bdc783
            # TODO: revisit this later
            _supplement_index_with_cache(reduced_index)

        # add feature records for the solver
        known_features = set()
        for rec in itervalues(reduced_index):
            known_features.update(concatv(rec.track_features, rec.features))
        known_features.update(context.track_features)
        for ftr_str in known_features:
            rec = make_feature_record(ftr_str)
            reduced_index[Dist(rec)] = rec

        return reduced_index
コード例 #36
0
ファイル: solve.py プロジェクト: jhunkeler/conda
    def solve_final_state(self, update_modifier=NULL, deps_modifier=NULL, prune=NULL,
                          ignore_pinned=NULL, force_remove=NULL):
        """Gives the final, solved state of the environment.

        Args:
            update_modifier (UpdateModifier):
                An optional flag directing how updates are handled regarding packages already
                existing in the environment.

            deps_modifier (DepsModifier):
                An optional flag indicating special solver handling for dependencies. The
                default solver behavior is to be as conservative as possible with dependency
                updates (in the case the dependency already exists in the environment), while
                still ensuring all dependencies are satisfied.  Options include
                    * NO_DEPS
                    * ONLY_DEPS
                    * UPDATE_DEPS
                    * UPDATE_DEPS_ONLY_DEPS
                    * FREEZE_INSTALLED
            prune (bool):
                If ``True``, the solution will not contain packages that were
                previously brought into the environment as dependencies but are no longer
                required as dependencies and are not user-requested.
            ignore_pinned (bool):
                If ``True``, the solution will ignore pinned package configuration
                for the prefix.
            force_remove (bool):
                Forces removal of a package without removing packages that depend on it.

        Returns:
            Tuple[PackageRef]:
                In sorted dependency order from roots to leaves, the package references for
                the solved state of the environment.

        """
        if update_modifier is NULL:
            update_modifier = context.update_modifier
        else:
            update_modifier = UpdateModifier(text_type(update_modifier).lower())
        if deps_modifier is NULL:
            deps_modifier = context.deps_modifier
        else:
            deps_modifier = DepsModifier(text_type(deps_modifier).lower())
        prune = context.prune if prune is NULL else prune
        ignore_pinned = context.ignore_pinned if ignore_pinned is NULL else ignore_pinned
        force_remove = context.force_remove if force_remove is NULL else force_remove
        specs_to_remove = self.specs_to_remove
        specs_to_add = self.specs_to_add

        # force_remove is a special case where we return early
        if specs_to_remove and force_remove:
            if specs_to_add:
                raise NotImplementedError()
            solution = tuple(prec for prec in PrefixData(self.prefix).iter_records()
                             if not any(spec.match(prec) for spec in specs_to_remove))
            return IndexedSet(PrefixGraph(solution).graph)

        log.debug("solving prefix %s\n"
                  "  specs_to_remove: %s\n"
                  "  specs_to_add: %s\n"
                  "  prune: %s", self.prefix, specs_to_remove, specs_to_add, prune)

        # declare starting point, the initial state of the environment
        # `solution` and `specs_map` are mutated throughout this method
        prefix_data = PrefixData(self.prefix)
        solution = tuple(prec for prec in prefix_data.iter_records())

        # Check if specs are satisfied by current environment. If they are, exit early.
        if (update_modifier == UpdateModifier.SPECS_SATISFIED_SKIP_SOLVE
                and not specs_to_remove and not prune):
            for spec in specs_to_add:
                if not next(prefix_data.query(spec), None):
                    break
            else:
                # All specs match a package in the current environment.
                # Return early, with a solution that should just be PrefixData().iter_records()
                return IndexedSet(PrefixGraph(solution).graph)

        specs_from_history_map = History(self.prefix).get_requested_specs_map()
        if prune:  # or update_modifier == UpdateModifier.UPDATE_ALL  # pending conda/constructor#138  # NOQA
            # Users are struggling with the prune functionality in --update-all, due to
            # https://github.com/conda/constructor/issues/138.  Until that issue is resolved,
            # and for the foreseeable future, it's best to be more conservative with --update-all.

            # Start with empty specs map for UPDATE_ALL because we're optimizing the update
            # only for specs the user has requested; it's ok to remove dependencies.
            specs_map = odict()

            # However, because of https://github.com/conda/constructor/issues/138, we need
            # to hard-code keeping conda, conda-build, and anaconda, if they're already in
            # the environment.
            solution_pkg_names = set(d.name for d in solution)
            ensure_these = (pkg_name for pkg_name in {
                'anaconda', 'conda', 'conda-build',
            } if pkg_name not in specs_from_history_map and pkg_name in solution_pkg_names)
            for pkg_name in ensure_these:
                specs_from_history_map[pkg_name] = MatchSpec(pkg_name)
        else:
            specs_map = odict((d.name, MatchSpec(d.name)) for d in solution)

        # add in historically-requested specs
        specs_map.update(specs_from_history_map)

        # let's pretend for now that this is the right place to build the index
        prepared_specs = set(concatv(
            specs_to_remove,
            specs_to_add,
            itervalues(specs_from_history_map),
        ))

        index, r = self._prepare(prepared_specs)

        if specs_to_remove:
            # In a previous implementation, we invoked SAT here via `r.remove()` to help with
            # spec removal, and then later invoking SAT again via `r.solve()`. Rather than invoking
            # SAT for spec removal determination, we can use the PrefixGraph and simple tree
            # traversal if we're careful about how we handle features. We still invoke sat via
            # `r.solve()` later.
            _track_fts_specs = (spec for spec in specs_to_remove if 'track_features' in spec)
            feature_names = set(concat(spec.get_raw_value('track_features')
                                       for spec in _track_fts_specs))
            graph = PrefixGraph(solution, itervalues(specs_map))

            all_removed_records = []
            no_removed_records_specs = []
            for spec in specs_to_remove:
                # If the spec was a track_features spec, then we need to also remove every
                # package with a feature that matches the track_feature. The
                # `graph.remove_spec()` method handles that for us.
                log.trace("using PrefixGraph to remove records for %s", spec)
                removed_records = graph.remove_spec(spec)
                if removed_records:
                    all_removed_records.extend(removed_records)
                else:
                    no_removed_records_specs.append(spec)

            # ensure that each spec in specs_to_remove is actually associated with removed records
            unmatched_specs_to_remove = tuple(
                spec for spec in no_removed_records_specs
                if not any(spec.match(rec) for rec in all_removed_records)
            )
            if unmatched_specs_to_remove:
                raise PackagesNotFoundError(
                    tuple(sorted(str(s) for s in unmatched_specs_to_remove))
                )

            for rec in all_removed_records:
                # We keep specs (minus the feature part) for the non provides_features packages
                # if they're in the history specs.  Otherwise, we pop them from the specs_map.
                rec_has_a_feature = set(rec.features or ()) & feature_names
                if rec_has_a_feature and rec.name in specs_from_history_map:
                    spec = specs_map.get(rec.name, MatchSpec(rec.name))
                    spec._match_components.pop('features', None)
                    specs_map[spec.name] = spec
                else:
                    specs_map.pop(rec.name, None)

            solution = tuple(graph.graph)

        # We handle as best as possible environments in inconsistent states. To do this,
        # we remove now from consideration the set of packages causing inconsistencies,
        # and then we add them back in following the main SAT call.
        _, inconsistent_precs = r.bad_installed(solution, ())
        add_back_map = {}  # name: (prec, spec)
        if log.isEnabledFor(DEBUG):
            log.debug("inconsistent precs: %s",
                      dashlist(inconsistent_precs) if inconsistent_precs else 'None')
        if inconsistent_precs:
            for prec in inconsistent_precs:
                # pop and save matching spec in specs_map
                add_back_map[prec.name] = (prec, specs_map.pop(prec.name, None))
            solution = tuple(prec for prec in solution if prec not in inconsistent_precs)

        # For the remaining specs in specs_map, add target to each spec. `target` is a reference
        # to the package currently existing in the environment. Setting target instructs the
        # solver to not disturb that package if it's not necessary.
        # If the spec.name is being modified by inclusion in specs_to_add, we don't set `target`,
        # since we *want* the solver to modify/update that package.
        #
        # TLDR: when working with MatchSpec objects,
        #  - to minimize the version change, set MatchSpec(name=name, target=prec.dist_str())
        #  - to freeze the package, set all the components of MatchSpec individually
        for pkg_name, spec in iteritems(specs_map):
            matches_for_spec = tuple(prec for prec in solution if spec.match(prec))
            if matches_for_spec:
                if len(matches_for_spec) != 1:
                    raise CondaError(dals("""
                    Conda encountered an error with your environment.  Please report an issue
                    at https://github.com/conda/conda/issues/new.  In your report, please include
                    the output of 'conda info' and 'conda list' for the active environment, along
                    with the command you invoked that resulted in this error.
                      pkg_name: %s
                      spec: %s
                      matches_for_spec: %s
                    """) % (pkg_name, spec,
                            dashlist((text_type(s) for s in matches_for_spec), indent=4)))
                target_prec = matches_for_spec[0]
                if update_modifier == UpdateModifier.FREEZE_INSTALLED:
                    new_spec = MatchSpec(target_prec)
                else:
                    target = target_prec.dist_str()
                    new_spec = MatchSpec(spec, target=target)
                specs_map[pkg_name] = new_spec
        if log.isEnabledFor(TRACE):
            log.trace("specs_map with targets: %s", specs_map)

        # If we're in UPDATE_ALL mode, we need to drop all the constraints attached to specs,
        # so they can all float and the solver can find the most up-to-date solution. In the case
        # of UPDATE_ALL, `specs_map` wasn't initialized with packages from the current environment,
        # but *only* historically-requested specs.  This lets UPDATE_ALL drop dependencies if
        # they're no longer needed, and their presence would otherwise prevent the updated solution
        # the user most likely wants.
        if update_modifier == UpdateModifier.UPDATE_ALL:
            specs_map = {pkg_name: MatchSpec(spec.name, optional=spec.optional)
                         for pkg_name, spec in iteritems(specs_map)}

        # As a business rule, we never want to update python beyond the current minor version,
        # unless that's requested explicitly by the user (which we actively discourage).
        if 'python' in specs_map:
            python_prefix_rec = prefix_data.get('python')
            if python_prefix_rec:
                python_spec = specs_map['python']
                if not python_spec.get('version'):
                    pinned_version = get_major_minor_version(python_prefix_rec.version) + '.*'
                    specs_map['python'] = MatchSpec(python_spec, version=pinned_version)

        # For the aggressive_update_packages configuration parameter, we strip any target
        # that's been set.
        if not context.offline:
            for spec in context.aggressive_update_packages:
                if spec.name in specs_map:
                    specs_map[spec.name] = spec
            if (context.auto_update_conda and paths_equal(self.prefix, context.root_prefix)
                    and any(prec.name == "conda" for prec in solution)):
                specs_map["conda"] = MatchSpec("conda")

        # add in explicitly requested specs from specs_to_add
        # this overrides any name-matching spec already in the spec map
        specs_map.update((s.name, s) for s in specs_to_add)

        # collect additional specs to add to the solution
        track_features_specs = pinned_specs = ()
        if context.track_features:
            track_features_specs = tuple(MatchSpec(x + '@') for x in context.track_features)
        if not ignore_pinned:
            pinned_specs = get_pinned_specs(self.prefix)

        final_environment_specs = IndexedSet(concatv(
            itervalues(specs_map),
            track_features_specs,
            pinned_specs,
        ))

        # We've previously checked `solution` for consistency (which at that point was the
        # pre-solve state of the environment). Now we check our compiled set of
        # `final_environment_specs` for the possibility of a solution.  If there are conflicts,
        # we can often avoid them by neutering specs that have a target (e.g. removing version
        # constraint) and also making them optional. The result here will be less cases of
        # `UnsatisfiableError` handed to users, at the cost of more packages being modified
        # or removed from the environment.
        conflicting_specs = r.get_conflicting_specs(tuple(final_environment_specs))
        if log.isEnabledFor(DEBUG):
            log.debug("conflicting specs: %s", dashlist(conflicting_specs))
        for spec in conflicting_specs:
            if spec.target:
                final_environment_specs.remove(spec)
                neutered_spec = MatchSpec(spec.name, target=spec.target, optional=True)
                final_environment_specs.add(neutered_spec)

        # Finally! We get to call SAT.
        if log.isEnabledFor(DEBUG):
            log.debug("final specs to add: %s",
                      dashlist(sorted(text_type(s) for s in final_environment_specs)))
        solution = r.solve(tuple(final_environment_specs))  # return value is List[PackageRecord]

        # add back inconsistent packages to solution
        if add_back_map:
            for name, (prec, spec) in iteritems(add_back_map):
                if not any(d.name == name for d in solution):
                    solution.append(prec)
                    if spec:
                        final_environment_specs.add(spec)

        # Special case handling for various DepsModifier flags. Maybe this block could be pulled
        # out into its own non-public helper method?
        if deps_modifier == DepsModifier.NO_DEPS:
            # In the NO_DEPS case, we need to start with the original list of packages in the
            # environment, and then only modify packages that match specs_to_add or
            # specs_to_remove.
            _no_deps_solution = IndexedSet(prefix_data.iter_records())
            only_remove_these = set(prec
                                    for spec in specs_to_remove
                                    for prec in _no_deps_solution
                                    if spec.match(prec))
            _no_deps_solution -= only_remove_these

            only_add_these = set(prec
                                 for spec in specs_to_add
                                 for prec in solution
                                 if spec.match(prec))
            remove_before_adding_back = set(prec.name for prec in only_add_these)
            _no_deps_solution = IndexedSet(prec for prec in _no_deps_solution
                                           if prec.name not in remove_before_adding_back)
            _no_deps_solution |= only_add_these
            solution = _no_deps_solution
        elif (deps_modifier == DepsModifier.ONLY_DEPS
                and update_modifier != UpdateModifier.UPDATE_DEPS):
            # Using a special instance of PrefixGraph to remove youngest child nodes that match
            # the original specs_to_add.  It's important to remove only the *youngest* child nodes,
            # because a typical use might be `conda install --only-deps python=2 flask`, and in
            # that case we'd want to keep python.
            graph = PrefixGraph(solution, specs_to_add)
            graph.remove_youngest_descendant_nodes_with_specs()
            solution = tuple(graph.graph)

        elif update_modifier == UpdateModifier.UPDATE_DEPS:
            # Here we have to SAT solve again :(  It's only now that we know the dependency
            # chain of specs_to_add.
            specs_to_add_names = set(spec.name for spec in specs_to_add)
            update_names = set()
            graph = PrefixGraph(solution, final_environment_specs)
            for spec in specs_to_add:
                node = graph.get_node_by_name(spec.name)
                for ancestor_record in graph.all_ancestors(node):
                    ancestor_name = ancestor_record.name
                    if ancestor_name not in specs_to_add_names:
                        update_names.add(ancestor_name)
            grouped_specs = groupby(lambda s: s.name in update_names, final_environment_specs)
            new_final_environment_specs = set(grouped_specs.get(False, ()))
            update_specs = set(MatchSpec(spec.name, optional=spec.optional)
                               for spec in grouped_specs.get(True, ()))
            final_environment_specs = new_final_environment_specs | update_specs
            solution = r.solve(final_environment_specs)

            if deps_modifier == DepsModifier.ONLY_DEPS:
                # duplicated from DepsModifier.ONLY_DEPS
                graph = PrefixGraph(solution, specs_to_add)
                graph.remove_youngest_descendant_nodes_with_specs()
                solution = tuple(graph.graph)

        if prune:
            graph = PrefixGraph(solution, final_environment_specs)
            graph.prune()
            solution = tuple(graph.graph)

        self._check_solution(solution, pinned_specs)

        solution = IndexedSet(PrefixGraph(solution).graph)
        log.debug("solved prefix %s\n"
                  "  solved_linked_dists:\n"
                  "    %s\n",
                  self.prefix, "\n    ".join(prec.dist_str() for prec in solution))
        return solution
コード例 #37
0
ファイル: envs_manager.py プロジェクト: timsnyder/conda
 def get_registered_packages_keyed_on_env_name(self):
     get_env_name = lambda x: x['preferred_env_name']
     return groupby(get_env_name, self._preferred_env_packages)
コード例 #38
0
ファイル: main_config.py プロジェクト: groutr/conda
def execute_config(args, parser):

    json_warnings = []
    json_get = {}

    if args.show_sources:
        if context.json:
            print(json.dumps(context.collect_all(), sort_keys=True,
                             indent=2, separators=(',', ': ')))
        else:
            lines = []
            for source, reprs in iteritems(context.collect_all()):
                lines.append("==> %s <==" % source)
                lines.extend(format_dict(reprs))
                lines.append('')
            print('\n'.join(lines))
        return

    if args.show is not None:
        if args.show:
            paramater_names = args.show
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..resolve import dashlist
                raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
        else:
            paramater_names = context.list_parameters()

        from collections import OrderedDict

        d = OrderedDict((key, getattr(context, key)) for key in paramater_names)
        if context.json:
            print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),
                  cls=EntityEncoder))
        else:
            # Add in custom formatting
            if 'custom_channels' in d:
                d['custom_channels'] = {
                    channel.name: "%s://%s" % (channel.scheme, channel.location)
                    for channel in itervalues(d['custom_channels'])
                }
            if 'custom_multichannels' in d:
                from ..resolve import dashlist
                d['custom_multichannels'] = {
                    multichannel_name: dashlist(channels, indent=4)
                    for multichannel_name, channels in iteritems(d['custom_multichannels'])
                }

            print('\n'.join(format_dict(d)))
        context.validate_configuration()
        return

    if args.describe is not None:
        if args.describe:
            paramater_names = args.describe
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..resolve import dashlist
                raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
            if context.json:
                print(json.dumps([context.describe_parameter(name) for name in paramater_names],
                                 sort_keys=True, indent=2, separators=(',', ': '),
                                 cls=EntityEncoder))
            else:
                builder = []
                builder.extend(concat(parameter_description_builder(name)
                                      for name in paramater_names))
                print('\n'.join(builder))
        else:
            if context.json:
                skip_categories = ('CLI-only', 'Hidden and Undocumented')
                paramater_names = sorted(concat(
                    parameter_names for category, parameter_names in context.category_map.items()
                    if category not in skip_categories
                ))
                print(json.dumps([context.describe_parameter(name) for name in paramater_names],
                                 sort_keys=True, indent=2, separators=(',', ': '),
                                 cls=EntityEncoder))
            else:
                print(describe_all_parameters())
        return

    if args.validate:
        context.validate_all()
        return

    if args.system:
        rc_path = sys_rc_path
    elif args.env:
        if 'CONDA_PREFIX' in os.environ:
            rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
        else:
            rc_path = user_rc_path
    elif args.file:
        rc_path = args.file
    else:
        rc_path = user_rc_path

    if args.write_default:
        if isfile(rc_path):
            with open(rc_path) as fh:
                data = fh.read().strip()
            if data:
                raise CondaError("The file '%s' "
                                 "already contains configuration information.\n"
                                 "Remove the file to proceed.\n"
                                 "Use `conda config --describe` to display default configuration."
                                 % rc_path)

        with open(rc_path, 'w') as fh:
            fh.write(describe_all_parameters())
        return

    # read existing condarc
    if os.path.exists(rc_path):
        with open(rc_path, 'r') as fh:
            rc_config = yaml_load(fh) or {}
    else:
        rc_config = {}

    grouped_paramaters = groupby(lambda p: context.describe_parameter(p)['parameter_type'],
                                 context.list_parameters())
    primitive_parameters = grouped_paramaters['primitive']
    sequence_parameters = grouped_paramaters['sequence']
    map_parameters = grouped_paramaters['map']

    # Get
    if args.get is not None:
        context.validate_all()
        if args.get == []:
            args.get = sorted(rc_config.keys())
        for key in args.get:
            if key not in primitive_parameters + sequence_parameters:
                message = "unknown key %s" % key
                if not context.json:
                    print(message, file=sys.stderr)
                else:
                    json_warnings.append(message)
                continue
            if key not in rc_config:
                continue

            if context.json:
                json_get[key] = rc_config[key]
                continue

            if isinstance(rc_config[key], (bool, string_types)):
                print("--set", key, rc_config[key])
            else:  # assume the key is a list-type
                # Note, since conda config --add prepends, these are printed in
                # the reverse order so that entering them in this order will
                # recreate the same file
                items = rc_config.get(key, [])
                numitems = len(items)
                for q, item in enumerate(reversed(items)):
                    # Use repr so that it can be pasted back in to conda config --add
                    if key == "channels" and q in (0, numitems-1):
                        print("--add", key, repr(item),
                              "  # lowest priority" if q == 0 else "  # highest priority")
                    else:
                        print("--add", key, repr(item))

    if args.stdin:
        content = timeout(5, sys.stdin.read)
        if not content:
            return
        try:
            parsed = yaml_load(content)
            rc_config.update(parsed)
        except Exception:  # pragma: no cover
            from ..exceptions import ParseError
            raise ParseError("invalid yaml content:\n%s" % content)

    # prepend, append, add
    for arg, prepend in zip((args.prepend, args.append), (True, False)):
        for key, item in arg:
            if key == 'channels' and key not in rc_config:
                rc_config[key] = ['defaults']
            if key not in sequence_parameters:
                from ..exceptions import CondaValueError
                raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
            if not isinstance(rc_config.get(key, []), list):
                from ..exceptions import CouldntParseError
                bad = rc_config[key].__class__.__name__
                raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
            arglist = rc_config.setdefault(key, [])
            if item in arglist:
                # Right now, all list keys should not contain duplicates
                message = "Warning: '%s' already in '%s' list, moving to the %s" % (
                    item, key, "top" if prepend else "bottom")
                arglist = rc_config[key] = [p for p in arglist if p != item]
                if not context.json:
                    print(message, file=sys.stderr)
                else:
                    json_warnings.append(message)
            arglist.insert(0 if prepend else len(arglist), item)

    # Set
    for key, item in args.set:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key in primitive_parameters:
            value = context.typify_parameter(key, item)
            rc_config[key] = value
        elif key in map_parameters:
            argmap = rc_config.setdefault(key, {})
            argmap[subkey] = item
        else:
            from ..exceptions import CondaValueError
            raise CondaValueError("Key '%s' is not a known primitive parameter." % key)

    # Remove
    for key, item in args.remove:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            if key != 'channels':
                from ..exceptions import CondaKeyError
                raise CondaKeyError(key, "key %r is not in the config file" % key)
            rc_config[key] = ['defaults']
        if item not in rc_config[key]:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, "%r is not in the %r key of the config file" %
                                (item, key))
        rc_config[key] = [i for i in rc_config[key] if i != item]

    # Remove Key
    for key, in args.remove_key:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, "key %r is not in the config file" %
                                key)
        del rc_config[key]

    # config.rc_keys
    if not args.get:

        # Add representers for enums.
        # Because a representer cannot be added for the base Enum class (it must be added for
        # each specific Enum subclass), and because of import rules), I don't know of a better
        # location to do this.
        def enum_representer(dumper, data):
            return dumper.represent_str(str(data))

        yaml.representer.RoundTripRepresenter.add_representer(SafetyChecks, enum_representer)
        yaml.representer.RoundTripRepresenter.add_representer(PathConflict, enum_representer)

        try:
            with open(rc_path, 'w') as rc:
                rc.write(yaml_dump(rc_config))
        except (IOError, OSError) as e:
            raise CondaError('Cannot write to condarc file at %s\n'
                             'Caused by %r' % (rc_path, e))

    if context.json:
        from .common import stdout_json_success
        stdout_json_success(
            rc_path=rc_path,
            warnings=json_warnings,
            get=json_get
        )
    return
コード例 #39
0
ファイル: solve.py プロジェクト: hemantmshah26/conda
    def solve_final_state(self,
                          deps_modifier=NULL,
                          prune=NULL,
                          ignore_pinned=NULL,
                          force_remove=NULL):
        """Gives the final, solved state of the environment.

        Args:
            deps_modifier (DepsModifier):
                An optional flag indicating special solver handling for dependencies. The
                default solver behavior is to be as conservative as possible with dependency
                updates (in the case the dependency already exists in the environment), while
                still ensuring all dependencies are satisfied.  Options include
                    * NO_DEPS
                    * ONLY_DEPS
                    * UPDATE_DEPS
                    * UPDATE_DEPS_ONLY_DEPS
            prune (bool):
                If ``True``, the solution will not contain packages that were
                previously brought into the environment as dependencies but are no longer
                required as dependencies and are not user-requested.
            ignore_pinned (bool):
                If ``True``, the solution will ignore pinned package configuration
                for the prefix.
            force_remove (bool):
                Forces removal of a package without removing packages that depend on it.

        Returns:
            Tuple[PackageRef]:
                In sorted dependency order from roots to leaves, the package references for
                the solved state of the environment.

        """
        index, r = self._prepare()
        prune = context.prune if prune is NULL else prune
        ignore_pinned = context.ignore_pinned if ignore_pinned is NULL else ignore_pinned
        deps_modifier = context.deps_modifier if deps_modifier is NULL else deps_modifier
        if isinstance(deps_modifier, string_types):
            deps_modifier = DepsModifier(deps_modifier.lower())
        specs_to_remove = self.specs_to_remove
        specs_to_add = self.specs_to_add

        # force_remove is a special case where we return early
        if specs_to_remove and force_remove:
            solution = tuple(
                Dist(rec) for rec in PrefixData(self.prefix).iter_records()
                if not any(spec.match(rec) for spec in specs_to_remove))
            return IndexedSet(
                index[d]
                for d in r.dependency_sort({d.name: d
                                            for d in solution}))

        log.debug(
            "solving prefix %s\n"
            "  specs_to_remove: %s\n"
            "  specs_to_add: %s\n"
            "  prune: %s", self.prefix, specs_to_remove, specs_to_add, prune)

        # declare starting point, the initial state of the environment
        # `solution` and `specs_map` are mutated throughout this method
        prefix_data = PrefixData(self.prefix)
        solution = tuple(Dist(d) for d in prefix_data.iter_records())
        if prune or deps_modifier == DepsModifier.UPDATE_ALL:
            # start with empty specs map for UPDATE_ALL because we're optimizing the update
            # only for specs the user has requested; it's ok to remove dependencies
            specs_map = odict()
        else:
            specs_map = odict((d.name, MatchSpec(d.name)) for d in solution)

        # add in historically-requested specs
        specs_from_history_map = History(self.prefix).get_requested_specs_map()
        specs_map.update(specs_from_history_map)

        if specs_to_remove:
            # Rather than invoking SAT for removal, we can use the DAG and simple tree traversal
            # if we're careful about how we handle features.
            _track_features_specs = (spec for spec in specs_to_remove
                                     if 'track_features' in spec)
            feature_names = set(
                concat(
                    spec.get_raw_value('track_features')
                    for spec in _track_features_specs))
            dag = PrefixDag((index[dist] for dist in solution),
                            itervalues(specs_map))

            removed_records = []
            for spec in specs_to_remove:
                # If the spec was is track_features spec, then we need to also remove every package
                # with a feature that matches the track_feature.  The `dag.remove_spec()` method
                # handles that for us.
                removed_records.extend(dag.remove_spec(spec))

            for rec in removed_records:
                # We keep specs (minus the feature part) for the non track_features packages
                # if they're in the history specs.  Otherwise, we pop them from the specs_map.
                if set(
                        rec.features or
                    ()) & feature_names and rec.name in specs_from_history_map:
                    spec = specs_map.get(rec.name, MatchSpec(rec.name))
                    spec._match_components.pop('features', None)
                    specs_map[spec.name] = spec
                else:
                    specs_map.pop(rec.name, None)

            solution = tuple(Dist(rec) for rec in dag.records)

            if not removed_records and not prune:
                raise PackageNotFoundError(
                    "No packages found to remove from environment.")

        # We handle as best as possible environments in inconsistent states. To do this,
        # we remove now from consideration the set of packages causing inconsistencies,
        # and then we add them back in following the main SAT call.
        _, inconsistent_dists = r.bad_installed(solution, ())
        add_back_map = {}  # name: (dist, spec)
        if inconsistent_dists:
            for dist in inconsistent_dists:
                # pop and save matching spec in specs_map
                add_back_map[dist.name] = (dist,
                                           specs_map.pop(dist.name, None))
            solution = tuple(dist for dist in solution
                             if dist not in inconsistent_dists)

        # For the remaining specs in specs_map, add target to each spec. `target` is a reference
        # to the package currently existing in the environment. Setting target instructs the
        # solver to not disturb that package if it's not necessary.
        # If the spec.name is being modified by inclusion in specs_to_add, we don't set `target`,
        # since we *want* the solver to modify/update that package.
        #
        # TLDR: when working with MatchSpec objects,
        #  - to minimize the version change, set MatchSpec(name=name, target=dist.full_name)
        #  - to freeze the package, set all the components of MatchSpec individually
        for pkg_name, spec in iteritems(specs_map):
            matches_for_spec = tuple(dist for dist in solution
                                     if spec.match(index[dist]))
            if matches_for_spec:
                assert len(matches_for_spec) == 1
                target_dist = matches_for_spec[0]
                if deps_modifier == DepsModifier.FREEZE_INSTALLED:
                    new_spec = MatchSpec(index[target_dist])
                else:
                    target = Dist(target_dist).full_name
                    new_spec = MatchSpec(spec, target=target)
                specs_map[pkg_name] = new_spec

        # If we're in UPDATE_ALL mode, we need to drop all the constraints attached to specs,
        # so they can all float and the solver can find the most up-to-date solution. In the case
        # of UPDATE_ALL, `specs_map` wasn't initialized with packages from the current environment,
        # but *only* historically-requested specs.  This let's UPDATE_ALL drop dependencies if
        # they're no longer needed, and their presence would otherwise prevent the updated solution
        # the user most likely wants.
        if deps_modifier == DepsModifier.UPDATE_ALL:
            specs_map = {
                pkg_name: MatchSpec(spec.name, optional=spec.optional)
                for pkg_name, spec in iteritems(specs_map)
            }

        # For the aggressive_update_packages configuration parameter, we strip any target
        # that's been set.
        if not context.offline:
            for spec in context.aggressive_update_packages:
                if spec.name in specs_map:
                    old_spec = specs_map[spec.name]
                    specs_map[spec.name] = MatchSpec(old_spec, target=None)
            if (context.auto_update_conda
                    and paths_equal(self.prefix, context.root_prefix)
                    and any(dist.name == "conda" for dist in solution)):
                specs_map["conda"] = MatchSpec("conda")

        # add in explicitly requested specs from specs_to_add
        # this overrides any name-matching spec already in the spec map
        specs_map.update((s.name, s) for s in specs_to_add)

        # collect additional specs to add to the solution
        track_features_specs = pinned_specs = ()
        if context.track_features:
            track_features_specs = tuple(
                MatchSpec(x + '@') for x in context.track_features)
        if not ignore_pinned:
            pinned_specs = get_pinned_specs(self.prefix)

        final_environment_specs = IndexedSet(
            concatv(
                itervalues(specs_map),
                track_features_specs,
                pinned_specs,
            ))

        # We've previously checked `solution` for consistency (which at that point was the
        # pre-solve state of the environment). Now we check our compiled set of
        # `final_environment_specs` for the possibility of a solution.  If there are conflicts,
        # we can often avoid them by neutering specs that have a target (e.g. removing version
        # constraint) and also making them optional. The result here will be less cases of
        # `UnsatisfiableError` handed to users, at the cost of more packages being modified
        # or removed from the environment.
        conflicting_specs = r.get_conflicting_specs(final_environment_specs)
        for spec in conflicting_specs:
            if spec.target:
                final_environment_specs.remove(spec)
                neutered_spec = MatchSpec(spec.name,
                                          target=spec.target,
                                          optional=True)
                final_environment_specs.add(neutered_spec)

        # Finally! We get to call SAT.
        log.debug("final specs to add:\n    %s\n",
                  "\n    ".join(text_type(s) for s in final_environment_specs))
        pre_solution = solution
        solution = r.solve(
            final_environment_specs)  # return value is List[dist]

        # add back inconsistent packages to solution
        if add_back_map:
            for name, (dist, spec) in iteritems(add_back_map):
                if not any(d.name == name for d in solution):
                    solution.append(dist)
                    if spec:
                        final_environment_specs.add(spec)

        # Special case handling for various DepsModifer flags. Maybe this block could be pulled
        # out into its own non-public helper method?
        if deps_modifier == DepsModifier.NO_DEPS:
            # In the NO_DEPS case we're just filtering out packages from the solution.
            dont_add_packages = []
            new_packages = set(solution) - set(pre_solution)
            for dist in new_packages:
                if not any(spec.match(index[dist]) for spec in specs_to_add):
                    dont_add_packages.append(dist)
            solution = tuple(rec for rec in solution
                             if rec not in dont_add_packages)
        elif deps_modifier == DepsModifier.ONLY_DEPS:
            # Using a special instance of the DAG to remove leaf nodes that match the original
            # specs_to_add.  It's important to only remove leaf nodes, because a typical use
            # might be `conda install --only-deps python=2 flask`, and in that case we'd want
            # to keep python.
            dag = PrefixDag((index[d] for d in solution), specs_to_add)
            dag.remove_leaf_nodes_with_specs()
            solution = tuple(Dist(rec) for rec in dag.records)
        elif deps_modifier in (DepsModifier.UPDATE_DEPS,
                               DepsModifier.UPDATE_DEPS_ONLY_DEPS):
            # Here we have to SAT solve again :(  It's only now that we know the dependency
            # chain of specs_to_add.
            specs_to_add_names = set(spec.name for spec in specs_to_add)
            update_names = set()
            dag = PrefixDag((index[d] for d in solution),
                            final_environment_specs)
            for spec in specs_to_add:
                node = dag.get_node_by_name(spec.name)
                for ascendant in node.all_ascendants():
                    ascendant_name = ascendant.record.name
                    if ascendant_name not in specs_to_add_names:
                        update_names.add(ascendant_name)
            grouped_specs = groupby(lambda s: s.name in update_names,
                                    final_environment_specs)
            new_final_environment_specs = set(grouped_specs[False])
            update_specs = set(
                MatchSpec(spec.name, optional=spec.optional)
                for spec in grouped_specs[True])
            final_environment_specs = new_final_environment_specs | update_specs
            solution = r.solve(final_environment_specs)

            if deps_modifier == DepsModifier.UPDATE_DEPS_ONLY_DEPS:
                # duplicated from DepsModifier.ONLY_DEPS
                dag = PrefixDag((index[d] for d in solution), specs_to_add)
                dag.remove_leaf_nodes_with_specs()
                solution = tuple(Dist(rec) for rec in dag.records)

        if prune:
            dag = PrefixDag((index[d] for d in solution),
                            final_environment_specs)
            dag.prune()
            solution = tuple(Dist(rec) for rec in dag.records)

        self._check_solution(solution, pinned_specs)

        solution = IndexedSet(r.dependency_sort({d.name: d for d in solution}))
        log.debug("solved prefix %s\n"
                  "  solved_linked_dists:\n"
                  "    %s\n", self.prefix,
                  "\n    ".join(text_type(d) for d in solution))
        return IndexedSet(index[d] for d in solution)
コード例 #40
0
ファイル: main_config.py プロジェクト: zapnat/conda
def execute_config(args, parser):

    json_warnings = []
    json_get = {}

    if args.show_sources:
        if context.json:
            print(
                json.dumps(context.collect_all(),
                           sort_keys=True,
                           indent=2,
                           separators=(',', ': ')))
        else:
            lines = []
            for source, reprs in iteritems(context.collect_all()):
                lines.append("==> %s <==" % source)
                lines.extend(format_dict(reprs))
                lines.append('')
            print('\n'.join(lines))
        return

    if args.show is not None:
        if args.show:
            paramater_names = args.show
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..resolve import dashlist
                raise ArgumentError("Invalid configuration parameters: %s" %
                                    dashlist(not_params))
        else:
            paramater_names = context.list_parameters()

        from collections import OrderedDict

        d = OrderedDict(
            (key, getattr(context, key)) for key in paramater_names)
        if context.json:
            print(
                json.dumps(d,
                           sort_keys=True,
                           indent=2,
                           separators=(',', ': '),
                           cls=EntityEncoder))
        else:
            # coerce channels
            if 'custom_channels' in d:
                d['custom_channels'] = {
                    k: text_type(v).replace(
                        k,
                        '')  # TODO: the replace here isn't quite right  # NOQA
                    for k, v in iteritems(d['custom_channels'])
                }
            # TODO: custom_multichannels needs better formatting
            if 'custom_multichannels' in d:
                d['custom_multichannels'] = {
                    k: json.dumps([text_type(c) for c in chnls])
                    for k, chnls in iteritems(d['custom_multichannels'])
                }

            print('\n'.join(format_dict(d)))
        context.validate_configuration()
        return

    if args.describe is not None:
        if args.describe:
            paramater_names = args.describe
            all_names = context.list_parameters()
            not_params = set(paramater_names) - set(all_names)
            if not_params:
                from ..exceptions import ArgumentError
                from ..resolve import dashlist
                raise ArgumentError("Invalid configuration parameters: %s" %
                                    dashlist(not_params))
        else:
            paramater_names = context.list_parameters()
        if context.json:
            print(
                json.dumps([
                    context.describe_parameter(name)
                    for name in paramater_names
                ],
                           sort_keys=True,
                           indent=2,
                           separators=(',', ': '),
                           cls=EntityEncoder))
        else:
            print('\n'.join(
                concat(
                    parameter_description_builder(name)
                    for name in paramater_names)))
        return

    if args.validate:
        context.validate_all()
        return

    if args.system:
        rc_path = sys_rc_path
    elif args.env:
        if 'CONDA_PREFIX' in os.environ:
            rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
        else:
            rc_path = user_rc_path
    elif args.file:
        rc_path = args.file
    else:
        rc_path = user_rc_path

    if args.write_default:
        if isfile(rc_path):
            with open(rc_path) as fh:
                data = fh.read().strip()
            if data:
                raise CondaError(
                    "The file '%s' "
                    "already contains configuration information.\n"
                    "Remove the file to proceed.\n"
                    "Use `conda config --describe` to display default configuration."
                    % rc_path)

        with open(rc_path, 'w') as fh:
            paramater_names = context.list_parameters()
            fh.write('\n'.join(
                concat(
                    parameter_description_builder(name)
                    for name in paramater_names)))
        return

    # read existing condarc
    if os.path.exists(rc_path):
        with open(rc_path, 'r') as fh:
            rc_config = yaml_load(fh) or {}
    else:
        rc_config = {}

    grouped_paramaters = groupby(
        lambda p: context.describe_parameter(p)['parameter_type'],
        context.list_parameters())
    primitive_parameters = grouped_paramaters['primitive']
    sequence_parameters = grouped_paramaters['sequence']
    map_parameters = grouped_paramaters['map']

    # Get
    if args.get is not None:
        context.validate_all()
        if args.get == []:
            args.get = sorted(rc_config.keys())
        for key in args.get:
            if key not in primitive_parameters + sequence_parameters:
                message = "unknown key %s" % key
                if not context.json:
                    print(message, file=sys.stderr)
                else:
                    json_warnings.append(message)
                continue
            if key not in rc_config:
                continue

            if context.json:
                json_get[key] = rc_config[key]
                continue

            if isinstance(rc_config[key], (bool, string_types)):
                print("--set", key, rc_config[key])
            else:  # assume the key is a list-type
                # Note, since conda config --add prepends, these are printed in
                # the reverse order so that entering them in this order will
                # recreate the same file
                items = rc_config.get(key, [])
                numitems = len(items)
                for q, item in enumerate(reversed(items)):
                    # Use repr so that it can be pasted back in to conda config --add
                    if key == "channels" and q in (0, numitems - 1):
                        print(
                            "--add", key, repr(item), "  # lowest priority"
                            if q == 0 else "  # highest priority")
                    else:
                        print("--add", key, repr(item))

    if args.stdin:
        content = timeout(5, sys.stdin.read)
        if not content:
            return
        try:
            parsed = yaml_load(content)
            rc_config.update(parsed)
        except Exception:  # pragma: no cover
            from ..exceptions import ParseError
            raise ParseError("invalid yaml content:\n%s" % content)

    # prepend, append, add
    for arg, prepend in zip((args.prepend, args.append), (True, False)):
        for key, item in arg:
            if key == 'channels' and key not in rc_config:
                rc_config[key] = ['defaults']
            if key not in sequence_parameters:
                from ..exceptions import CondaValueError
                raise CondaValueError(
                    "Key '%s' is not a known sequence parameter." % key)
            if not isinstance(rc_config.get(key, []), list):
                from ..exceptions import CouldntParseError
                bad = rc_config[key].__class__.__name__
                raise CouldntParseError("key %r should be a list, not %s." %
                                        (key, bad))
            arglist = rc_config.setdefault(key, [])
            if item in arglist:
                # Right now, all list keys should not contain duplicates
                message = "Warning: '%s' already in '%s' list, moving to the %s" % (
                    item, key, "top" if prepend else "bottom")
                arglist = rc_config[key] = [p for p in arglist if p != item]
                if not context.json:
                    print(message, file=sys.stderr)
                else:
                    json_warnings.append(message)
            arglist.insert(0 if prepend else len(arglist), item)

    # Set
    for key, item in args.set:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key in primitive_parameters:
            value = context.typify_parameter(key, item)
            rc_config[key] = value
        elif key in map_parameters:
            argmap = rc_config.setdefault(key, {})
            argmap[subkey] = item
        else:
            from ..exceptions import CondaValueError
            raise CondaValueError(
                "Key '%s' is not a known primitive parameter." % key)

    # Remove
    for key, item in args.remove:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            if key != 'channels':
                from ..exceptions import CondaKeyError
                raise CondaKeyError(key,
                                    "key %r is not in the config file" % key)
            rc_config[key] = ['defaults']
        if item not in rc_config[key]:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(
                key,
                "%r is not in the %r key of the config file" % (item, key))
        rc_config[key] = [i for i in rc_config[key] if i != item]

    # Remove Key
    for key, in args.remove_key:
        key, subkey = key.split('.', 1) if '.' in key else (key, None)
        if key not in rc_config:
            from ..exceptions import CondaKeyError
            raise CondaKeyError(key, "key %r is not in the config file" % key)
        del rc_config[key]

    # config.rc_keys
    if not args.get:
        try:
            with open(rc_path, 'w') as rc:
                rc.write(yaml_dump(rc_config))
        except (IOError, OSError) as e:
            raise CondaError('Cannot write to condarc file at %s\n'
                             'Caused by %r' % (rc_path, e))

    if context.json:
        from .common import stdout_json_success
        stdout_json_success(rc_path=rc_path,
                            warnings=json_warnings,
                            get=json_get)
    return
コード例 #41
0
ファイル: test_itertoolz.py プロジェクト: aitatanit/cytoolz
def test_groupby():
    assert groupby(iseven, [1, 2, 3, 4]) == {True: [2, 4], False: [1, 3]}
コード例 #42
0
ファイル: wikivote_lp.py プロジェクト: vlukiyanov/pt-splitter
print(
    roc_auc_score(
        [1] * len(positive_samples) + [0] * len(negative_samples),
        positive_scores_non_persona + negative_scores_non_persona,
    )
)

print("Constructing persona graph.")
PG = persona_graph(G_original)

print("Constructing lookups.")
forward_persona, reverse_persona = lookup_tables(PG)
forward, reverse = lookup_tables(G)

groups = groupby(operator.attrgetter("node"), PG.nodes())

positive_scores_persona = [
    excepts(ValueError, max, lambda _: 0.0)(
        iter_get_scores_networkx(groups, node1, node2, PG, jaccard_coefficient)
    )
    for (node1, node2) in positive_samples
]
negative_scores_persona = [
    excepts(ValueError, max, lambda _: 0.0)(
        iter_get_scores_networkx(groups, node1, node2, PG, jaccard_coefficient)
    )
    for (node1, node2) in negative_samples
]
print(sum(positive_scores_persona))
print(sum(negative_scores_persona))
コード例 #43
0
ファイル: index.py プロジェクト: groutr/conda
def get_reduced_index(prefix, channels, subdirs, specs):

    # # this block of code is a "combine" step intended to filter out redundant specs
    # # causes a problem with py.test tests/core/test_solve.py -k broken_install
    # specs_map = defaultdict(list)
    # for spec in specs:
    #     specs_map[spec.name].append(spec)
    # consolidated_specs = set()
    # for spec_name, specs_group in iteritems(specs_map):
    #     if len(specs_group) == 1:
    #         consolidated_specs.add(specs_group[0])
    #     elif spec_name == '*':
    #         consolidated_specs.update(specs_group)
    #     else:
    #         keep_specs = []
    #         for spec in specs_group:
    #             if len(spec._match_components) > 1 or spec.target or spec.optional:
    #                 keep_specs.append(spec)
    #         consolidated_specs.update(keep_specs)

    with ThreadLimitedThreadPoolExecutor() as executor:

        channel_urls = all_channel_urls(channels, subdirs=subdirs)
        check_whitelist(channel_urls)

        if context.offline:
            grouped_urls = groupby(lambda url: url.startswith('file://'), channel_urls)
            ignored_urls = grouped_urls.get(False, ())
            if ignored_urls:
                log.info("Ignoring the following channel urls because mode is offline.%s",
                         dashlist(ignored_urls))
            channel_urls = IndexedSet(grouped_urls.get(True, ()))
        subdir_datas = tuple(SubdirData(Channel(url)) for url in channel_urls)

        records = IndexedSet()
        collected_names = set()
        collected_track_features = set()
        pending_names = set()
        pending_track_features = set()

        def query_all(spec):
            futures = tuple(executor.submit(sd.query, spec) for sd in subdir_datas)
            return tuple(concat(future.result() for future in as_completed(futures)))

        def push_spec(spec):
            name = spec.get_raw_value('name')
            if name and name not in collected_names:
                pending_names.add(name)
            track_features = spec.get_raw_value('track_features')
            if track_features:
                for ftr_name in track_features:
                    if ftr_name not in collected_track_features:
                        pending_track_features.add(ftr_name)

        def push_record(record):
            push_spec(MatchSpec(record.name))
            for _spec in record.combined_depends:
                push_spec(_spec)
            if record.track_features:
                for ftr_name in record.track_features:
                    push_spec(MatchSpec(track_features=ftr_name))

        if prefix:
            for prefix_rec in PrefixData(prefix).iter_records():
                push_record(prefix_rec)
        for spec in specs:
            push_spec(spec)

        while pending_names or pending_track_features:
            while pending_names:
                name = pending_names.pop()
                collected_names.add(name)
                spec = MatchSpec(name)
                new_records = query_all(spec)
                for record in new_records:
                    push_record(record)
                records.update(new_records)

            while pending_track_features:
                feature_name = pending_track_features.pop()
                collected_track_features.add(feature_name)
                spec = MatchSpec(track_features=feature_name)
                new_records = query_all(spec)
                for record in new_records:
                    push_record(record)
                records.update(new_records)

        reduced_index = {rec: rec for rec in records}

        if prefix is not None:
            _supplement_index_with_prefix(reduced_index, prefix)

        if context.offline or ('unknown' in context._argparse_args
                               and context._argparse_args.unknown):
            # This is really messed up right now.  Dates all the way back to
            # https://github.com/conda/conda/commit/f761f65a82b739562a0d997a2570e2b8a0bdc783
            # TODO: revisit this later
            _supplement_index_with_cache(reduced_index)

        # add feature records for the solver
        known_features = set()
        for rec in itervalues(reduced_index):
            known_features.update(concatv(rec.track_features, rec.features))
        known_features.update(context.track_features)
        for ftr_str in known_features:
            rec = make_feature_record(ftr_str)
            reduced_index[rec] = rec

        return reduced_index
コード例 #44
0
ファイル: solve.py プロジェクト: dkoppstein/conda
def get_install_transaction(prefix,
                            index,
                            spec_strs,
                            force=False,
                            only_names=None,
                            always_copy=False,
                            pinned=True,
                            minimal_hint=False,
                            update_deps=True,
                            prune=False,
                            channel_priority_map=None,
                            is_update=False):
    # type: (str, Dict[Dist, Record], List[str], bool, Option[List[str]], bool, bool, bool,
    #        bool, bool, bool, Dict[str, Sequence[str, int]]) -> List[Dict[weird]]

    # split out specs into potentially multiple preferred envs if:
    #  1. the user default env (root_prefix) is the prefix being considered here
    #  2. the user has not specified the --name or --prefix command-line flags
    if (prefix == context.root_prefix and not context.prefix_specified
            and prefix_is_writable(prefix) and context.enable_private_envs):

        # a registered package CANNOT be installed in the root env
        # if ANY package requesting a private env is required in the root env, all packages for
        #   that requested env must instead be installed in the root env

        root_r = get_resolve_object(index.copy(), context.root_prefix)

        def get_env_for_spec(spec):
            # use resolve's get_dists_for_spec() to find the "best" matching record
            record_for_spec = root_r.index[root_r.get_dists_for_spec(
                spec, emptyok=False)[-1]]
            return ensure_pad(record_for_spec.preferred_env)

        # specs grouped by target env, the 'None' key holds the specs for the root env
        env_add_map = groupby(get_env_for_spec,
                              (MatchSpec(s) for s in spec_strs))
        requested_root_specs_to_add = {s for s in env_add_map.pop(None, ())}

        ed = EnvsDirectory(join(context.root_prefix, 'envs'))
        registered_packages = ed.get_registered_packages_keyed_on_env_name()

        if len(env_add_map) == len(registered_packages) == 0:
            # short-circuit the rest of this logic
            return get_install_transaction_single(prefix, index, spec_strs,
                                                  force, only_names,
                                                  always_copy, pinned,
                                                  minimal_hint, update_deps,
                                                  prune, channel_priority_map,
                                                  is_update)

        root_specs_to_remove = set(
            MatchSpec(s.name) for s in concat(itervalues(env_add_map)))
        required_root_dists, _ = solve_prefix(
            context.root_prefix,
            root_r,
            specs_to_remove=root_specs_to_remove,
            specs_to_add=requested_root_specs_to_add,
            prune=True)

        required_root_package_names = tuple(d.name
                                            for d in required_root_dists)

        # first handle pulling back requested specs to root
        forced_root_specs_to_add = set()
        pruned_env_add_map = defaultdict(list)
        for env_name, specs in iteritems(env_add_map):
            for spec in specs:
                spec_name = MatchSpec(spec).name
                if spec_name in required_root_package_names:
                    forced_root_specs_to_add.add(spec)
                else:
                    pruned_env_add_map[env_name].append(spec)
        env_add_map = pruned_env_add_map

        # second handle pulling back registered specs to root
        env_remove_map = defaultdict(list)
        for env_name, registered_package_entries in iteritems(
                registered_packages):
            for rpe in registered_package_entries:
                if rpe['package_name'] in required_root_package_names:
                    # ANY registered packages in this environment need to be pulled back
                    for pe in registered_package_entries:
                        # add an entry in env_remove_map
                        # add an entry in forced_root_specs_to_add
                        pname = pe['package_name']
                        env_remove_map[env_name].append(MatchSpec(pname))
                        forced_root_specs_to_add.add(
                            MatchSpec(pe['requested_spec']))
                break

        unlink_link_map = odict()

        # solve all neede preferred_env prefixes
        for env_name in set(concatv(env_add_map, env_remove_map)):
            specs_to_add = env_add_map[env_name]
            spec_to_remove = env_remove_map[env_name]
            pfx = ed.preferred_env_to_prefix(env_name)
            unlink, link = solve_for_actions(pfx,
                                             get_resolve_object(
                                                 index.copy(), pfx),
                                             specs_to_remove=spec_to_remove,
                                             specs_to_add=specs_to_add,
                                             prune=True)
            unlink_link_map[env_name] = unlink, link, specs_to_add

        # now solve root prefix
        # we have to solve root a second time in all cases, because this time we don't prune
        root_specs_to_add = set(
            concatv(requested_root_specs_to_add, forced_root_specs_to_add))
        root_unlink, root_link = solve_for_actions(
            context.root_prefix,
            root_r,
            specs_to_remove=root_specs_to_remove,
            specs_to_add=root_specs_to_add)
        if root_unlink or root_link:
            # this needs to be added to odict last; the private envs need to be updated first
            unlink_link_map[None] = root_unlink, root_link, root_specs_to_add

        def make_txn_setup(pfx, unlink, link, specs):
            # TODO: this index here is probably wrong; needs to be per-prefix
            return PrefixSetup(index, pfx, unlink, link, 'INSTALL',
                               tuple(s.spec for s in specs))

        txn_args = tuple(
            make_txn_setup(ed.to_prefix(ensure_pad(env_name)), *oink)
            for env_name, oink in iteritems(unlink_link_map))
        txn = UnlinkLinkTransaction(*txn_args)
        return txn

    else:
        # disregard any requested preferred env
        return get_install_transaction_single(prefix, index, spec_strs, force,
                                              only_names, always_copy, pinned,
                                              minimal_hint, update_deps, prune,
                                              channel_priority_map, is_update)
コード例 #45
0
ファイル: test_itertoolz.py プロジェクト: adamchainz/cytoolz
def test_groupby():
    assert groupby(iseven, [1, 2, 3, 4]) == {True: [2, 4], False: [1, 3]}
コード例 #46
0
ファイル: solve.py プロジェクト: starcruiseromega/conda
    def solve_final_state(self, update_modifier=NULL, deps_modifier=NULL, prune=NULL,
                          ignore_pinned=NULL, force_remove=NULL):
        """Gives the final, solved state of the environment.

        Args:
            update_modifier (UpdateModifier):
                An optional flag directing how updates are handled regarding packages already
                existing in the environment.

            deps_modifier (DepsModifier):
                An optional flag indicating special solver handling for dependencies. The
                default solver behavior is to be as conservative as possible with dependency
                updates (in the case the dependency already exists in the environment), while
                still ensuring all dependencies are satisfied.  Options include
                    * NO_DEPS
                    * ONLY_DEPS
                    * UPDATE_DEPS
                    * UPDATE_DEPS_ONLY_DEPS
                    * FREEZE_INSTALLED
            prune (bool):
                If ``True``, the solution will not contain packages that were
                previously brought into the environment as dependencies but are no longer
                required as dependencies and are not user-requested.
            ignore_pinned (bool):
                If ``True``, the solution will ignore pinned package configuration
                for the prefix.
            force_remove (bool):
                Forces removal of a package without removing packages that depend on it.

        Returns:
            Tuple[PackageRef]:
                In sorted dependency order from roots to leaves, the package references for
                the solved state of the environment.

        """
        if update_modifier is NULL:
            update_modifier = context.update_modifier
        else:
            update_modifier = UpdateModifier(text_type(update_modifier).lower())
        if deps_modifier is NULL:
            deps_modifier = context.deps_modifier
        else:
            deps_modifier = DepsModifier(text_type(deps_modifier).lower())
        prune = context.prune if prune is NULL else prune
        ignore_pinned = context.ignore_pinned if ignore_pinned is NULL else ignore_pinned
        force_remove = context.force_remove if force_remove is NULL else force_remove
        specs_to_remove = self.specs_to_remove
        specs_to_add = self.specs_to_add

        # force_remove is a special case where we return early
        if specs_to_remove and force_remove:
            if specs_to_add:
                raise NotImplementedError()
            solution = tuple(prec for prec in PrefixData(self.prefix).iter_records()
                             if not any(spec.match(prec) for spec in specs_to_remove))
            return IndexedSet(PrefixGraph(solution).graph)

        log.debug("solving prefix %s\n"
                  "  specs_to_remove: %s\n"
                  "  specs_to_add: %s\n"
                  "  prune: %s", self.prefix, specs_to_remove, specs_to_add, prune)

        # declare starting point, the initial state of the environment
        # `solution` and `specs_map` are mutated throughout this method
        prefix_data = PrefixData(self.prefix)
        solution = tuple(prec for prec in prefix_data.iter_records())

        # Check if specs are satisfied by current environment. If they are, exit early.
        if (update_modifier == UpdateModifier.SPECS_SATISFIED_SKIP_SOLVE
                and not specs_to_remove and not prune):
            for spec in specs_to_add:
                if not next(prefix_data.query(spec), None):
                    break
            else:
                # All specs match a package in the current environment.
                # Return early, with a solution that should just be PrefixData().iter_records()
                return IndexedSet(PrefixGraph(solution).graph)

        specs_from_history_map = History(self.prefix).get_requested_specs_map()
        if prune:  # or update_modifier == UpdateModifier.UPDATE_ALL  # pending conda/constructor#138  # NOQA
            # Users are struggling with the prune functionality in --update-all, due to
            # https://github.com/conda/constructor/issues/138.  Until that issue is resolved,
            # and for the foreseeable future, it's best to be more conservative with --update-all.

            # Start with empty specs map for UPDATE_ALL because we're optimizing the update
            # only for specs the user has requested; it's ok to remove dependencies.
            specs_map = odict()

            # However, because of https://github.com/conda/constructor/issues/138, we need
            # to hard-code keeping conda, conda-build, and anaconda, if they're already in
            # the environment.
            solution_pkg_names = set(d.name for d in solution)
            ensure_these = (pkg_name for pkg_name in {
                'anaconda', 'conda', 'conda-build',
            } if pkg_name not in specs_from_history_map and pkg_name in solution_pkg_names)
            for pkg_name in ensure_these:
                specs_from_history_map[pkg_name] = MatchSpec(pkg_name)
        else:
            specs_map = odict((d.name, MatchSpec(d.name)) for d in solution)

        # add in historically-requested specs
        specs_map.update(specs_from_history_map)

        # let's pretend for now that this is the right place to build the index
        prepared_specs = set(concatv(
            specs_to_remove,
            specs_to_add,
            itervalues(specs_from_history_map),
        ))

        index, r = self._prepare(prepared_specs)

        if specs_to_remove:
            # In a previous implementation, we invoked SAT here via `r.remove()` to help with
            # spec removal, and then later invoking SAT again via `r.solve()`. Rather than invoking
            # SAT for spec removal determination, we can use the PrefixGraph and simple tree
            # traversal if we're careful about how we handle features. We still invoke sat via
            # `r.solve()` later.
            _track_fts_specs = (spec for spec in specs_to_remove if 'track_features' in spec)
            feature_names = set(concat(spec.get_raw_value('track_features')
                                       for spec in _track_fts_specs))
            graph = PrefixGraph(solution, itervalues(specs_map))

            all_removed_records = []
            no_removed_records_specs = []
            for spec in specs_to_remove:
                # If the spec was a track_features spec, then we need to also remove every
                # package with a feature that matches the track_feature. The
                # `graph.remove_spec()` method handles that for us.
                log.trace("using PrefixGraph to remove records for %s", spec)
                removed_records = graph.remove_spec(spec)
                if removed_records:
                    all_removed_records.extend(removed_records)
                else:
                    no_removed_records_specs.append(spec)

            # ensure that each spec in specs_to_remove is actually associated with removed records
            unmatched_specs_to_remove = tuple(
                spec for spec in no_removed_records_specs
                if not any(spec.match(rec) for rec in all_removed_records)
            )
            if unmatched_specs_to_remove:
                raise PackagesNotFoundError(
                    tuple(sorted(str(s) for s in unmatched_specs_to_remove))
                )

            for rec in all_removed_records:
                # We keep specs (minus the feature part) for the non provides_features packages
                # if they're in the history specs.  Otherwise, we pop them from the specs_map.
                rec_has_a_feature = set(rec.features or ()) & feature_names
                if rec_has_a_feature and rec.name in specs_from_history_map:
                    spec = specs_map.get(rec.name, MatchSpec(rec.name))
                    spec._match_components.pop('features', None)
                    specs_map[spec.name] = spec
                else:
                    specs_map.pop(rec.name, None)

            solution = tuple(graph.graph)

        # We handle as best as possible environments in inconsistent states. To do this,
        # we remove now from consideration the set of packages causing inconsistencies,
        # and then we add them back in following the main SAT call.
        _, inconsistent_precs = r.bad_installed(solution, ())
        add_back_map = {}  # name: (prec, spec)
        if log.isEnabledFor(DEBUG):
            log.debug("inconsistent precs: %s",
                      dashlist(inconsistent_precs) if inconsistent_precs else 'None')
        if inconsistent_precs:
            for prec in inconsistent_precs:
                # pop and save matching spec in specs_map
                add_back_map[prec.name] = (prec, specs_map.pop(prec.name, None))
            solution = tuple(prec for prec in solution if prec not in inconsistent_precs)

        # For the remaining specs in specs_map, add target to each spec. `target` is a reference
        # to the package currently existing in the environment. Setting target instructs the
        # solver to not disturb that package if it's not necessary.
        # If the spec.name is being modified by inclusion in specs_to_add, we don't set `target`,
        # since we *want* the solver to modify/update that package.
        #
        # TLDR: when working with MatchSpec objects,
        #  - to minimize the version change, set MatchSpec(name=name, target=prec.dist_str())
        #  - to freeze the package, set all the components of MatchSpec individually
        for pkg_name, spec in iteritems(specs_map):
            matches_for_spec = tuple(prec for prec in solution if spec.match(prec))
            if matches_for_spec:
                if len(matches_for_spec) != 1:
                    raise CondaError(dals("""
                    Conda encountered an error with your environment.  Please report an issue
                    at https://github.com/conda/conda/issues/new.  In your report, please include
                    the output of 'conda info' and 'conda list' for the active environment, along
                    with the command you invoked that resulted in this error.
                      pkg_name: %s
                      spec: %s
                      matches_for_spec: %s
                    """) % (pkg_name, spec,
                            dashlist((text_type(s) for s in matches_for_spec), indent=4)))
                target_prec = matches_for_spec[0]
                if update_modifier == UpdateModifier.FREEZE_INSTALLED:
                    new_spec = MatchSpec(target_prec)
                else:
                    target = target_prec.dist_str()
                    new_spec = MatchSpec(spec, target=target)
                specs_map[pkg_name] = new_spec
        if log.isEnabledFor(TRACE):
            log.trace("specs_map with targets: %s", specs_map)

        # If we're in UPDATE_ALL mode, we need to drop all the constraints attached to specs,
        # so they can all float and the solver can find the most up-to-date solution. In the case
        # of UPDATE_ALL, `specs_map` wasn't initialized with packages from the current environment,
        # but *only* historically-requested specs.  This lets UPDATE_ALL drop dependencies if
        # they're no longer needed, and their presence would otherwise prevent the updated solution
        # the user most likely wants.
        if update_modifier == UpdateModifier.UPDATE_ALL:
            specs_map = {pkg_name: MatchSpec(spec.name, optional=spec.optional)
                         for pkg_name, spec in iteritems(specs_map)}

        # As a business rule, we never want to update python beyond the current minor version,
        # unless that's requested explicitly by the user (which we actively discourage).
        if 'python' in specs_map:
            python_prefix_rec = prefix_data.get('python')
            if python_prefix_rec:
                python_spec = specs_map['python']
                if not python_spec.get('version'):
                    pinned_version = get_major_minor_version(python_prefix_rec.version) + '.*'
                    specs_map['python'] = MatchSpec(python_spec, version=pinned_version)

        # For the aggressive_update_packages configuration parameter, we strip any target
        # that's been set.
        if not context.offline:
            for spec in context.aggressive_update_packages:
                if spec.name in specs_map:
                    specs_map[spec.name] = spec

        # add in explicitly requested specs from specs_to_add
        # this overrides any name-matching spec already in the spec map
        specs_map.update((s.name, s) for s in specs_to_add)

        # collect additional specs to add to the solution
        track_features_specs = pinned_specs = ()
        if context.track_features:
            track_features_specs = tuple(MatchSpec(x + '@') for x in context.track_features)
        if not ignore_pinned:
            pinned_specs = get_pinned_specs(self.prefix)

        # As a business rule, we never want to downgrade conda below the current version,
        # unless that's requested explicitly by the user (which we actively discourage).
        if 'conda' in specs_map and paths_equal(self.prefix, context.conda_prefix):
            conda_prefix_rec = prefix_data.get('conda')
            if conda_prefix_rec:
                conda_spec = specs_map['conda']
                conda_in_specs_to_add_version = next(
                    (spec.get('version') for spec in specs_to_add if spec.name == "conda"), None
                )
                if not conda_in_specs_to_add_version:
                    conda_spec = MatchSpec(conda_spec, version=">=%s" % conda_prefix_rec.version)
                if context.auto_update_conda:
                    conda_spec = MatchSpec(conda_spec, target=None)
                specs_map['conda'] = conda_spec

        final_environment_specs = IndexedSet(concatv(
            itervalues(specs_map),
            track_features_specs,
            pinned_specs,
        ))

        # We've previously checked `solution` for consistency (which at that point was the
        # pre-solve state of the environment). Now we check our compiled set of
        # `final_environment_specs` for the possibility of a solution.  If there are conflicts,
        # we can often avoid them by neutering specs that have a target (e.g. removing version
        # constraint) and also making them optional. The result here will be less cases of
        # `UnsatisfiableError` handed to users, at the cost of more packages being modified
        # or removed from the environment.
        conflicting_specs = r.get_conflicting_specs(tuple(final_environment_specs))
        if log.isEnabledFor(DEBUG):
            log.debug("conflicting specs: %s", dashlist(conflicting_specs))
        for spec in conflicting_specs:
            if spec.target:
                final_environment_specs.remove(spec)
                neutered_spec = MatchSpec(spec.name, target=spec.target, optional=True)
                final_environment_specs.add(neutered_spec)

        # Finally! We get to call SAT.
        if log.isEnabledFor(DEBUG):
            log.debug("final specs to add: %s",
                      dashlist(sorted(text_type(s) for s in final_environment_specs)))
        solution = r.solve(tuple(final_environment_specs))  # return value is List[PackageRecord]

        # add back inconsistent packages to solution
        if add_back_map:
            for name, (prec, spec) in iteritems(add_back_map):
                if not any(d.name == name for d in solution):
                    solution.append(prec)
                    if spec:
                        final_environment_specs.add(spec)

        # Special case handling for various DepsModifier flags. Maybe this block could be pulled
        # out into its own non-public helper method?
        if deps_modifier == DepsModifier.NO_DEPS:
            # In the NO_DEPS case, we need to start with the original list of packages in the
            # environment, and then only modify packages that match specs_to_add or
            # specs_to_remove.
            _no_deps_solution = IndexedSet(prefix_data.iter_records())
            only_remove_these = set(prec
                                    for spec in specs_to_remove
                                    for prec in _no_deps_solution
                                    if spec.match(prec))
            _no_deps_solution -= only_remove_these

            only_add_these = set(prec
                                 for spec in specs_to_add
                                 for prec in solution
                                 if spec.match(prec))
            remove_before_adding_back = set(prec.name for prec in only_add_these)
            _no_deps_solution = IndexedSet(prec for prec in _no_deps_solution
                                           if prec.name not in remove_before_adding_back)
            _no_deps_solution |= only_add_these
            solution = _no_deps_solution
        elif (deps_modifier == DepsModifier.ONLY_DEPS
                and update_modifier != UpdateModifier.UPDATE_DEPS):
            # Using a special instance of PrefixGraph to remove youngest child nodes that match
            # the original specs_to_add.  It's important to remove only the *youngest* child nodes,
            # because a typical use might be `conda install --only-deps python=2 flask`, and in
            # that case we'd want to keep python.
            graph = PrefixGraph(solution, specs_to_add)
            graph.remove_youngest_descendant_nodes_with_specs()
            solution = tuple(graph.graph)

        elif update_modifier == UpdateModifier.UPDATE_DEPS:
            # Here we have to SAT solve again :(  It's only now that we know the dependency
            # chain of specs_to_add.
            specs_to_add_names = set(spec.name for spec in specs_to_add)
            update_names = set()
            graph = PrefixGraph(solution, final_environment_specs)
            for spec in specs_to_add:
                node = graph.get_node_by_name(spec.name)
                for ancestor_record in graph.all_ancestors(node):
                    ancestor_name = ancestor_record.name
                    if ancestor_name not in specs_to_add_names:
                        update_names.add(ancestor_name)
            grouped_specs = groupby(lambda s: s.name in update_names, final_environment_specs)
            new_final_environment_specs = set(grouped_specs.get(False, ()))
            update_specs = set(MatchSpec(spec.name, optional=spec.optional)
                               for spec in grouped_specs.get(True, ()))
            final_environment_specs = new_final_environment_specs | update_specs
            solution = r.solve(final_environment_specs)

            if deps_modifier == DepsModifier.ONLY_DEPS:
                # duplicated from DepsModifier.ONLY_DEPS
                graph = PrefixGraph(solution, specs_to_add)
                graph.remove_youngest_descendant_nodes_with_specs()
                solution = tuple(graph.graph)

        if prune:
            graph = PrefixGraph(solution, final_environment_specs)
            graph.prune()
            solution = tuple(graph.graph)

        self._check_solution(solution, pinned_specs)
        time_recorder.log_totals()

        solution = IndexedSet(PrefixGraph(solution).graph)
        log.debug("solved prefix %s\n"
                  "  solved_linked_dists:\n"
                  "    %s\n",
                  self.prefix, "\n    ".join(prec.dist_str() for prec in solution))
        return solution