Example #1
0
def add_intradependencies(graph):
    """ensure that downstream packages wait for upstream build/test (not use existing
    available packages)"""
    for node in graph.nodes():
        if 'meta' not in graph.node[node]:
            continue
        # get build dependencies
        m = graph.node[node]['meta']
        # this is pretty hard. Realistically, we would want to know
        # what the build and host platforms are on the build machine.
        # However, all we know right now is what machine we're actually
        # on (the one calculating the graph).
        deps = set(m.ms_depends('build') + m.ms_depends('host') + m.ms_depends('run') +
                   [conda_interface.MatchSpec(dep) for dep in
                    ensure_list((m.meta.get('test') or {}).get('requires'))])

        for dep in deps:
            name_matches = (n for n in graph.nodes() if graph.node[n]['meta'].name() == dep.name)
            for matching_node in name_matches:
                # are any of these build dependencies also nodes in our graph?
                match_meta = graph.node[matching_node]['meta']
                if (match_peer_job(conda_interface.MatchSpec(dep), match_meta, m) and
                         (node, matching_node) not in graph.edges()):
                    # inside if statement because getting used vars is expensive
                    shared_vars = set(match_meta.get_used_vars()) & set(m.get_used_vars())
                    # all vars in variant that they both use must line up
                    if all(match_meta.config.variant[v] == m.config.variant[v]
                            for v in shared_vars):
                        # add edges if they don't already exist
                        graph.add_edge(node, matching_node)
Example #2
0
def add_intradependencies(graph):
    """ensure that downstream packages wait for upstream build/test (not use existing
    available packages)"""
    for node in graph.nodes():
        if 'meta' not in graph.node[node]:
            continue
        # get build dependencies
        m = graph.node[node]['meta']
        # this is pretty hard. Realistically, we would want to know
        # what the build and host platforms are on the build machine.
        # However, all we know right now is what machine we're actually
        # on (the one calculating the graph).
        deps = set(
            m.ms_depends('build') + m.ms_depends('host') +
            m.ms_depends('run') + [
                conda_interface.MatchSpec(dep)
                for dep in m.meta.get('test', {}).get('requires', [])
            ])

        for dep in deps:
            name_matches = (n for n in graph.nodes()
                            if graph.node[n]['meta'].name() == dep.name)
            for matching_node in name_matches:
                # are any of these build dependencies also nodes in our graph?
                if (match_peer_job(conda_interface.MatchSpec(dep),
                                   graph.node[matching_node]['meta'], m)
                        and (node, matching_node) not in graph.edges()):
                    # add edges if they don't already exist
                    graph.add_edge(node, matching_node)
def add_intradependencies(graph):
    """ensure that downstream packages wait for upstream build/test (not use existing
    available packages)"""
    for node in graph.nodes():
        if 'meta' not in graph.nodes[node]:
            continue
        # get build dependencies
        m = graph.nodes[node]['meta']

        if hasattr(m, 'other_outputs'):
            internal_deps = tuple(i[0] for i in m.other_outputs)
        else:
            internal_deps = ()
        # this is pretty hard. Realistically, we would want to know
        # what the build and host platforms are on the build machine.
        # However, all we know right now is what machine we're actually
        # on (the one calculating the graph).
        deps = set(
            m.ms_depends('build') + m.ms_depends('host') +
            m.ms_depends('run') + [
                conda_interface.MatchSpec(dep) for dep in ensure_list(
                    (m.meta.get('test') or {}).get('requires'))
            ])

        for dep in deps:
            # Ignore all dependecies that are outputs of the current recipe.
            # These may not always match because of version differents but
            # without this recipe with outputs which depend on each other
            # cannot be submitted
            if dep.name in internal_deps:
                continue
            name_matches = (n for n in graph.nodes()
                            if graph.nodes[n]['meta'].name() == dep.name)
            for matching_node in name_matches:
                # are any of these build dependencies also nodes in our graph?
                match_meta = graph.nodes[matching_node]['meta']
                if (match_peer_job(conda_interface.MatchSpec(dep), match_meta,
                                   m)
                        and (node, matching_node) not in graph.edges()):
                    # inside if statement because getting used vars is expensive
                    shared_vars = set(match_meta.get_used_vars()) & set(
                        m.get_used_vars())
                    # all vars in variant that they both use must line up
                    if all(match_meta.config.variant[v] == m.config.variant[v]
                           for v in shared_vars):
                        # add edges if they don't already exist
                        graph.add_edge(node, matching_node)
Example #4
0
def _installable(name, version, build_string, config, conda_resolve):
    """Can Conda install the package we need?"""
    ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config),
                                             _fix_any(build_string, config)]))
    installable = conda_resolve.find_matches(ms)
    if not installable:
            log.warn("Dependency {name}, version {ver} is not installable from your "
                     "channels: {channels} with subdir {subdir}.  Seeing if we can build it..."
                     .format(name=name, ver=version, channels=config.channel_urls,
                             subdir=config.host_subdir))
    return installable
def _buildable(package, version=""):
    """Does the recipe that we have available produce the package we need?"""
    available = False
    if os.path.isdir(package):
        metadata, _, _ = api.render(package)
        match_dict = {
            'name': metadata.name(),
            'version': metadata.version(),
            'build': metadata.build_number(),
        }
        ms = conda_interface.MatchSpec(" ".join([package, version]))
        available = ms.match(match_dict)
    return available
Example #6
0
def _buildable(name, version, recipes_dir, worker, config, finalize):
    """Does the recipe that we have available produce the package we need?"""
    possible_dirs = os.listdir(recipes_dir)
    packagename_re = re.compile(r'%s(?:\-[0-9]+[\.0-9\_\-a-zA-Z]*)?$' % name)
    likely_dirs = (dirname for dirname in possible_dirs if
                    (os.path.isdir(os.path.join(recipes_dir, dirname)) and
                    packagename_re.match(dirname)))
    metadata_tuples = [m for path in likely_dirs
                        for (m, _, _) in _get_or_render_metadata(os.path.join(recipes_dir,
                                                                 path), worker, finalize=finalize)]

    # this is our target match
    ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config)]))
    available = False
    for m in metadata_tuples:
        available = match_peer_job(ms, m)
        if available:
            break
    return m.meta_path if available else False
def _installable(package, version, conda_resolve):
    """Can Conda install the package we need?"""
    return conda_resolve.valid(conda_interface.MatchSpec(" ".join(
        [package, version])),
                               filter=conda_resolve.default_filter())