コード例 #1
0
ファイル: render.py プロジェクト: sshuster/conda-build
def get_upstream_pins(m, actions, env):
    """Download packages from specs, then inspect each downloaded package for additional
    downstream dependency specs.  Return these additional specs."""

    env_specs = m.meta.get('requirements', {}).get(env, [])
    explicit_specs = [req.split(' ')[0]
                      for req in env_specs] if env_specs else []
    linked_packages = actions.get('LINK', [])
    linked_packages = [
        pkg for pkg in linked_packages if pkg.name in explicit_specs
    ]

    ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
    additional_specs = {}
    for pkg in linked_packages:
        run_exports = None
        if m.config.use_channeldata:
            channeldata = utils.download_channeldata(pkg.channel)
            # only use channeldata if requested, channeldata exists and contains
            # a packages key, otherwise use run_exports from the packages themselves
            if 'packages' in channeldata:
                pkg_data = channeldata['packages'].get(pkg.name, {})
                run_exports = pkg_data.get('run_exports',
                                           {}).get(pkg.version, {})
        if run_exports is None:
            loc, dist = execute_download_actions(m,
                                                 actions,
                                                 env=env,
                                                 package_subset=pkg)[pkg]
            run_exports = _read_specs_from_package(loc, dist)
        specs = _filter_run_exports(run_exports, ignore_list)
        if specs:
            additional_specs = utils.merge_dicts_of_lists(
                additional_specs, specs)
    return additional_specs
コード例 #2
0
ファイル: render.py プロジェクト: isuruf/conda-build
def get_upstream_pins(m, actions, env):
    """Download packages from specs, then inspect each downloaded package for additional
    downstream dependency specs.  Return these additional specs."""

    env_specs = m.meta.get('requirements', {}).get(env, [])
    explicit_specs = [req.split(' ')[0]
                      for req in env_specs] if env_specs else []
    linked_packages = actions.get('LINK', [])
    linked_packages = [
        pkg for pkg in linked_packages if pkg.name in explicit_specs
    ]

    ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
    additional_specs = {}
    run_exports = {}
    empty_run_exports = False
    for pkg in linked_packages:
        channeldata = utils.download_channeldata(pkg.channel)
        if channeldata:
            pkg_data = channeldata.get('packages', {}).get(pkg.name, {})
            run_exports = pkg_data.get('run_exports', {}).get(pkg.version, {})
            empty_run_exports = run_exports == {}
        if not run_exports and not empty_run_exports:
            locs_and_dists = execute_download_actions(
                m, actions, env=env, package_subset=linked_packages)
            locs_and_dists = [v for k, v in locs_and_dists.items() if k == pkg]
            run_exports = _read_specs_from_package(*next(iter(locs_and_dists)))
        specs = _filter_run_exports(run_exports, ignore_list)
        if specs:
            additional_specs = utils.merge_dicts_of_lists(
                additional_specs, specs)
    return additional_specs
コード例 #3
0
def get_upstream_pins(m, actions, env):
    """Download packages from specs, then inspect each downloaded package for additional
    downstream dependency specs.  Return these additional specs."""

    env_specs = m.meta.get('requirements', {}).get(env, [])
    explicit_specs = [req.split(' ')[0]
                      for req in env_specs] if env_specs else []
    linked_packages = actions.get('LINK', [])
    linked_packages = [
        pkg for pkg in linked_packages if pkg.name in explicit_specs
    ]

    ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
    additional_specs = {}
    for pkg in linked_packages:
        channeldata = utils.download_channeldata(pkg.channel)
        run_exports = channeldata.get('packages',
                                      {}).get(pkg.name,
                                              {}).get('run_exports',
                                                      {}).get(pkg.version, {})
        specs = _filter_run_exports(run_exports, ignore_list)
        if specs:
            additional_specs = utils.merge_dicts_of_lists(
                additional_specs, specs)
    return additional_specs
コード例 #4
0
ファイル: mamba_solver.py プロジェクト: bgruening/cf-scripts
def _get_run_export(link_tuple):

    global LIBCFGRAPH_INDEX

    run_exports = None

    if "https://" in link_tuple[0]:
        https = _strip_anaconda_tokens(link_tuple[0])
        channel_url = https.rsplit("/", maxsplit=1)[0]
        if "conda.anaconda.org" in channel_url:
            channel_url = channel_url.replace(
                "conda.anaconda.org",
                "conda-static.anaconda.org",
            )
    else:
        channel_url = link_tuple[0].rsplit("/", maxsplit=1)[0]

    cd = download_channeldata(channel_url)
    data = json.loads(link_tuple[2])
    name = data["name"]

    if cd.get("packages", {}).get(name, {}).get("run_exports", {}):
        # libcfgraph location
        if link_tuple[1].endswith(".tar.bz2"):
            pkg_nm = link_tuple[1][:-len(".tar.bz2")]
        else:
            pkg_nm = link_tuple[1][:-len(".conda")]
        channel_subdir = "/".join(link_tuple[0].split("/")[-2:])
        libcfg_pth = f"artifacts/{name}/" f"{channel_subdir}/{pkg_nm}.json"
        if LIBCFGRAPH_INDEX is None:
            logger.warning("downloading libcfgraph file index")
            r = requests.get(
                "https://raw.githubusercontent.com/regro/libcfgraph"
                "/master/.file_listing.json", )
            LIBCFGRAPH_INDEX = r.json()

        if libcfg_pth in LIBCFGRAPH_INDEX:
            data = requests.get(
                os.path.join(
                    "https://raw.githubusercontent.com",
                    "regro/libcfgraph/master",
                    libcfg_pth,
                ), ).json()

            rx = data.get("rendered_recipe",
                          {}).get("build", {}).get("run_exports", {})
            if rx:
                run_exports = copy.deepcopy(DEFAULT_RUN_EXPORTS, )
                if isinstance(rx, str):
                    # some packages have a single string
                    # eg pyqt
                    rx = [rx]

                for k in rx:
                    if k in DEFAULT_RUN_EXPORTS:
                        logger.debug(
                            "RUN EXPORT: %s %s %s",
                            name,
                            k,
                            rx[k],
                        )
                        run_exports[k].update(rx[k])
                    else:
                        logger.debug(
                            "RUN EXPORT: %s %s %s",
                            name,
                            "weak",
                            [k],
                        )
                        run_exports["weak"].add(k)

        # fall back to getting repodata shard if needed
        if run_exports is None:
            logger.info(
                "RUN EXPORTS: downloading package %s/%s/%s" %
                (channel_url, link_tuple[0].split("/")[-1], link_tuple[1]), )
            run_exports = _get_run_export_download(link_tuple)[1]
    else:
        run_exports = copy.deepcopy(DEFAULT_RUN_EXPORTS)

    return run_exports