def get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None, repodata_fn="repodata.json"): real_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local) check_whitelist(real_urls) dlist = api.DownloadTargetList() sddata = [] index = [] for idx, url in enumerate(real_urls): channel = Channel(url) full_url = channel.url(with_credentials=True) + '/' + repodata_fn full_path_cache = os.path.join( create_cache_dir(), cache_fn_url(full_url, repodata_fn)) sd = api.SubdirData(channel.name + '/' + channel.subdir, full_url, full_path_cache) sd.load() index.append((sd, channel)) dlist.add(sd) is_downloaded = dlist.download(True) if not is_downloaded: raise RuntimeError("Error downloading repodata.") return index
def get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None): channel_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local) check_whitelist(channel_urls) threads = [] result = [] for url in channel_urls: t = threading.Thread(target=get_channel, args=(url, result)) threads.append(t) t.start() for t in threads: t.join() return result
def get_index( channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None, repodata_fn="repodata.json", ): """Get an index? Function from @wolfv here: https://gist.github.com/wolfv/cd12bd4a448c77ff02368e97ffdf495a. """ real_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local) check_whitelist(real_urls) dlist = api.DownloadTargetList() index = [] for idx, url in enumerate(real_urls): channel = Channel(url) full_url = channel.url(with_credentials=True) + "/" + repodata_fn full_path_cache = os.path.join( create_cache_dir(), cache_fn_url(full_url, repodata_fn), ) sd = api.SubdirData( channel.name + "/" + channel.subdir, full_url, full_path_cache, ) sd.load() index.append((sd, channel)) dlist.add(sd) is_downloaded = dlist.download(True) if not is_downloaded: raise RuntimeError("Error downloading repodata.") return index
def get_index( channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None, repodata_fn="repodata.json", ): real_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local) check_whitelist(real_urls) dlist = api.DownloadTargetList() index = [] for url in real_urls: channel = Channel(url) full_url = CondaHttpAuth.add_binstar_token( channel.url(with_credentials=True) + "/" + repodata_fn ) full_path_cache = os.path.join( api.create_cache_dir(), api.cache_fn_url(full_url) ) if channel.name: channel_name = channel.name + "/" + channel.subdir else: channel_name = channel.url(with_credentials=False) sd = api.SubdirData(channel_name, full_url, full_path_cache) sd.load() index.append((sd, channel)) dlist.add(sd) is_downloaded = dlist.download(True) if not is_downloaded: raise RuntimeError("Error downloading repodata.") return index
def get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None): channel_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local) check_whitelist(channel_urls) threads = [] result = [] sddata = [FastSubdirData(Channel(x)) for x in channel_urls] for sd in sddata: t = threading.Thread(target=load_channel, args=(sd, result)) threads.append(t) t.start() for t in threads: t.join() return result
def test_check_whitelist(): whitelist = ('defaults', 'conda-forge', 'https://beta.conda.anaconda.org/conda-test') with env_vars({'CONDA_WHITELIST_CHANNELS': ','.join(whitelist)}, stack_callback=conda_tests_ctxt_mgmt_def_pol): with pytest.raises(ChannelNotAllowed): get_index(("conda-canary", )) with pytest.raises(ChannelNotAllowed): get_index(("https://repo.anaconda.com/pkgs/denied", )) check_whitelist(("defaults", )) check_whitelist((DEFAULT_CHANNELS[0], DEFAULT_CHANNELS[1])) check_whitelist(("https://conda.anaconda.org/conda-forge/linux-64", )) check_whitelist(("conda-canary", ))
def test_check_whitelist(): # get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None) whitelist = ('defaults', 'conda-forge', 'https://beta.conda.anaconda.org/conda-test') with env_var('CONDA_WHITELIST_CHANNELS', ','.join(whitelist), reset_context): with pytest.raises(ChannelNotAllowed): get_index(("conda-canary", )) with pytest.raises(ChannelNotAllowed): get_index(("https://repo.anaconda.com/pkgs/denied", )) check_whitelist(("defaults", )) check_whitelist((DEFAULT_CHANNELS[0], DEFAULT_CHANNELS[1])) check_whitelist(("https://conda.anaconda.org/conda-forge/linux-64", )) check_whitelist(("conda-canary", ))
def test_check_whitelist(): whitelist = ( 'defaults', 'conda-forge', 'https://beta.conda.anaconda.org/conda-test' ) with env_vars({'CONDA_WHITELIST_CHANNELS': ','.join(whitelist)}, stack_callback=conda_tests_ctxt_mgmt_def_pol): with pytest.raises(ChannelNotAllowed): get_index(("conda-canary",)) with pytest.raises(ChannelNotAllowed): get_index(("https://repo.anaconda.com/pkgs/denied",)) check_whitelist(("defaults",)) check_whitelist((DEFAULT_CHANNELS[0], DEFAULT_CHANNELS[1])) check_whitelist(("https://conda.anaconda.org/conda-forge/linux-64",)) check_whitelist(("conda-canary",))
def test_check_whitelist(): # get_index(channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None) whitelist = ( 'defaults', 'conda-forge', 'https://beta.conda.anaconda.org/conda-test' ) with env_var('CONDA_WHITELIST_CHANNELS', ','.join(whitelist), reset_context): with pytest.raises(ChannelNotAllowed): get_index(("conda-canary",)) with pytest.raises(ChannelNotAllowed): get_index(("https://repo.anaconda.com/pkgs/denied",)) check_whitelist(("defaults",)) check_whitelist((DEFAULT_CHANNELS[0], DEFAULT_CHANNELS[1])) check_whitelist(("https://conda.anaconda.org/conda-forge/linux-64",)) check_whitelist(("conda-canary",))
def get_index( channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None, repodata_fn="repodata.json", ): if isinstance(platform, str): platform = [platform, "noarch"] all_channels = [] if use_local: all_channels.append("local") all_channels.extend(channel_urls) if prepend: all_channels.extend(context.channels) check_whitelist(all_channels) # Remove duplicates but retain order all_channels = list(OrderedDict.fromkeys(all_channels)) dlist = api.DownloadTargetList() index = [] def fixup_channel_spec(spec): at_count = spec.count("@") if at_count > 1: first_at = spec.find("@") spec = (spec[:first_at] + urllib.parse.quote(spec[first_at]) + spec[first_at + 1:]) if platform: spec = spec + "[" + ",".join(platform) + "]" return spec all_channels = list(map(fixup_channel_spec, all_channels)) pkgs_dirs = api.MultiPackageCache(context.pkgs_dirs) api.create_cache_dir(str(pkgs_dirs.first_writable_path)) for channel in api.get_channels(all_channels): for channel_platform, url in channel.platform_urls( with_credentials=True): full_url = CondaHttpAuth.add_binstar_token(url) sd = api.SubdirData(channel, channel_platform, full_url, pkgs_dirs, repodata_fn) index.append((sd, { "platform": channel_platform, "url": url, "channel": channel })) dlist.add(sd) is_downloaded = dlist.download(api.MAMBA_DOWNLOAD_FAILFAST) if not is_downloaded: raise RuntimeError("Error downloading repodata.") return index
def get_index( channel_urls=(), prepend=True, platform=None, use_local=False, use_cache=False, unknown=None, prefix=None, repodata_fn="repodata.json", ): all_channels = [] if use_local: all_channels.append("local") all_channels.extend(channel_urls) if prepend: all_channels.extend(context.channels) check_whitelist(all_channels) # Remove duplicates but retain order all_channels = list(OrderedDict.fromkeys(all_channels)) dlist = api.DownloadTargetList() index = [] def fixup_channel_spec(spec): at_count = spec.count("@") if at_count > 1: first_at = spec.find("@") spec = ( spec[:first_at] + urllib.parse.quote(spec[first_at]) + spec[first_at + 1 :] ) if platform: spec = spec + "[" + platform + "]" return spec all_channels = list(map(fixup_channel_spec, all_channels)) for channel in api.get_channels(all_channels): for channel_platform, url in channel.platform_urls(with_credentials=True): full_url = CondaHttpAuth.add_binstar_token(url + "/" + repodata_fn) full_path_cache = os.path.join( api.create_cache_dir(), api.cache_fn_url(full_url) ) name = None if channel.name: name = channel.name + "/" + channel_platform else: name = channel.platform_url(channel_platform, with_credentials=False) sd = api.SubdirData( name, full_url, full_path_cache, channel_platform == "noarch" ) sd.load() index.append( (sd, {"platform": channel_platform, "url": url, "channel": channel}) ) dlist.add(sd) is_downloaded = dlist.download(True) if not is_downloaded: raise RuntimeError("Error downloading repodata.") return index