def _is_url_like_archive(url): """Return whether the URL looks like an archive. """ filename = Link(url).filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): return True return False
def get_location(self, dist, dependency_links): for url in dependency_links: egg_fragment = Link(url).egg_fragment if not egg_fragment: continue if '-' in egg_fragment: # FIXME: will this work when a package has - in the name? key = '-'.join(egg_fragment.split('-')[:-1]).lower() else: key = egg_fragment if key == dist.key: return url.split('#', 1)[0] return None
def test_from_link_vcs_without_source_dir(script, tmpdir): direct_url = direct_url_from_link( Link("git+https://g.c/u/p.git@1"), link_is_in_wheel_cache=True ) assert direct_url.url == "https://g.c/u/p.git" assert direct_url.info.commit_id == "1"
def test_from_link_dir(tmpdir): dir_url = path_to_url(tmpdir) direct_url = direct_url_from_link(Link(dir_url)) assert direct_url.url == dir_url assert isinstance(direct_url.info, DirInfo)
vcs_url = EXECUTE_CODE vc_type, _ = vcs_url.split('+', 1) # Use tempfolder for cloning repository import tempfile, shutil, atexit code_path = tempfile.mkdtemp() # automatically remove temp directory if process exits def cleanup(): shutil.rmtree(code_path) atexit.register(cleanup) vcs.get_backend(vc_type).export(code_path, url=vcs_url) from pip._internal.models.link import Link subdir = Link(vcs_url).subdirectory_fragment if subdir: code_path = os.path.join(code_path, subdir.lstrip('/')) except Exception as ex: log.exception("Failed to clone repository via pip internal.") if not code_path or not os.path.exists(code_path): log.info("No code artifacts could be found for " + EXECUTE_CODE) sys.exit(1) # code script is the file that actually executed -> if directory it needs a main module main_script = code_path # Execute single script if os.path.isfile(code_path): # set code path to the root folder of the script so it can also resolve conda env and requirements code_path = os.path.dirname(os.path.realpath(code_path))
def _get_simple_response(url: str, session: PipSession) -> Response: """Access an Simple API response with GET, and return the response. This consists of three parts: 1. If the URL looks suspiciously like an archive, send a HEAD first to check the Content-Type is HTML or Simple API, to avoid downloading a large file. Raise `_NotHTTP` if the content type cannot be determined, or `_NotAPIContent` if it is not HTML or a Simple API. 2. Actually perform the request. Raise HTTP exceptions on network failures. 3. Check the Content-Type header to make sure we got a Simple API response, and raise `_NotAPIContent` otherwise. """ if is_archive_file(Link(url).filename): _ensure_api_response(url, session=session) logger.debug("Getting page %s", redact_auth_from_url(url)) resp = session.get( url, headers={ "Accept": ", ".join( [ "application/vnd.pypi.simple.v1+json", "application/vnd.pypi.simple.v1+html; q=0.1", "text/html; q=0.01", ] ), # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) raise_for_status(resp) # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is a # Simple API response or not. However we can check after we've # downloaded it. _ensure_api_header(resp) logger.debug( "Fetched page %s as %s", redact_auth_from_url(url), resp.headers.get("Content-Type", "Unknown"), ) return resp
def test_is_wheel_false(self) -> None: assert not Link("http://yo/not_a_wheel").is_wheel
def test_is_wheel(self) -> None: assert Link("http://yo/wheel.whl").is_wheel
def test_ext_query(self) -> None: assert ".whl" == Link("http://yo/wheel.whl?a=b").ext
def test_ext_fragment(self) -> None: assert ".whl" == Link("http://yo/wheel.whl#frag").ext
def test_ext(self) -> None: assert ".whl" == Link("http://yo/wheel.whl").ext