def ensure_python_distribution(version): # type: (str) -> Tuple[str, str, Callable[[Iterable[str]], Text]] if version not in _ALL_PY_VERSIONS: raise ValueError( "Please constrain version to one of {}".format(_ALL_PY_VERSIONS)) pyenv_root = os.path.abspath( os.path.join( os.environ.get("_PEX_TEST_PYENV_ROOT", "{}_dev".format(ENV.PEX_ROOT)), "pyenv", )) interpreter_location = os.path.join(pyenv_root, "versions", version) pyenv = os.path.join(pyenv_root, "bin", "pyenv") pyenv_env = os.environ.copy() pyenv_env["PYENV_ROOT"] = pyenv_root pip = os.path.join(interpreter_location, "bin", "pip") with atomic_directory(target_dir=os.path.join(pyenv_root), exclusive=True) as target_dir: if target_dir: bootstrap_python_installer(target_dir) with atomic_directory(target_dir=interpreter_location, exclusive=True) as interpreter_target_dir: if interpreter_target_dir: subprocess.check_call([ "git", "--git-dir={}".format(os.path.join(pyenv_root, ".git")), "--work-tree={}".format(pyenv_root), "pull", "--ff-only", "https://github.com/pyenv/pyenv.git", ]) env = pyenv_env.copy() if sys.platform.lower().startswith("linux"): env["CONFIGURE_OPTS"] = "--enable-shared" # The pyenv builder detects `--enable-shared` and sets up `RPATH` via # `LDFLAGS=-Wl,-rpath=... $LDFLAGS` to ensure the built python binary links the # correct libpython shared lib. Some versions of compiler set the `RUNPATH` instead # though which is searched _after_ the `LD_LIBRARY_PATH` environment variable. To # ensure an inopportune `LD_LIBRARY_PATH` doesn't fool the pyenv python binary into # linking the wrong libpython, force `RPATH`, which is searched 1st by the linker, # with with `--disable-new-dtags`. env["LDFLAGS"] = "-Wl,--disable-new-dtags" subprocess.check_call([pyenv, "install", "--keep", version], env=env) subprocess.check_call([pip, "install", "-U", "pip"]) python = os.path.join(interpreter_location, "bin", "python" + version[0:3]) def run_pyenv(args): # type: (Iterable[str]) -> Text return to_unicode( subprocess.check_output([pyenv] + list(args), env=pyenv_env)) return python, pip, run_pyenv
def cache_distribution(cls, zf, source, target_dir): """Possibly cache a wheel from within a zipfile into `target_dir`. Given a zipfile handle and a source path prefix corresponding to a wheel install embedded within that zip, maybe extract the wheel install into the target cache and then return a distribution from the cache. :param zf: An open zip file (a zipped pex). :type zf: :class:`zipfile.ZipFile` :param str source: The path prefix of a wheel install embedded in the zip file. :param str target_dir: The directory to cache the distribution in if not already cached. :returns: The cached distribution. :rtype: :class:`pex.third_party.pkg_resources.Distribution` """ with atomic_directory(target_dir, source=source) as target_dir_tmp: if target_dir_tmp is None: TRACER.log('Using cached {}'.format(target_dir)) else: with TRACER.timed('Caching {}:{} in {}'.format( zf.filename, source, target_dir)): for name in zf.namelist(): if name.startswith(source) and not name.endswith('/'): zf.extract(name, target_dir_tmp) dist = DistributionHelper.distribution_from_path(target_dir) assert dist is not None, 'Failed to cache distribution '.format(source) return dist
def cache_distribution(cls, zf, source, target_dir): # type: (ZipFile, str, str) -> Distribution """Possibly cache a wheel from within a zipfile into `target_dir`. Given a zipfile handle and a source path prefix corresponding to a wheel install embedded within that zip, maybe extract the wheel install into the target cache and then return a distribution from the cache. :param zf: An open zip file (a zipped pex). :param source: The path prefix of a wheel install embedded in the zip file. :param target_dir: The directory to cache the distribution in if not already cached. :returns: The cached distribution. """ with atomic_directory(target_dir, source=source, exclusive=True) as target_dir_tmp: if target_dir_tmp is None: TRACER.log("Using cached {}".format(target_dir)) else: with TRACER.timed("Caching {}:{} in {}".format( zf.filename, source, target_dir)): for name in zf.namelist(): if name.startswith(source) and not name.endswith("/"): zf.extract(name, target_dir_tmp) dist = DistributionHelper.distribution_from_path(target_dir) assert dist is not None, "Failed to cache distribution: {} ".format( source) return dist
def finalize_install(self, install_requests): self.atomic_dir.finalize() # The install_chroot is keyed by the hash of the wheel file (zip) we installed. Here we add a # key by the hash of the exploded wheel dir (the install_chroot). This latter key is used by # zipped PEXes at runtime to explode their wheel chroots to the filesystem. By adding the key # here we short-circuit the explode process for PEXes created and run on the same machine. # # From a clean cache after building a simple pex this looks like: # $ rm -rf ~/.pex # $ python -mpex -c pex -o /tmp/pex.pex . # $ tree -L 4 ~/.pex/ # /home/jsirois/.pex/ # ├── built_wheels # │ └── 1003685de2c3604dc6daab9540a66201c1d1f718 # │ └── cp-38-cp38 # │ └── pex-2.0.2-py2.py3-none-any.whl # └── installed_wheels # ├── 2a594cef34d2e9109bad847358d57ac4615f81f4 # │ └── pex-2.0.2-py2.py3-none-any.whl # │ ├── bin # │ ├── pex # │ └── pex-2.0.2.dist-info # └── ae13cba3a8e50262f4d730699a11a5b79536e3e1 # └── pex-2.0.2-py2.py3-none-any.whl -> /home/jsirois/.pex/installed_wheels/2a594cef34d2e9109bad847358d57ac4615f81f4/pex-2.0.2-py2.py3-none-any.whl # noqa # # 11 directories, 1 file # # And we see in the created pex, the runtime key that the layout above satisfies: # $ unzip -qc /tmp/pex.pex PEX-INFO | jq .distributions # { # "pex-2.0.2-py2.py3-none-any.whl": "ae13cba3a8e50262f4d730699a11a5b79536e3e1" # } # # When the pex is run, the runtime key is followed to the build time key, avoiding re-unpacking # the wheel: # $ PEX_VERBOSE=1 /tmp/pex.pex --version # pex: Found site-library: /usr/lib/python3.8/site-packages # pex: Tainted path element: /usr/lib/python3.8/site-packages # pex: Scrubbing from user site: /home/jsirois/.local/lib/python3.8/site-packages # pex: Scrubbing from site-packages: /usr/lib/python3.8/site-packages # pex: Activating PEX virtual environment from /tmp/pex.pex: 9.1ms # pex: Bootstrap complete, performing final sys.path modifications... # pex: PYTHONPATH contains: # pex: /tmp/pex.pex # pex: * /usr/lib/python38.zip # pex: /usr/lib/python3.8 # pex: /usr/lib/python3.8/lib-dynload # pex: /home/jsirois/.pex/installed_wheels/2a594cef34d2e9109bad847358d57ac4615f81f4/pex-2.0.2-py2.py3-none-any.whl # noqa # pex: * /tmp/pex.pex/.bootstrap # pex: * - paths that do not exist or will be imported via zipimport # pex.pex 2.0.2 # wheel_dir_hash = CacheHelper.dir_hash(self.install_chroot) runtime_key_dir = os.path.join(self.installation_root, wheel_dir_hash) with atomic_directory(runtime_key_dir) as work_dir: if work_dir: os.symlink(self.install_chroot, os.path.join(work_dir, self.request.wheel_file)) return self._iter_requirements_requests(install_requests)
def create( cls, path, # type: str interpreter=None, # type: Optional[PythonInterpreter] ): # type: (...) -> Pip """Creates a pip tool with PEX isolation at path. :param path: The path to assemble the pip tool at. :param interpreter: The interpreter to run Pip with. The current interpreter by default. :return: The path of a PEX that can be used to execute Pip in isolation. """ pip_interpreter = interpreter or PythonInterpreter.get() pip_pex_path = os.path.join(path, isolated().pex_hash) with atomic_directory(pip_pex_path, exclusive=True) as chroot: if not chroot.is_finalized: from pex.pex_builder import PEXBuilder isolated_pip_builder = PEXBuilder(path=chroot.work_dir) isolated_pip_builder.info.venv = True for dist_location in third_party.expose( ["pip", "setuptools", "wheel"]): isolated_pip_builder.add_dist_location(dist=dist_location) isolated_pip_builder.set_script("pip") isolated_pip_builder.freeze() pex_info = PexInfo.from_pex(pip_pex_path) pex_info.add_interpreter_constraint( str(pip_interpreter.identity.requirement)) return cls( ensure_venv( PEX(pip_pex_path, interpreter=pip_interpreter, pex_info=pex_info)))
def seed_cache( options, # type: Namespace pex, # type: PEX ): # type: (...) -> Iterable[str] pex_path = pex.path() with TRACER.timed("Seeding local caches for {}".format(pex_path)): if options.unzip: unzip_dir = pex.pex_info().unzip_dir if unzip_dir is None: raise AssertionError( "Expected PEX-INFO for {} to have the components of an unzip directory" .format(pex_path)) with atomic_directory(unzip_dir, exclusive=True) as chroot: if chroot: with TRACER.timed("Extracting {}".format(pex_path)): with open_zip(options.pex_name) as pex_zip: pex_zip.extractall(chroot) return [pex.interpreter.binary, unzip_dir] elif options.venv: with TRACER.timed("Creating venv from {}".format(pex_path)): venv_pex = ensure_venv(pex) return [venv_pex] else: with TRACER.timed( "Extracting code and distributions for {}".format( pex_path)): pex.activate() return [os.path.abspath(options.pex_name)]
def test_atomic_directory_empty_workdir_finalized(): # type: () -> None with temporary_dir() as target_dir: with atomic_directory(target_dir, exclusive=False) as work_dir: assert ( work_dir.is_finalized ), "When the target_dir exists no work_dir should be created."
def isolated(): """Returns a chroot for third_party isolated from the ``sys.path``. PEX will typically be installed in site-packages flat alongside many other distributions; as such, adding the location of the pex distribution to the ``sys.path`` will typically expose many other distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure of pex. :return: An isolation result. :rtype: :class:`IsolationResult` """ global _ISOLATED if _ISOLATED is None: from pex import vendor from pex.common import atomic_directory from pex.util import CacheHelper from pex.variables import ENV from pex.third_party.pkg_resources import resource_isdir, resource_listdir, resource_stream module = "pex" # TODO(John Sirois): Unify with `pex.util.DistributionHelper.access_zipped_assets`. def recursive_copy(srcdir, dstdir): os.mkdir(dstdir) for entry_name in resource_listdir(module, srcdir): if not entry_name: # The `resource_listdir` function returns a '' entry name for the directory # entry itself if it is either present on the filesystem or present as an # explicit zip entry. Since we only care about files and subdirectories at this # point, skip these entries. continue # NB: Resource path components are always separated by /, on all systems. src_entry = "{}/{}".format( srcdir, entry_name) if srcdir else entry_name dst_entry = os.path.join(dstdir, entry_name) if resource_isdir(module, src_entry): recursive_copy(src_entry, dst_entry) elif not entry_name.endswith(".pyc"): with open(dst_entry, "wb") as fp: with closing(resource_stream(module, src_entry)) as resource: shutil.copyfileobj(resource, fp) pex_path = os.path.join(vendor.VendorSpec.ROOT, "pex") with _tracer().timed("Hashing pex"): dir_hash = CacheHelper.dir_hash(pex_path) isolated_dir = os.path.join(ENV.PEX_ROOT, "isolated", dir_hash) with _tracer().timed("Isolating pex"): with atomic_directory(isolated_dir, exclusive=True) as chroot: if chroot: with _tracer().timed( "Extracting pex to {}".format(isolated_dir)): recursive_copy("", os.path.join(chroot, "pex")) _ISOLATED = IsolationResult(pex_hash=dir_hash, chroot_path=isolated_dir) return _ISOLATED
def _force_local(self): if self._pex_info.code_hash is None: # Do not support force_local if code_hash is not set. (It should always be set.) return self._pex explode_dir = os.path.join(self._pex_info.zip_unsafe_cache, self._pex_info.code_hash) TRACER.log("PEX is not zip safe, exploding to %s" % explode_dir) with atomic_directory(explode_dir, exclusive=True) as explode_tmp: if not explode_tmp.is_finalized: self.explode_code(explode_tmp.work_dir) return explode_dir
def _force_local(cls, pex_file, pex_info): if pex_info.code_hash is None: # Do not support force_local if code_hash is not set. (It should always be set.) return pex_file explode_dir = os.path.join(pex_info.zip_unsafe_cache, pex_info.code_hash) TRACER.log("PEX is not zip safe, exploding to %s" % explode_dir) with atomic_directory(explode_dir, exclusive=True) as explode_tmp: if explode_tmp: cls.explode_code(pex_file, pex_info, explode_tmp) return explode_dir
def seed_cache( options, # type: Namespace pex, # type: PEX verbose=False, # type : bool ): # type: (...) -> str pex_path = pex.path() with TRACER.timed("Seeding local caches for {}".format(pex_path)): pex_info = pex.pex_info() def create_verbose_info(final_pex_path): # type: (str) -> Dict[str, str] return dict(pex_root=pex_info.pex_root, python=pex.interpreter.binary, pex=final_pex_path) if options.unzip: unzip_dir = pex_info.unzip_dir if unzip_dir is None: raise AssertionError( "Expected PEX-INFO for {} to have the components of an unzip directory" .format(pex_path)) with atomic_directory(unzip_dir, exclusive=True) as chroot: if not chroot.is_finalized: with TRACER.timed("Extracting {}".format(pex_path)): with open_zip(options.pex_name) as pex_zip: pex_zip.extractall(chroot.work_dir) if verbose: return json.dumps( create_verbose_info(final_pex_path=unzip_dir)) else: return "{} {}".format(pex.interpreter.binary, unzip_dir) elif options.venv: with TRACER.timed("Creating venv from {}".format(pex_path)): venv_pex = ensure_venv(pex) if verbose: return json.dumps( create_verbose_info(final_pex_path=venv_pex)) else: return venv_pex else: with TRACER.timed( "Extracting code and distributions for {}".format( pex_path)): pex.activate() pex_path = os.path.abspath(options.pex_name) if verbose: return json.dumps(create_verbose_info(final_pex_path=pex_path)) else: return pex_path
def isolated(): """Returns a chroot for third_party isolated from the ``sys.path``. PEX will typically be installed in site-packages flat alongside many other distributions; as such, adding the location of the pex distribution to the ``sys.path`` will typically expose many other distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure of pex. :return: An isolation result. :rtype: :class:`IsolationResult` """ global _ISOLATED if _ISOLATED is None: from pex import vendor from pex.common import atomic_directory from pex.util import CacheHelper from pex.variables import ENV from pex.third_party.pkg_resources import resource_isdir, resource_listdir, resource_stream module = 'pex' def recursive_copy(srcdir, dstdir): os.mkdir(dstdir) for entry_name in resource_listdir(module, srcdir): # NB: Resource path components are always separated by /, on all systems. src_entry = '{}/{}'.format( srcdir, entry_name) if srcdir else entry_name dst_entry = os.path.join(dstdir, entry_name) if resource_isdir(module, src_entry): recursive_copy(src_entry, dst_entry) elif not entry_name.endswith('.pyc'): with open(dst_entry, 'wb') as fp: shutil.copyfileobj(resource_stream(module, src_entry), fp) pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex') with _tracer().timed('Hashing pex'): dir_hash = CacheHelper.dir_hash(pex_path) isolated_dir = os.path.join(ENV.PEX_ROOT, 'isolated', dir_hash) with _tracer().timed('Isolating pex'): with atomic_directory(isolated_dir) as chroot: if chroot: with _tracer().timed( 'Extracting pex to {}'.format(isolated_dir)): recursive_copy('', os.path.join(chroot, 'pex')) _ISOLATED = IsolationResult(pex_hash=dir_hash, chroot_path=isolated_dir) return _ISOLATED
def test_atomic_directory_empty_workdir_failure(): class SimulatedRuntimeError(RuntimeError): pass with temporary_dir() as sandbox: target_dir = os.path.join(sandbox, 'target_dir') with pytest.raises(SimulatedRuntimeError): with atomic_directory(target_dir) as work_dir: touch(os.path.join(work_dir, 'created')) raise SimulatedRuntimeError() assert not os.path.exists(work_dir), 'The work_dir should always be cleaned up.' assert not os.path.exists(target_dir), ( 'When the context raises the work_dir it was given should not be moved to the target_dir.' )
def test_atomic_directory_empty_workdir_failure(): # type: () -> None class SimulatedRuntimeError(RuntimeError): pass with temporary_dir() as sandbox: target_dir = os.path.join(sandbox, "target_dir") with pytest.raises(SimulatedRuntimeError): with atomic_directory(target_dir) as work_dir: touch(os.path.join(work_dir, "created")) raise SimulatedRuntimeError() assert not os.path.exists(work_dir), "The work_dir should always be cleaned up." # type: ignore[unreachable] assert not os.path.exists( target_dir ), "When the context raises the work_dir it was given should not be moved to the target_dir."
def _force_local(cls, pex_file, pex_info): if pex_info.code_hash is None: # Do not support force_local if code_hash is not set. (It should always be set.) return pex_file explode_dir = os.path.join(pex_info.zip_unsafe_cache, pex_info.code_hash) TRACER.log('PEX is not zip safe, exploding to %s' % explode_dir) with atomic_directory(explode_dir) as explode_tmp: if explode_tmp: with TRACER.timed('Unzipping %s' % pex_file): with open_zip(pex_file) as pex_zip: pex_files = ( x for x in pex_zip.namelist() if not x.startswith(pex_builder.BOOTSTRAP_DIR) and not x.startswith(pex_info.internal_cache)) pex_zip.extractall(explode_tmp, pex_files) return explode_dir
def test_atomic_directory_empty_workdir_finalize(): with temporary_dir() as sandbox: target_dir = os.path.join(sandbox, 'target_dir') assert not os.path.exists(target_dir) with atomic_directory(target_dir) as work_dir: assert work_dir is not None assert os.path.exists(work_dir) assert os.path.isdir(work_dir) assert [] == os.listdir(work_dir) touch(os.path.join(work_dir, 'created')) assert not os.path.exists(target_dir) assert not os.path.exists(work_dir), 'The work_dir should always be cleaned up.' assert os.path.exists(os.path.join(target_dir, 'created'))
def test_atomic_directory_empty_workdir_finalize(): # type: () -> None with temporary_dir() as sandbox: target_dir = os.path.join(sandbox, "target_dir") assert not os.path.exists(target_dir) with atomic_directory(target_dir) as work_dir: assert work_dir is not None assert os.path.exists(work_dir) assert os.path.isdir(work_dir) assert [] == os.listdir(work_dir) touch(os.path.join(work_dir, "created")) assert not os.path.exists(target_dir) assert not os.path.exists(work_dir), "The work_dir should always be cleaned up." assert os.path.exists(os.path.join(target_dir, "created"))
def test_atomic_directory_empty_workdir_finalize(): # type: () -> None with temporary_dir() as sandbox: target_dir = os.path.join(sandbox, "target_dir") assert not os.path.exists(target_dir) with atomic_directory(target_dir, exclusive=False) as atomic_dir: assert not atomic_dir.is_finalized assert target_dir == atomic_dir.target_dir assert os.path.exists(atomic_dir.work_dir) assert os.path.isdir(atomic_dir.work_dir) assert [] == os.listdir(atomic_dir.work_dir) touch(os.path.join(atomic_dir.work_dir, "created")) assert not os.path.exists(target_dir) assert not os.path.exists(atomic_dir.work_dir), "The work_dir should always be cleaned up." assert os.path.exists(os.path.join(target_dir, "created"))
def isolated(): """Returns a chroot for third_party isolated from the ``sys.path``. PEX will typically be installed in site-packages flat alongside many other distributions; as such, adding the location of the pex distribution to the ``sys.path`` will typically expose many other distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure of pex. :return: The path of the chroot. :rtype: str """ global _ISOLATED if _ISOLATED is None: from pex import vendor from pex.common import atomic_directory, safe_copy from pex.util import CacheHelper from pex.variables import ENV pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex') with _tracer().timed('Isolating pex'): isolated_dir = os.path.join(ENV.PEX_ROOT, 'isolated', CacheHelper.dir_hash(pex_path)) with atomic_directory(isolated_dir) as chroot: if chroot: with _tracer().timed( 'Extracting pex to {}'.format(isolated_dir)): pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex') for root, dirs, files in os.walk(pex_path): relroot = os.path.relpath(root, pex_path) for d in dirs: os.makedirs( os.path.join(chroot, 'pex', relroot, d)) for f in files: if not f.endswith('.pyc'): safe_copy( os.path.join(root, f), os.path.join(chroot, 'pex', relroot, f)) _ISOLATED = isolated_dir return _ISOLATED
def create(cls, path): # type: (str) -> Pip """Creates a pip tool with PEX isolation at path. :param path: The path to build the pip tool pex at. """ pip_pex_path = os.path.join(path, isolated().pex_hash) with atomic_directory(pip_pex_path, exclusive=True) as chroot: if chroot is not None: from pex.pex_builder import PEXBuilder isolated_pip_builder = PEXBuilder(path=chroot) pythonpath = third_party.expose(["pip", "setuptools", "wheel"]) isolated_pip_environment = third_party.pkg_resources.Environment( search_path=pythonpath ) for dist_name in isolated_pip_environment: for dist in isolated_pip_environment[dist_name]: isolated_pip_builder.add_dist_location(dist=dist.location) with open(os.path.join(isolated_pip_builder.path(), "run_pip.py"), "w") as fp: fp.write( dedent( """\ import os import runpy import sys # Propagate un-vendored setuptools to pip for any legacy setup.py builds it needs to # perform. os.environ['__PEX_UNVENDORED__'] = '1' os.environ['PYTHONPATH'] = os.pathsep.join(sys.path) runpy.run_module('pip', run_name='__main__') """ ) ) isolated_pip_builder.set_executable(fp.name) isolated_pip_builder.freeze() return cls(pip_pex_path)
def ensure_venv(pex): # type: (PEX) -> str pex_info = pex.pex_info() venv_dir = pex_info.venv_dir if venv_dir is None: raise AssertionError( "Expected PEX-INFO for {} to have the components of a venv directory" .format(pex.path())) with atomic_directory(venv_dir, exclusive=True) as venv: if venv: from .tools.commands.venv import populate_venv_with_pex from .tools.commands.virtualenv import Virtualenv virtualenv = Virtualenv.create(venv_dir=venv, interpreter=pex.interpreter) populate_venv_with_pex( virtualenv, pex, bin_path=pex_info.venv_bin_path, python=os.path.join(venv_dir, "bin", os.path.basename(pex.interpreter.binary)), collisions_ok=True, ) return os.path.join(venv_dir, "pex")
def test_atomic_directory_empty_workdir_finalized(): with temporary_dir() as target_dir: with atomic_directory(target_dir) as work_dir: assert work_dir is None, "When the target_dir exists no work_dir should be created."
def ensure_venv(pex): # type: (PEX) -> str pex_info = pex.pex_info() venv_dir = pex_info.venv_dir if venv_dir is None: raise AssertionError( "Expected PEX-INFO for {} to have the components of a venv directory" .format(pex.path())) with atomic_directory(venv_dir, exclusive=True) as venv: if venv: from .tools.commands.venv import populate_venv_with_pex from .tools.commands.virtualenv import Virtualenv virtualenv = Virtualenv.create( venv_dir=venv, interpreter=pex.interpreter, copies=pex_info.venv_copies, ) pex_path = os.path.abspath(pex.path()) short_venv_dir = os.path.join(pex_info.pex_root, "venvs", "short") safe_mkdir(short_venv_dir) # A sha1 hash is 160 bits -> 20 bytes -> 40 hex characters. We start with 8 characters # (32 bits) of entropy since that is short and _very_ unlikely to collide with another # PEX venv on this machine. If we still collide after using the whole sha1 (for a total # of 33 collisions), then the universe is broken and we raise. It's the least we can do. venv_hash = hashlib.sha1(venv_dir.encode("utf-8")).hexdigest() collisions = [] for chars in range(8, len(venv_hash) + 1): entropy = venv_hash[:chars] short_venv_path = os.path.join(short_venv_dir, entropy) try: os.symlink(venv_dir, short_venv_path) break except OSError as e: if e.errno != errno.EEXIST: raise e collisions.append(short_venv_path) if entropy == venv_hash: raise RuntimeError( "The venv for {pex} at {venv} has hash collisions with {count} other " "{venvs}!\n{collisions}".format( pex=pex_path, venv=venv_dir, count=len(collisions), venvs=pluralize(collisions, "venv"), collisions="\n".join( "{index}.) {venv_path}".format( index=index, venv_path=os.path.realpath(path)) for index, path in enumerate(collisions, start=1)), )) shenbang = populate_venv_with_pex( virtualenv, pex, bin_path=pex_info.venv_bin_path, python=os.path.join(short_venv_path, "bin", os.path.basename(pex.interpreter.binary)), collisions_ok=True, ) # There are popular Linux distributions with shebang length limits (BINPRM_BUF_SIZE # in /usr/include/linux/binfmts.h) set at 128 characters, so we warn in the _very_ # unlikely case that our shortened shebang is longer than this. if len(shenbang) > 128: pex_warnings.warn( "The venv for {pex} at {venv} has script shebangs of {shebang!r} with {count} " "characters. On some systems this may be too long and cause problems running " "the venv scripts. You may be able adjust PEX_ROOT from {pex_root} to a " "shorter path as a work-around.".format( pex=pex_path, venv=venv_dir, shebang=shenbang, count=len(shenbang), pex_root=pex_info.pex_root, )) return os.path.join(venv_dir, "pex")
def supported_tags(self, manylinux=None): # type: (Optional[str]) -> Tuple[tags.Tag, ...] # We use a 2 level cache, probing memory first and then a json file on disk in order to # avoid calculating tags when possible since it's an O(500ms) operation that involves # spawning Pip. # Read level 1. memory_cache_key = (self, manylinux) supported_tags = self._SUPPORTED_TAGS_BY_PLATFORM.get(memory_cache_key) if supported_tags is not None: return supported_tags # Read level 2. components = list(attr.astuple(self)) if manylinux: components.append(manylinux) disk_cache_key = os.path.join(ENV.PEX_ROOT, "platforms", self.SEP.join(components)) with atomic_directory(target_dir=disk_cache_key, exclusive=False) as cache_dir: if not cache_dir.is_finalized: # Missed both caches - spawn calculation. plat_info = attr.asdict(self) plat_info.update( supported_tags=[ (tag.interpreter, tag.abi, tag.platform) for tag in self._calculate_tags(manylinux=manylinux) ], ) # Write level 2. with safe_open(os.path.join(cache_dir.work_dir, self.PLAT_INFO_FILE), "w") as fp: json.dump(plat_info, fp) with open(os.path.join(disk_cache_key, self.PLAT_INFO_FILE)) as fp: try: data = json.load(fp) except ValueError as e: TRACER.log( "Regenerating the platform info file at {} since it did not contain parsable " "JSON data: {}".format(fp.name, e) ) safe_rmtree(disk_cache_key) return self.supported_tags(manylinux=manylinux) if not isinstance(data, dict): TRACER.log( "Regenerating the platform info file at {} since it did not contain a " "configuration object. Found: {!r}".format(fp.name, data) ) safe_rmtree(disk_cache_key) return self.supported_tags(manylinux=manylinux) sup_tags = data.get("supported_tags") if not isinstance(sup_tags, list): TRACER.log( "Regenerating the platform info file at {} since it was missing a valid " "`supported_tags` list. Found: {!r}".format(fp.name, sup_tags) ) safe_rmtree(disk_cache_key) return self.supported_tags(manylinux=manylinux) count = len(sup_tags) def parse_tag( index, # type: int tag, # type: List[Any] ): # type: (...) -> tags.Tag if len(tag) != 3 or not all( isinstance(component, compatibility.string) for component in tag ): raise ValueError( "Serialized platform tags should be lists of three strings. Tag {index} of " "{count} was: {tag!r}.".format(index=index, count=count, tag=tag) ) interpreter, abi, platform = tag return tags.Tag(interpreter=interpreter, abi=abi, platform=platform) try: supported_tags = tuple(parse_tag(index, tag) for index, tag in enumerate(sup_tags)) # Write level 1. self._SUPPORTED_TAGS_BY_PLATFORM[memory_cache_key] = supported_tags return supported_tags except ValueError as e: TRACER.log( "Regenerating the platform info file at {} since it did not contain parsable " "tag data: {}".format(fp.name, e) ) safe_rmtree(disk_cache_key) return self.supported_tags(manylinux=manylinux)