def get_credits_data() -> dict: """Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["tool"]["poetry"] lock_data = toml.load(project_dir / "poetry.lock") project_name = metadata["name"] poetry_dependencies = chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) direct_dependencies = {dep.lower() for dep in poetry_dependencies} direct_dependencies.remove("python") indirect_dependencies = {pkg["name"].lower() for pkg in lock_data["package"]} indirect_dependencies -= direct_dependencies dependencies = direct_dependencies | indirect_dependencies packages = {} for pkg in search_packages_info(dependencies): pkg = {_: pkg[_] for _ in ("name", "home-page")} packages[pkg["name"].lower()] = pkg # all packages might not be credited, # like the ones that are now part of the standard library # or the ones that are only used on other operating systems, # and therefore are not installed, # but it's not that important return { "project_name": project_name, "direct_dependencies": sorted(direct_dependencies), "indirect_dependencies": sorted(indirect_dependencies), "package_info": packages, }
def get_package_info(name=None, start=0, end=-1): """ Calls ``pip show`` to retrieve information about packages. @param name name of he packages or None to get all of them in a list @param start start at package n (in list return by @see fn get_packages_list) @param end end at package n, -1 for all @return dictionary or list of dictionaries """ from pip._internal.commands.show import search_packages_info if name is None: res = [] packs = get_packages_list() if end == -1: end = len(packs) subp = packs[start:end] if len(subp) == 0: raise PQPipError( # pragma: no cover "No package, start={0}, end={1}, len(subp)={2}, len(packs)={3}" .format(start, end, len(subp), len(packs))) for cp in subp: pack = cp.project_name info = get_package_info(pack) res.append(info) if len(res) == 0 and len(subp) > 0: raise PQPipError( # pragma: no cover "Empty list, unexpected, start={0}, end={1}, len(subp)={3}". format(start, end, len(subp))) return res res = list(search_packages_info([name])) if len(res) != 1: raise PQPipError( # pragma: no cover "Unexpected number of results {0} for {1}".format(len(res), name)) return res[0]
def get_ydl_website(ydl_module_name): import pip._internal.commands.show as pipshow info = list(pipshow.search_packages_info([ydl_module_name])) if len(info) < 1 or "home-page" not in info[0]: return "" return info[0]["home-page"]
def test_more_than_one_package() -> None: """ Search for more than one package. """ result = list(search_packages_info(["pIp", "pytest", "Virtualenv"])) assert len(result) == 3
def test_more_than_one_package(): """ Search for more than one package. """ result = list(search_packages_info(['Pip', 'pytest', 'Virtualenv'])) assert len(result) == 3
def test_search_any_case() -> None: """ Search for a package in any case. """ result = list(search_packages_info(["PIP"])) assert len(result) == 1 assert result[0].name == "pip"
def test_search_any_case(): """ Search for a package in any case. """ result = list(search_packages_info(['PIP'])) assert len(result) == 1 assert 'pip' == result[0]['name']
def get_package_files(package_name: str) -> List[str]: """Get package files list.""" packages_info = list(search_packages_info([package_name])) if len(packages_info) == 0: raise Exception(f"package {package_name} not found") files = packages_info[0]["files"] location = packages_info[0]["location"] return [str(Path(location) / i) for i in files]
def find_missing_reqs(options, requirements_filename): # 1. find files used by imports in the code (as best we can without # executing) used_modules = common.find_imported_modules(options) # 2. find which packages provide which files installed_files = {} all_pkgs = (pkg.project_name for pkg in get_installed_distributions()) for package in search_packages_info(all_pkgs): log.debug('installed package: %s (at %s)', package['name'], package['location']) for package_file in package.get('files', []) or []: path = os.path.realpath( os.path.join(package['location'], package_file), ) installed_files[path] = package['name'] package_path = common.is_package_file(path) if package_path: # we've seen a package file so add the bare package directory # to the installed list as well as we might want to look up # a package by its directory path later installed_files[package_path] = package['name'] # 3. match imported modules against those packages used = collections.defaultdict(list) for modname, info in used_modules.items(): # probably standard library if it's not in the files list if info.filename in installed_files: used_name = canonicalize_name(installed_files[info.filename]) log.debug('used module: %s (from package %s)', modname, installed_files[info.filename]) used[used_name].append(info) else: log.debug( 'used module: %s (from file %s, assuming stdlib or local)', modname, info.filename) # 4. compare with requirements.txt explicit = set() for requirement in parse_requirements( requirements_filename, session=PipSession(), ): try: requirement_name = requirement.name # The type of "requirement" changed between pip versions. # We exclude the "except" from coverage so that on any pip version we # can report 100% coverage. except AttributeError: # pragma: no cover from pip._internal.req.constructors import install_req_from_line requirement_name = install_req_from_line( requirement.requirement, ).name log.debug('found requirement: %s', requirement_name) explicit.add(canonicalize_name(requirement_name)) return [(name, used[name]) for name in used if name not in explicit]
def find_owners(path): """Return the package(s) that file belongs to.""" abspath = os.path.abspath(path) packages = search_packages_info( sorted((d.project_name for d in get_installed_distributions(user_only=ENABLE_USER_SITE)), key=lambda d: d.lower())) return [p['name'] for p in packages if is_owner(p, abspath)]
def get_package_info(self, package_name: str): distributions = search_packages_info([package_name]) messages = {} for dist in distributions: messages['name'] = dist.get('name', '') messages['version'] = dist.get('version', '') messages['summary'] = dist.get('summary', '') messages['home-page'] = dist.get('home-page', '') messages['anthor'] = dist.get('author', '') messages['location'] = dist.get('location', '') messages['requires'] = dist.get('requires', []) messages['requires_by'] = dist.get('required_by', []) return messages
def get_module_requirement(module: ModuleType) -> Optional[Requirement]: module_relative_location = os.path.relpath( module.__file__, THIRD_PARTY_PACKAGES_ROOT_DIR, ) for distribution_info in get_installed_distributions(): key = distribution_info.project_name package_info = list(search_packages_info([key]))[0] package_files = package_info['files'] package_sources = {x for x in package_files if x.endswith('.py')} if module_relative_location in package_sources: return distribution_info.as_requirement()
def get_data() -> dict: """ Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["tool"]["poetry"] lock_data = toml.load(project_dir / "poetry.lock") project_name = metadata["name"] poetry_dependencies = chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) direct_dependencies = sorted(dep.lower() for dep in poetry_dependencies) direct_dependencies.remove("python") indirect_dependencies = sorted(pkg["name"] for pkg in lock_data["package"] if pkg["name"] not in direct_dependencies) dependencies = direct_dependencies + indirect_dependencies packages = { pkg["name"]: clean_info(pkg) for pkg in search_packages_info(dependencies) } # poetry.lock seems to always use lowercase for packages names packages.update({name.lower(): pkg for name, pkg in packages.items() }) # noqa: WPS221 (not that complex) for dependency in dependencies: if dependency not in packages: pkg_data = httpx.get( f"https://pypi.python.org/pypi/{dependency}/json").json( )["info"] home_page = pkg_data["home_page"] or pkg_data[ "project_url"] or pkg_data["package_url"] pkg_name = pkg_data["name"] pkg = {"name": pkg_name, "home-page": home_page} packages.update({pkg_name: pkg, pkg_name.lower(): pkg}) return { "project_name": project_name, "direct_dependencies": direct_dependencies, "indirect_dependencies": indirect_dependencies, "package_info": packages, }
def get_credits_data() -> dict: """ Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["tool"]["poetry"] lock_data = toml.load(project_dir / "poetry.lock") project_name = metadata["name"] poetry_dependencies = chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) direct_dependencies = {dep.lower() for dep in poetry_dependencies} direct_dependencies.remove("python") indirect_dependencies = { pkg["name"].lower() for pkg in lock_data["package"] } indirect_dependencies -= direct_dependencies dependencies = direct_dependencies | indirect_dependencies packages = {} for pkg in search_packages_info(dependencies): pkg = {_: pkg[_] for _ in ("name", "home-page")} packages[pkg["name"].lower()] = pkg for dependency in dependencies: if dependency not in packages: pkg_data = httpx.get( f"https://pypi.python.org/pypi/{dependency}/json").json( )["info"] home_page = pkg_data["home_page"] or pkg_data[ "project_url"] or pkg_data["package_url"] pkg_name = pkg_data["name"] package = {"name": pkg_name, "home-page": home_page} packages.update({pkg_name.lower(): package}) return { "project_name": project_name, "direct_dependencies": sorted(direct_dependencies), "indirect_dependencies": sorted(indirect_dependencies), "package_info": packages, }
def find_extra_reqs(options, requirements_filename): # 1. find files used by imports in the code (as best we can without # executing) used_modules = common.find_imported_modules(options) # 2. find which packages provide which files installed_files = {} all_pkgs = (pkg.project_name for pkg in get_installed_distributions()) for package in search_packages_info(all_pkgs): log.debug('installed package: %s (at %s)', package['name'], package['location']) for package_file in package.get('files', []) or []: path = os.path.realpath( os.path.join(package['location'], package_file), ) installed_files[path] = package['name'] package_path = common.is_package_file(path) if package_path: # we've seen a package file so add the bare package directory # to the installed list as well as we might want to look up # a package by its directory path later installed_files[package_path] = package['name'] # 3. match imported modules against those packages used = collections.defaultdict(list) for modname, info in used_modules.items(): # probably standard library if it's not in the files list if info.filename in installed_files: used_name = canonicalize_name(installed_files[info.filename]) log.debug('used module: %s (from package %s)', modname, installed_files[info.filename]) used[used_name].append(info) else: log.debug( 'used module: %s (from file %s, assuming stdlib or local)', modname, info.filename) # 4. compare with requirements.txt explicit = common.find_required_modules( options=options, requirements_filename=requirements_filename, ) return [name for name in explicit if name not in used]
def create_nodes(package_names, depth=0): """Show information about installed package.""" nodes = list() results = search_packages_info(package_names) try: for _, dist in enumerate(results): node = Node(dist.get('name'), dist.get('version'), url=dist.get('home-page'), requires=dist.get('requires'), depth=depth) if len(dist.get('requires')) > 0: _nodes = create_nodes(dist.get('requires'), node.depth + 1) node.targets += _nodes nodes.append(node) except StopIteration: pass finally: return nodes # pylint: disable=lost-exception
def get_credits_data() -> dict: """ Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["project"] metadata_pdm = toml.load(project_dir / "pyproject.toml")["tool"]["pdm"] lock_data = toml.load(project_dir / "pdm.lock") project_name = metadata["name"] all_dependencies = chain( metadata.get("dependencies", []), chain(*metadata.get("optional-dependencies", {}).values()), chain(*metadata_pdm.get("dev-dependencies", {}).values()), ) direct_dependencies = {re.sub(r"[^\w-].*$", "", dep) for dep in all_dependencies} direct_dependencies = {dep.lower() for dep in direct_dependencies} indirect_dependencies = {pkg["name"].lower() for pkg in lock_data["package"]} indirect_dependencies -= direct_dependencies packages = {} for pkg in search_packages_info(direct_dependencies | indirect_dependencies): pkg = {_: pkg[_] for _ in ("name", "home-page")} packages[pkg["name"].lower()] = pkg # all packages might not be credited, # like the ones that are now part of the standard library # or the ones that are only used on other operating systems, # and therefore are not installed, # but it's not that important return { "project_name": project_name, "direct_dependencies": sorted(direct_dependencies), "indirect_dependencies": sorted(indirect_dependencies), "package_info": packages, }
def get_data(): """Return data used to generate the credits file.""" metadata = toml.load(Path(__file__).parent.parent / "pyproject.toml")["tool"]["poetry"] project_name = metadata["name"] direct_dependencies = sorted( _.lower() for _ in chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) ) direct_dependencies.remove("python") lock_data = toml.load(Path(__file__).parent.parent / "poetry.lock") indirect_dependencies = sorted(p["name"] for p in lock_data["package"] if p["name"] not in direct_dependencies) package_info = {p["name"]: clean_info(p) for p in search_packages_info(direct_dependencies + indirect_dependencies)} for dependency in direct_dependencies + indirect_dependencies: # poetry.lock seems to always use lowercase for packages names if dependency not in [_.lower() for _ in package_info.keys()]: info = requests.get(f"https://pypi.python.org/pypi/{dependency}/json").json()["info"] package_info[info["name"]] = { "name": info["name"], "home-page": info["home_page"] or info["project_url"] or info["package_url"], } lower_package_info = {} for package_name, package in package_info.items(): lower = package_name.lower() if lower != package_name: lower_package_info[lower] = package package_info.update(lower_package_info) return { "project_name": project_name, "direct_dependencies": direct_dependencies, "indirect_dependencies": indirect_dependencies, "package_info": package_info, }
def translate_req_to_module_names(requirement_name): provides = set() def is_module_folder(filepath): return bool(filepath) and \ '/' not in filepath and \ '.egg-info' not in filepath and \ '.dist-info' not in filepath and \ '__pycache__' not in filepath def is_top_level_file(filepath): return bool(filepath) and \ '/' not in filepath and \ filepath.endswith('.py') for result in search_packages_info([requirement_name]): if 'files' not in result or not result['files']: # Assume that only one module is installed in this case continue # Handle modules that are installed as folders in site-packages folders = [os.path.dirname(filepath) for filepath in result['files']] folders = filter(is_module_folder, folders) provides |= set(folders) # Handle modules that are installed as .py files in site-packages top_level_files = filter(is_top_level_file, result['files']) provides |= set( [os.path.splitext(filename)[0] for filename in top_level_files]) if provides: return provides else: module_name = requirement_name.split('.')[0] if module_name not in ALL_MODULES: LOGGER.warning( "Cannot find install location of '%s'; please \ install this package for more accurate name resolution", requirement_name) return provides if provides else set([requirement_name])
def get_url_for_platform(self, req): # check if package is already installed with system packages # noinspection PyBroadException try: if self.config.get("agent.package_manager.system_site_packages", None): from pip._internal.commands.show import search_packages_info installed_torch = list(search_packages_info([req.name])) # notice the comparison order, the first part will make sure we have a valid installed package if installed_torch and installed_torch[0]['version'] and \ req.compare_version(installed_torch[0]['version']): print( 'PyTorch: requested "{}" version {}, using pre-installed version {}' .format(req.name, req.specs[0] if req.specs else 'unspecified', installed_torch[0]['version'])) # package already installed, do nothing req.specs = [('==', str(installed_torch[0]['version']))] return '{} {} {}'.format(req.name, req.specs[0][0], req.specs[0][1]), True except Exception: pass # make sure we have a specific version to retrieve if not req.specs: req.specs = [('>', '0')] # noinspection PyBroadException try: req.specs[0] = (req.specs[0][0], req.specs[0][1].split('+')[0]) except Exception: pass op, version = req.specs[0] # assert op == "==" torch_url, torch_url_key = SimplePytorchRequirement.get_torch_page( self.cuda_version) url, closest_matched_version = self._get_link_from_torch_page( req, torch_url) if not url and self.config.get("agent.package_manager.torch_nightly", None): torch_url, torch_url_key = SimplePytorchRequirement.get_torch_page( self.cuda_version, nightly=True) url, closest_matched_version = self._get_link_from_torch_page( req, torch_url) # try one more time, with a lower cuda version (never fallback to CPU): while not url and torch_url_key > 0: previous_cuda_key = torch_url_key print( 'Warning, could not locate PyTorch {} matching CUDA version {}, best candidate {}\n' .format(req, previous_cuda_key, closest_matched_version)) url, closest_matched_version = self._get_link_from_torch_page( req, torch_url) if url: break torch_url, torch_url_key = SimplePytorchRequirement.get_torch_page( int(torch_url_key) - 1) # never fallback to CPU if torch_url_key < 1: print( 'Error! Could not locate PyTorch version {} matching CUDA version {}' .format(req, previous_cuda_key)) raise ValueError( 'Could not locate PyTorch version {} matching CUDA version {}' .format(req, self.cuda_version)) else: print('Trying PyTorch CUDA version {} support'.format( torch_url_key)) if not url: url = PytorchWheel( torch_version=fix_version(version), python=self.python_major_minor_str.replace('.', ''), os_name=self.os, cuda_version=self.cuda_version, ).make_url() if url: # normalize url (sometimes we will get ../ which we should not... url = '/'.join(url.split('/')[:3]) + urllib.parse.quote( str(furl(url).path.normalize())) # print found print('Found PyTorch version {} matching CUDA version {}'.format( req, torch_url_key)) self.log.debug("checking url: %s", url) return url, requests.head(url, timeout=10).ok
def find_extra_reqs(options, requirements_filename): # 1. find files used by imports in the code (as best we can without # executing) used_modules = common.find_imported_modules(options) # 2. find which packages provide which files installed_files = {} all_pkgs = (pkg.project_name for pkg in get_installed_distributions()) for package in search_packages_info(all_pkgs): if isinstance(package, dict): # pragma: no cover package_name = package['name'] package_location = package['location'] package_files = package.get('files', []) or [] else: # pragma: no cover package_name = package.name package_location = package.location package_files = [] for item in (package.files or []): here = pathlib.Path('.').resolve() item_location_rel = (pathlib.Path(package_location) / item) item_location = item_location_rel.resolve() try: relative_item_location = item_location.relative_to(here) except ValueError: # Ideally we would use Pathlib.is_relative_to rather than # checking for a ValueError, but that is only available in # Python 3.9+. relative_item_location = item_location package_files.append(str(relative_item_location)) log.debug('installed package: %s (at %s)', package_name, package_location) for package_file in package_files: path = os.path.realpath( os.path.join(package_location, package_file), ) installed_files[path] = package_name package_path = common.is_package_file(path) if package_path: # we've seen a package file so add the bare package directory # to the installed list as well as we might want to look up # a package by its directory path later installed_files[package_path] = package_name # 3. match imported modules against those packages used = collections.defaultdict(list) for modname, info in used_modules.items(): # probably standard library if it's not in the files list if info.filename in installed_files: used_name = canonicalize_name(installed_files[info.filename]) log.debug('used module: %s (from package %s)', modname, installed_files[info.filename]) used[used_name].append(info) else: log.debug( 'used module: %s (from file %s, assuming stdlib or local)', modname, info.filename) # 4. compare with requirements explicit = common.find_required_modules( options=options, requirements_filename=requirements_filename, ) return [name for name in explicit if name not in used]
def test_find_package_not_found(): """ Test trying to get info about a nonexistent package. """ result = search_packages_info(['abcd3']) assert len(list(result)) == 0
def get_requires(module_names: List[str]) -> Iterator[List[str]]: for pkg_info in search_packages_info(module_names): yield pkg_info['requires'] yield from get_requires(pkg_info['requires'])
def get_licenses(module_names: List[str]) -> Iterator[str]: for pkg_info in search_packages_info(module_names): yield pkg_info['license']
def get_url_for_platform(self, req): # check if package is already installed with system packages try: if self.config.get("agent.package_manager.system_site_packages"): from pip._internal.commands.show import search_packages_info installed_torch = list(search_packages_info([req.name])) op, version = req.specs[0] if req.specs else (None, None) # notice the comparision order, the first part will make sure we have a valid installed package if installed_torch[0]['version'] and ( installed_torch[0]['version'] == version or not version): # package already installed, do nothing return str(req), True except: pass # make sure we have a specific version to retrieve if not req.specs: req.specs = [('>', '0')] try: req.specs[0] = (req.specs[0][0], req.specs[0][1].split('+')[0]) except: pass op, version = req.specs[0] # assert op == "==" torch_url, torch_url_key = SimplePytorchRequirement.get_torch_page( self.cuda_version) url = self._get_link_from_torch_page(req, torch_url) # try one more time, with a lower cuda version (never fallback to CPU): while not url and torch_url_key > 0: previous_cuda_key = torch_url_key torch_url, torch_url_key = SimplePytorchRequirement.get_torch_page( int(torch_url_key) - 1) # never fallback to CPU if torch_url_key < 1: print( 'Warning! Could not locate PyTorch version {} matching CUDA version {}' .format(req, previous_cuda_key)) raise ValueError( 'Could not locate PyTorch version {} matching CUDA version {}' .format(req, self.cuda_version)) print( 'Warning! Could not locate PyTorch version {} matching CUDA version {}, trying CUDA version {}' .format(req, previous_cuda_key, torch_url_key)) url = self._get_link_from_torch_page(req, torch_url) if not url: url = PytorchWheel( torch_version=fix_version(version), python=self.python_major_minor_str.replace('.', ''), os_name=self.os, cuda_version=self.cuda_version, ).make_url() if url: # normalize url (sometimes we will get ../ which we should not... url = '/'.join(url.split('/')[:3]) + urllib.parse.quote( str(furl(url).path.normalize())) self.log.debug("checking url: %s", url) return url, requests.head(url, timeout=10).ok
direct_dependencies = sorted([ _.lower() for _ in chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) ]) direct_dependencies.remove("python") lock_data = toml.load(Path(__file__).parent.parent / "poetry.lock") indirect_dependencies = sorted([ p["name"] for p in lock_data["package"] if p["name"] not in direct_dependencies ]) # poetry.lock seems to always use lowercase for packages names package_info = { p["name"]: clean_info(p) for p in search_packages_info(direct_dependencies + indirect_dependencies) } for dependency in direct_dependencies + indirect_dependencies: if dependency not in [_.lower() for _ in package_info.keys()]: info = requests.get( f"https://pypi.python.org/pypi/{dependency}/json").json()["info"] package_info[info["name"]] = add_vlz_url({ "name": info["name"], "home-page": info["home_page"] or info["project_url"] or info["package_url"] }) lower_package_info = {} for package_name, package in package_info.items():
user_only=options.user, skip_regex=options.skip_requirements_regex, isolated=options.isolated_mode, wheel_cache=wheel_cache, skip=skip, exclude_editable=options.exclude_editable, ) # build a list of packages that are currently installed packages = {} for item in list(freeze(**freeze_kwargs)): k, v = item.split('==') packages[k] = v # Get the package info for all installed packages packages_info = search_packages_info(packages.keys()) # Build a set of dependencies for package in list(packages.keys()): dependencies = list(search_packages_info([package]))[0]['requires'] for dependency in dependencies: # don't remove the dependency if the package is prefixed with the dependency indicating # it's an extension. if dependency.lower() != package.lower()[0:len(dependency)]: try: del packages[dependency] except KeyError: pass lines = [] for k, v in packages.items():