def get_repo_caches( paths: Iterable[str], config: FullRepoMetadataConfig, ) -> Mapping[str, Dict["ProviderT", object]]: """ Generate type metadata by instantiating a :class:`~libcst.metadata.FullRepoManager` with :class:`~libcst.metadata.FullRepoManager` passed to ```providers``` parameter. :param paths: An iterable of paths to files to pass to :class:`~libcst.metadata.FullRepoManager` constructor's ```paths``` argument. These will be split in batches where the combined length of each path in the batch is <= ```arg_size```. :param timeout: The number of seconds at which to cap the pyre query which is run as a subprocess during cache resolving. :param repo_root_dir: Root directory of paths in ```paths```. :param batch_size: The size of the batch of paths to pass in each call to the :class:`~libcst.metadata.FullRepoManager` constructor. """ caches = {} paths_iter = iter(paths) head: Optional[str] = next(paths_iter, None) while head is not None: paths_batch = tuple( chain([head], islice(paths_iter, config.batch_size - 1))) head = next(paths_iter, None) frm = FullRepoManager( repo_root_dir=config.repo_root_dir, paths=paths_batch, providers=config.providers, timeout=config.timeout_seconds, ) try: frm.resolve_cache() except Exception: # We want to fail silently since some metadata providers can be flaky. If a logger is provided by the caller, we'll add a log here. logger = config.logger if logger is not None: logger.warning( "Failed to retrieve metadata cache.", exc_info=True, extra={"paths": paths_batch}, ) # Populate with placeholder caches to avoid failures down the line. This will however result in reduced functionality in cache-dependent lint rules. caches.update( dict.fromkeys( paths_batch, { provider: PLACEHOLDER_CACHES[provider] for provider in config.providers }, )) else: # TODO: remove access of private variable when public `cache` property is available in libcst.metadata.FullRepoManager API. batch_caches = defaultdict(dict) for provider, files in frm._cache.items(): for _path, cache in files.items(): batch_caches[_path][provider] = cache caches.update(batch_caches) return caches
def parallel_exec_transform_with_prettyprint( # noqa: C901 transform: Codemod, files: Sequence[str], *, jobs: Optional[int] = None, unified_diff: Optional[int] = None, include_generated: bool = False, generated_code_marker: str = _DEFAULT_GENERATED_CODE_MARKER, format_code: bool = False, formatter_args: Sequence[str] = (), show_successes: bool = False, hide_generated: bool = False, hide_blacklisted: bool = False, hide_progress: bool = False, blacklist_patterns: Sequence[str] = (), python_version: Optional[str] = None, repo_root: Optional[str] = None, ) -> ParallelTransformResult: """ Given a list of files and an instantiated codemod we should apply to them, fork and apply the codemod in parallel to all of the files, including any configured formatter. The ``jobs`` parameter controls the maximum number of in-flight transforms, and needs to be at least 1. If not included, the number of jobs will automatically be set to the number of CPU cores. If ``unified_diff`` is set to a number, changes to files will be printed to stdout with ``unified_diff`` lines of context. If it is set to ``None`` or left out, files themselves will be updated with changes and formatting. If a ``python_version`` is provided, then we will parse each source file using this version. Otherwise, we will use the version of the currently executing python binary. A progress indicator as well as any generated warnings will be printed to stderr. To supress the interactive progress indicator, set ``hide_progress`` to ``True``. Files that include the generated code marker will be skipped unless the ``include_generated`` parameter is set to ``True``. Similarly, files that match a supplied blacklist of regex patterns will be skipped. Warnings for skipping both blacklisted and generated files will be printed to stderr along with warnings generated by the codemod unless ``hide_blacklisted`` and ``hide_generated`` are set to ``True``. Files that were successfully codemodded will not be printed to stderr unless ``show_successes`` is set to ``True``. To make this API possible, we take an instantiated transform. This is due to the fact that lambdas are not pickleable and pickling functions is undefined. This means we're implicitly relying on fork behavior on UNIX-like systems, and this function will not work on Windows systems. To create a command-line utility that runs on Windows, please instead see :func:`~libcst.codemod.exec_transform_with_prettyprint`. """ # Ensure that we have no duplicates, otherwise we might get race conditions # on write. files = sorted(list({os.path.abspath(f) for f in files})) total = len(files) progress = Progress(enabled=not hide_progress, total=total) # Grab number of cores if we need to jobs: int = jobs if jobs is not None else cpu_count() if jobs < 1: raise Exception("Must have at least one job to process!") if total == 0: return ParallelTransformResult(successes=0, failures=0, skips=0, warnings=0) if repo_root is not None: # Make sure if there is a root that we have the absolute path to it. repo_root = os.path.abspath(repo_root) # Spin up a full repo metadata manager so that we can provide metadata # like type inference to individual forked processes. print("Calculating full-repo metadata...", file=sys.stderr) metadata_manager = FullRepoManager( repo_root, files, transform.get_inherited_dependencies(), ) metadata_manager.resolve_cache() transform.context = replace( transform.context, metadata_manager=metadata_manager, ) print("Executing codemod...", file=sys.stderr) config = ExecutionConfig( repo_root=repo_root, unified_diff=unified_diff, include_generated=include_generated, generated_code_marker=generated_code_marker, format_code=format_code, formatter_args=formatter_args, blacklist_patterns=blacklist_patterns, python_version=python_version, ) if total == 1: # Simple case, we should not pay for process overhead. # Let's just use a dummy synchronous pool. jobs = 1 pool_impl = DummyPool else: pool_impl = Pool # Warm the parser, pre-fork. parse_module( "", config=(PartialParserConfig(python_version=python_version) if python_version is not None else PartialParserConfig()), ) successes: int = 0 failures: int = 0 warnings: int = 0 skips: int = 0 with pool_impl(processes=jobs) as p: # type: ignore args = [{ "transformer": transform, "filename": filename, "config": config, } for filename in files] try: for result in p.imap_unordered(_execute_transform_wrap, args, chunksize=4): # Print an execution result, keep track of failures _print_parallel_result( result, progress, unified_diff=bool(unified_diff), show_successes=show_successes, hide_generated=hide_generated, hide_blacklisted=hide_blacklisted, ) progress.print(successes + failures + skips) if isinstance(result.transform_result, TransformFailure): failures += 1 elif isinstance(result.transform_result, TransformSuccess): successes += 1 elif isinstance(result.transform_result, (TransformExit, TransformSkip)): skips += 1 warnings += len(result.transform_result.warning_messages) finally: progress.clear() # Return whether there was one or more failure. return ParallelTransformResult(successes=successes, failures=failures, skips=skips, warnings=warnings)
def parallel_exec_transform_with_prettyprint( # noqa: C901 transform: Codemod, files: Sequence[str], *, jobs: Optional[int] = None, unified_diff: Optional[int] = None, include_generated: bool = False, generated_code_marker: str = _DEFAULT_GENERATED_CODE_MARKER, format_code: bool = False, formatter_args: Sequence[str] = (), show_successes: bool = False, hide_generated: bool = False, hide_blacklisted: bool = False, hide_progress: bool = False, blacklist_patterns: Sequence[str] = (), python_version: Optional[str] = None, repo_root: Optional[str] = None, ) -> ParallelTransformResult: """ Given a list of files and an instantiated codemod we should apply to them, fork and apply the codemod in parallel to all of the files, including any configured formatter. The ``jobs`` parameter controls the maximum number of in-flight transforms, and needs to be at least 1. If not included, the number of jobs will automatically be set to the number of CPU cores. If ``unified_diff`` is set to a number, changes to files will be printed to stdout with ``unified_diff`` lines of context. If it is set to ``None`` or left out, files themselves will be updated with changes and formatting. If a ``python_version`` is provided, then we will parse each source file using this version. Otherwise, we will use the version of the currently executing python binary. A progress indicator as well as any generated warnings will be printed to stderr. To supress the interactive progress indicator, set ``hide_progress`` to ``True``. Files that include the generated code marker will be skipped unless the ``include_generated`` parameter is set to ``True``. Similarly, files that match a supplied blacklist of regex patterns will be skipped. Warnings for skipping both blacklisted and generated files will be printed to stderr along with warnings generated by the codemod unless ``hide_blacklisted`` and ``hide_generated`` are set to ``True``. Files that were successfully codemodded will not be printed to stderr unless ``show_successes`` is set to ``True``. To make this API possible, we take an instantiated transform. This is due to the fact that lambdas are not pickleable and pickling functions is undefined. This means we're implicitly relying on fork behavior on UNIX-like systems, and this function will not work on Windows systems. To create a command-line utility that runs on Windows, please instead see :func:`~libcst.codemod.exec_transform_with_prettyprint`. """ # Ensure that we have no duplicates, otherwise we might get race conditions # on write. files = sorted(list(set(os.path.abspath(f) for f in files))) total = len(files) progress = Progress(enabled=not hide_progress, total=total) # Grab number of cores if we need to jobs: int = jobs or cpu_count() if jobs < 1: raise Exception("Must have at least one job to process!") if total == 0: return ParallelTransformResult(successes=0, failures=0, skips=0, warnings=0) if repo_root: # Make sure if there is a root that we have the absolute path to it. repo_root = os.path.abspath(repo_root) # Spin up a full repo metadata manager so that we can provide metadata # like type inference to individual forked processes. print("Calculating full-repo metadata...", file=sys.stderr) metadata_manager = FullRepoManager( repo_root, files, transform.get_inherited_dependencies(), ) metadata_manager.resolve_cache() transform.context = replace( transform.context, metadata_manager=metadata_manager, ) print("Executing codemod...", file=sys.stderr) # We place results in this queue inside _parallel_exec_process_stub # so that we can control when things get printed to the console. queue = Queue() if total == 1: # Simple case, we should not pay for process overhead. Lets still # use the exec stub however, so we can share code. progress.print(0) _parallel_exec_process_stub( queue, transform, files[0], repo_root, unified_diff=unified_diff, include_generated=include_generated, generated_code_marker=generated_code_marker, format_code=format_code, formatter_args=formatter_args, blacklist_patterns=blacklist_patterns, python_version=python_version, ) result = queue.get() _print_parallel_result( result, progress, unified_diff=bool(unified_diff), show_successes=show_successes, hide_generated=hide_generated, hide_blacklisted=hide_blacklisted, ) if isinstance(result.transform_result, TransformFailure): return ParallelTransformResult( successes=0, failures=1, skips=0, warnings=len(result.transform_result.warning_messages), ) elif isinstance(result.transform_result, (TransformSkip, TransformExit)): return ParallelTransformResult( successes=0, failures=0, skips=1, warnings=len(result.transform_result.warning_messages), ) elif isinstance(result.transform_result, TransformSuccess): return ParallelTransformResult( successes=1, failures=0, skips=0, warnings=len(result.transform_result.warning_messages), ) else: raise Exception("Logic error, unaccounted for result!") # Warm the parser, pre-fork. parse_module( "", config=(PartialParserConfig(python_version=python_version) if python_version is not None else PartialParserConfig()), ) # Complex case, more than one file successes: int = 0 failures: int = 0 warnings: int = 0 skips: int = 0 pending_processes: List[Process] = [] # Start processes filename_to_process: Dict[str, Process] = {} for f in files: process = Process( target=_parallel_exec_process_stub, args=( queue, transform, f, repo_root, unified_diff, include_generated, generated_code_marker, format_code, formatter_args, blacklist_patterns, python_version, ), ) pending_processes.append(process) filename_to_process[f] = process # Start the processes, allowing no more than num_processes to be running # at once. results_left = len(pending_processes) joinable_processes: Set[Process] = set() processes_started = 0 interrupted = False while results_left > 0 and not interrupted: while processes_started < jobs and pending_processes: try: # Move this process to the joinables process = pending_processes.pop(0) joinable_processes.add(process) # Start it, bookkeep that we did process.start() processes_started += 1 except KeyboardInterrupt: interrupted = True continue try: result = queue.get(block=True, timeout=0.005) except KeyboardInterrupt: interrupted = True continue except Empty: progress.print(successes + failures + skips) continue # Bookkeep the result, since we know the process that returned this is done. results_left -= 1 processes_started -= 1 # Print an execution result, keep track of failures _print_parallel_result( result, progress, unified_diff=bool(unified_diff), show_successes=show_successes, hide_generated=hide_generated, hide_blacklisted=hide_blacklisted, ) progress.print(successes + failures + skips) if isinstance(result.transform_result, TransformFailure): failures += 1 elif isinstance(result.transform_result, TransformSuccess): successes += 1 elif isinstance(result.transform_result, (TransformExit, TransformSkip)): skips += 1 warnings += len(result.transform_result.warning_messages) # Join the process to free any related resources. # Remove all references to the process to allow the GC to # clean up any file handles. process = filename_to_process.pop(result.filename, None) if process: process.join() joinable_processes.discard(process) # Now, join on all of them so we don't leave zombies or hang for p in joinable_processes: p.join() # Return whether there was one or more failure. progress.clear() # If we caught an interrupt, raise that if interrupted: raise KeyboardInterrupt() return ParallelTransformResult(successes=successes, failures=failures, skips=skips, warnings=warnings)
def get_repo_caches( paths: Iterable[str], config: FullRepoMetadataConfig, ) -> Mapping[str, Dict[ProviderT, object]]: """ Generate type metadata by instantiating a :class:`~libcst.metadata.FullRepoManager` with :class:`~libcst.metadata.FullRepoManager` passed to ```providers``` parameter. :param paths: An iterable of paths to files to pass to :class:`~libcst.metadata.FullRepoManager` constructor's `paths` argument. These will be split in batches where the combined length of each path in the batch is <= `arg_size`. :param timeout: The number of seconds at which to cap the pyre query which is run as a subprocess during cache resolving. :param repo_root_dir: Root directory of paths in ```paths```. :param batch_size: The size of the batch of paths to pass in each call to the `FullRepoManager` constructor. # We want to fail silently since some metadata providers can be flaky. If a logger is provided by the caller, we'll add a log here. # Populate with placeholder caches to avoid failures down the line. This will however result in reduced functionality in cache-dependent lint rules. # TODO: remove access of private variable when public `cache` property is available in libcst.metadata.FullRepoManager API. misc: the cache from frm._cache is mapping of mappings of `provider` to _path-cache mapping `so Dict[provider, files]` where `files` is a Dict[_path, cache] itself` to make the batch_caches[_path][provider] = cache to update as a fallback >>> mgr = FullRepoManager('/Users/kristen/repos/Fixit',{"fixit/common/base.py"}, {FullyQualifiedNameProvider, TypeInferenceProvider}) >>> wrapper: meta.MetadataWrapper = mgr.get_metadata_wrapper_for_path("fixit/common/base.py") >>> fqnames = wrapper.resolve(meta.FullyQualifiedNameProvider) >>> cache = mgr.get_cache_for_path("fixit/common/base.py") #pyre --noninteractive query "types(path='fixit/common/base.py')" #_cache: Dict["ProviderT", files= Mapping[str, object]] = {} """ caches = {} paths_iter = iter(paths) head: Optional[str] = next(paths_iter, None) while head is not None: paths_batch = tuple(chain([head], islice(paths_iter, config.batch_size - 1))) head = next(paths_iter, None) frm = FullRepoManager( repo_root_dir=config.repo_root_dir, paths=paths_batch, providers=config.providers, timeout=config.timeout_seconds, ) try: frm.resolve_cache() except Exception: # We want to fail silently since some metadata providers can be flaky. If a logger is provided by the caller, we'll add a log here. logger = config.logger if logger is not None: logger.warning( "Failed to retrieve metadata cache.", exc_info=True, extra={"paths": paths_batch}, ) # Populate with placeholder caches to avoid failures down the line. This will however result in reduced functionality in cache-dependent lint rules. caches.update( dict.fromkeys( paths_batch, {provider: PLACEHOLDER_CACHES[provider] for provider in config.providers}, ) ) else: # TODO: remove access of private variable when public `cache` property is available in libcst.metadata.FullRepoManager API. batch_caches = defaultdict(dict) for provider, files in frm._cache.items(): for _path, cache in files.items(): batch_caches[_path][provider] = cache caches.update(batch_caches) return caches