def run_context(self, staged: bool, run_step: RunStep) -> Iterator[List[Path]]: """ Provides a context within which to run tools and returns list of paths that should be analyzed. Possible contexts include: Head Context: all files in current branch HEAD Staged Files Context: all files in current branch HEAD plus staged changes (hides all untracked files) Noop: all files as currently available on filesystem Returned list of paths are all abolute paths and include all files that are - not ignored based on .bentoignore rules and - exist in any path filters specified. :param staged: Whether to use remove file diffs :param run_step: Which run step is in use (baseline if tool is determining baseline, check if tool is finding new results) :return: A Python with-expression :raises subprocess.CalledProcessError: If git encounters an exception :raises NoGitHeadException: If git cannot detect a HEAD commit :raises UnsupportedGitStateException: If unmerged files are detected """ if staged and run_step == RunStep.BASELINE: stash_context = self._head_context() elif staged: stash_context = staged_files_only(PATCH_CACHE) else: # staged is False stash_context = noop_context() with stash_context: yield self._target_paths
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ): # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if (args.source and not args.origin) or \ (args.origin and not args.source): logger.error('--origin and --source depend on each other.') return 1 # Don't stash if specified or files are specified if args.no_stash or args.all_files or args.files: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: if args.hook: return _run_hook(runner, args, write=write) else: return _run_hooks(runner, args, write=write, environ=environ)
def run(runner, args, environ=os.environ): no_stash = args.no_stash or args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: if args.allow_unstaged_config: logger.warn( 'You have an unstaged config file and have specified the ' '--allow-unstaged-config option.\n' 'Note that your config will be stashed before the config is ' 'parsed unless --no-stash is specified.', ) else: logger.error( 'Your .pre-commit-config.yaml is unstaged.\n' '`git add .pre-commit-config.yaml` to fix this.\n' 'Run pre-commit with --allow-unstaged-config to silence this.' ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook ] if not repo_hooks: output.write_line('No hook with id `{}`'.format(args.hook)) return 1 # Filter hooks for stages repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if not hook['stages'] or args.hook_stage in hook['stages'] ] return _run_hooks(repo_hooks, args, environ)
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ): no_stash = args.no_stash or args.all_files or bool(args.files) # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: if args.allow_unstaged_config: logger.warn( 'You have an unstaged config file and have specified the ' '--allow-unstaged-config option.\n' 'Note that your config will be stashed before the config is ' 'parsed unless --no-stash is specified.', ) else: logger.error( 'Your .pre-commit-config.yaml is unstaged.\n' '`git add .pre-commit-config.yaml` to fix this.\n' 'Run pre-commit with --allow-unstaged-config to silence this.' ) return 1 if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook ] if not repo_hooks: write('No hook with id `{}`\n'.format(args.hook)) return 1 # Filter hooks for stages repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if not hook['stages'] or args.hook_stage in hook['stages'] ] return _run_hooks(repo_hooks, args, write, environ)
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ): no_stash = args.no_stash or args.all_files or bool(args.files) # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: if args.allow_unstaged_config: logger.warn( 'You have an unstaged config file and have specified the ' '--allow-unstaged-config option.\n' 'Note that your config will be stashed before the config is ' 'parsed unless --no-stash is specified.', ) else: logger.error( 'Your .pre-commit-config.yaml is unstaged.\n' '`git add .pre-commit-config.yaml` to fix this.\n' 'Run pre-commit with --allow-unstaged-config to silence this.' ) return 1 if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook ] if not repo_hooks: write('No hook with id `{0}`\n'.format(args.hook)) return 1 # Filter hooks for stages repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if not hook['stages'] or args.hook_stage in hook['stages'] ] return _run_hooks(repo_hooks, args, write, environ)
def write_line(s=None, stream=stdout_byte_stream, logfile_name=None): output_streams = [stream] if logfile_name: ctx = open(logfile_name, 'ab') output_streams.append(ctx) else: ctx = noop_context() with ctx: for output_stream in output_streams: if s is not None: output_stream.write(five.to_bytes(s)) output_stream.write(b'\n') output_stream.flush()
def run_context( context: Context, target_paths: List[Path], staged: bool, run_step: RunStep, show_bars: bool = True, ) -> Iterator[Runner]: """ Provides a context within which to run tools. This context obeys the following behaviors: Filesystem modifications: staged is true - file diffs are removed otherwise - no changes Paths to be checked: explicit paths - these paths are used staged is true - only paths with staged changes are used otherwise - only paths with diffs vs the head git index are used :param context: The Bento command context :param input_paths: A list of paths to check, or None to indicate that check should operate against the base path :param staged: Whether to use remove file diffs :param run_step: Which run step is in use (baseline if tool is determining baseline, check if tool is finding new results) :param show_bars: If true, attempts to configure Runner to display progress bars (these may not be displayed if not supported by environment) :return: A Python with-expression, which is passed a Runner object :raises Exception: If comparison is not HEAD and run_step is not CHECK """ use_cache = False skip_setup = True if staged and run_step == RunStep.BASELINE: stash_context = head_context() use_cache = True elif staged: # run_step = RunStep.CHECK stash_context = staged_files_only(PATCH_CACHE) else: # staged is False stash_context = noop_context() skip_setup = False with stash_context: yield Runner( paths=target_paths, use_cache=use_cache, skip_setup=skip_setup, show_bars=show_bars, )
def run(config_file, store, args, environ=os.environ): no_stash = args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(config_file) and not no_stash: logger.error( 'Your pre-commit configuration is unstaged.\n' '`git add {}` to fix this.'.format(config_file), ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(store.directory) with ctx: config = load_config(config_file) hooks = [ hook for hook in all_hooks(config, store) if not args.hook or hook.id == args.hook or hook.alias == args.hook if args.hook_stage in hook.stages ] if args.hook and not hooks: output.write_line( 'No hook with id `{}` in stage `{}`'.format( args.hook, args.hook_stage, ), ) return 1 install_hook_envs(hooks, store) return _run_hooks(config, hooks, args, environ)
def run(runner, args, environ=os.environ): no_stash = args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: logger.error( 'Your .pre-commit-config.yaml is unstaged.\n' '`git add .pre-commit-config.yaml` to fix this.', ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.store.directory) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook ] if not repo_hooks: output.write_line('No hook with id `{}`'.format(args.hook)) return 1 # Filter hooks for stages repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if not hook['stages'] or args.hook_stage in hook['stages'] ] return _run_hooks(runner.config, repo_hooks, args, environ)
def run(runner, args, environ=os.environ): no_stash = args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: logger.error( 'Your pre-commit configuration is unstaged.\n' '`git add {}` to fix this.'.format(runner.config_file), ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.store.directory) with ctx: repo_hooks = [] for repo in runner.repositories: for _, hook in repo.hooks: if ( (not args.hook or hook['id'] == args.hook) and not hook['stages'] or args.hook_stage in hook['stages'] ): repo_hooks.append((repo, hook)) if args.hook and not repo_hooks: output.write_line('No hook with id `{}`'.format(args.hook)) return 1 for repo in {repo for repo, _ in repo_hooks}: repo.require_installed() return _run_hooks(runner.config, repo_hooks, args, environ)
def run(runner, args, environ=os.environ): no_stash = args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not no_stash: logger.error( 'Your .pre-commit-config.yaml is unstaged.\n' '`git add .pre-commit-config.yaml` to fix this.', ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(runner.store.directory) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [(repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook] if not repo_hooks: output.write_line('No hook with id `{}`'.format(args.hook)) return 1 # Filter hooks for stages repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if not hook['stages'] or args.hook_stage in hook['stages'] ] return _run_hooks(runner.config, repo_hooks, args, environ)
def run(config_file, store, args, environ=os.environ): no_stash = args.all_files or bool(args.files) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(config_file) and not no_stash: logger.error( 'Your pre-commit configuration is unstaged.\n' '`git add {}` to fix this.'.format(config_file), ) return 1 # Expose origin / source as environment variables for hooks to consume if args.origin and args.source: environ['PRE_COMMIT_ORIGIN'] = args.origin environ['PRE_COMMIT_SOURCE'] = args.source if no_stash: ctx = noop_context() else: ctx = staged_files_only(store.directory) with ctx: config = load_config(config_file) hooks = [ hook for hook in all_hooks(config, store) if not args.hook or hook.id == args.hook or hook.alias == args.hook if args.hook_stage in hook.stages ] if args.hook and not hooks: output.write_line('No hook with id `{}`'.format(args.hook)) return 1 install_hook_envs(hooks, store) return _run_hooks(config, hooks, args, environ)
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ): # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if args.no_stash or args.all_files: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: if args.hook: return _run_hook(runner, args, write=write) else: return _run_hooks(runner, args, write=write, environ=environ)
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ): # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) # Check if we have unresolved merge conflict files and fail fast. if _has_unmerged_paths(runner): logger.error('Unmerged files. Resolve before committing.') return 1 if bool(args.source) != bool(args.origin): logger.error('Specify both --origin and --source.') return 1 if _has_unstaged_config(runner) and not args.no_stash: if args.allow_unstaged_config: logger.warn('You have an unstaged config file and have ' 'specified the --allow-unstaged-config option.\n' 'Note that your config will be stashed before the ' 'config is parsed unless --no-stash is specified.') else: logger.error('You have an unstaged config file and have not ' 'specified the --allow-unstaged-config option.\n') return 1 # Don't stash if specified or files are specified if args.no_stash or args.all_files or args.files: ctx = noop_context() else: ctx = staged_files_only(runner.cmd_runner) with ctx: repo_hooks = list(get_repo_hooks(runner)) if args.hook: repo_hooks = [ (repo, hook) for repo, hook in repo_hooks if hook['id'] == args.hook ] if not repo_hooks: write('No hook with id `{0}`\n'.format(args.hook)) return 1 return _run_hooks(repo_hooks, args, write, environ)
def check( context: Context, formatter: Tuple[str, ...] = (), pager: bool = True, show_all: bool = False, staged_only: bool = False, tool: Optional[str] = None, paths: Optional[List[str]] = None, ) -> None: """ Checks for new findings. Only findings not previously archived will be displayed (use --show-all to display archived findings). By default, 'bento check' will check the entire project. To run on one or more paths only, run: bento check path1 path2 ... """ if tool and tool not in context.configured_tools: click.echo( f"{tool} has not been configured. Adding default configuration for tool to .bento.yml" ) update_tool_run(context, tool, False) # Set configured_tools to None so that future calls will # update and include newly added tool context._configured_tools = None if not context.config_path.exists(): echo_error("No Bento configuration found. Please run `bento init`.") sys.exit(3) if not show_all and context.baseline_file_path.exists(): with context.baseline_file_path.open() as json_file: baseline = bento.result.yml_to_violation_hashes(json_file) else: baseline = {} config = context.config if formatter: config["formatter"] = [{f: {}} for f in formatter] fmts = context.formatters findings_to_log: List[Any] = [] click.echo("Running Bento checks...\n", err=True) ctx = noop_context() if paths and len(paths) > 0: if staged_only: raise Exception( "--staged_only should not be used with explicit paths") elif staged_only: ctx = staged_files_only( os.path.join(os.path.expanduser("~"), ".cache", "bento", "patches")) paths = get_staged_files() else: paths = None with ctx: before = time.time() runner = bento.tool_runner.Runner() tools: Iterable[Tool[Any]] = context.tools.values() if tool: tools = [context.configured_tools[tool]] all_results = runner.parallel_results(tools, baseline, paths) elapsed = time.time() - before # Progress bars terminate on whitespace echo_newline() is_error = False n_all = 0 n_all_filtered = 0 filtered_findings: Dict[str, List[Violation]] = {} for tool_id, findings in all_results: if isinstance(findings, Exception): logging.error(findings) echo_error(f"Error while running {tool_id}: {findings}") if isinstance(findings, subprocess.CalledProcessError): click.secho(findings.stderr, err=True) click.secho(findings.stdout, err=True) if isinstance(findings, NodeError): echo_warning( f"Node.js not found or version is not compatible with ESLint v6." ) click.secho( f"""------------------------------------------------------------------------------------------------- This may be due to a corrupted tool installation. You might be able to fix this issue by running: bento init --clean You can also view full details of this error in `{bento.constants.DEFAULT_LOG_PATH}`. ------------------------------------------------------------------------------------------------- """, err=True, ) is_error = True elif isinstance(findings, list) and findings: findings_to_log += bento.metrics.violations_to_metrics( tool_id, context.timestamp, findings, __get_ignores_for_tool(tool_id, config), ) filtered = [f for f in findings if not f.filtered] filtered_findings[tool_id] = filtered n_all += len(findings) n_filtered = len(filtered) n_all_filtered += n_filtered logging.debug(f"{tool_id}: {n_filtered} findings passed filter") def post_metrics() -> None: bento.network.post_metrics(findings_to_log, is_finding=True) stats_thread = threading.Thread(name="stats", target=post_metrics) stats_thread.start() if n_all_filtered > 0: dumped = [f.dump(filtered_findings) for f in fmts] context.start_user_timer() bento.util.less(dumped, pager=pager, overrun_pages=OVERRUN_PAGES) context.stop_user_timer() echo_warning(f"{n_all_filtered} finding(s) in {elapsed:.2f} s\n") if not context.is_init: echo_next_step("To suppress all findings", "bento archive") else: echo_success(f"0 findings in {elapsed:.2f} s\n") n_archived = n_all - n_all_filtered if n_archived > 0 and not show_all: echo_next_step( f"Not showing {n_archived} archived finding(s). To view", f"bento check {SHOW_ALL}", ) if is_error: sys.exit(3) elif n_all_filtered > 0: sys.exit(2)