def add_hooks(pre_commit: str, pre_push: str): """ Add git hooks for commit and push to run linting and tests. """ # Detect virtualenv the hooks should use # Detect virtualenv virtual_env = conf.get_env('VIRTUAL_ENV') if virtual_env is None: log.err("You are not inside a virtualenv") confirm_msg = ( "Are you sure you want to use global python installation " "to run your git hooks? [y/N] " ) click.prompt(confirm_msg, default='') if not click.confirm(confirm_msg): log.info("Cancelling") return load_venv = '' else: load_venv = 'source "{}/bin/activate"'.format(virtual_env) commit_hook = conf.proj_path('.git/hooks/pre-commit') push_hook = conf.proj_path('.git/hooks/pre-push') # Write pre-commit hook log.info("Adding pre-commit hook <33>{}", commit_hook) fs.write_file(commit_hook, util.remove_indent(''' #!/bin/bash PATH="/opt/local/libexec/gnubin:$PATH" {load_venv} {command} '''.format(load_venv=load_venv, command=pre_commit))) # Write pre-push hook log.info("Adding pre-push hook: <33>{}", push_hook) fs.write_file(push_hook, util.remove_indent(''' #!/bin/bash PATH="/opt/local/libexec/gnubin:$PATH" {load_venv} peltak test --allow-empty {command} '''.format(load_venv=load_venv, command=pre_push))) log.info("Making hooks executable") if not context.get('pretend', False): os.chmod(conf.proj_path('.git/hooks/pre-commit'), 0o755) os.chmod(conf.proj_path('.git/hooks/pre-push'), 0o755)
def test_calls_filtered_walk_with_paths_configured(p_filtered_walk: Mock): files = types.FilesCollection.from_config({ 'paths': ['path1', 'path2'], }) fs.collect_files(files) assert p_filtered_walk.call_count == 2 args, _ = p_filtered_walk.call_args_list[0] expected = (conf.proj_path('path1'), files.whitelist(), files.blacklist()) assert tuple(args) == expected args, _ = p_filtered_walk.call_args_list[1] expected = (conf.proj_path('path2'), files.whitelist(), files.blacklist()) assert tuple(args) == expected
def run_script(script: Script, options: CliOptions) -> None: """ Run the script with the given (command line) options. """ template_ctx = build_template_context(script, options) verbose = RunContext().get('verbose') pretend = RunContext().get('pretend') if verbose >= 3: log.info('Compiling script <35>{name}\n{script}'.format( name=script.name, script=shell.highlight(script.command, 'jinja'))) yaml_str = yaml.dump(template_ctx, default_flow_style=False) log.info('with context:\n{}\n'.format(shell.highlight( yaml_str, 'yaml'))) # Command is either specified directly in pelconf.yaml or lives in a # separate file. command = script.command if script.command_file: with open(conf.proj_path(script.command_file)) as fp: command = fp.read() if not command: raise ValueError( "Scripts must have 'command' or 'command_file' specified.") cmd = templates.Engine().render(command, template_ctx) retcode = exec_script_command(cmd, pretend) if verbose: log.info("Script exited with code: <33>{}", retcode) if retcode not in script.success_exit_codes: sys.exit(retcode)
def clean(exclude: List[str]): """ Remove all unnecessary files. Args: exclude (list[str]): A list of path patterns to exclude from deletion. """ pretend = context.get('pretend', False) exclude = list(exclude) + conf.get('clean.exclude', []) clean_patterns = conf.get('clean.patterns', [ '*__pycache__*', '*.py[cod]', '*.swp', "*.mypy_cache", "*.pytest_cache", "*.build", ]) if context.get('verbose'): log.info('Clean patterns:') for pattern in clean_patterns: log.info(f' <90>{pattern}') log.info('Exclude:') for pattern in exclude: log.info(f' <90>{pattern}') num_files = 0 with util.timed_block() as t: files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude) log.info('') log.info('Deleting:') for path in files: try: num_files += 1 if not isdir(path): log.info(' <91>[file] <90>{}', path) if not pretend: os.remove(path) else: log.info(' <91>[dir] <90>{}', path) if not pretend: rmtree(path) except OSError: log.info("<33>Failed to remove <90>{}", path) if pretend: msg = "Would delete <33>{}<32> files. Took <33>{}<32>s" else: msg = "Deleted <33>{}<32> files in <33>{}<32>s" log.info(msg.format(num_files, t.elapsed_s))
def test_works_as_expected(p_getcwd, p_chdir, app_conf): # Better one test than none fake_cwd = 'fake_dir' path = '.' p_getcwd.return_value = fake_cwd with conf.within_proj_dir(path): p_getcwd.assert_called() p_chdir.assert_called_once_with(conf.proj_path(path)) # last call was back to our fake working directory p_chdir.assert_called_with(fake_cwd)
def test_uses_command_file_if_given(p_open: Mock, command: str, command_file: str, app_conf: conf.Config): """ GIVEN A command_file is defined for the script WHEN I run the script THEN It will always use it no matter if 'command' is also defined or not """ options: Dict[str, Any] = {} script = Script( name='test', command=command, command_file=command_file, ) run_script(script, options) p_open.assert_called_once_with(conf.proj_path('fake/file'))
def setup_ci(): # type: () -> None """ Setup AppEngine SDK on CircleCI """ gcloud_path = shell.run('which gcloud', capture=True).stdout.strip() sdk_path = normpath(join(gcloud_path, '../../platform/google_appengine')) gcloud_cmd = gcloud_path + ' --quiet' if not exists(sdk_path): log.info("Installing AppEngine SDK") shell.run( 'sudo {} components install app-engine-python'.format(gcloud_cmd)) else: # Only initialise once. To reinitialise, just build without cache. log.info("AppEngine SDK already initialised") log.info("Using service account authentication") shell.run('{} auth activate-service-account --key-file {}'.format( gcloud_cmd, conf.proj_path('ops/client_secret.json')))
def test_converts_config_value_to_absolute_path(): assert conf.get_path('test') == conf.proj_path('hello')
def test_converts_default_to_abspath(): assert conf.get_path('test', 'hello') == conf.proj_path('hello')