def _check_shell(): if const.SHELL in os.environ and const.IS_WINDOWS and const.BASH_NAME_WIN in os.environ[const.SHELL]: heading("WARNING: You are running bash in Windows, the setup may not work correctly and " "command may have unexpected behavior") from knack.prompting import prompt_y_n if not prompt_y_n('Would you like to continue with the install?'): sys.exit(0)
def check_history(modules=None): # TODO: Does not work with extensions path_table = get_path_table(include_only=modules) selected_modules = list(path_table['core'].items()) + list(path_table['mod'].items()) heading('Verify History') module_names = sorted([name for name, _ in selected_modules]) display('Verifying README and HISTORY files for modules: {}'.format(' '.join(module_names))) failed_mods = [] for name, path in selected_modules: errors = _check_readme_render(path) if errors: failed_mods.append(name) subheading('{} errors'.format(name)) for error in errors: logger.error('%s\n', error) subheading('Results') if failed_mods: display('The following modules have invalid README/HISTORYs:') logger.error('\n'.join(failed_mods)) logger.warning('See above for the full warning/errors') logger.warning('note: Line numbers in the errors map to the long_description of your setup.py.') sys.exit(1) display('OK')
def check_document_map(): heading('Verify Document Map') cli_repo = get_cli_repo_path() map_path = os.path.join(cli_repo, DOC_SOURCE_MAP_PATH) help_files_in_map = _get_help_files_in_map(map_path) help_files_not_found = _map_help_files_not_found(cli_repo, help_files_in_map) help_files_to_add_to_map = _help_files_not_in_map(cli_repo, help_files_in_map) subheading('Results') if help_files_not_found or help_files_to_add_to_map: error_lines = [] error_lines.append('Errors whilst verifying {}!'.format(DOC_MAP_NAME)) if help_files_not_found: error_lines.append( 'The following files are in {} but do not exist:'.format( DOC_MAP_NAME)) error_lines += help_files_not_found if help_files_to_add_to_map: error_lines.append( 'The following files should be added to {}:'.format( DOC_MAP_NAME)) error_lines += help_files_to_add_to_map error_msg = '\n'.join(error_lines) raise CLIError(error_msg) display('Verified {} OK.'.format(DOC_MAP_NAME))
def check_license_headers(): heading('Verify License Headers') cli_path = get_cli_repo_path() env_path = os.path.join(cli_path, 'env') files_without_header = [] for current_dir, _, files in os.walk(cli_path): if current_dir.startswith(env_path): continue file_itr = (os.path.join(current_dir, p) for p in files if p.endswith('.py') and p != 'azure_bdist_wheel.py') for python_file in file_itr: with open(python_file, 'r') as f: file_text = f.read() if file_text and LICENSE_HEADER not in file_text: files_without_header.append( os.path.join(current_dir, python_file)) subheading('Results') if files_without_header: raise CLIError( "{}\nError: {} files don't have the required license headers.". format('\n'.join(files_without_header), len(files_without_header))) display('License headers verified OK.')
def publish_extensions(extensions, storage_subscription, storage_account, storage_container, dist_dir='dist', update_index=False, yes=False): heading('Publish Extensions') require_azure_cli() # rebuild the extensions subheading('Building WHLs') try: shutil.rmtree(dist_dir) except Exception as ex: # pylint: disable=broad-except logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex) build_extensions(extensions, dist_dir=dist_dir) whl_files = find_files(dist_dir, '*.whl') uploaded_urls = [] subheading('Uploading WHLs') for whl_path in whl_files: whl_file = os.path.split(whl_path)[-1] # check if extension already exists unless user opted not to if not yes: command = 'az storage blob exists --subscription {} --account-name {} -c {} -n {}'.format( storage_subscription, storage_account, storage_container, whl_file) exists = json.loads(cmd(command).result)['exists'] if exists: if not prompt_y_n( "{} already exists. You may need to bump the extension version. Replace?" .format(whl_file), default='n'): logger.warning("Skipping '%s'...", whl_file) continue # upload the WHL file command = 'az storage blob upload --subscription {} --account-name {} -c {} -n {} -f {}'.format( storage_subscription, storage_account, storage_container, whl_file, os.path.abspath(whl_path)) cmd(command, "Uploading '{}'...".format(whl_file)) command = 'az storage blob url --subscription {} --account-name {} -c {} -n {} -otsv'.format( storage_subscription, storage_account, storage_container, whl_file) url = cmd(command).result logger.info(url) uploaded_urls.append(url) if update_index: subheading('Updating Index') update_extension_index(uploaded_urls) subheading('Published') display(uploaded_urls) if not update_index: logger.warning( 'You still need to update the index for your changes with `az extension update-index`.' )
def check_style(modules=None, pylint=False, pep8=False): heading('Style Check') selected_modules = get_path_table(include_only=modules) pep8_result = None pylint_result = None if pylint: try: require_azure_cli() except CLIError: raise CLIError( 'usage error: --pylint requires Azure CLI to be installed.') if not selected_modules: raise CLIError('No modules selected.') mod_names = list(selected_modules['mod'].keys()) + list( selected_modules['core'].keys()) ext_names = list(selected_modules['ext'].keys()) if mod_names: display('Modules: {}\n'.format(', '.join(mod_names))) if ext_names: display('Extensions: {}\n'.format(', '.join(ext_names))) # if neither flag provided, same as if both were provided if not any([pylint, pep8]): pep8 = True pylint = True exit_code_sum = 0 if pep8: pep8_result = _run_pep8(selected_modules) exit_code_sum += pep8_result.exit_code if pylint: pylint_result = _run_pylint(selected_modules) exit_code_sum += pylint_result.exit_code display('') subheading('Results') # print success messages first if pep8_result and not pep8_result.error: display('Flake8: PASSED') if pylint_result and not pylint_result.error: display('Pylint: PASSED') display('') # print error messages last if pep8_result and pep8_result.error: logger.error(pep8_result.error.output.decode('utf-8')) logger.error('Flake8: FAILED\n') if pylint_result and pylint_result.error: logger.error(pylint_result.error.output.decode('utf-8')) logger.error('Pylint: FAILED\n') sys.exit(exit_code_sum)
def generate_cli_ref_docs(output_dir=None, output_type=None): # require that azure cli installed require_azure_cli() output_dir = _process_ref_doc_output_dir(output_dir) heading('Generate CLI Reference Docs') display("Docs will be placed in {}.".format(output_dir)) # Generate documentation for all comamnds _call_sphinx_build(output_type, output_dir) display("\nThe {} files are in {}".format(output_type, output_dir))
def verify_versions(): import tempfile import shutil require_azure_cli() heading('Verify CLI Versions') path_table = get_path_table() modules = list(path_table['core'].items()) modules = [x for x in modules if x[0] not in EXCLUDED_MODULES] if not modules: raise CLIError('No modules selected to test.') display('MODULES: {}'.format(', '.join([x[0] for x in modules]))) results = {} original_cwd = os.getcwd() temp_dir = tempfile.mkdtemp() for mod, mod_path in modules: if not mod.startswith(COMMAND_MODULE_PREFIX) and mod != 'azure-cli': mod = '{}{}'.format(COMMAND_MODULE_PREFIX, mod) results[mod] = {} results.update( _compare_module_against_pypi(results, temp_dir, mod, mod_path)) shutil.rmtree(temp_dir) os.chdir(original_cwd) logger.info('Module'.ljust(40) + 'Local Version'.rjust(20) + 'Public Version'.rjust(20)) # pylint: disable=logging-not-lazy for mod, data in results.items(): logger.info( mod.ljust(40) + data['local_version'].rjust(20) + data['public_version'].rjust(20)) bump_mods = {k: v for k, v in results.items() if v['status'] == 'BUMP'} subheading('RESULTS') if bump_mods: logger.error( 'The following modules need their versions bumped. ' 'Scroll up for details: %s', ', '.join(bump_mods.keys())) logger.warning( '\nNote that before changing versions, you should consider ' 'running `git clean` to remove untracked files from your repo. ' 'Files that were once tracked but removed from the source may ' 'still be on your machine, resuling in false positives.') sys.exit(1) else: display('OK!')
def _display_success_message(package_name, group_name): heading('Creation of {} successful!'.format(package_name)) display('Getting started:') display('\n To see your new commands:') display(' `az {} -h`'.format(group_name)) display('\n To discover and run your tests:') display(' `azdev test {} --discover`'.format(group_name)) display( '\n To identify code style issues (there will be some left over from code generation):' ) display(' `azdev style {}`'.format(group_name)) display('\n To identify CLI-specific linter violations:') display(' `azdev linter {}`'.format(group_name))
def generate_extension_ref_docs(output_dir=None, output_type=None): # require that azure cli installed require_azure_cli() output_dir = _process_ref_doc_output_dir(output_dir) heading('Generate CLI Extensions Reference Docs') display("Docs will be placed in {}.".format(output_dir)) display( "Generating Docs for public extensions. Installed extensions will not be affected..." ) _generate_ref_docs_for_public_exts(output_type, output_dir) display("\nThe {} files are in {}".format(output_type, output_dir))
def _generate_extension(ext_name, repo_path, swagger_readme_file_path, use): heading('Start generating extension {}.'.format(ext_name)) # check if npm is installed try: shell_cmd('npm --version', stdout=subprocess.DEVNULL, raise_ex=False) except CLIError as ex: raise CLIError('{}\nPlease install npm.'.format(ex)) display('Installing autorest...\n') if const.IS_WINDOWS: try: shell_cmd('npm install -g autorest', raise_ex=False) except CLIError as ex: raise CLIError("Failed to install autorest.\n{}".format(ex)) else: try: shell_cmd('npm install -g autorest', stderr=subprocess.DEVNULL, raise_ex=False) except CLIError as ex: path = os.environ['PATH'] # check if npm is installed through nvm if os.environ.get('NVM_DIR'): raise ex # check if user using specific node version and manually add it to the os env PATH node_version = shell_cmd('node --version', capture_output=True).result if 'node/' + node_version + '/bin' in path: raise ex # create a new directory for npm global installations, to avoid using sudo in installing autorest npm_path = os.path.join(os.environ['HOME'], '.npm-packages') if not os.path.isdir(npm_path): os.mkdir(npm_path) npm_prefix = shell_cmd('npm prefix -g', capture_output=True).result shell_cmd('npm config set prefix ' + npm_path) os.environ['PATH'] = path + ':' + os.path.join(npm_path, 'bin') os.environ['MANPATH'] = os.path.join(npm_path, 'share', 'man') shell_cmd('npm install -g autorest') shell_cmd('npm config set prefix ' + npm_prefix) # update autorest core shell_cmd('autorest --latest') if not use: cmd = 'autorest --az --azure-cli-extension-folder={} {}'.format( repo_path, swagger_readme_file_path) else: cmd = 'autorest --az --azure-cli-extension-folder={} {} --use={}'.format( repo_path, swagger_readme_file_path, use) shell_cmd(cmd, message=True)
def check_license_headers(): heading('Verify License Headers') cli_path = get_cli_repo_path() all_paths = [cli_path] try: ext_repo = get_ext_repo_paths() for path in ext_repo: all_paths.append(path) except CLIError: display("No CLI ext path, running check only on modules") files_without_header = [] for path in all_paths: py_files = pathlib.Path(path).glob('**' + os.path.sep + '*.py') for py_file in py_files: py_file = str(py_file) if py_file.endswith('azure_bdist_wheel.py'): continue for ignore_token in _IGNORE_SUBDIRS: if ignore_token in py_file: break else: with open(str(py_file), 'r', encoding='utf-8') as f: file_text = f.read() if not file_text: continue test_results = [ LICENSE_HEADER in file_text, WRAPPED_LICENSE_HEADER in file_text, CODEGEN_LICENSE_HEADER in file_text ] if not any(test_results): files_without_header.append(py_file) subheading('Results') if files_without_header: raise CLIError( "{}\nError: {} files don't have the required license headers.". format('\n'.join(files_without_header), len(files_without_header))) display('License headers verified OK.')
def update_setup_py(pin=False): require_azure_cli() heading('Update azure-cli setup.py') path_table = get_path_table() azure_cli_path = path_table['core']['azure-cli'] azure_cli_setup_path = find_files(azure_cli_path, SETUP_PY_NAME)[0] modules = list(path_table['core'].items()) + list(path_table['mod'].items()) modules = [x for x in modules if x[0] not in EXCLUDED_MODULES] results = {mod[0]: {} for mod in modules} results = _get_module_versions(results, modules) _update_setup_py(results, azure_cli_setup_path, pin) display('OK!')
def generate_cli_ref_docs(output_dir=None, output_type=None, all_profiles=None): # require that azure cli installed and warn the users if extensions are installed. require_azure_cli() output_dir = _process_ref_doc_output_dir(output_dir) _warn_if_exts_installed() heading('Generate CLI Reference Docs') display("Docs will be placed in {}.".format(output_dir)) if all_profiles: # Generate documentation for all commands and for all CLI profiles _generate_ref_docs_for_all_profiles(output_type, output_dir) else: # Generate documentation for all comamnds _call_sphinx_build(output_type, output_dir) display("\nThe {} files are in {}".format(output_type, output_dir))
def check_load_time(runs=3): require_azure_cli() heading('Module Load Performance') regex = r"[^']*'(?P<mod>[^']*)'[\D]*(?P<val>[\d\.]*)" results = {TOTAL: []} # Time the module loading X times for i in range(0, runs + 1): lines = cmd('az -h --debug', show_stderr=True).result if i == 0: # Ignore the first run since it can be longer due to *.pyc file compilation continue try: lines = lines.decode().splitlines() except AttributeError: lines = lines.splitlines() total_time = 0 for line in lines: if line.startswith('DEBUG: Loaded module'): matches = re.match(regex, line) mod = matches.group('mod') val = float(matches.group('val')) * 1000 total_time = total_time + val if mod in results: results[mod].append(val) else: results[mod] = [val] results[TOTAL].append(total_time) passed_mods = {} failed_mods = {} def _claim_higher_threshold(val): avail_thresholds = {k: v for k, v in THRESHOLDS.items() if v} new_threshold = None for threshold in sorted(avail_thresholds): if val < threshold: THRESHOLDS[threshold] = THRESHOLDS[threshold] - 1 new_threshold = threshold break return new_threshold mods = sorted(results.keys()) for mod in mods: val = results[mod] mean_val = mean(val) stdev_val = pstdev(val) threshold = TOTAL_THRESHOLD if mod == TOTAL else DEFAULT_THRESHOLD statistics = { 'average': mean_val, 'stdev': stdev_val, 'threshold': threshold, 'values': val } if mean_val > threshold: # claim a threshold exception if available new_threshold = _claim_higher_threshold(mean_val) if new_threshold: statistics['threshold'] = new_threshold passed_mods[mod] = statistics else: failed_mods[mod] = statistics else: passed_mods[mod] = statistics subheading('Results') if failed_mods: display('== PASSED MODULES ==') display_table(passed_mods) display('\nFAILED MODULES') display_table(failed_mods) raise CLIError(""" FAILED: Some modules failed. If values are close to the threshold, rerun. If values are large, check that you do not have top-level imports like azure.mgmt or msrestazure in any modified files. """) display('== PASSED MODULES ==') display_table(passed_mods) display('\nPASSED: Average load time all modules: {} ms'.format( int(passed_mods[TOTAL]['average'])))
def run_tests(tests, xml_path=None, discover=False, in_series=False, run_live=False, profile=None, last_failed=False, pytest_args=None, no_exit_first=False, git_source=None, git_target=None, git_repo=None, cli_ci=False): require_virtual_env() DEFAULT_RESULT_FILE = 'test_results.xml' DEFAULT_RESULT_PATH = os.path.join(get_azdev_config_dir(), DEFAULT_RESULT_FILE) heading('Run Tests') path_table = get_path_table() test_index = _get_test_index(profile or current_profile(), discover) if not tests: tests = list(path_table['mod'].keys()) + list( path_table['core'].keys()) + list(path_table['ext'].keys()) if tests == ['CLI']: tests = list(path_table['mod'].keys()) + list( path_table['core'].keys()) elif tests == ['EXT']: tests = list(path_table['ext'].keys()) # filter out tests whose modules haven't changed modified_mods = _filter_by_git_diff(tests, test_index, git_source, git_target, git_repo) if modified_mods: display('\nTest on modules: {}\n'.format(', '.join(modified_mods))) if cli_ci is True: ctx = CLIAzureDevOpsContext(git_repo, git_source, git_target) modified_mods = ctx.filter(test_index) # resolve the path at which to dump the XML results xml_path = xml_path or DEFAULT_RESULT_PATH if not xml_path.endswith('.xml'): xml_path = os.path.join(xml_path, DEFAULT_RESULT_FILE) # process environment variables if run_live: logger.warning('RUNNING TESTS LIVE') os.environ[ENV_VAR_TEST_LIVE] = 'True' def _find_test(index, name): name_comps = name.split('.') num_comps = len(name_comps) key_error = KeyError() for i in range(num_comps): check_name = '.'.join(name_comps[(-1 - i):]) try: match = index[check_name] if check_name != name: logger.info( "Test found using just '%s'. The rest of the name was ignored.\n", check_name) return match except KeyError as ex: key_error = ex continue raise key_error # lookup test paths from index test_paths = [] for t in modified_mods: try: test_path = os.path.normpath(_find_test(test_index, t)) test_paths.append(test_path) except KeyError: logger.warning( "'%s' not found. If newly added, re-run with --discover", t) continue exit_code = 0 # Tests have been collected. Now run them. if not test_paths: logger.warning('No tests selected to run.') sys.exit(exit_code) exit_code = 0 with ProfileContext(profile): runner = get_test_runner(parallel=not in_series, log_path=xml_path, last_failed=last_failed, no_exit_first=no_exit_first) exit_code = runner(test_paths=test_paths, pytest_args=pytest_args) sys.exit(0 if not exit_code else 1)
def check_style(modules=None, pylint=False, pep8=False, git_source=None, git_target=None, git_repo=None): heading('Style Check') # allow user to run only on CLI or extensions cli_only = modules == ['CLI'] ext_only = modules == ['EXT'] if cli_only or ext_only: modules = None selected_modules = get_path_table(include_only=modules) # remove these two non-modules selected_modules['core'].pop('azure-cli-nspkg', None) selected_modules['core'].pop('azure-cli-command_modules-nspkg', None) pep8_result = None pylint_result = None if pylint: try: require_azure_cli() except CLIError: raise CLIError('usage error: --pylint requires Azure CLI to be installed.') if cli_only: ext_names = None selected_modules['ext'] = {} if ext_only: mod_names = None selected_modules['mod'] = {} selected_modules['core'] = {} # filter down to only modules that have changed based on git diff selected_modules = filter_by_git_diff(selected_modules, git_source, git_target, git_repo) if not any((selected_modules[x] for x in selected_modules)): raise CLIError('No modules selected.') mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) ext_names = list(selected_modules['ext'].keys()) if mod_names: display('Modules: {}\n'.format(', '.join(mod_names))) if ext_names: display('Extensions: {}\n'.format(', '.join(ext_names))) # if neither flag provided, same as if both were provided if not any([pylint, pep8]): pep8 = True pylint = True exit_code_sum = 0 if pep8: pep8_result = _run_pep8(selected_modules) exit_code_sum += pep8_result.exit_code if pylint: pylint_result = _run_pylint(selected_modules) exit_code_sum += pylint_result.exit_code display('') subheading('Results') # print success messages first if pep8_result and not pep8_result.error: display('Flake8: PASSED') if pylint_result and not pylint_result.error: display('Pylint: PASSED') display('') # print error messages last if pep8_result and pep8_result.error: logger.error(pep8_result.error.output.decode('utf-8')) logger.error('Flake8: FAILED\n') if pylint_result and pylint_result.error: logger.error(pylint_result.error.output.decode('utf-8')) logger.error('Pylint: FAILED\n') sys.exit(exit_code_sum)
def verify_versions(modules=None, update=False, pin=False): import tempfile import shutil require_azure_cli() heading('Verify CLI Module Versions') usage_err = CLIError('usage error: <MODULES> | --update [--pin]') if modules and (update or pin): raise usage_err if not modules and pin and not update: raise usage_err if modules: update = None pin = None path_table = get_path_table(include_only=modules) modules = list(path_table['core'].items()) + list(path_table['mod'].items()) modules = [x for x in modules if x[0] not in EXCLUDED_MODULES] if not modules: raise CLIError('No modules selected to test.') display('MODULES: {}'.format(', '.join([x[0] for x in modules]))) results = {mod[0]: {} for mod in modules} original_cwd = os.getcwd() temp_dir = tempfile.mkdtemp() for mod, mod_path in modules: if not mod.startswith(COMMAND_MODULE_PREFIX) and mod != 'azure-cli': mod = '{}{}'.format(COMMAND_MODULE_PREFIX, mod) results.update(_compare_module_against_pypi(results, temp_dir, mod, mod_path)) shutil.rmtree(temp_dir) os.chdir(original_cwd) results = _check_setup_py(results, update, pin) logger.info('Module'.ljust(40) + 'Local Version'.rjust(20) + 'Public Version'.rjust(20)) # pylint: disable=logging-not-lazy for mod, data in results.items(): logger.info(mod.ljust(40) + data['local_version'].rjust(20) + data['public_version'].rjust(20)) bump_mods = {k: v for k, v in results.items() if v['status'] == 'BUMP'} mismatch_mods = {k: v for k, v in results.items() if v['status'] == 'MISMATCH'} subheading('RESULTS') if bump_mods: logger.error('The following modules need their versions bumped. ' 'Scroll up for details: %s', ', '.join(bump_mods.keys())) logger.warning('\nNote that before changing versions, you should consider ' 'running `git clean` to remove untracked files from your repo. ' 'Files that were once tracked but removed from the source may ' 'still be on your machine, resuling in false positives.') sys.exit(1) elif mismatch_mods and not update: logger.error('The following modules have a mismatch between the module version ' 'and the version in azure-cli\'s setup.py file. ' 'Scroll up for details: %s', ', '.join(mismatch_mods.keys())) sys.exit(1) else: display('OK!')
def run_linter(modules=None, rule_types=None, rules=None, ci_exclusions=None, git_source=None, git_target=None, git_repo=None, include_whl_extensions=False, min_severity=None, save_global_exclusion=False): require_azure_cli() from azure.cli.core import get_default_cli # pylint: disable=import-error from azure.cli.core.file_util import ( # pylint: disable=import-error get_all_help, create_invoker_and_load_cmds_and_args) heading('CLI Linter') # allow user to run only on CLI or extensions cli_only = modules == ['CLI'] ext_only = modules == ['EXT'] if cli_only or ext_only: modules = None # process severity option if min_severity: try: min_severity = LinterSeverity.get_linter_severity(min_severity) except ValueError: valid_choices = linter_severity_choices() raise CLIError( "Please specify a valid linter severity. It should be one of: {}" .format(", ".join(valid_choices))) # needed to remove helps from azdev azdev_helps = helps.copy() exclusions = {} selected_modules = get_path_table( include_only=modules, include_whl_extensions=include_whl_extensions) if cli_only: selected_modules['ext'] = {} if ext_only: selected_modules['mod'] = {} selected_modules['core'] = {} # used to upsert global exclusion update_global_exclusion = None if save_global_exclusion and (cli_only or ext_only): if cli_only: update_global_exclusion = 'CLI' if os.path.exists( os.path.join(get_cli_repo_path(), 'linter_exclusions.yml')): os.remove( os.path.join(get_cli_repo_path(), 'linter_exclusions.yml')) elif ext_only: update_global_exclusion = 'EXT' for ext_path in get_ext_repo_paths(): if os.path.exists( os.path.join(ext_path, 'linter_exclusions.yml')): os.remove(os.path.join(ext_path, 'linter_exclusions.yml')) # filter down to only modules that have changed based on git diff selected_modules = filter_by_git_diff(selected_modules, git_source, git_target, git_repo) if not any((selected_modules[x] for x in selected_modules)): raise CLIError('No modules selected.') selected_mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) + \ list(selected_modules['ext'].keys()) selected_mod_paths = list(selected_modules['mod'].values()) + list(selected_modules['core'].values()) + \ list(selected_modules['ext'].values()) if selected_mod_names: display('Modules: {}\n'.format(', '.join(selected_mod_names))) # collect all rule exclusions for path in selected_mod_paths: exclusion_path = os.path.join(path, 'linter_exclusions.yml') if os.path.isfile(exclusion_path): mod_exclusions = yaml.safe_load(open(exclusion_path)) merge_exclusion(exclusions, mod_exclusions or {}) global_exclusion_paths = [ os.path.join(get_cli_repo_path(), 'linter_exclusions.yml') ] try: global_exclusion_paths.extend([ os.path.join(path, 'linter_exclusions.yml') for path in (get_ext_repo_paths() or []) ]) except CLIError: pass for path in global_exclusion_paths: if os.path.isfile(path): mod_exclusions = yaml.safe_load(open(path)) merge_exclusion(exclusions, mod_exclusions or {}) start = time.time() display('Initializing linter with command table and help files...') az_cli = get_default_cli() # load commands, args, and help create_invoker_and_load_cmds_and_args(az_cli) loaded_help = get_all_help(az_cli) stop = time.time() logger.info('Commands and help loaded in %i sec', stop - start) command_loader = az_cli.invocation.commands_loader # format loaded help loaded_help = {data.command: data for data in loaded_help if data.command} # load yaml help help_file_entries = {} for entry_name, help_yaml in helps.items(): # ignore help entries from azdev itself, unless it also coincides # with a CLI or extension command name. if entry_name in azdev_helps and entry_name not in command_loader.command_table: continue help_entry = yaml.safe_load(help_yaml) help_file_entries[entry_name] = help_entry # trim command table and help to just selected_modules command_loader, help_file_entries = filter_modules( command_loader, help_file_entries, modules=selected_mod_names, include_whl_extensions=include_whl_extensions) if not command_loader.command_table: raise CLIError('No commands selected to check.') # Instantiate and run Linter linter_manager = LinterManager( command_loader=command_loader, help_file_entries=help_file_entries, loaded_help=loaded_help, exclusions=exclusions, rule_inclusions=rules, use_ci_exclusions=ci_exclusions, min_severity=min_severity, update_global_exclusion=update_global_exclusion) subheading('Results') logger.info('Running linter: %i commands, %i help entries', len(command_loader.command_table), len(help_file_entries)) exit_code = linter_manager.run(run_params=not rule_types or 'params' in rule_types, run_commands=not rule_types or 'commands' in rule_types, run_command_groups=not rule_types or 'command_groups' in rule_types, run_help_files_entries=not rule_types or 'help_entries' in rule_types) sys.exit(exit_code)
def run_tests(tests, xml_path=None, discover=False, in_series=False, run_live=False, profile=None, last_failed=False, pytest_args=None, git_source=None, git_target=None, git_repo=None): require_virtual_env() DEFAULT_RESULT_FILE = 'test_results.xml' DEFAULT_RESULT_PATH = os.path.join(get_azdev_config_dir(), DEFAULT_RESULT_FILE) from .pytest_runner import get_test_runner heading('Run Tests') original_profile = _get_profile(profile) if not profile: profile = original_profile path_table = get_path_table() test_index = _get_test_index(profile, discover) if not tests: tests = list(path_table['mod'].keys()) + list(path_table['core'].keys()) + list(path_table['ext'].keys()) if tests == ['CLI']: tests = list(path_table['mod'].keys()) + list(path_table['core'].keys()) elif tests == ['EXT']: tests = list(path_table['ext'].keys()) # filter out tests whose modules haven't changed tests = _filter_by_git_diff(tests, test_index, git_source, git_target, git_repo) if tests: display('\nTESTS: {}\n'.format(', '.join(tests))) # resolve the path at which to dump the XML results xml_path = xml_path or DEFAULT_RESULT_PATH if not xml_path.endswith('.xml'): xml_path = os.path.join(xml_path, DEFAULT_RESULT_FILE) # process environment variables if run_live: logger.warning('RUNNING TESTS LIVE') os.environ[ENV_VAR_TEST_LIVE] = 'True' def _find_test(index, name): name_comps = name.split('.') num_comps = len(name_comps) key_error = KeyError() for i in range(num_comps): check_name = '.'.join(name_comps[(-1 - i):]) try: match = index[check_name] if check_name != name: logger.info("Test found using just '%s'. The rest of the name was ignored.\n", check_name) return match except KeyError as ex: key_error = ex continue raise key_error # lookup test paths from index test_paths = [] for t in tests: try: test_path = os.path.normpath(_find_test(test_index, t)) test_paths.append(test_path) except KeyError: logger.warning("'%s' not found. If newly added, re-run with --discover", t) continue # Tests have been collected. Now run them. if not test_paths: raise CLIError('No tests selected to run.') runner = get_test_runner(parallel=not in_series, log_path=xml_path, last_failed=last_failed) exit_code = runner(test_paths=test_paths, pytest_args=pytest_args) _summarize_test_results(xml_path) # attempt to restore the original profile if profile != original_profile: result = raw_cmd('az cloud update --profile {}'.format(original_profile), "Restoring profile '{}'.".format(original_profile)) if result.exit_code != 0: logger.warning("Failed to restore profile '%s'.", original_profile) sys.exit(0 if not exit_code else 1)
def setup(cli_path=None, ext_repo_path=None, ext=None, deps=None, set_env=None, copy=None, use_global=None): _check_env(set_env) _check_shell() heading('Azure CLI Dev Setup') # cases for handling legacy install if not any([cli_path, ext_repo_path]) or cli_path == "pypi": display( "WARNING: Installing azdev in legacy mode. Run with atleast -c " "to install the latest azdev wihout \"pypi\"\n") return _handle_legacy(cli_path, ext_repo_path, ext, deps, time.time()) if 'CONDA_PREFIX' in os.environ: raise CLIError('CONDA virutal enviroments are not supported outside' ' of interactive mode or when -c and -r are provided') if not cli_path: cli_path = _handle_no_cli_path() _validate_input(cli_path, ext_repo_path, set_env, copy, use_global, ext) _check_paths(cli_path, ext_repo_path) if set_env: shell_cmd((const.VENV_CMD if const.IS_WINDOWS else const.VENV_CMD3) + set_env, raise_ex=False) azure_path = os.path.join(os.path.abspath(os.getcwd()), set_env) else: azure_path = os.environ.get('VIRTUAL_ENV') dot_azure_config = os.path.join(azure_path, '.azure') dot_azdev_config = os.path.join(azure_path, '.azdev') # clean up venv dirs if they already existed # and this is a reinstall/new setup if os.path.isdir(dot_azure_config): shutil.rmtree(dot_azure_config) if os.path.isdir(dot_azdev_config): shutil.rmtree(dot_azdev_config) global_az_config = os.path.expanduser(os.path.join('~', '.azure')) global_azdev_config = os.path.expanduser(os.path.join('~', '.azdev')) azure_config_path = os.path.join(dot_azure_config, const.CONFIG_NAME) azdev_config_path = os.path.join(dot_azdev_config, const.CONFIG_NAME) if os.path.isdir(global_az_config) and copy: shutil.copytree(global_az_config, dot_azure_config) if os.path.isdir(global_azdev_config): shutil.copytree(global_azdev_config, dot_azdev_config) else: os.mkdir(dot_azdev_config) file = open(azdev_config_path, "w") file.close() elif not use_global and not copy: os.mkdir(dot_azure_config) os.mkdir(dot_azdev_config) file_az, file_dev = open(azure_config_path, "w"), open(azdev_config_path, "w") file_az.close() file_dev.close() elif os.path.isdir(global_az_config): dot_azure_config, dot_azdev_config = global_az_config, global_azdev_config azure_config_path = os.path.join(dot_azure_config, const.CONFIG_NAME) else: raise CLIError( "Global AZ config is not set up, yet it was specified to be used.") # set env vars for get azure config and get azdev config os.environ['AZURE_CONFIG_DIR'], os.environ[ 'AZDEV_CONFIG_DIR'] = dot_azure_config, dot_azdev_config config = get_azure_config() if not config.get('cloud', 'name', None): config.set_value('cloud', 'name', 'AzureCloud') if ext_repo_path: config.set_value(const.EXT_SECTION, const.AZ_DEV_SRC, os.path.abspath(ext_repo_path)) venv.edit_activate(azure_path, dot_azure_config, dot_azdev_config) if cli_path: config.set_value('clipath', const.AZ_DEV_SRC, os.path.abspath(cli_path)) venv.install_cli(os.path.abspath(cli_path), azure_path) config = get_azdev_config() config.set_value( 'ext', 'repo_paths', os.path.abspath(ext_repo_path) if ext_repo_path else '_NONE_') config.set_value('cli', 'repo_path', os.path.abspath(cli_path)) _copy_config_files() if ext and ext_repo_path: venv.install_extensions(azure_path, ext) if not set_env: heading( "The setup was successful! Please run or re-run the virtual environment activation script." ) else: heading("The setup was successful!") return None
def setup(cli_path=None, ext_repo_path=None, ext=None, deps=None): require_virtual_env() start = time.time() heading('Azure CLI Dev Setup') ext_to_install = [] if not any([cli_path, ext_repo_path, ext]): cli_path, ext_repo_path, ext_to_install = _interactive_setup() else: if cli_path == "pypi": cli_path = None # otherwise assume programmatic setup if cli_path: CLI_SENTINEL = 'azure-cli.pyproj' if cli_path == Flag: cli_path = find_file(CLI_SENTINEL) if not cli_path: raise CLIError( 'Unable to locate your CLI repo. Things to check:' '\n Ensure you have cloned the repo. ' '\n Specify the path explicitly with `-c PATH`. ' '\n If you run with `-c` to autodetect, ensure you are running ' 'this command from a folder upstream of the repo.') if cli_path != 'EDGE': cli_path = _check_path(cli_path, CLI_SENTINEL) display('Azure CLI:\n {}\n'.format(cli_path)) else: display('Azure CLI:\n PyPI\n') # must add the necessary repo to add an extension if ext and not ext_repo_path: raise CLIError( 'usage error: --repo EXT_REPO [EXT_REPO ...] [--ext EXT_NAME ...]' ) get_azure_config().set_value('extension', 'dev_sources', '') if ext_repo_path: # add extension repo(s) add_extension_repo(ext_repo_path) display('Azure CLI extension repos:\n {}'.format('\n '.join( [os.path.abspath(x) for x in ext_repo_path]))) if ext == ['*']: ext_to_install = [x['path'] for x in list_extensions()] elif ext: # add extension(s) available_extensions = [x['name'] for x in list_extensions()] not_found = [x for x in ext if x not in available_extensions] if not_found: raise CLIError( "The following extensions were not found. Ensure you have added " "the repo using `--repo/-r PATH`.\n {}".format( '\n '.join(not_found))) ext_to_install = [ x['path'] for x in list_extensions() if x['name'] in ext ] if ext_to_install: display('\nAzure CLI extensions:\n {}'.format( '\n '.join(ext_to_install))) dev_sources = get_azure_config().get('extension', 'dev_sources', None) # save data to config files config = get_azdev_config() config.set_value('ext', 'repo_paths', dev_sources if dev_sources else '_NONE_') config.set_value('cli', 'repo_path', cli_path if cli_path else '_NONE_') # install packages subheading('Installing packages') # upgrade to latest pip pip_cmd('install --upgrade pip -q', 'Upgrading pip...') _install_cli(cli_path, deps=deps) _install_extensions(ext_to_install) _copy_config_files() end = time.time() elapsed_min = int((end - start) / 60) elapsed_sec = int(end - start) % 60 display('\nElapsed time: {} min {} sec'.format(elapsed_min, elapsed_sec)) subheading('Finished dev setup!')
def publish_extensions(extensions, storage_account, storage_account_key, storage_container, dist_dir='dist', update_index=False, yes=False): from azure.multiapi.storage.v2018_11_09.blob import BlockBlobService heading('Publish Extensions') require_azure_cli() # rebuild the extensions subheading('Building WHLs') try: shutil.rmtree(dist_dir) except Exception as ex: # pylint: disable=broad-except logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex) build_extensions(extensions, dist_dir=dist_dir) whl_files = find_files(dist_dir, '*.whl') uploaded_urls = [] subheading('Uploading WHLs') for whl_path in whl_files: whl_file = os.path.split(whl_path)[-1] client = BlockBlobService(account_name=storage_account, account_key=storage_account_key) exists = client.exists(container_name=storage_container, blob_name=whl_file) # check if extension already exists unless user opted not to if not yes: if exists: if not prompt_y_n( "{} already exists. You may need to bump the extension version. Replace?" .format(whl_file), default='n'): logger.warning("Skipping '%s'...", whl_file) continue # upload the WHL file client.create_blob_from_path(container_name=storage_container, blob_name=whl_file, file_path=os.path.abspath(whl_path)) url = client.make_blob_url(container_name=storage_container, blob_name=whl_file) logger.info(url) uploaded_urls.append(url) if update_index: subheading('Updating Index') update_extension_index(uploaded_urls) subheading('Published WHLs') for url in uploaded_urls: display(url) if not update_index: logger.warning('You still need to update the index for your changes!') logger.warning(' az extension update-index <URL>')
def _discover_tests(profile): """ Builds an index of tests so that the user can simply supply the name they wish to test instead of the full path. """ profile_split = profile.split('-') profile_namespace = '_'.join([profile_split[-1]] + profile_split[:-1]) heading('Discovering Tests') path_table = get_path_table() core_modules = path_table['core'].items() command_modules = path_table['mod'].items() extensions = path_table['ext'].items() inverse_name_table = get_name_index(invert=True) module_data = {} logger.info('\nCore Modules: %s', ', '.join([name for name, _ in core_modules])) for mod_name, mod_path in core_modules: file_path = mod_path for comp in mod_name.split('-'): file_path = os.path.join(file_path, comp) mod_data = { 'alt_name': 'main' if mod_name == 'azure-cli' else mod_name.replace(COMMAND_MODULE_PREFIX, ''), 'filepath': os.path.join(file_path, 'tests'), 'base_path': '{}.tests'.format(mod_name).replace('-', '.'), 'files': {} } tests = _discover_module_tests(mod_name, mod_data) if tests: module_data[mod_name] = tests logger.info('\nCommand Modules: %s', ', '.join([name for name, _ in command_modules])) for mod_name, mod_path in command_modules: mod_data = { # Modules don't technically have azure-cli-foo moniker anymore, but preserving # for consistency. 'alt_name': '{}{}'.format(COMMAND_MODULE_PREFIX, mod_name), 'filepath': os.path.join( mod_path, 'tests', profile_namespace), 'base_path': 'azure.cli.command_modules.{}.tests.{}'.format(mod_name, profile_namespace), 'files': {} } tests = _discover_module_tests(mod_name, mod_data) if tests: module_data[mod_name] = tests logger.info('\nExtensions: %s', ', '.join([name for name, _ in extensions])) for mod_name, mod_path in extensions: glob_pattern = os.path.normcase(os.path.join('{}*'.format(EXTENSION_PREFIX))) try: file_path = glob.glob(os.path.join(mod_path, glob_pattern))[0] except IndexError: logger.debug("No extension found at: %s", os.path.join(mod_path, glob_pattern)) continue import_name = os.path.basename(file_path) mod_data = { 'alt_name': inverse_name_table[mod_name], 'filepath': os.path.join(file_path, 'tests', profile_namespace), 'base_path': '{}.tests.{}'.format(import_name, profile_namespace), 'files': {} } tests = _discover_module_tests(import_name, mod_data) if tests: module_data[mod_name] = tests test_index = {} conflicted_keys = [] def add_to_index(key, path): from azdev.utilities import extract_module_name key = key or mod_name if key in test_index: if key not in conflicted_keys: conflicted_keys.append(key) mod1 = extract_module_name(path) mod2 = extract_module_name(test_index[key]) if mod1 != mod2: # resolve conflicted keys by prefixing with the module name and a dot (.) logger.warning("'%s' exists in both '%s' and '%s'. Resolve using `%s.%s` or `%s.%s`", key, mod1, mod2, mod1, key, mod2, key) test_index['{}.{}'.format(mod1, key)] = path test_index['{}.{}'.format(mod2, key)] = test_index[key] else: logger.error("'%s' exists twice in the '%s' module. " "Please rename one or both and re-run --discover.", key, mod1) else: test_index[key] = path # build the index for mod_name, mod_data in module_data.items(): # don't add empty mods to the index if not mod_data: continue mod_path = mod_data['filepath'] for file_name, file_data in mod_data['files'].items(): file_path = os.path.join(mod_path, file_name) + '.py' for class_name, test_list in file_data.items(): for test_name in test_list: test_path = '{}::{}::{}'.format(file_path, class_name, test_name) add_to_index(test_name, test_path) class_path = '{}::{}'.format(file_path, class_name) add_to_index(class_name, class_path) add_to_index(file_name, file_path) add_to_index(mod_name, mod_path) add_to_index(mod_data['alt_name'], mod_path) # remove the conflicted keys since they would arbitrarily point to a random implementation for key in conflicted_keys: del test_index[key] return test_index
def run_linter(modules=None, rule_types=None, rules=None): require_azure_cli() from azure.cli.core import get_default_cli # pylint: disable=import-error from azure.cli.core.file_util import ( # pylint: disable=import-error get_all_help, create_invoker_and_load_cmds_and_args) heading('CLI Linter') # needed to remove helps from azdev azdev_helps = helps.copy() exclusions = {} selected_modules = get_path_table(include_only=modules) if not selected_modules: raise CLIError('No modules selected.') selected_mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) + \ list(selected_modules['ext'].keys()) selected_mod_paths = list(selected_modules['mod'].values()) + list(selected_modules['core'].values()) + \ list(selected_modules['ext'].values()) if selected_mod_names: display('Modules: {}\n'.format(', '.join(selected_mod_names))) # collect all rule exclusions for path in selected_mod_paths: exclusion_path = os.path.join(path, 'linter_exclusions.yml') if os.path.isfile(exclusion_path): mod_exclusions = yaml.load(open(exclusion_path)) exclusions.update(mod_exclusions) start = time.time() display('Initializing linter with command table and help files...') az_cli = get_default_cli() # load commands, args, and help create_invoker_and_load_cmds_and_args(az_cli) loaded_help = get_all_help(az_cli) stop = time.time() logger.info('Commands and help loaded in %i sec', stop - start) command_loader = az_cli.invocation.commands_loader # format loaded help loaded_help = {data.command: data for data in loaded_help if data.command} # load yaml help help_file_entries = {} for entry_name, help_yaml in helps.items(): # ignore help entries from azdev itself, unless it also coincides # with a CLI or extension command name. if entry_name in azdev_helps and entry_name not in command_loader.command_table: continue help_entry = yaml.load(help_yaml) help_file_entries[entry_name] = help_entry # trim command table and help to just selected_modules command_loader, help_file_entries = filter_modules( command_loader, help_file_entries, modules=selected_mod_names) if not command_loader.command_table: raise CLIError('No commands selected to check.') # Instantiate and run Linter linter_manager = LinterManager(command_loader=command_loader, help_file_entries=help_file_entries, loaded_help=loaded_help, exclusions=exclusions, rule_inclusions=rules) subheading('Results') logger.info('Running linter: %i commands, %i help entries', len(command_loader.command_table), len(help_file_entries)) exit_code = linter_manager.run( run_params=not rule_types or 'params' in rule_types, run_commands=not rule_types or 'commands' in rule_types, run_command_groups=not rule_types or 'command_groups'in rule_types, run_help_files_entries=not rule_types or 'help_entries' in rule_types) sys.exit(exit_code)
def check_load_time(runs=3): require_azure_cli() heading('Module Load Performance') regex = r"[^']*'([^']*)'[\D]*([\d\.]*)" results = {TOTAL: []} # Time the module loading X times for i in range(0, runs + 1): lines = cmd('az -h --debug', show_stderr=True).result if i == 0: # Ignore the first run since it can be longer due to *.pyc file compilation continue try: lines = lines.decode().splitlines() except AttributeError: lines = lines.splitlines() total_time = 0 for line in lines: if line.startswith('DEBUG: Loaded module'): matches = re.match(regex, line) mod = matches.group(1) val = float(matches.group(2)) * 1000 total_time = total_time + val if mod in results: results[mod].append(val) else: results[mod] = [val] results[TOTAL].append(total_time) passed_mods = {} failed_mods = {} mods = sorted(results.keys()) bubble_found = False for mod in mods: val = results[mod] mean_val = mean(val) stdev_val = pstdev(val) threshold = THRESHOLDS.get(mod) or DEFAULT_THRESHOLD statistics = { 'average': mean_val, 'stdev': stdev_val, 'threshold': threshold, 'values': val } if mean_val > threshold: if not bubble_found and mean_val < 30: # This temporary measure allows one floating performance # failure up to 30 ms. See issue #6224 and #6218. bubble_found = True passed_mods[mod] = statistics else: failed_mods[mod] = statistics else: passed_mods[mod] = statistics subheading('Results') if failed_mods: display('== PASSED MODULES ==') display_table(passed_mods) display('\nFAILED MODULES') display_table(failed_mods) raise CLIError(""" FAILED: Some modules failed. If values are close to the threshold, rerun. If values are large, check that you do not have top-level imports like azure.mgmt or msrestazure in any modified files. """) display('== PASSED MODULES ==') display_table(passed_mods) display('\nPASSED: Average load time all modules: {} ms'.format( int(passed_mods[TOTAL]['average'])))
def _create_package(prefix, repo_path, is_ext, name='test', display_name=None, display_name_plural=None, required_sdk=None, client_name=None, operation_name=None, sdk_property=None, not_preview=False, local_sdk=None): from jinja2 import Environment, PackageLoader if local_sdk and required_sdk: raise CLIError( 'usage error: --local-sdk PATH | --required-sdk NAME==VER') if name.startswith(prefix): name = name[len(prefix):] heading('Create CLI {}: {}{}'.format('Extension' if is_ext else 'Module', prefix, name)) # package_name is how the item should show up in `pip list` package_name = '{}{}'.format(prefix, name.replace( '_', '-')) if not is_ext else name display_name = display_name or name.capitalize() kwargs = { 'name': name, 'mod_path': '{}{}'.format(prefix, name) if is_ext else 'azure.cli.command_modules.{}'.format(name), 'display_name': display_name, 'display_name_plural': display_name_plural or '{}s'.format(display_name), 'loader_name': '{}CommandsLoader'.format(name.capitalize()), 'pkg_name': package_name, 'ext_long_name': '{}{}'.format(prefix, name) if is_ext else None, 'is_ext': is_ext, 'is_preview': not not_preview } new_package_path = os.path.join(repo_path, package_name) if os.path.isdir(new_package_path): if not prompt_y_n("{} '{}' already exists. Overwrite?".format( 'Extension' if is_ext else 'Module', package_name), default='n'): raise CLIError('aborted by user') ext_folder = '{}{}'.format(prefix, name) if is_ext else None # create folder tree if is_ext: _ensure_dir( os.path.join(new_package_path, ext_folder, 'tests', 'latest')) _ensure_dir(os.path.join(new_package_path, ext_folder, 'vendored_sdks')) else: _ensure_dir(os.path.join(new_package_path, 'tests', 'latest')) env = Environment(loader=PackageLoader('azdev', 'mod_templates')) # determine dependencies dependencies = [] if is_ext: if required_sdk: _download_vendored_sdk(required_sdk, path=os.path.join(new_package_path, ext_folder, 'vendored_sdks')) elif local_sdk: _copy_vendored_sdk( local_sdk, os.path.join(new_package_path, ext_folder, 'vendored_sdks')) sdk_path = None if any([local_sdk, required_sdk]): sdk_path = '{}{}.vendored_sdks'.format(prefix, package_name) kwargs.update({ 'sdk_path': sdk_path, 'client_name': client_name, 'operation_name': operation_name, 'sdk_property': sdk_property or '{}_name'.format(name) }) else: if required_sdk: version_regex = r'(?P<name>[a-zA-Z-]+)(?P<op>[~<>=]*)(?P<version>[\d.]*)' version_comps = re.compile(version_regex).match(required_sdk) sdk_kwargs = version_comps.groupdict() kwargs.update({ 'sdk_path': sdk_kwargs['name'].replace('-', '.'), 'client_name': client_name, 'operation_name': operation_name, }) dependencies.append("'{}'".format(required_sdk)) else: dependencies.append('# TODO: azure-mgmt-<NAME>==<VERSION>') kwargs.update({'sdk_property': sdk_property or '{}_name'.format(name)}) kwargs['dependencies'] = dependencies # generate code for root level dest_path = new_package_path if is_ext: root_files = ['HISTORY.rst', 'README.rst', 'setup.cfg', 'setup.py'] _generate_files(env, kwargs, root_files, dest_path) dest_path = dest_path if not is_ext else os.path.join( dest_path, ext_folder) module_files = [{ 'name': '__init__.py', 'template': 'module__init__.py' }, '_client_factory.py', '_help.py', '_params.py', '_validators.py', 'commands.py', 'custom.py'] if is_ext: module_files.append('azext_metadata.json') _generate_files(env, kwargs, module_files, dest_path) dest_path = os.path.join(dest_path, 'tests') blank_init = {'name': '__init__.py', 'template': 'blank__init__.py'} _generate_files(env, kwargs, blank_init, dest_path) dest_path = os.path.join(dest_path, 'latest') test_files = [ blank_init, { 'name': 'test_{}_scenario.py'.format(name), 'template': 'test_service_scenario.py' } ] _generate_files(env, kwargs, test_files, dest_path) if is_ext: result = pip_cmd('install -e {}'.format(new_package_path), "Installing `{}{}`...".format(prefix, name)) if result.error: raise result.error # pylint: disable=raising-bad-type