def _summarize_test_results(xml_path): import xml.etree.ElementTree as ElementTree subheading('Results') root = ElementTree.parse(xml_path).getroot() summary = { 'time': root.get('time'), 'tests': root.get('tests'), 'skips': root.get('skips'), 'failures': root.get('failures'), 'errors': root.get('errors') } display('Time: {time} sec\tTests: {tests}\tSkipped: {skips}\tFailures: {failures}\tErrors: {errors}'.format( **summary)) failed = [] for item in root.findall('testcase'): if item.findall('failure'): file_and_class = '.'.join(item.get('classname').split('.')[-2:]) failed.append('{}.{}'.format(file_and_class, item.get('name'))) if failed: subheading('FAILURES') for name in failed: display(name) display('')
def check_license_headers(): heading('Verify License Headers') cli_path = get_cli_repo_path() env_path = os.path.join(cli_path, 'env') files_without_header = [] for current_dir, _, files in os.walk(cli_path): if current_dir.startswith(env_path): continue file_itr = (os.path.join(current_dir, p) for p in files if p.endswith('.py') and p != 'azure_bdist_wheel.py') for python_file in file_itr: with open(python_file, 'r') as f: file_text = f.read() if file_text and LICENSE_HEADER not in file_text: files_without_header.append( os.path.join(current_dir, python_file)) subheading('Results') if files_without_header: raise CLIError( "{}\nError: {} files don't have the required license headers.". format('\n'.join(files_without_header), len(files_without_header))) display('License headers verified OK.')
def check_history(modules=None): # TODO: Does not work with extensions path_table = get_path_table(include_only=modules) selected_modules = list(path_table['core'].items()) + list(path_table['mod'].items()) heading('Verify History') module_names = sorted([name for name, _ in selected_modules]) display('Verifying README and HISTORY files for modules: {}'.format(' '.join(module_names))) failed_mods = [] for name, path in selected_modules: errors = _check_readme_render(path) if errors: failed_mods.append(name) subheading('{} errors'.format(name)) for error in errors: logger.error('%s\n', error) subheading('Results') if failed_mods: display('The following modules have invalid README/HISTORYs:') logger.error('\n'.join(failed_mods)) logger.warning('See above for the full warning/errors') logger.warning('note: Line numbers in the errors map to the long_description of your setup.py.') sys.exit(1) display('OK')
def check_document_map(): heading('Verify Document Map') cli_repo = get_cli_repo_path() map_path = os.path.join(cli_repo, DOC_SOURCE_MAP_PATH) help_files_in_map = _get_help_files_in_map(map_path) help_files_not_found = _map_help_files_not_found(cli_repo, help_files_in_map) help_files_to_add_to_map = _help_files_not_in_map(cli_repo, help_files_in_map) subheading('Results') if help_files_not_found or help_files_to_add_to_map: error_lines = [] error_lines.append('Errors whilst verifying {}!'.format(DOC_MAP_NAME)) if help_files_not_found: error_lines.append( 'The following files are in {} but do not exist:'.format( DOC_MAP_NAME)) error_lines += help_files_not_found if help_files_to_add_to_map: error_lines.append( 'The following files should be added to {}:'.format( DOC_MAP_NAME)) error_lines += help_files_to_add_to_map error_msg = '\n'.join(error_lines) raise CLIError(error_msg) display('Verified {} OK.'.format(DOC_MAP_NAME))
def check_style(modules=None, pylint=False, pep8=False): heading('Style Check') selected_modules = get_path_table(include_only=modules) pep8_result = None pylint_result = None if pylint: try: require_azure_cli() except CLIError: raise CLIError( 'usage error: --pylint requires Azure CLI to be installed.') if not selected_modules: raise CLIError('No modules selected.') mod_names = list(selected_modules['mod'].keys()) + list( selected_modules['core'].keys()) ext_names = list(selected_modules['ext'].keys()) if mod_names: display('Modules: {}\n'.format(', '.join(mod_names))) if ext_names: display('Extensions: {}\n'.format(', '.join(ext_names))) # if neither flag provided, same as if both were provided if not any([pylint, pep8]): pep8 = True pylint = True exit_code_sum = 0 if pep8: pep8_result = _run_pep8(selected_modules) exit_code_sum += pep8_result.exit_code if pylint: pylint_result = _run_pylint(selected_modules) exit_code_sum += pylint_result.exit_code display('') subheading('Results') # print success messages first if pep8_result and not pep8_result.error: display('Flake8: PASSED') if pylint_result and not pylint_result.error: display('Pylint: PASSED') display('') # print error messages last if pep8_result and pep8_result.error: logger.error(pep8_result.error.output.decode('utf-8')) logger.error('Flake8: FAILED\n') if pylint_result and pylint_result.error: logger.error(pylint_result.error.output.decode('utf-8')) logger.error('Pylint: FAILED\n') sys.exit(exit_code_sum)
def delete_groups(prefixes=None, older_than=6, product='azurecli', cause='automation', yes=False): from datetime import datetime, timedelta require_azure_cli() groups = json.loads(run_cmd('az group list -ojson').result) groups_to_delete = [] def _filter_by_tags(): for group in groups: group = Data(**group) if not group.tags: # pylint: disable=no-member continue tags = Data(**group.tags) # pylint: disable=no-member try: date_tag = datetime.strptime(tags.date, '%Y-%m-%dT%H:%M:%SZ') curr_time = datetime.utcnow() if (tags.product == product and tags.cause == cause and (curr_time - date_tag <= timedelta(hours=older_than + 1))): groups_to_delete.append(group.name) except AttributeError: continue def _filter_by_prefix(): for group in groups: group = Data(**group) for prefix in prefixes: if group.name.startswith(prefix): groups_to_delete.append(group.name) def _delete(): for group in groups_to_delete: run_cmd('az group delete -g {} -y --no-wait'.format(group), message=True) if prefixes: logger.info('Filter by prefix') _filter_by_prefix() else: logger.info('Filter by tags') _filter_by_tags() if not groups_to_delete: raise CLIError('No groups meet the criteria to delete.') if yes: _delete() else: subheading('Groups to Delete') for group in groups_to_delete: display('\t{}'.format(group)) if prompt_y_n('Delete {} resource groups?'.format(len(groups_to_delete)), 'y'): _delete() else: raise CLIError('Command cancelled.')
def verify_versions(): import tempfile import shutil require_azure_cli() heading('Verify CLI Versions') path_table = get_path_table() modules = list(path_table['core'].items()) modules = [x for x in modules if x[0] not in EXCLUDED_MODULES] if not modules: raise CLIError('No modules selected to test.') display('MODULES: {}'.format(', '.join([x[0] for x in modules]))) results = {} original_cwd = os.getcwd() temp_dir = tempfile.mkdtemp() for mod, mod_path in modules: if not mod.startswith(COMMAND_MODULE_PREFIX) and mod != 'azure-cli': mod = '{}{}'.format(COMMAND_MODULE_PREFIX, mod) results[mod] = {} results.update( _compare_module_against_pypi(results, temp_dir, mod, mod_path)) shutil.rmtree(temp_dir) os.chdir(original_cwd) logger.info('Module'.ljust(40) + 'Local Version'.rjust(20) + 'Public Version'.rjust(20)) # pylint: disable=logging-not-lazy for mod, data in results.items(): logger.info( mod.ljust(40) + data['local_version'].rjust(20) + data['public_version'].rjust(20)) bump_mods = {k: v for k, v in results.items() if v['status'] == 'BUMP'} subheading('RESULTS') if bump_mods: logger.error( 'The following modules need their versions bumped. ' 'Scroll up for details: %s', ', '.join(bump_mods.keys())) logger.warning( '\nNote that before changing versions, you should consider ' 'running `git clean` to remove untracked files from your repo. ' 'Files that were once tracked but removed from the source may ' 'still be on your machine, resuling in false positives.') sys.exit(1) else: display('OK!')
def publish_extensions(extensions, storage_subscription, storage_account, storage_container, dist_dir='dist', update_index=False, yes=False): heading('Publish Extensions') require_azure_cli() # rebuild the extensions subheading('Building WHLs') try: shutil.rmtree(dist_dir) except Exception as ex: # pylint: disable=broad-except logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex) build_extensions(extensions, dist_dir=dist_dir) whl_files = find_files(dist_dir, '*.whl') uploaded_urls = [] subheading('Uploading WHLs') for whl_path in whl_files: whl_file = os.path.split(whl_path)[-1] # check if extension already exists unless user opted not to if not yes: command = 'az storage blob exists --subscription {} --account-name {} -c {} -n {}'.format( storage_subscription, storage_account, storage_container, whl_file) exists = json.loads(cmd(command).result)['exists'] if exists: if not prompt_y_n( "{} already exists. You may need to bump the extension version. Replace?" .format(whl_file), default='n'): logger.warning("Skipping '%s'...", whl_file) continue # upload the WHL file command = 'az storage blob upload --subscription {} --account-name {} -c {} -n {} -f {}'.format( storage_subscription, storage_account, storage_container, whl_file, os.path.abspath(whl_path)) cmd(command, "Uploading '{}'...".format(whl_file)) command = 'az storage blob url --subscription {} --account-name {} -c {} -n {} -otsv'.format( storage_subscription, storage_account, storage_container, whl_file) url = cmd(command).result logger.info(url) uploaded_urls.append(url) if update_index: subheading('Updating Index') update_extension_index(uploaded_urls) subheading('Published') display(uploaded_urls) if not update_index: logger.warning( 'You still need to update the index for your changes with `az extension update-index`.' )
def check_license_headers(): heading('Verify License Headers') cli_path = get_cli_repo_path() all_paths = [cli_path] try: ext_repo = get_ext_repo_paths() for path in ext_repo: all_paths.append(path) except CLIError: display("No CLI ext path, running check only on modules") files_without_header = [] for path in all_paths: py_files = pathlib.Path(path).glob('**' + os.path.sep + '*.py') for py_file in py_files: py_file = str(py_file) if py_file.endswith('azure_bdist_wheel.py'): continue for ignore_token in _IGNORE_SUBDIRS: if ignore_token in py_file: break else: with open(str(py_file), 'r', encoding='utf-8') as f: file_text = f.read() if not file_text: continue test_results = [ LICENSE_HEADER in file_text, WRAPPED_LICENSE_HEADER in file_text, CODEGEN_LICENSE_HEADER in file_text ] if not any(test_results): files_without_header.append(py_file) subheading('Results') if files_without_header: raise CLIError( "{}\nError: {} files don't have the required license headers.". format('\n'.join(files_without_header), len(files_without_header))) display('License headers verified OK.')
def run_linter(modules=None, rule_types=None, rules=None): require_azure_cli() from azure.cli.core import get_default_cli # pylint: disable=import-error from azure.cli.core.file_util import ( # pylint: disable=import-error get_all_help, create_invoker_and_load_cmds_and_args) heading('CLI Linter') # needed to remove helps from azdev azdev_helps = helps.copy() exclusions = {} selected_modules = get_path_table(include_only=modules) if not selected_modules: raise CLIError('No modules selected.') selected_mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) + \ list(selected_modules['ext'].keys()) selected_mod_paths = list(selected_modules['mod'].values()) + list(selected_modules['core'].values()) + \ list(selected_modules['ext'].values()) if selected_mod_names: display('Modules: {}\n'.format(', '.join(selected_mod_names))) # collect all rule exclusions for path in selected_mod_paths: exclusion_path = os.path.join(path, 'linter_exclusions.yml') if os.path.isfile(exclusion_path): mod_exclusions = yaml.load(open(exclusion_path)) exclusions.update(mod_exclusions) start = time.time() display('Initializing linter with command table and help files...') az_cli = get_default_cli() # load commands, args, and help create_invoker_and_load_cmds_and_args(az_cli) loaded_help = get_all_help(az_cli) stop = time.time() logger.info('Commands and help loaded in %i sec', stop - start) command_loader = az_cli.invocation.commands_loader # format loaded help loaded_help = {data.command: data for data in loaded_help if data.command} # load yaml help help_file_entries = {} for entry_name, help_yaml in helps.items(): # ignore help entries from azdev itself, unless it also coincides # with a CLI or extension command name. if entry_name in azdev_helps and entry_name not in command_loader.command_table: continue help_entry = yaml.load(help_yaml) help_file_entries[entry_name] = help_entry # trim command table and help to just selected_modules command_loader, help_file_entries = filter_modules( command_loader, help_file_entries, modules=selected_mod_names) if not command_loader.command_table: raise CLIError('No commands selected to check.') # Instantiate and run Linter linter_manager = LinterManager(command_loader=command_loader, help_file_entries=help_file_entries, loaded_help=loaded_help, exclusions=exclusions, rule_inclusions=rules) subheading('Results') logger.info('Running linter: %i commands, %i help entries', len(command_loader.command_table), len(help_file_entries)) exit_code = linter_manager.run( run_params=not rule_types or 'params' in rule_types, run_commands=not rule_types or 'commands' in rule_types, run_command_groups=not rule_types or 'command_groups'in rule_types, run_help_files_entries=not rule_types or 'help_entries' in rule_types) sys.exit(exit_code)
def setup(cli_path=None, ext_repo_path=None, ext=None, deps=None): require_virtual_env() start = time.time() heading('Azure CLI Dev Setup') ext_to_install = [] if not any([cli_path, ext_repo_path, ext]): cli_path, ext_repo_path, ext_to_install = _interactive_setup() else: if cli_path == "pypi": cli_path = None # otherwise assume programmatic setup if cli_path: CLI_SENTINEL = 'azure-cli.pyproj' if cli_path == Flag: cli_path = find_file(CLI_SENTINEL) if not cli_path: raise CLIError( 'Unable to locate your CLI repo. Things to check:' '\n Ensure you have cloned the repo. ' '\n Specify the path explicitly with `-c PATH`. ' '\n If you run with `-c` to autodetect, ensure you are running ' 'this command from a folder upstream of the repo.') if cli_path != 'EDGE': cli_path = _check_path(cli_path, CLI_SENTINEL) display('Azure CLI:\n {}\n'.format(cli_path)) else: display('Azure CLI:\n PyPI\n') # must add the necessary repo to add an extension if ext and not ext_repo_path: raise CLIError( 'usage error: --repo EXT_REPO [EXT_REPO ...] [--ext EXT_NAME ...]' ) get_azure_config().set_value('extension', 'dev_sources', '') if ext_repo_path: # add extension repo(s) add_extension_repo(ext_repo_path) display('Azure CLI extension repos:\n {}'.format('\n '.join( [os.path.abspath(x) for x in ext_repo_path]))) if ext == ['*']: ext_to_install = [x['path'] for x in list_extensions()] elif ext: # add extension(s) available_extensions = [x['name'] for x in list_extensions()] not_found = [x for x in ext if x not in available_extensions] if not_found: raise CLIError( "The following extensions were not found. Ensure you have added " "the repo using `--repo/-r PATH`.\n {}".format( '\n '.join(not_found))) ext_to_install = [ x['path'] for x in list_extensions() if x['name'] in ext ] if ext_to_install: display('\nAzure CLI extensions:\n {}'.format( '\n '.join(ext_to_install))) dev_sources = get_azure_config().get('extension', 'dev_sources', None) # save data to config files config = get_azdev_config() config.set_value('ext', 'repo_paths', dev_sources if dev_sources else '_NONE_') config.set_value('cli', 'repo_path', cli_path if cli_path else '_NONE_') # install packages subheading('Installing packages') # upgrade to latest pip pip_cmd('install --upgrade pip -q', 'Upgrading pip...') _install_cli(cli_path, deps=deps) _install_extensions(ext_to_install) _copy_config_files() end = time.time() elapsed_min = int((end - start) / 60) elapsed_sec = int(end - start) % 60 display('\nElapsed time: {} min {} sec'.format(elapsed_min, elapsed_sec)) subheading('Finished dev setup!')
def _interactive_setup(): from knack.prompting import prompt_y_n, prompt while True: cli_path = None ext_repos = [] exts = [] # CLI Installation if prompt_y_n('Do you plan to develop CLI modules?'): display( "\nGreat! Please enter the path to your azure-cli repo, 'EDGE' to install " "the latest developer edge build or simply press " "RETURN and we will attempt to find your repo for you.") while True: cli_path = prompt('\nPath (RETURN to auto-find): ', None) cli_path = os.path.abspath( os.path.expanduser(cli_path)) if cli_path else None CLI_SENTINEL = 'azure-cli.pyproj' if not cli_path: cli_path = find_file(CLI_SENTINEL) if not cli_path: raise CLIError( 'Unable to locate your CLI repo. Things to check:' '\n Ensure you have cloned the repo. ' '\n Specify the path explicitly with `-c PATH`. ' '\n If you run with `-c` to autodetect, ensure you are running ' 'this command from a folder upstream of the repo.') try: if cli_path != 'EDGE': cli_path = _check_path(cli_path, CLI_SENTINEL) display('Found: {}'.format(cli_path)) break except CLIError as ex: logger.error(ex) continue else: display( '\nOK. We will install the latest `azure-cli` from PyPI then.') def add_ext_repo(path): try: _check_repo(path) except CLIError as ex: logger.error(ex) return False ext_repos.append(path) display('Repo {} OK.'.format(path)) return True # Determine extension repos # Allows the user to simply press RETURN to use their cwd, assuming they are in their desired extension # repo directory. To use multiple extension repos or identify a repo outside the cwd, they must specify # the path. if prompt_y_n('\nDo you plan to develop CLI extensions?'): display( '\nGreat! Input the paths for the extension repos you wish to develop for, one per ' 'line. You can add as many repos as you like. (TIP: to quickly get started, press RETURN to ' 'use your current working directory).') first_repo = True while True: msg = '\nPath ({}): '.format( 'RETURN to use current directory' if first_repo else 'RETURN to continue') ext_repo_path = prompt(msg, None) if not ext_repo_path: if first_repo and not add_ext_repo(os.getcwd()): first_repo = False continue break add_ext_repo(os.path.abspath( os.path.expanduser(ext_repo_path))) first_repo = False display( '\nTIP: you can manage extension repos later with the `azdev extension repo` commands.' ) # Determine extensions if ext_repos: if prompt_y_n( '\nWould you like to install certain extensions by default? ' ): display( '\nGreat! Input the names of the extensions you wish to install, one per ' 'line. You can add as many repos as you like. Use * to install all extensions. ' 'Press RETURN to continue to the next step.') available_extensions = [x['name'] for x in list_extensions()] while True: ext_name = prompt('\nName (RETURN to continue): ', None) if not ext_name: break if ext_name == '*': exts = [x['path'] for x in list_extensions()] break if ext_name not in available_extensions: logger.error( "Extension '%s' not found. Check the spelling, and make " "sure you added the repo first!", ext_name) continue display('Extension {} OK.'.format(ext_name)) exts.append( next(x['path'] for x in list_extensions() if x['name'] == ext_name)) display( '\nTIP: you can manage extensions later with the `azdev extension` commands.' ) subheading('Summary') display('CLI: {}'.format(cli_path if cli_path else 'PyPI')) display('Extension repos: {}'.format(' '.join(ext_repos))) display('Extensions: \n {}'.format('\n '.join(exts))) if prompt_y_n('\nProceed with installation? '): return cli_path, ext_repos, exts raise CLIError('Installation aborted.')
def run_linter(modules=None, rule_types=None, rules=None, ci_exclusions=None, git_source=None, git_target=None, git_repo=None, include_whl_extensions=False, min_severity=None, save_global_exclusion=False): require_azure_cli() from azure.cli.core import get_default_cli # pylint: disable=import-error from azure.cli.core.file_util import ( # pylint: disable=import-error get_all_help, create_invoker_and_load_cmds_and_args) heading('CLI Linter') # allow user to run only on CLI or extensions cli_only = modules == ['CLI'] ext_only = modules == ['EXT'] if cli_only or ext_only: modules = None # process severity option if min_severity: try: min_severity = LinterSeverity.get_linter_severity(min_severity) except ValueError: valid_choices = linter_severity_choices() raise CLIError( "Please specify a valid linter severity. It should be one of: {}" .format(", ".join(valid_choices))) # needed to remove helps from azdev azdev_helps = helps.copy() exclusions = {} selected_modules = get_path_table( include_only=modules, include_whl_extensions=include_whl_extensions) if cli_only: selected_modules['ext'] = {} if ext_only: selected_modules['mod'] = {} selected_modules['core'] = {} # used to upsert global exclusion update_global_exclusion = None if save_global_exclusion and (cli_only or ext_only): if cli_only: update_global_exclusion = 'CLI' if os.path.exists( os.path.join(get_cli_repo_path(), 'linter_exclusions.yml')): os.remove( os.path.join(get_cli_repo_path(), 'linter_exclusions.yml')) elif ext_only: update_global_exclusion = 'EXT' for ext_path in get_ext_repo_paths(): if os.path.exists( os.path.join(ext_path, 'linter_exclusions.yml')): os.remove(os.path.join(ext_path, 'linter_exclusions.yml')) # filter down to only modules that have changed based on git diff selected_modules = filter_by_git_diff(selected_modules, git_source, git_target, git_repo) if not any((selected_modules[x] for x in selected_modules)): raise CLIError('No modules selected.') selected_mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) + \ list(selected_modules['ext'].keys()) selected_mod_paths = list(selected_modules['mod'].values()) + list(selected_modules['core'].values()) + \ list(selected_modules['ext'].values()) if selected_mod_names: display('Modules: {}\n'.format(', '.join(selected_mod_names))) # collect all rule exclusions for path in selected_mod_paths: exclusion_path = os.path.join(path, 'linter_exclusions.yml') if os.path.isfile(exclusion_path): mod_exclusions = yaml.safe_load(open(exclusion_path)) merge_exclusion(exclusions, mod_exclusions or {}) global_exclusion_paths = [ os.path.join(get_cli_repo_path(), 'linter_exclusions.yml') ] try: global_exclusion_paths.extend([ os.path.join(path, 'linter_exclusions.yml') for path in (get_ext_repo_paths() or []) ]) except CLIError: pass for path in global_exclusion_paths: if os.path.isfile(path): mod_exclusions = yaml.safe_load(open(path)) merge_exclusion(exclusions, mod_exclusions or {}) start = time.time() display('Initializing linter with command table and help files...') az_cli = get_default_cli() # load commands, args, and help create_invoker_and_load_cmds_and_args(az_cli) loaded_help = get_all_help(az_cli) stop = time.time() logger.info('Commands and help loaded in %i sec', stop - start) command_loader = az_cli.invocation.commands_loader # format loaded help loaded_help = {data.command: data for data in loaded_help if data.command} # load yaml help help_file_entries = {} for entry_name, help_yaml in helps.items(): # ignore help entries from azdev itself, unless it also coincides # with a CLI or extension command name. if entry_name in azdev_helps and entry_name not in command_loader.command_table: continue help_entry = yaml.safe_load(help_yaml) help_file_entries[entry_name] = help_entry # trim command table and help to just selected_modules command_loader, help_file_entries = filter_modules( command_loader, help_file_entries, modules=selected_mod_names, include_whl_extensions=include_whl_extensions) if not command_loader.command_table: raise CLIError('No commands selected to check.') # Instantiate and run Linter linter_manager = LinterManager( command_loader=command_loader, help_file_entries=help_file_entries, loaded_help=loaded_help, exclusions=exclusions, rule_inclusions=rules, use_ci_exclusions=ci_exclusions, min_severity=min_severity, update_global_exclusion=update_global_exclusion) subheading('Results') logger.info('Running linter: %i commands, %i help entries', len(command_loader.command_table), len(help_file_entries)) exit_code = linter_manager.run(run_params=not rule_types or 'params' in rule_types, run_commands=not rule_types or 'commands' in rule_types, run_command_groups=not rule_types or 'command_groups' in rule_types, run_help_files_entries=not rule_types or 'help_entries' in rule_types) sys.exit(exit_code)
def check_load_time(runs=3): require_azure_cli() heading('Module Load Performance') regex = r"[^']*'(?P<mod>[^']*)'[\D]*(?P<val>[\d\.]*)" results = {TOTAL: []} # Time the module loading X times for i in range(0, runs + 1): lines = cmd('az -h --debug', show_stderr=True).result if i == 0: # Ignore the first run since it can be longer due to *.pyc file compilation continue try: lines = lines.decode().splitlines() except AttributeError: lines = lines.splitlines() total_time = 0 for line in lines: if line.startswith('DEBUG: Loaded module'): matches = re.match(regex, line) mod = matches.group('mod') val = float(matches.group('val')) * 1000 total_time = total_time + val if mod in results: results[mod].append(val) else: results[mod] = [val] results[TOTAL].append(total_time) passed_mods = {} failed_mods = {} def _claim_higher_threshold(val): avail_thresholds = {k: v for k, v in THRESHOLDS.items() if v} new_threshold = None for threshold in sorted(avail_thresholds): if val < threshold: THRESHOLDS[threshold] = THRESHOLDS[threshold] - 1 new_threshold = threshold break return new_threshold mods = sorted(results.keys()) for mod in mods: val = results[mod] mean_val = mean(val) stdev_val = pstdev(val) threshold = TOTAL_THRESHOLD if mod == TOTAL else DEFAULT_THRESHOLD statistics = { 'average': mean_val, 'stdev': stdev_val, 'threshold': threshold, 'values': val } if mean_val > threshold: # claim a threshold exception if available new_threshold = _claim_higher_threshold(mean_val) if new_threshold: statistics['threshold'] = new_threshold passed_mods[mod] = statistics else: failed_mods[mod] = statistics else: passed_mods[mod] = statistics subheading('Results') if failed_mods: display('== PASSED MODULES ==') display_table(passed_mods) display('\nFAILED MODULES') display_table(failed_mods) raise CLIError(""" FAILED: Some modules failed. If values are close to the threshold, rerun. If values are large, check that you do not have top-level imports like azure.mgmt or msrestazure in any modified files. """) display('== PASSED MODULES ==') display_table(passed_mods) display('\nPASSED: Average load time all modules: {} ms'.format( int(passed_mods[TOTAL]['average'])))
def check_style(modules=None, pylint=False, pep8=False, git_source=None, git_target=None, git_repo=None): heading('Style Check') # allow user to run only on CLI or extensions cli_only = modules == ['CLI'] ext_only = modules == ['EXT'] if cli_only or ext_only: modules = None selected_modules = get_path_table(include_only=modules) # remove these two non-modules selected_modules['core'].pop('azure-cli-nspkg', None) selected_modules['core'].pop('azure-cli-command_modules-nspkg', None) pep8_result = None pylint_result = None if pylint: try: require_azure_cli() except CLIError: raise CLIError('usage error: --pylint requires Azure CLI to be installed.') if cli_only: ext_names = None selected_modules['ext'] = {} if ext_only: mod_names = None selected_modules['mod'] = {} selected_modules['core'] = {} # filter down to only modules that have changed based on git diff selected_modules = filter_by_git_diff(selected_modules, git_source, git_target, git_repo) if not any((selected_modules[x] for x in selected_modules)): raise CLIError('No modules selected.') mod_names = list(selected_modules['mod'].keys()) + list(selected_modules['core'].keys()) ext_names = list(selected_modules['ext'].keys()) if mod_names: display('Modules: {}\n'.format(', '.join(mod_names))) if ext_names: display('Extensions: {}\n'.format(', '.join(ext_names))) # if neither flag provided, same as if both were provided if not any([pylint, pep8]): pep8 = True pylint = True exit_code_sum = 0 if pep8: pep8_result = _run_pep8(selected_modules) exit_code_sum += pep8_result.exit_code if pylint: pylint_result = _run_pylint(selected_modules) exit_code_sum += pylint_result.exit_code display('') subheading('Results') # print success messages first if pep8_result and not pep8_result.error: display('Flake8: PASSED') if pylint_result and not pylint_result.error: display('Pylint: PASSED') display('') # print error messages last if pep8_result and pep8_result.error: logger.error(pep8_result.error.output.decode('utf-8')) logger.error('Flake8: FAILED\n') if pylint_result and pylint_result.error: logger.error(pylint_result.error.output.decode('utf-8')) logger.error('Pylint: FAILED\n') sys.exit(exit_code_sum)
def publish_extensions(extensions, storage_account, storage_account_key, storage_container, dist_dir='dist', update_index=False, yes=False): from azure.multiapi.storage.v2018_11_09.blob import BlockBlobService heading('Publish Extensions') require_azure_cli() # rebuild the extensions subheading('Building WHLs') try: shutil.rmtree(dist_dir) except Exception as ex: # pylint: disable=broad-except logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex) build_extensions(extensions, dist_dir=dist_dir) whl_files = find_files(dist_dir, '*.whl') uploaded_urls = [] subheading('Uploading WHLs') for whl_path in whl_files: whl_file = os.path.split(whl_path)[-1] client = BlockBlobService(account_name=storage_account, account_key=storage_account_key) exists = client.exists(container_name=storage_container, blob_name=whl_file) # check if extension already exists unless user opted not to if not yes: if exists: if not prompt_y_n( "{} already exists. You may need to bump the extension version. Replace?" .format(whl_file), default='n'): logger.warning("Skipping '%s'...", whl_file) continue # upload the WHL file client.create_blob_from_path(container_name=storage_container, blob_name=whl_file, file_path=os.path.abspath(whl_path)) url = client.make_blob_url(container_name=storage_container, blob_name=whl_file) logger.info(url) uploaded_urls.append(url) if update_index: subheading('Updating Index') update_extension_index(uploaded_urls) subheading('Published WHLs') for url in uploaded_urls: display(url) if not update_index: logger.warning('You still need to update the index for your changes!') logger.warning(' az extension update-index <URL>')
def verify_versions(modules=None, update=False, pin=False): import tempfile import shutil require_azure_cli() heading('Verify CLI Module Versions') usage_err = CLIError('usage error: <MODULES> | --update [--pin]') if modules and (update or pin): raise usage_err if not modules and pin and not update: raise usage_err if modules: update = None pin = None path_table = get_path_table(include_only=modules) modules = list(path_table['core'].items()) + list(path_table['mod'].items()) modules = [x for x in modules if x[0] not in EXCLUDED_MODULES] if not modules: raise CLIError('No modules selected to test.') display('MODULES: {}'.format(', '.join([x[0] for x in modules]))) results = {mod[0]: {} for mod in modules} original_cwd = os.getcwd() temp_dir = tempfile.mkdtemp() for mod, mod_path in modules: if not mod.startswith(COMMAND_MODULE_PREFIX) and mod != 'azure-cli': mod = '{}{}'.format(COMMAND_MODULE_PREFIX, mod) results.update(_compare_module_against_pypi(results, temp_dir, mod, mod_path)) shutil.rmtree(temp_dir) os.chdir(original_cwd) results = _check_setup_py(results, update, pin) logger.info('Module'.ljust(40) + 'Local Version'.rjust(20) + 'Public Version'.rjust(20)) # pylint: disable=logging-not-lazy for mod, data in results.items(): logger.info(mod.ljust(40) + data['local_version'].rjust(20) + data['public_version'].rjust(20)) bump_mods = {k: v for k, v in results.items() if v['status'] == 'BUMP'} mismatch_mods = {k: v for k, v in results.items() if v['status'] == 'MISMATCH'} subheading('RESULTS') if bump_mods: logger.error('The following modules need their versions bumped. ' 'Scroll up for details: %s', ', '.join(bump_mods.keys())) logger.warning('\nNote that before changing versions, you should consider ' 'running `git clean` to remove untracked files from your repo. ' 'Files that were once tracked but removed from the source may ' 'still be on your machine, resuling in false positives.') sys.exit(1) elif mismatch_mods and not update: logger.error('The following modules have a mismatch between the module version ' 'and the version in azure-cli\'s setup.py file. ' 'Scroll up for details: %s', ', '.join(mismatch_mods.keys())) sys.exit(1) else: display('OK!')
def _compare_module_against_pypi(results, root_dir, mod, mod_path): import zipfile version_pattern = re.compile(r'.*azure_cli[^-]*-(\d*.\d*.\d*).*') downloaded_path = None downloaded_version = None build_path = None build_version = None build_dir = os.path.join(root_dir, mod, 'local') pypi_dir = os.path.join(root_dir, mod, 'public') # download the public PyPI package and extract the version logger.info('Checking %s...', mod) result = pip_cmd('download {} --no-deps -d {}'.format(mod, root_dir)).result try: result = result.decode('utf-8') except AttributeError: pass for line in result.splitlines(): line = line.strip() if line.endswith('.whl') and line.startswith('Saved'): downloaded_path = line.replace('Saved ', '').strip() downloaded_version = version_pattern.match(downloaded_path).group(1) break if line.startswith('No matching distribution found'): downloaded_path = None downloaded_version = 'Unavailable' break if not downloaded_version: raise CLIError('Unexpected error trying to acquire {}: {}'.format(mod, result)) # build from source and extract the version setup_path = os.path.normpath(mod_path.strip()) os.chdir(setup_path) py_cmd('setup.py bdist_wheel -d {}'.format(build_dir)) if len(os.listdir(build_dir)) != 1: raise CLIError('Unexpectedly found multiple build files found in {}.'.format(build_dir)) build_path = os.path.join(build_dir, os.listdir(build_dir)[0]) build_version = version_pattern.match(build_path).group(1) results[mod].update({ 'local_version': build_version, 'public_version': downloaded_version }) # OK if package is new if downloaded_version == 'Unavailable': results[mod]['status'] = 'OK' return results # OK if local version is higher than what's on PyPI if LooseVersion(build_version) > LooseVersion(downloaded_version): results[mod]['status'] = 'OK' return results # slight difference in dist-info dirs, so we must extract the azure folders and compare them with zipfile.ZipFile(str(downloaded_path), 'r') as z: z.extractall(pypi_dir) with zipfile.ZipFile(str(build_path), 'r') as z: z.extractall(build_dir) errors = _compare_folders(os.path.join(pypi_dir), os.path.join(build_dir)) # clean up empty strings errors = [e for e in errors if e] if errors: subheading('Differences found in {}'.format(mod)) for error in errors: logger.warning(error) results[mod]['status'] = 'OK' if not errors else 'BUMP' # special case: to make a release, these MUST be bumped, even if it wouldn't otherwise be necessary if mod in ['azure-cli', 'azure-cli-core']: if results[mod]['status'] == 'OK': logger.warning('%s version must be bumped to support release!', mod) results[mod]['status'] = 'BUMP' return results
def check_load_time(runs=3): require_azure_cli() heading('Module Load Performance') regex = r"[^']*'([^']*)'[\D]*([\d\.]*)" results = {TOTAL: []} # Time the module loading X times for i in range(0, runs + 1): lines = cmd('az -h --debug', show_stderr=True).result if i == 0: # Ignore the first run since it can be longer due to *.pyc file compilation continue try: lines = lines.decode().splitlines() except AttributeError: lines = lines.splitlines() total_time = 0 for line in lines: if line.startswith('DEBUG: Loaded module'): matches = re.match(regex, line) mod = matches.group(1) val = float(matches.group(2)) * 1000 total_time = total_time + val if mod in results: results[mod].append(val) else: results[mod] = [val] results[TOTAL].append(total_time) passed_mods = {} failed_mods = {} mods = sorted(results.keys()) bubble_found = False for mod in mods: val = results[mod] mean_val = mean(val) stdev_val = pstdev(val) threshold = THRESHOLDS.get(mod) or DEFAULT_THRESHOLD statistics = { 'average': mean_val, 'stdev': stdev_val, 'threshold': threshold, 'values': val } if mean_val > threshold: if not bubble_found and mean_val < 30: # This temporary measure allows one floating performance # failure up to 30 ms. See issue #6224 and #6218. bubble_found = True passed_mods[mod] = statistics else: failed_mods[mod] = statistics else: passed_mods[mod] = statistics subheading('Results') if failed_mods: display('== PASSED MODULES ==') display_table(passed_mods) display('\nFAILED MODULES') display_table(failed_mods) raise CLIError(""" FAILED: Some modules failed. If values are close to the threshold, rerun. If values are large, check that you do not have top-level imports like azure.mgmt or msrestazure in any modified files. """) display('== PASSED MODULES ==') display_table(passed_mods) display('\nPASSED: Average load time all modules: {} ms'.format( int(passed_mods[TOTAL]['average'])))
def _interactive_setup(): from knack.prompting import prompt_y_n, prompt while True: cli_path = None ext_repos = [] exts = [] # CLI Installation if prompt_y_n('Do you plan to develop CLI modules?'): display( '\nGreat! Please enter the path to your azure-cli repo or press ' 'RETURN and we will attempt to find it for you.') while True: cli_path = prompt('\nPath (RETURN to auto-find): ', None) cli_path = os.path.abspath(cli_path) if cli_path else None CLI_SENTINEL = 'azure-cli.pyproj' if not cli_path: cli_path = find_file(CLI_SENTINEL) if not cli_path: raise CLIError( 'Unable to locate your CLI repo. Things to check:' '\n Ensure you have cloned the repo. ' '\n Specify the path explicitly with `-c PATH`. ' '\n If you run with `-c` to autodetect, ensure you are running ' 'this command from a folder upstream of the repo.') try: cli_path = _check_path(cli_path, CLI_SENTINEL) display('Found: {}'.format(cli_path)) break except CLIError as ex: logger.error(ex) continue else: display( '\nOK. We will install the latest `azure-cli` from PyPI then.') # Determine extension repos if prompt_y_n('\nDo you plan to develop CLI extensions?'): display( '\nGreat! Input the paths for the extension repos you wish to develop for, one per' 'line. You can add as many repos as you like. Press RETURN to continue to the next step.' ) while True: ext_repo_path = prompt('\nPath (RETURN to continue): ', None) if not ext_repo_path: break try: _check_repo(os.path.abspath(ext_repo_path)) except CLIError as ex: logger.error(ex) continue ext_repos.append(ext_repo_path) display('Repo {} OK.'.format(ext_repo_path)) if not ext_repos: display( '\nNo problem! You can always add extension repos later with `azdev extension repo add`.' ) # Determine extensions if ext_repos: if prompt_y_n( '\nWould you like to install certain extensions by default? ' ): display( '\nGreat! Input the names of the extensions you wish to install, one per ' 'line. You can add as many repos as you like. Press RETURN to continue to the next step.' ) available_extensions = [x['name'] for x in list_extensions()] while True: ext_name = prompt('\nName (RETURN to continue): ', None) if not ext_name: break if ext_name not in available_extensions: logger.error( "Extension '%s' not found. Check the spelling, and make " "sure you added the repo first!", ext_name) continue display('Extension {} OK.'.format(ext_name)) exts.append( next(x['path'] for x in list_extensions() if x['name'] == ext_name)) else: display( '\nNo problem! You can always add extensions later with `azdev extension add`.' ) subheading('Summary') display('CLI: {}'.format(cli_path if cli_path else 'PyPI')) display('Extension repos: {}'.format(' '.join(ext_repos))) display('Extensions: \n {}'.format('\n '.join(exts))) if prompt_y_n('\nProceed with installation? '): return cli_path, ext_repos, exts display("\nNo problem! Let's start again.\n")