def remove_extension(extensions):

    ext_paths = get_ext_repo_paths()
    installed_paths = find_files(ext_paths, '*.*-info')
    paths_to_remove = []
    names_to_remove = []
    if extensions == ['*']:
        paths_to_remove = [os.path.dirname(path) for path in installed_paths]
        names_to_remove = [
            os.path.basename(os.path.dirname(path)) for path in installed_paths
        ]
    else:
        for path in installed_paths:
            folder = os.path.dirname(path)
            long_name = os.path.basename(folder)
            if long_name in extensions:
                paths_to_remove.append(folder)
                names_to_remove.append(long_name)
                extensions.remove(long_name)
        # raise error if any extension not installed
        if extensions:
            raise CLIError('extension(s) not installed: {}'.format(
                ' '.join(extensions)))

    # removes any links that may have been added to site-packages.
    for ext in names_to_remove:
        pip_cmd('uninstall {} -y'.format(ext))

    for path in paths_to_remove:
        for d in os.listdir(path):
            # delete the egg-info and dist-info folders to make the extension invisible to the CLI and azdev
            if d.endswith('egg-info') or d.endswith('dist-info'):
                path_to_remove = os.path.join(path, d)
                display("Removing '{}'...".format(path_to_remove))
                shutil.rmtree(path_to_remove)
def display_table(data):
    display('{:<20} {:>12} {:>12} {:>12} {:>25}'.format(
        'Module', 'Average', 'Threshold', 'Stdev', 'Values'))
    for key, val in data.items():
        display('{:<20} {:>12.0f} {:>12.0f} {:>12.0f} {:>25}'.format(
            key, val['average'], val['threshold'], val['stdev'],
            str(val['values'])))
Esempio n. 3
0
def cmd(command, message=False, show_stderr=True, **kwargs):
    """ Run an arbitrary command.

    :param command: The entire command line to run.
    :param message: A custom message to display, or True (bool) to use a default.
    :param show_stderr: On error, display the contents of STDERR.
    :param kwargs: Any kwargs supported by subprocess.Popen
    :returns: CommandResultItem object.
    """
    from azdev.utilities import IS_WINDOWS, display

    # use default message if custom not provided
    if message is True:
        message = 'Running: {}\n'.format(command)

    if message:
        display(message)

    try:
        output = subprocess.check_output(
            command.split(),
            stderr=subprocess.STDOUT if show_stderr else None,
            shell=IS_WINDOWS,
            **kwargs).decode('utf-8').strip()
        return CommandResultItem(output, exit_code=0, error=None)
    except subprocess.CalledProcessError as err:
        return CommandResultItem(err.output,
                                 exit_code=err.returncode,
                                 error=err)
def _get_profile(profile):
    import colorama
    colorama.init(autoreset=True)
    try:
        fore_red = colorama.Fore.RED if not IS_WINDOWS else ''
        fore_reset = colorama.Fore.RESET if not IS_WINDOWS else ''
        current_profile = raw_cmd('az cloud show --query profile -otsv',
                                  show_stderr=False).result
        if not profile or current_profile == profile:
            profile = current_profile
            display(
                'The tests are set to run against current profile {}.'.format(
                    fore_red + current_profile + fore_reset))
        elif current_profile != profile:
            display(
                'The tests are set to run against profile {} but the current az cloud profile is {}.'
                .format(fore_red + profile + fore_reset,
                        fore_red + current_profile + fore_reset))
            result = raw_cmd(
                'az cloud update --profile {}'.format(profile),
                'SWITCHING TO PROFILE {}.'.format(fore_red + profile +
                                                  fore_reset))
            if result.exit_code != 0:
                raise CLIError(result.error.output)
        return current_profile
    except CalledProcessError:
        raise CLIError('Failed to retrieve current az profile')
Esempio n. 5
0
def install_extensions(venv_path, extensions):
    activate_path = os.path.join(
        venv_path, 'Scripts',
        'activate') if const.IS_WINDOWS else 'source ' + os.path.join(
            venv_path, const.UN_BIN, const.UN_ACTIVATE)
    delimiter = ' && ' if const.IS_WINDOWS else '; '
    executable = None if const.IS_WINDOWS else '/bin/bash'
    all_ext = azdev.operations.extensions.list_extensions()
    if extensions == ['*']:
        display("\nInstalling all extensions")
        for i in all_ext:
            shell_cmd(activate_path + delimiter + const.PIP_E_CMD + i['path'],
                      executable=executable)
        extensions = False
    else:
        display("\nInstalling the following extensions: " + str(extensions))
        extensions = set(extensions)
    k = 0
    while k < len(all_ext) and extensions:
        if all_ext[k]['name'] in extensions:
            shell_cmd(activate_path + delimiter + const.PIP_E_CMD +
                      all_ext[k]['path'],
                      executable=executable)
            extensions.remove(all_ext[k]['name'])
        k += 1
    if extensions:
        raise CLIError(
            "The following extensions were not found. Ensure you have added "
            "the repo using `--repo/-r PATH`.\n    {}".format(
                '\n    '.join(extensions)))
def check_license_headers():

    heading('Verify License Headers')

    cli_path = get_cli_repo_path()
    env_path = os.path.join(cli_path, 'env')

    files_without_header = []
    for current_dir, _, files in os.walk(cli_path):
        if current_dir.startswith(env_path):
            continue

        file_itr = (os.path.join(current_dir, p) for p in files
                    if p.endswith('.py') and p != 'azure_bdist_wheel.py')
        for python_file in file_itr:
            with open(python_file, 'r') as f:
                file_text = f.read()

                if file_text and LICENSE_HEADER not in file_text:
                    files_without_header.append(
                        os.path.join(current_dir, python_file))

    subheading('Results')
    if files_without_header:
        raise CLIError(
            "{}\nError: {} files don't have the required license headers.".
            format('\n'.join(files_without_header), len(files_without_header)))
    display('License headers verified OK.')
def publish_extensions(extensions,
                       storage_subscription,
                       storage_account,
                       storage_container,
                       dist_dir='dist',
                       update_index=False,
                       yes=False):

    heading('Publish Extensions')

    require_azure_cli()

    # rebuild the extensions
    subheading('Building WHLs')
    try:
        shutil.rmtree(dist_dir)
    except Exception as ex:  # pylint: disable=broad-except
        logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex)
    build_extensions(extensions, dist_dir=dist_dir)

    whl_files = find_files(dist_dir, '*.whl')
    uploaded_urls = []

    subheading('Uploading WHLs')
    for whl_path in whl_files:
        whl_file = os.path.split(whl_path)[-1]
        # check if extension already exists unless user opted not to
        if not yes:
            command = 'az storage blob exists --subscription {} --account-name {} -c {} -n {}'.format(
                storage_subscription, storage_account, storage_container,
                whl_file)
            exists = json.loads(cmd(command).result)['exists']
            if exists:
                if not prompt_y_n(
                        "{} already exists. You may need to bump the extension version. Replace?"
                        .format(whl_file),
                        default='n'):
                    logger.warning("Skipping '%s'...", whl_file)
                    continue
        # upload the WHL file
        command = 'az storage blob upload --subscription {} --account-name {} -c {} -n {} -f {}'.format(
            storage_subscription, storage_account, storage_container, whl_file,
            os.path.abspath(whl_path))
        cmd(command, "Uploading '{}'...".format(whl_file))
        command = 'az storage blob url --subscription {} --account-name {} -c {} -n {} -otsv'.format(
            storage_subscription, storage_account, storage_container, whl_file)
        url = cmd(command).result
        logger.info(url)
        uploaded_urls.append(url)

    if update_index:
        subheading('Updating Index')
        update_extension_index(uploaded_urls)

    subheading('Published')
    display(uploaded_urls)
    if not update_index:
        logger.warning(
            'You still need to update the index for your changes with `az extension update-index`.'
        )
def _generate_ref_docs_for_all_profiles(output_type, base_output_dir):
    original_profile = None
    profile = ""
    try:
        # store original profile and get all profiles.
        original_profile = _get_current_profile()
        profiles = _get_profiles()
        _logger.info("Original Profile: %s", original_profile)

        for profile in profiles:
            # set profile and call sphinx build cmd
            profile_output_dir = os.path.join(base_output_dir, profile)
            _set_profile(profile)
            _call_sphinx_build(output_type, profile_output_dir)

            display("\nFinished generating files for profile {} in dir {}\n".
                    format(output_type, profile_output_dir))

        # always set the profile back to the original profile after generating all docs.
        _set_profile(original_profile)

    except (CLIError, KeyboardInterrupt, SystemExit) as e:
        _logger.error(
            "Error when attempting to generate docs for profile %s.\n\t%s",
            profile, e)
        if original_profile:
            _logger.error(
                "Will try to set the CLI's profile back to the original value: '%s'",
                original_profile)
            _set_profile(original_profile)
        # still re-raise the error.
        raise e
def check_document_map():

    heading('Verify Document Map')

    cli_repo = get_cli_repo_path()

    map_path = os.path.join(cli_repo, DOC_SOURCE_MAP_PATH)
    help_files_in_map = _get_help_files_in_map(map_path)
    help_files_not_found = _map_help_files_not_found(cli_repo,
                                                     help_files_in_map)
    help_files_to_add_to_map = _help_files_not_in_map(cli_repo,
                                                      help_files_in_map)

    subheading('Results')
    if help_files_not_found or help_files_to_add_to_map:
        error_lines = []
        error_lines.append('Errors whilst verifying {}!'.format(DOC_MAP_NAME))
        if help_files_not_found:
            error_lines.append(
                'The following files are in {} but do not exist:'.format(
                    DOC_MAP_NAME))
            error_lines += help_files_not_found
        if help_files_to_add_to_map:
            error_lines.append(
                'The following files should be added to {}:'.format(
                    DOC_MAP_NAME))
            error_lines += help_files_to_add_to_map
        error_msg = '\n'.join(error_lines)
        raise CLIError(error_msg)
    display('Verified {} OK.'.format(DOC_MAP_NAME))
def _call_sphinx_build(builder_name,
                       output_dir,
                       for_extensions_alone=False,
                       call_env=None,
                       msg=""):
    conf_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            'refdoc')

    if for_extensions_alone:
        source_dir = os.path.abspath(os.path.join(conf_dir, 'extension_docs'))
    else:
        source_dir = os.path.abspath(os.path.join(conf_dir, 'cli_docs'))

    try:
        opts = ['-E', '-b', builder_name, '-c', conf_dir]
        args = [source_dir, output_dir]
        if for_extensions_alone:
            # apparently the configuration in extensions and core CLI differed in this way. This is only cosmetic
            # set smartquotes to false. Due to a bug, one has to use "0" instead "False"
            opts.extend(["-D", "smartquotes=0"])

        sphinx_cmd = ['sphinx-build'] + opts + args
        display("sphinx cmd: {}".format(" ".join(sphinx_cmd)))
        display(msg)
        # call sphinx-build
        check_call(sphinx_cmd,
                   stdout=sys.stdout,
                   stderr=sys.stderr,
                   env=call_env)

    except CalledProcessError:
        raise CLIError("Doc generation failed.")
Esempio n. 11
0
    def _run(test_paths, pytest_args):

        if os.name == 'posix':
            arguments = [
                '-x', '-v', '--boxed', '-p no:warnings', '--log-level=WARN',
                '--junit-xml', log_path
            ]
        else:
            arguments = [
                '-x', '-v', '-p no:warnings', '--log-level=WARN',
                '--junit-xml', log_path
            ]

        if no_exit_first:
            arguments.remove('-x')

        if mark:
            arguments.append('-m "{}"'.format(mark))

        if parallel:
            arguments += ['-n', 'auto']
        if last_failed:
            arguments.append('--lf')
        if pytest_args:
            arguments += pytest_args
        tests_params = [(i, clean, arguments) for i in test_paths]
        test_fail = False
        with multiprocessing.Pool(multiprocessing.cpu_count()) as the_pool:
            try:
                the_pool.map(_run_test, tests_params)
            except subprocess.CalledProcessError:
                display("TEST FAILED! : " +
                        str(subprocess.CalledProcessError.stdout))
                test_fail = True
        return test_fail
def _add_to_codeowners(repo_path, prefix, name, github_alias):
    # add the user Github alias to the CODEOWNERS file for new packages
    if not github_alias:
        display(
            '\nWhat is the Github alias of the person responsible for maintaining this package?'
        )
        while not github_alias:
            github_alias = prompt('Alias: ')

    # accept a raw alias or @alias
    github_alias = '@{}'.format(
        github_alias) if not github_alias.startswith('@') else github_alias
    try:
        codeowners = find_files(repo_path, 'CODEOWNERS')[0]
    except IndexError:
        raise CLIError('unexpected error: unable to find CODEOWNERS file.')

    if prefix == EXTENSION_PREFIX:
        new_line = '/src/{}{}/ {}'.format(prefix, name, github_alias)
    else:
        # ensure Linux-style separators when run on Windows
        new_line = '/{} {}'.format(
            os.path.join('', _MODULE_ROOT_PATH, name, ''),
            github_alias).replace('\\', '/')

    with open(codeowners, 'a') as f:
        f.write(new_line)
        f.write('\n')
def _download_vendored_sdk(required_sdk, path):
    import tempfile
    import zipfile

    path_regex = re.compile(
        r'.*((\s*.*downloaded\s)|(\s*.*saved\s))(?P<path>.*\.whl)',
        re.IGNORECASE | re.S)
    temp_path = tempfile.mkdtemp()

    # download and extract the required SDK to the vendored_sdks folder
    downloaded_path = None
    if required_sdk:
        display('Downloading {}...'.format(required_sdk))
        vendored_sdks_path = path
        result = pip_cmd('download {} --no-deps -d {}'.format(
            required_sdk, temp_path)).result
        try:
            result = result.decode('utf-8')
        except AttributeError:
            pass
        for line in result.splitlines():
            try:
                downloaded_path = path_regex.match(line).group('path')
            except AttributeError:
                continue
            break
        if not downloaded_path:
            display('Unable to download')
            raise CLIError('Unable to download: {}'.format(required_sdk))

        # extract the WHL file
        with zipfile.ZipFile(str(downloaded_path), 'r') as z:
            z.extractall(temp_path)

        _copy_vendored_sdk(temp_path, vendored_sdks_path)
def delete_groups(prefixes=None, older_than=6, product='azurecli', cause='automation', yes=False):
    from datetime import datetime, timedelta

    require_azure_cli()

    groups = json.loads(run_cmd('az group list -ojson').result)
    groups_to_delete = []

    def _filter_by_tags():
        for group in groups:
            group = Data(**group)

            if not group.tags:  # pylint: disable=no-member
                continue

            tags = Data(**group.tags)  # pylint: disable=no-member
            try:
                date_tag = datetime.strptime(tags.date, '%Y-%m-%dT%H:%M:%SZ')
                curr_time = datetime.utcnow()
                if (tags.product == product and tags.cause == cause and
                        (curr_time - date_tag <= timedelta(hours=older_than + 1))):
                    groups_to_delete.append(group.name)
            except AttributeError:
                continue

    def _filter_by_prefix():
        for group in groups:
            group = Data(**group)

            for prefix in prefixes:
                if group.name.startswith(prefix):
                    groups_to_delete.append(group.name)

    def _delete():
        for group in groups_to_delete:
            run_cmd('az group delete -g {} -y --no-wait'.format(group), message=True)

    if prefixes:
        logger.info('Filter by prefix')
        _filter_by_prefix()
    else:
        logger.info('Filter by tags')
        _filter_by_tags()

    if not groups_to_delete:
        raise CLIError('No groups meet the criteria to delete.')

    if yes:
        _delete()
    else:
        subheading('Groups to Delete')
        for group in groups_to_delete:
            display('\t{}'.format(group))

        if prompt_y_n('Delete {} resource groups?'.format(len(groups_to_delete)), 'y'):
            _delete()
        else:
            raise CLIError('Command cancelled.')
Esempio n. 15
0
 def add_ext_repo(path):
     try:
         _check_repo(path)
     except CLIError as ex:
         logger.error(ex)
         return False
     ext_repos.append(path)
     display('Repo {} OK.'.format(path))
     return True
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.target_profile is not None and self.target_profile != self.origin_profile:
            display('Switching back to origin profile "{}"...'.format(
                self.origin_profile))
            call('az cloud update --profile {}'.format(self.origin_profile))

        if exc_tb:
            display('')
            traceback.print_exception(exc_type, exc_val, exc_tb)
 def __enter__(self):
     if self.target_profile is None or self.target_profile == self.origin_profile:
         display(
             'The tests are set to run against current profile "{}"'.format(
                 self.origin_profile))
     else:
         result = cmd(
             'az cloud update --profile {}'.format(self.target_profile),
             'Switching to target profile "{}"...'.format(
                 self.target_profile))
         if result.exit_code != 0:
             raise CLIError(result.error.output.decode('utf-8'))
Esempio n. 18
0
def generate_cli_ref_docs(output_dir=None, output_type=None):
    # require that azure cli installed
    require_azure_cli()
    output_dir = _process_ref_doc_output_dir(output_dir)

    heading('Generate CLI Reference Docs')
    display("Docs will be placed in {}.".format(output_dir))

    # Generate documentation for all comamnds
    _call_sphinx_build(output_type, output_dir)

    display("\nThe {} files are in {}".format(output_type, output_dir))
Esempio n. 19
0
def verify_versions():
    import tempfile
    import shutil

    require_azure_cli()

    heading('Verify CLI Versions')

    path_table = get_path_table()
    modules = list(path_table['core'].items())
    modules = [x for x in modules if x[0] not in EXCLUDED_MODULES]

    if not modules:
        raise CLIError('No modules selected to test.')

    display('MODULES: {}'.format(', '.join([x[0] for x in modules])))

    results = {}

    original_cwd = os.getcwd()
    temp_dir = tempfile.mkdtemp()
    for mod, mod_path in modules:
        if not mod.startswith(COMMAND_MODULE_PREFIX) and mod != 'azure-cli':
            mod = '{}{}'.format(COMMAND_MODULE_PREFIX, mod)
        results[mod] = {}
        results.update(
            _compare_module_against_pypi(results, temp_dir, mod, mod_path))

    shutil.rmtree(temp_dir)
    os.chdir(original_cwd)

    logger.info('Module'.ljust(40) + 'Local Version'.rjust(20) +
                'Public Version'.rjust(20))  # pylint: disable=logging-not-lazy
    for mod, data in results.items():
        logger.info(
            mod.ljust(40) + data['local_version'].rjust(20) +
            data['public_version'].rjust(20))

    bump_mods = {k: v for k, v in results.items() if v['status'] == 'BUMP'}
    subheading('RESULTS')
    if bump_mods:
        logger.error(
            'The following modules need their versions bumped. '
            'Scroll up for details: %s', ', '.join(bump_mods.keys()))
        logger.warning(
            '\nNote that before changing versions, you should consider '
            'running `git clean` to remove untracked files from your repo. '
            'Files that were once tracked but removed from the source may '
            'still be on your machine, resuling in false positives.')
        sys.exit(1)
    else:
        display('OK!')
def _generate_ref_docs_for_public_exts(output_type, base_output_dir):
    # TODO: this shouldn't define the env key, but should reference it from a central place in the cli repo.
    ENV_KEY_AZURE_EXTENSION_DIR = 'AZURE_EXTENSION_DIR'

    extensions_url_tups = _get_available_extension_urls()
    if not extensions_url_tups:
        raise CLIError("Failed to retrieve public extensions.")

    temp_dir = tempfile.mkdtemp(prefix="temp_whl_ext_dir")
    _logger.debug("Created temp directory to store downloaded whl files: %s",
                  temp_dir)

    try:
        for name, file_name, download_url in extensions_url_tups:
            # for every compatible public extensions
            # download the whl file
            whl_file_name = _get_whl_from_url(download_url, file_name,
                                              temp_dir)

            # install the whl file in a new temp directory
            installed_ext_dir = tempfile.mkdtemp(prefix="temp_extension_dir_",
                                                 dir=temp_dir)
            _logger.debug(
                "Created temp directory %s to use as the extension installation dir for %s extension.",
                installed_ext_dir, name)
            pip_cmd = [
                sys.executable, '-m', 'pip', 'install', '--target',
                os.path.join(installed_ext_dir, 'extension'), whl_file_name,
                '--disable-pip-version-check', '--no-cache-dir'
            ]
            display('Executing "{}"'.format(' '.join(pip_cmd)))
            check_call(pip_cmd)

            # set the directory as the extension directory in the environment used to call sphinx-build
            env = os.environ.copy()
            env[ENV_KEY_AZURE_EXTENSION_DIR] = installed_ext_dir
            # generate documentation for installed extensions

            ext_output_dir = os.path.join(base_output_dir, name)
            os.makedirs(ext_output_dir)
            _call_sphinx_build(output_type,
                               ext_output_dir,
                               for_extensions_alone=True,
                               call_env=env,
                               msg="\nGenerating ref docs for {}".format(name))
    finally:
        # finally delete the temp dir
        shutil.rmtree(temp_dir)
        _logger.debug("Deleted temp whl extension directory: %s", temp_dir)
Esempio n. 21
0
def _run_test(test_args):
    cmd = ("python " + ('-B ' if test_args[1] else ' ') +
           "-m pytest {}").format(' '.join([test_args[0]] + test_args[2]))
    try:
        subprocess.check_call(cmd.split(), shell=const.IS_WINDOWS)
    except subprocess.CalledProcessError as e:
        if test_args[1]:
            display("Test failed, cleaning up recordings")
            recordings = os.path.join(test_args[0], 'recordings')
            if os.path.isdir(recordings):
                recording_files = os.listdir(recordings)
                for file in recording_files:
                    if file.endswith(".yaml"):
                        os.remove(os.path.join(recordings, file))
        raise e
Esempio n. 22
0
def _generate_extension(ext_name, repo_path, swagger_readme_file_path, use):
    heading('Start generating extension {}.'.format(ext_name))
    # check if npm is installed
    try:
        shell_cmd('npm --version', stdout=subprocess.DEVNULL, raise_ex=False)
    except CLIError as ex:
        raise CLIError('{}\nPlease install npm.'.format(ex))
    display('Installing autorest...\n')
    if const.IS_WINDOWS:
        try:
            shell_cmd('npm install -g autorest', raise_ex=False)
        except CLIError as ex:
            raise CLIError("Failed to install autorest.\n{}".format(ex))
    else:
        try:
            shell_cmd('npm install -g autorest',
                      stderr=subprocess.DEVNULL,
                      raise_ex=False)
        except CLIError as ex:
            path = os.environ['PATH']
            # check if npm is installed through nvm
            if os.environ.get('NVM_DIR'):
                raise ex
            # check if user using specific node version and manually add it to the os env PATH
            node_version = shell_cmd('node --version',
                                     capture_output=True).result
            if 'node/' + node_version + '/bin' in path:
                raise ex
            # create a new directory for npm global installations, to avoid using sudo in installing autorest
            npm_path = os.path.join(os.environ['HOME'], '.npm-packages')
            if not os.path.isdir(npm_path):
                os.mkdir(npm_path)
            npm_prefix = shell_cmd('npm prefix -g', capture_output=True).result
            shell_cmd('npm config set prefix ' + npm_path)
            os.environ['PATH'] = path + ':' + os.path.join(npm_path, 'bin')
            os.environ['MANPATH'] = os.path.join(npm_path, 'share', 'man')
            shell_cmd('npm install -g autorest')
            shell_cmd('npm config set prefix ' + npm_prefix)
    # update autorest core
    shell_cmd('autorest --latest')
    if not use:
        cmd = 'autorest --az --azure-cli-extension-folder={} {}'.format(
            repo_path, swagger_readme_file_path)
    else:
        cmd = 'autorest --az --azure-cli-extension-folder={} {} --use={}'.format(
            repo_path, swagger_readme_file_path, use)
    shell_cmd(cmd, message=True)
Esempio n. 23
0
def check_license_headers():

    heading('Verify License Headers')

    cli_path = get_cli_repo_path()
    all_paths = [cli_path]
    try:
        ext_repo = get_ext_repo_paths()
        for path in ext_repo:
            all_paths.append(path)
    except CLIError:
        display("No CLI ext path, running check only on modules")

    files_without_header = []
    for path in all_paths:
        py_files = pathlib.Path(path).glob('**' + os.path.sep + '*.py')

        for py_file in py_files:
            py_file = str(py_file)

            if py_file.endswith('azure_bdist_wheel.py'):
                continue

            for ignore_token in _IGNORE_SUBDIRS:
                if ignore_token in py_file:
                    break
            else:
                with open(str(py_file), 'r', encoding='utf-8') as f:
                    file_text = f.read()

                    if not file_text:
                        continue

                    test_results = [
                        LICENSE_HEADER in file_text, WRAPPED_LICENSE_HEADER
                        in file_text, CODEGEN_LICENSE_HEADER in file_text
                    ]
                    if not any(test_results):
                        files_without_header.append(py_file)

    subheading('Results')
    if files_without_header:
        raise CLIError(
            "{}\nError: {} files don't have the required license headers.".
            format('\n'.join(files_without_header), len(files_without_header)))
    display('License headers verified OK.')
Esempio n. 24
0
def install_cli(cli_path, venv_path):
    src_path = os.path.join(cli_path, 'src')
    activate_path = (os.path.join(venv_path, 'Scripts', 'activate')
                     if const.IS_WINDOWS else 'source ' +
                     os.path.join(venv_path, const.UN_BIN, const.UN_ACTIVATE))
    delimiter = ' && ' if const.IS_WINDOWS else '; '
    executable = None if const.IS_WINDOWS else '/bin/bash'
    display("\nvenv activate path is " + str(activate_path))
    shell_cmd(activate_path + delimiter +
              'pip install --ignore-installed azure-common',
              stdout=subprocess.DEVNULL,
              stderr=subprocess.DEVNULL,
              raise_ex=False,
              executable=executable)
    display("\nInstalling telemetry ")
    shell_cmd(activate_path + delimiter + const.PIP_E_CMD +
              os.path.join(src_path, 'azure-cli-telemetry'),
              stdout=subprocess.DEVNULL,
              raise_ex=False,
              stderr=subprocess.DEVNULL,
              executable=executable)
    display("\nInstalling core ")
    shell_cmd(activate_path + delimiter + const.PIP_E_CMD +
              os.path.join(src_path, 'azure-cli-core'),
              stdout=subprocess.DEVNULL,
              raise_ex=False,
              stderr=subprocess.DEVNULL,
              executable=executable)
    shell_cmd(activate_path + delimiter + const.PIP_E_CMD +
              os.path.join(src_path, 'azure-cli-testsdk'),
              stdout=subprocess.DEVNULL,
              stderr=subprocess.DEVNULL,
              raise_ex=False,
              executable=executable)
    display("\nInstalling cli ")
    shell_cmd(activate_path + delimiter + const.PIP_E_CMD +
              os.path.join(src_path, 'azure-cli'),
              raise_ex=False,
              executable=executable)
    req_file = 'requirements.py3.{}.txt'.format(
        platform.system().lower() if const.IS_WINDOWS else platform.system())
    req_file = "{}/src/azure-cli/{}".format(cli_path, req_file)
    display("Installing " + req_file)
    shell_cmd(activate_path + delimiter + const.PIP_R_CMD + req_file,
              raise_ex=False,
              executable=executable)
def update_setup_py(pin=False):
    require_azure_cli()

    heading('Update azure-cli setup.py')

    path_table = get_path_table()
    azure_cli_path = path_table['core']['azure-cli']
    azure_cli_setup_path = find_files(azure_cli_path, SETUP_PY_NAME)[0]

    modules = list(path_table['core'].items()) + list(path_table['mod'].items())
    modules = [x for x in modules if x[0] not in EXCLUDED_MODULES]

    results = {mod[0]: {} for mod in modules}

    results = _get_module_versions(results, modules)
    _update_setup_py(results, azure_cli_setup_path, pin)

    display('OK!')
def _check_setup_py(results, update, pin):
    # only audit or update setup.py when all modules are being considered
    # otherwise, edge cases could arise
    if update is None:
        return results

    # retrieve current versions from azure-cli's setup.py file
    azure_cli_path = get_path_table(include_only='azure-cli')['core']['azure-cli']
    azure_cli_setup_path = find_files(azure_cli_path, SETUP_PY_NAME)[0]
    with open(azure_cli_setup_path, 'r') as f:
        setup_py_version_regex = re.compile(r"(?P<quote>[\"'])(?P<mod>[^'=]*)(==(?P<ver>[\d.]*))?(?P=quote)")
        for line in f.readlines():
            if line.strip().startswith("'azure-cli-"):
                match = setup_py_version_regex.match(line.strip())
                mod = match.group('mod')
                if mod == 'azure-cli-command-modules-nspkg':
                    mod = 'azure-cli-command_modules-nspkg'
                try:
                    results[mod]['setup_version'] = match.group('ver')
                except KeyError:
                    # something that is in setup.py but isn't module is
                    # inherently a mismatch
                    results[mod] = {
                        'local_version': 'Unavailable',
                        'public_version': 'Unknown',
                        'setup_version': match.group('ver'),
                        'status': 'MISMATCH'
                    }

    if update:
        _update_setup_py(results, azure_cli_setup_path, pin)
    else:
        display('\nAuditing azure-cli setup.py against local module versions...')
        for mod, data in results.items():
            if mod == 'azure-cli':
                continue
            setup_version = data['setup_version']
            if not setup_version:
                logger.warning('The azure-cli setup.py file is not using pinned versions. Aborting audit.')
                break
            elif setup_version != data['local_version']:
                data['status'] = 'MISMATCH'
    return results
def generate_cli_ref_docs(output_dir=None,
                          output_type=None,
                          all_profiles=None):
    # require that azure cli installed and warn the users if extensions are installed.
    require_azure_cli()
    output_dir = _process_ref_doc_output_dir(output_dir)

    _warn_if_exts_installed()

    heading('Generate CLI Reference Docs')
    display("Docs will be placed in {}.".format(output_dir))

    if all_profiles:
        # Generate documentation for all commands and for all CLI profiles
        _generate_ref_docs_for_all_profiles(output_type, output_dir)
    else:
        # Generate documentation for all comamnds
        _call_sphinx_build(output_type, output_dir)

    display("\nThe {} files are in {}".format(output_type, output_dir))
Esempio n. 28
0
def _summarize_test_results(xml_path):
    import xml.etree.ElementTree as ElementTree

    subheading('Results')

    root = ElementTree.parse(xml_path).getroot()
    summary = {
        'time': root.get('time'),
        'tests': root.get('tests'),
        'skips': root.get('skips'),
        'failures': root.get('failures'),
        'errors': root.get('errors')
    }
    display('Time: {time} sec\tTests: {tests}\tSkipped: {skips}\tFailures: {failures}\tErrors: {errors}'.format(
        **summary))

    failed = []
    for item in root.findall('testcase'):
        if item.findall('failure'):
            file_and_class = '.'.join(item.get('classname').split('.')[-2:])
            failed.append('{}.{}'.format(file_and_class, item.get('name')))

    if failed:
        subheading('FAILURES')
        for name in failed:
            display(name)
    display('')
def check_history(modules=None):

    # TODO: Does not work with extensions
    path_table = get_path_table(include_only=modules)
    selected_modules = list(path_table['core'].items()) + list(path_table['mod'].items())

    heading('Verify History')

    module_names = sorted([name for name, _ in selected_modules])
    display('Verifying README and HISTORY files for modules: {}'.format(' '.join(module_names)))

    failed_mods = []
    for name, path in selected_modules:
        errors = _check_readme_render(path)
        if errors:
            failed_mods.append(name)
            subheading('{} errors'.format(name))
            for error in errors:
                logger.error('%s\n', error)
    subheading('Results')
    if failed_mods:
        display('The following modules have invalid README/HISTORYs:')
        logger.error('\n'.join(failed_mods))
        logger.warning('See above for the full warning/errors')
        logger.warning('note: Line numbers in the errors map to the long_description of your setup.py.')
        sys.exit(1)
    display('OK')
Esempio n. 30
0
def _install_modules():

    all_modules = list(get_path_table()['mod'].items())

    failures = []
    mod_num = 1
    total_mods = len(all_modules)
    for name, path in all_modules:
        try:
            pip_cmd(
                "install -q -e {}".format(path),
                "Installing module `{}` ({}/{})...".format(
                    name, mod_num, total_mods))
            mod_num += 1
        except CalledProcessError as err:
            # exit code is not zero
            failures.append("Failed to install {}. Error message: {}".format(
                name, err.output))

    for f in failures:
        display(f)

    return not any(failures)