コード例 #1
0
ファイル: executor.py プロジェクト: ernstp/ansible
def integration_environment(args, target, cmd):
    """
    :type args: IntegrationConfig
    :type target: IntegrationTarget
    :type cmd: list[str]
    :rtype: dict[str, str]
    """
    env = ansible_environment(args)

    integration = dict(
        JUNIT_OUTPUT_DIR=os.path.abspath('test/results/junit'),
        ANSIBLE_CALLBACK_WHITELIST='junit',
        ANSIBLE_TEST_CI=args.metadata.ci_provider,
    )

    if args.debug_strategy:
        env.update(dict(ANSIBLE_STRATEGY='debug'))

    if 'non_local/' in target.aliases:
        if args.coverage:
            display.warning('Skipping coverage reporting for non-local test: %s' % target.name)

        env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))

    env.update(integration)

    cloud_environment = get_cloud_environment(args, target)

    if cloud_environment:
        cloud_environment.configure_environment(env, cmd)

    return env
コード例 #2
0
def extract_powershell_module_utils_imports(path, module_utils):
    """Return a list of module_utils imports found in the specified source file.
    :type path: str
    :type module_utils: set[str]
    :rtype: set[str]
    """
    imports = set()

    with open(path, 'r') as module_fd:
        code = module_fd.read()

        if '# POWERSHELL_COMMON' in code:
            imports.add('Ansible.ModuleUtils.Legacy')

        lines = code.splitlines()
        line_number = 0

        for line in lines:
            line_number += 1
            match = re.search(r'(?i)^#\s*requires\s+-module(?:s?)\s*(Ansible\.ModuleUtils\..+)', line)

            if not match:
                continue

            import_name = match.group(1)

            if import_name in module_utils:
                imports.add(import_name)
            else:
                display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))

    return imports
コード例 #3
0
ファイル: shippable.py プロジェクト: awiddersheim/ansible
def main():
    """Main program function."""
    try:
        args = parse_args()
        display.verbosity = args.verbosity
        display.color = args.color

        try:
            run_id = os.environ['SHIPPABLE_BUILD_ID']
        except KeyError as ex:
            raise MissingEnvironmentVariable(ex.args[0])

        client = HttpClient(args)
        response = client.get('https://api.shippable.com/jobs?runIds=%s' % run_id)
        jobs = response.json()

        if not isinstance(jobs, list):
            raise ApplicationError(json.dumps(jobs, indent=4, sort_keys=True))

        if len(jobs) == 1:
            raise ApplicationError('Shippable run %s has only one job. Did you use the "Rebuild with SSH" option?' % run_id)
    except ApplicationWarning as ex:
        display.warning(str(ex))
        exit(0)
    except ApplicationError as ex:
        display.error(str(ex))
        exit(1)
    except KeyboardInterrupt:
        exit(2)
    except IOError as ex:
        if ex.errno == errno.EPIPE:
            exit(3)
        raise
コード例 #4
0
ファイル: test.py プロジェクト: 2ndQuadrant/ansible
def main():
    """Main program function."""
    try:
        git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
        os.chdir(git_root)
        args = parse_args()
        config = args.config(args)
        display.verbosity = config.verbosity
        display.color = config.color

        try:
            args.func(config)
        except Delegate as ex:
            delegate(config, ex.exclude, ex.require)

        display.review_warnings()
    except ApplicationWarning as ex:
        display.warning(str(ex))
        exit(0)
    except ApplicationError as ex:
        display.error(str(ex))
        exit(1)
    except KeyboardInterrupt:
        exit(2)
    except IOError as ex:
        if ex.errno == errno.EPIPE:
            exit(3)
        raise
コード例 #5
0
ファイル: changes.py プロジェクト: awiddersheim/ansible
    def get_last_successful_commit(git, merge_runs):
        """
        :type git: Git
        :type merge_runs: dict | list[dict]
        :rtype: str
        """
        if 'id' in merge_runs and merge_runs['id'] == 4004:
            display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
            return None

        merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
        known_commits = set()
        last_successful_commit = None

        for merge_run in merge_runs:
            commit_sha = merge_run['commitSha']
            if commit_sha not in known_commits:
                known_commits.add(commit_sha)
                if merge_run['statusCode'] == 30:
                    if git.is_valid_ref(commit_sha):
                        last_successful_commit = commit_sha

        if last_successful_commit is None:
            display.warning('No successful commit found. All tests will be executed.')

        return last_successful_commit
コード例 #6
0
ファイル: core_ci.py プロジェクト: awiddersheim/ansible
    def _start_at_threshold(self, data, headers, threshold):
        """
        :type data: dict[str, any]
        :type headers: dict[str, str]
        :type threshold: int
        :rtype: HttpResponse | None
        """
        tries = 3
        sleep = 15

        data['threshold'] = threshold

        display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)

        while True:
            tries -= 1
            response = self.client.put(self._uri, data=json.dumps(data), headers=headers)

            if response.status_code == 200:
                return response

            error = self._create_http_error(response)

            if response.status_code == 503:
                raise error

            if not tries:
                raise error

            display.warning('%s. Trying again after %d seconds.' % (error, sleep))
            time.sleep(sleep)
コード例 #7
0
ファイル: test.py プロジェクト: ernstp/ansible
def main():
    """Main program function."""
    try:
        git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
        os.chdir(git_root)
        initialize_cloud_plugins()
        sanity_init()
        args = parse_args()
        config = args.config(args)
        display.verbosity = config.verbosity
        display.color = config.color
        display.info_stderr = (isinstance(config, SanityConfig) and config.lint) or (isinstance(config, IntegrationConfig) and config.list_targets)
        check_startup()

        try:
            args.func(config)
        except Delegate as ex:
            delegate(config, ex.exclude, ex.require)

        display.review_warnings()
    except ApplicationWarning as ex:
        display.warning(str(ex))
        exit(0)
    except ApplicationError as ex:
        display.error(str(ex))
        exit(1)
    except KeyboardInterrupt:
        exit(2)
    except IOError as ex:
        if ex.errno == errno.EPIPE:
            exit(3)
        raise
コード例 #8
0
ファイル: executor.py プロジェクト: ernstp/ansible
def get_integration_docker_filter(args, targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    :rtype: list[str]
    """
    exclude = []

    if not args.docker_privileged:
        skip = 'needs/privileged/'
        skipped = [target.name for target in targets if skip in target.aliases]
        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
                            % (skip.rstrip('/'), ', '.join(skipped)))

    if args.docker.endswith('py3'):
        skip = 'skip/python3/'
        skipped = [target.name for target in targets if skip in target.aliases]
        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which are not yet supported on python 3: %s'
                            % (skip.rstrip('/'), ', '.join(skipped)))

    return exclude
コード例 #9
0
ファイル: executor.py プロジェクト: 2ndQuadrant/ansible
def get_integration_local_filter(args, targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    :rtype: list[str]
    """
    exclude = []

    if os.getuid() != 0:
        skip = 'needs/root/'
        skipped = [target.name for target in targets if skip in target.aliases]
        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which require running as root: %s'
                            % (skip.rstrip('/'), ', '.join(skipped)))

    # consider explicit testing of destructive as though --allow-destructive was given
    include_destructive = any(target.startswith('destructive/') for target in args.include)

    if not args.allow_destructive and not include_destructive:
        skip = 'destructive/'
        skipped = [target.name for target in targets if skip in target.aliases]
        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which require --allow-destructive to run locally: %s'
                            % (skip.rstrip('/'), ', '.join(skipped)))

    return exclude
コード例 #10
0
ファイル: core_ci.py プロジェクト: ernstp/ansible
    def _start(self, auth):
        """Start instance."""
        if self.started:
            display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
                         verbosity=1)
            return

        display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)

        if self.platform == 'windows':
            with open('examples/scripts/ConfigureRemotingForAnsible.ps1', 'rb') as winrm_config_fd:
                winrm_config = winrm_config_fd.read().decode('utf-8')
        else:
            winrm_config = None

        data = dict(
            config=dict(
                platform=self.platform,
                version=self.version,
                public_key=self.ssh_key.pub_contents if self.ssh_key else None,
                query=False,
                winrm_config=winrm_config,
            )
        )

        data.update(dict(auth=auth))

        headers = {
            'Content-Type': 'application/json',
        }

        tries = 3
        sleep = 15

        while True:
            tries -= 1
            response = self.client.put(self._uri, data=json.dumps(data), headers=headers)

            if response.status_code == 200:
                break

            error = self._create_http_error(response)

            if not tries:
                raise error

            display.warning('%s. Trying again after %d seconds.' % (error, sleep))
            time.sleep(sleep)

        self.started = True
        self._save()

        display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)

        if self.args.explain:
            return {}

        return response.json()
コード例 #11
0
ファイル: http.py プロジェクト: ernstp/ansible
    def request(self, method, url, data=None, headers=None):
        """
        :type method: str
        :type url: str
        :type data: str | None
        :type headers: dict[str, str] | None
        :rtype: HttpResponse
        """
        cmd = ['curl', '-s', '-S', '-i', '-X', method]

        if headers is None:
            headers = {}

        headers['Expect'] = ''  # don't send expect continue header

        for header in headers.keys():
            cmd += ['-H', '%s: %s' % (header, headers[header])]

        if data is not None:
            cmd += ['-d', data]

        cmd += [url]

        attempts = 0
        max_attempts = 3
        sleep_seconds = 3

        # curl error codes which are safe to retry (request never sent to server)
        retry_on_status = (
            6,  # CURLE_COULDNT_RESOLVE_HOST
        )

        while True:
            attempts += 1

            try:
                stdout, _ = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)
                break
            except SubprocessError as ex:
                if ex.status in retry_on_status and attempts < max_attempts:
                    display.warning(u'%s' % ex)
                    time.sleep(sleep_seconds)
                    continue

                raise

        if self.args.explain and not self.always:
            return HttpResponse(method, url, 200, '')

        header, body = stdout.split('\r\n\r\n', 1)

        response_headers = header.split('\r\n')
        first_line = response_headers[0]
        http_response = first_line.split(' ')
        status_code = int(http_response[1])

        return HttpResponse(method, url, status_code, body)
コード例 #12
0
ファイル: rstcheck.py プロジェクト: awiddersheim/ansible
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning('Skipping rstcheck on unsupported Python version %s.' % args.python_version)
            return SanitySkipped(self.name)

        with open('test/sanity/rstcheck/ignore-substitutions.txt', 'r') as ignore_fd:
            ignore_substitutions = sorted(set(ignore_fd.read().splitlines()))

        paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.rst',))

        if not paths:
            return SanitySkipped(self.name)

        cmd = [
            args.python_executable,
            '-m', 'rstcheck',
            '--report', 'warning',
            '--ignore-substitutions', ','.join(ignore_substitutions),
        ] + paths

        try:
            stdout, stderr = run_command(args, cmd, capture=True)
            status = 0
        except SubprocessError as ex:
            stdout = ex.stdout
            stderr = ex.stderr
            status = ex.status

        if stdout:
            raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)

        if args.explain:
            return SanitySuccess(self.name)

        pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'

        results = [parse_to_dict(pattern, line) for line in stderr.splitlines()]

        results = [SanityMessage(
            message=r['message'],
            path=r['path'],
            line=int(r['line']),
            column=0,
            level=r['level'],
        ) for r in results]

        if results:
            return SanityFailure(self.name, messages=results)

        return SanitySuccess(self.name)
コード例 #13
0
ファイル: __init__.py プロジェクト: awiddersheim/ansible
    def filter(self, targets, exclude):
        """Filter out the cloud tests when the necessary config and resources are not available.
        :type targets: tuple[TestTarget]
        :type exclude: list[str]
        """
        skip = 'cloud/%s/' % self.platform
        skipped = [target.name for target in targets if skip in target.aliases]

        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
                            % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
コード例 #14
0
ファイル: openshift.py プロジェクト: awiddersheim/ansible
    def _setup_static(self):
        """Configure OpenShift tests for use with static configuration."""
        with open(self.config_static_path, 'r') as config_fd:
            config = config_fd.read()

        match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)

        if match:
            endpoint = match.group('server')
            self._wait_for_service(endpoint)
        else:
            display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
コード例 #15
0
ファイル: classification.py プロジェクト: 2ndQuadrant/ansible
def categorize_changes(paths, verbose_command=None):
    """
    :type paths: list[str]
    :type verbose_command: str
    :rtype paths: dict[str, list[str]]
    """
    mapper = PathMapper()

    commands = {
        'sanity': set(),
        'compile': set(),
        'units': set(),
        'integration': set(),
        'windows-integration': set(),
        'network-integration': set(),
    }

    display.info('Mapping %d changed file(s) to tests.' % len(paths))

    for path in paths:
        tests = mapper.classify(path)

        if tests is None:
            display.info('%s -> all' % path, verbosity=1)
            tests = all_tests()  # not categorized, run all tests
            display.warning('Path not categorized: %s' % path)
        else:
            tests = dict((key, value) for key, value in tests.items() if value)

            if verbose_command:
                result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')

                # identify targeted integration tests (those which only target a single integration command)
                if 'integration' in verbose_command and tests.get(verbose_command):
                    if not any('integration' in command for command in tests.keys() if command != verbose_command):
                        result += ' (targeted)'
            else:
                result = '%s' % tests

            display.info('%s -> %s' % (path, result), verbosity=1)

        for command, target in tests.items():
            commands[command].add(target)

    for command in commands:
        if any(t == 'all' for t in commands[command]):
            commands[command] = set(['all'])

    commands = dict((c, sorted(commands[c])) for c in commands.keys() if commands[c])

    return commands
コード例 #16
0
ファイル: test.py プロジェクト: ernstp/ansible
    def write(self, args):
        """
        :type args: TestConfig
        """
        self.write_console()
        self.write_bot(args)

        if args.lint:
            self.write_lint()

        if args.junit:
            if self.junit:
                self.write_junit(args)
            else:
                display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
コード例 #17
0
ファイル: vcenter.py プロジェクト: awiddersheim/ansible
    def filter(self, targets, exclude):
        """Filter out the cloud tests when the necessary config and resources are not available.
        :type targets: tuple[TestTarget]
        :type exclude: list[str]
        """
        docker = find_executable('docker', required=False)

        if docker:
            return

        skip = 'cloud/%s/' % self.platform
        skipped = [target.name for target in targets if skip in target.aliases]

        if skipped:
            exclude.append(skip)
            display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
                            % (skip.rstrip('/'), ', '.join(skipped)))
コード例 #18
0
ファイル: docker_util.py プロジェクト: awiddersheim/ansible
def docker_pull(args, image):
    """
    :type args: EnvironmentConfig
    :type image: str
    """
    if not args.docker_pull:
        display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
        return

    for _ in range(1, 10):
        try:
            docker_command(args, ['pull', image])
            return
        except SubprocessError:
            display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
            time.sleep(3)

    raise ApplicationError('Failed to pull docker image "%s".' % image)
コード例 #19
0
ファイル: executor.py プロジェクト: ernstp/ansible
def detect_changes_local(args):
    """
    :type args: TestConfig
    :rtype: list[str]
    """
    git = Git(args)
    result = LocalChanges(args, git)

    display.info('Detected branch %s forked from %s at commit %s' % (
        result.current_branch, result.fork_branch, result.fork_point))

    if result.untracked and not args.untracked:
        display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
                        len(result.untracked))

    if result.committed and not args.committed:
        display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
                        len(result.committed))

    if result.staged and not args.staged:
        display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
                        len(result.staged))

    if result.unstaged and not args.unstaged:
        display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
                        len(result.unstaged))

    names = set()

    if args.tracked:
        names |= set(result.tracked)
    if args.untracked:
        names |= set(result.untracked)
    if args.committed:
        names |= set(result.committed)
    if args.staged:
        names |= set(result.staged)
    if args.unstaged:
        names |= set(result.unstaged)

    if not args.metadata.changes:
        args.metadata.populate_changes(result.diff)

        for path in result.untracked:
            if is_binary_file(path):
                args.metadata.changes[path] = ((0, 0),)
                continue

            with open(path, 'r') as source_fd:
                line_count = len(source_fd.read().splitlines())

            args.metadata.changes[path] = ((1, line_count),)

    return sorted(names)
コード例 #20
0
ファイル: docker_util.py プロジェクト: ernstp/ansible
def docker_run(args, image, options):
    """
    :type args: EnvironmentConfig
    :type image: str
    :type options: list[str] | None
    :rtype: str | None, str | None
    """
    if not options:
        options = []

    for _ in range(1, 3):
        try:
            return docker_command(args, ['run'] + options + [image], capture=True)
        except SubprocessError as ex:
            display.error(ex)
            display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
            time.sleep(3)

    raise ApplicationError('Failed to run docker image "%s".' % image)
コード例 #21
0
ファイル: executor.py プロジェクト: ernstp/ansible
def get_integration_remote_filter(args, targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    :rtype: list[str]
    """
    parts = args.remote.split('/', 1)

    platform = parts[0]

    exclude = []

    skip = 'skip/%s/' % platform
    skipped = [target.name for target in targets if skip in target.aliases]
    if skipped:
        exclude.append(skip)
        display.warning('Excluding tests marked "%s" which are not yet supported on %s: %s'
                        % (skip.rstrip('/'), platform, ', '.join(skipped)))

    return exclude
コード例 #22
0
def extract_python_module_utils_imports(path, module_utils):
    """Return a list of module_utils imports found in the specified source file.
    :type path: str
    :type module_utils: set[str]
    :rtype: set[str]
    """
    with open(path, 'r') as module_fd:
        code = module_fd.read()

        try:
            tree = ast.parse(code)
        except SyntaxError as ex:
            # Treat this error as a warning so tests can be executed as best as possible.
            # The compile test will detect and report this syntax error.
            display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
            return set()

        finder = ModuleUtilFinder(path, module_utils)
        finder.visit(tree)
        return finder.imports
コード例 #23
0
ファイル: core_ci.py プロジェクト: awiddersheim/ansible
    def _get_parallels_endpoints(self):
        """
        :rtype: tuple[str]
        """
        client = HttpClient(self.args, always=True)
        display.info('Getting available endpoints...', verbosity=1)
        sleep = 3

        for _ in range(1, 10):
            response = client.get('https://s3.amazonaws.com/ansible-ci-files/ansible-test/parallels-endpoints.txt')

            if response.status_code == 200:
                endpoints = tuple(response.response.splitlines())
                display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
                return endpoints

            display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
            time.sleep(sleep)

        raise ApplicationError('Unable to get available endpoints.')
コード例 #24
0
    def add_import(self, name, line_number):
        """
        :type name: str
        :type line_number: int
        """
        import_name = name

        while len(name) > len('ansible.module_utils.'):
            if name in self.module_utils:
                if name not in self.imports:
                    display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
                    self.imports.add(name)

                return  # duplicate imports are ignored

            name = '.'.join(name.split('.')[:-1])

        if self.path.startswith('test/'):
            return  # invalid imports in tests are ignored

        # Treat this error as a warning so tests can be executed as best as possible.
        # This error should be detected by unit or integration tests.
        display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
コード例 #25
0
def get_powershell_module_utils_imports(powershell_targets):
    """Return a dictionary of module_utils names mapped to sets of powershell file paths.
    :type powershell_targets: list[TestTarget]
    :rtype: dict[str, set[str]]
    """

    module_utils = enumerate_module_utils()

    imports_by_target_path = {}

    for target in powershell_targets:
        imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)

    imports = dict([(module_util, set()) for module_util in module_utils])

    for target_path in imports_by_target_path:
        for module_util in imports_by_target_path[target_path]:
            imports[module_util].add(target_path)

    for module_util in sorted(imports):
        if not imports[module_util]:
            display.warning('No imports found which use the "%s" module_util.' % module_util)

    return imports
コード例 #26
0
ファイル: executor.py プロジェクト: 2ndQuadrant/ansible
def detect_changes_local(args):
    """
    :type args: TestConfig
    :rtype: list[str]
    """
    git = Git(args)
    result = LocalChanges(args, git)

    display.info('Detected branch %s forked from %s at commit %s' % (
        result.current_branch, result.fork_branch, result.fork_point))

    if result.untracked and not args.untracked:
        display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
                        len(result.untracked))

    if result.committed and not args.committed:
        display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
                        len(result.committed))

    if result.staged and not args.staged:
        display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
                        len(result.staged))

    if result.unstaged and not args.unstaged:
        display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
                        len(result.unstaged))

    names = set()

    if args.tracked:
        names |= set(result.tracked)
    if args.untracked:
        names |= set(result.untracked)
    if args.committed:
        names |= set(result.committed)
    if args.staged:
        names |= set(result.staged)
    if args.unstaged:
        names |= set(result.unstaged)

    return sorted(names)
コード例 #27
0
ファイル: pylint.py プロジェクト: trishnaguha/ansible-fork
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: SanityResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning('Skipping pylint on unsupported Python version %s.' % args.python_version)
            return SanitySkipped(self.name)

        with open(PYLINT_SKIP_PATH, 'r') as skip_fd:
            skip_paths = skip_fd.read().splitlines()

        skip_paths_set = set(skip_paths)

        paths = sorted(i.path for i in targets.include if (os.path.splitext(i.path)[1] == '.py' or i.path.startswith('bin/')) and i.path not in skip_paths_set)

        contexts = {}
        remaining_paths = set(paths)

        def add_context(available_paths, context_name, context_filter):
            """
            :type available_paths: set[str]
            :type context_name: str
            :type context_filter: (str) -> bool
            """
            filtered_paths = set(p for p in available_paths if context_filter(p))
            contexts[context_name] = sorted(filtered_paths)
            available_paths -= filtered_paths

        add_context(remaining_paths, 'ansible-test', lambda p: p.startswith('test/runner/'))
        add_context(remaining_paths, 'units', lambda p: p.startswith('test/units/'))
        add_context(remaining_paths, 'test', lambda p: p.startswith('test/'))
        add_context(remaining_paths, 'hacking', lambda p: p.startswith('hacking/'))
        add_context(remaining_paths, 'modules', lambda p: p.startswith('lib/ansible/modules/'))
        add_context(remaining_paths, 'module_utils', lambda p: p.startswith('lib/ansible/module_utils/'))
        add_context(remaining_paths, 'ansible', lambda p: True)

        messages = []
        context_times = []

        test_start = datetime.datetime.utcnow()

        for context in sorted(contexts):
            context_paths = contexts[context]

            if not context_paths:
                continue

            context_start = datetime.datetime.utcnow()
            messages += self.pylint(args, context, context_paths)
            context_end = datetime.datetime.utcnow()

            context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))

        test_end = datetime.datetime.utcnow()

        for context_time in context_times:
            display.info(context_time, verbosity=4)

        display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)

        errors = [SanityMessage(
            message=m['message'].replace('\n', ' '),
            path=m['path'],
            line=int(m['line']),
            column=int(m['column']),
            level=m['type'],
            code=m['symbol'],
        ) for m in messages]

        line = 0

        for path in skip_paths:
            line += 1

            if not os.path.exists(path):
                # Keep files out of the list which no longer exist in the repo.
                errors.append(SanityMessage(
                    code='A101',
                    message='Remove "%s" since it does not exist' % path,
                    path=PYLINT_SKIP_PATH,
                    line=line,
                    column=1,
                    confidence=calculate_best_confidence(((PYLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
                ))

        if errors:
            return SanityFailure(self.name, messages=errors)

        return SanitySuccess(self.name)
コード例 #28
0
ファイル: delegation.py プロジェクト: psaintlaurent/ansible
def delegate_tox(args, exclude, require, integration_targets):
    """
    :type args: EnvironmentConfig
    :type exclude: list[str]
    :type require: list[str]
    :type integration_targets: tuple[IntegrationTarget]
    """
    if args.python:
        versions = args.python_version,

        if args.python_version not in SUPPORTED_PYTHON_VERSIONS:
            raise ApplicationError('tox does not support Python version %s' %
                                   args.python_version)
    else:
        versions = SUPPORTED_PYTHON_VERSIONS

    if args.httptester:
        needs_httptester = sorted(target.name for target in integration_targets
                                  if 'needs/httptester/' in target.aliases)

        if needs_httptester:
            display.warning(
                'Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s'
                % ', '.join(needs_httptester))

    options = {
        '--tox': args.tox_args,
        '--tox-sitepackages': 0,
    }

    for version in versions:
        tox = [
            'tox', '-c', 'test/runner/tox.ini', '-e',
            'py' + version.replace('.', '')
        ]

        if args.tox_sitepackages:
            tox.append('--sitepackages')

        tox.append('--')

        cmd = generate_command(args, os.path.abspath('test/runner/test.py'),
                               options, exclude, require)

        if not args.python:
            cmd += ['--python', version]

        if isinstance(args, TestConfig):
            if args.coverage and not args.coverage_label:
                cmd += ['--coverage-label', 'tox-%s' % version]

        env = common_environment()

        # temporary solution to permit ansible-test delegated to tox to provision remote resources
        optional = (
            'SHIPPABLE',
            'SHIPPABLE_BUILD_ID',
            'SHIPPABLE_JOB_NUMBER',
        )

        env.update(pass_vars(required=[], optional=optional))

        run_command(args, tox + cmd, env=env)
コード例 #29
0
def get_python_module_utils_imports(compile_targets):
    """Return a dictionary of python file paths mapped to sets of module_utils names.
    :type compile_targets: list[TestTarget]
    :rtype: dict[str, set[str]]
    """
    module_utils_files = (
        os.path.splitext(filename)
        for filename in os.listdir('lib/ansible/module_utils'))
    module_utils = sorted(name[0] for name in module_utils_files
                          if name[0] != '__init__' and name[1] == '.py')

    imports_by_target_path = {}

    for target in compile_targets:
        imports_by_target_path[
            target.path] = extract_python_module_utils_imports(
                target.path, module_utils)

    def recurse_import(import_name, depth=0, seen=None):
        """Recursively expand module_utils imports from module_utils files.
        :type import_name: str
        :type depth: int
        :type seen: set[str] | None
        :rtype set[str]
        """
        display.info('module_utils import: %s%s' % ('  ' * depth, import_name),
                     verbosity=4)

        if seen is None:
            seen = set([import_name])

        results = set([import_name])

        import_path = os.path.join('lib/ansible/module_utils',
                                   '%s.py' % import_name)

        for name in sorted(imports_by_target_path.get(import_path, set())):
            if name in seen:
                continue

            seen.add(name)

            matches = sorted(recurse_import(name, depth + 1, seen))

            for result in matches:
                results.add(result)

        return results

    for module_util in module_utils:
        # recurse over module_utils imports while excluding self
        module_util_imports = recurse_import(module_util)
        module_util_imports.remove(module_util)

        # add recursive imports to all path entries which import this module_util
        for target_path in imports_by_target_path:
            if module_util in imports_by_target_path[target_path]:
                for module_util_import in sorted(module_util_imports):
                    if module_util_import not in imports_by_target_path[
                            target_path]:
                        display.info(
                            '%s inherits import %s via %s' %
                            (target_path, module_util_import, module_util),
                            verbosity=6)
                        imports_by_target_path[target_path].add(
                            module_util_import)

    imports = dict([(module_util, set()) for module_util in module_utils])

    for target_path in imports_by_target_path:
        for module_util in imports_by_target_path[target_path]:
            imports[module_util].add(target_path)

    for module_util in sorted(imports):
        if not len(imports[module_util]):
            display.warning(
                'No imports found which use the "%s" module_util.' %
                module_util)

    return imports
コード例 #30
0
ファイル: cover.py プロジェクト: Rajeshkumar90/Ansible
def command_coverage_combine(args):
    """Patch paths in coverage files and merge into a single file.
    :type args: CoverageConfig
    """
    coverage = initialize_coverage(args)

    modules = dict((t.module, t.path) for t in list(walk_module_targets()))

    coverage_files = [
        os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR)
        if f.startswith('coverage') and f != 'coverage'
    ]

    arc_data = {}

    ansible_path = os.path.abspath('lib/ansible/') + '/'
    root_path = os.getcwd() + '/'

    for coverage_file in coverage_files:
        original = coverage.CoverageData()

        if os.path.getsize(coverage_file) == 0:
            display.warning('Empty coverage file: %s' % coverage_file)
            continue

        try:
            original.read_file(coverage_file)
        except Exception as ex:  # pylint: disable=locally-disabled, broad-except
            display.error(str(ex))
            continue

        for filename in original.measured_files():
            arcs = original.arcs(filename)

            if '/ansible_modlib.zip/ansible/' in filename:
                new_name = re.sub('^.*/ansible_modlib.zip/ansible/',
                                  ansible_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif '/ansible_module_' in filename:
                module = re.sub('^.*/ansible_module_(?P<module>.*).py$',
                                '\\g<module>', filename)
                new_name = os.path.abspath(modules[module])
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif filename.startswith('/root/ansible/'):
                new_name = re.sub('^/.*?/ansible/', root_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name

            if filename not in arc_data:
                arc_data[filename] = []

            arc_data[filename] += arcs

    updated = coverage.CoverageData()

    for filename in arc_data:
        if not os.path.isfile(filename):
            display.warning('Invalid coverage path: %s' % filename)
            continue

        updated.add_arcs({filename: arc_data[filename]})

    if not args.explain:
        updated.write_file(COVERAGE_FILE)
コード例 #31
0
def command_integration_filtered(args, targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    """
    found = False
    passed = []
    failed = []

    targets_iter = iter(targets)

    test_dir = os.path.expanduser('~/ansible_testing')

    if not args.explain and any('needs/ssh/' in target.aliases
                                for target in targets):
        max_tries = 20
        display.info(
            'SSH service required for tests. Checking to make sure we can connect.'
        )
        for i in range(1, max_tries + 1):
            try:
                run_command(args,
                            ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'],
                            capture=True)
                display.info('SSH service responded.')
                break
            except SubprocessError:
                if i == max_tries:
                    raise
                seconds = 3
                display.warning(
                    'SSH service not responding. Waiting %d second(s) before checking again.'
                    % seconds)
                time.sleep(seconds)

    start_at_task = args.start_at_task

    for target in targets_iter:
        if args.start_at and not found:
            found = target.name == args.start_at

            if not found:
                continue

        if args.list_targets:
            print(target.name)
            continue

        tries = 2 if args.retry_on_error else 1
        verbosity = args.verbosity

        cloud_environment = get_cloud_environment(args, target)

        original_environment = EnvironmentDescription(args)

        display.info('>>> Environment Description\n%s' % original_environment,
                     verbosity=3)

        try:
            while tries:
                tries -= 1

                if not args.explain:
                    # create a fresh test directory for each test target
                    remove_tree(test_dir)
                    make_dirs(test_dir)

                try:
                    if target.script_path:
                        command_integration_script(args, target)
                    else:
                        command_integration_role(args, target, start_at_task)
                        start_at_task = None
                    break
                except SubprocessError:
                    if cloud_environment:
                        cloud_environment.on_failure(target, tries)

                    if not original_environment.validate(target.name,
                                                         throw=False):
                        raise

                    if not tries:
                        raise

                    display.warning(
                        'Retrying test target "%s" with maximum verbosity.' %
                        target.name)
                    display.verbosity = args.verbosity = 6

            original_environment.validate(target.name, throw=True)
            passed.append(target)
        except Exception as ex:
            failed.append(target)

            if args.continue_on_error:
                display.error(ex)
                continue

            display.notice(
                'To resume at this test target, use the option: --start-at %s'
                % target.name)

            next_target = next(targets_iter, None)

            if next_target:
                display.notice(
                    'To resume after this test target, use the option: --start-at %s'
                    % next_target.name)

            raise
        finally:
            display.verbosity = args.verbosity = verbosity

    if failed:
        raise ApplicationError(
            'The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s'
            % (len(failed), len(passed) + len(failed), '\n'.join(
                target.name for target in failed)))
コード例 #32
0
ファイル: cover.py プロジェクト: awiddersheim/ansible
def command_coverage_combine(args):
    """Patch paths in coverage files and merge into a single file.
    :type args: CoverageConfig
    :rtype: list[str]
    """
    coverage = initialize_coverage(args)

    modules = dict((t.module, t.path) for t in list(walk_module_targets()))

    coverage_files = [os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f]

    ansible_path = os.path.abspath('lib/ansible/') + '/'
    root_path = os.getcwd() + '/'

    counter = 0
    groups = {}

    if args.all or args.stub:
        sources = sorted(os.path.abspath(target.path) for target in walk_compile_targets())
    else:
        sources = []

    if args.stub:
        groups['=stub'] = dict((source, set()) for source in sources)

    for coverage_file in coverage_files:
        counter += 1
        display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)

        original = coverage.CoverageData()

        group = get_coverage_group(args, coverage_file)

        if group is None:
            display.warning('Unexpected name for coverage file: %s' % coverage_file)
            continue

        if os.path.getsize(coverage_file) == 0:
            display.warning('Empty coverage file: %s' % coverage_file)
            continue

        try:
            original.read_file(coverage_file)
        except Exception as ex:  # pylint: disable=locally-disabled, broad-except
            display.error(str(ex))
            continue

        for filename in original.measured_files():
            arcs = set(original.arcs(filename) or [])

            if not arcs:
                # This is most likely due to using an unsupported version of coverage.
                display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
                continue

            if '/ansible_modlib.zip/ansible/' in filename:
                new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif '/ansible_module_' in filename:
                module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
                if module_name not in modules:
                    display.warning('Skipping coverage of unknown module: %s' % module_name)
                    continue
                new_name = os.path.abspath(modules[module_name])
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif re.search('^(/.*?)?/root/ansible/', filename):
                new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name

            if group not in groups:
                groups[group] = {}

            arc_data = groups[group]

            if filename not in arc_data:
                arc_data[filename] = set()

            arc_data[filename].update(arcs)

    output_files = []

    for group in sorted(groups):
        arc_data = groups[group]

        updated = coverage.CoverageData()

        for filename in arc_data:
            if not os.path.isfile(filename):
                display.warning('Invalid coverage path: %s' % filename)
                continue

            updated.add_arcs({filename: list(arc_data[filename])})

        if args.all:
            updated.add_arcs(dict((source, []) for source in sources))

        if not args.explain:
            output_file = COVERAGE_FILE + group
            updated.write_file(output_file)
            output_files.append(output_file)

    return sorted(output_files)
コード例 #33
0
ファイル: __init__.py プロジェクト: sourcepole/ansible
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        if args.python_version in self.UNSUPPORTED_PYTHON_VERSIONS:
            display.warning('Skipping %s on unsupported Python version %s.' %
                            (self.name, args.python_version))
            return SanitySkipped(self.name)

        if self.path.endswith('.py'):
            cmd = [args.python_executable, self.path]
        else:
            cmd = [self.path]

        env = ansible_environment(args, color=False)

        pattern = None
        data = None

        if self.config:
            output = self.config.get('output')
            extensions = self.config.get('extensions')
            prefixes = self.config.get('prefixes')
            files = self.config.get('files')
            always = self.config.get('always')
            text = self.config.get('text')
            ignore_changes = self.config.get('ignore_changes')

            if output == 'path-line-column-message':
                pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
            elif output == 'path-message':
                pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
            else:
                pattern = ApplicationError('Unsupported output type: %s' %
                                           output)

            if ignore_changes:
                paths = sorted(i.path for i in targets.targets)
                always = False
            else:
                paths = sorted(i.path for i in targets.include)

            if always:
                paths = []

            if text is not None:
                if text:
                    paths = [p for p in paths if not is_binary_file(p)]
                else:
                    paths = [p for p in paths if is_binary_file(p)]

            if extensions:
                paths = [
                    p for p in paths
                    if os.path.splitext(p)[1] in extensions or (
                        p.startswith('bin/') and '.py' in extensions)
                ]

            if prefixes:
                paths = [
                    p for p in paths if any(
                        p.startswith(pre) for pre in prefixes)
                ]

            if files:
                paths = [p for p in paths if os.path.basename(p) in files]

            if not paths and not always:
                return SanitySkipped(self.name)

            data = '\n'.join(paths)

            if data:
                display.info(data, verbosity=4)

        try:
            stdout, stderr = run_command(args,
                                         cmd,
                                         data=data,
                                         env=env,
                                         capture=True)
            status = 0
        except SubprocessError as ex:
            stdout = ex.stdout
            stderr = ex.stderr
            status = ex.status

        if stdout and not stderr:
            if pattern:
                matches = parse_to_list_of_dict(pattern, stdout)

                messages = [
                    SanityMessage(
                        message=m['message'],
                        path=m['path'],
                        line=int(m.get('line', 0)),
                        column=int(m.get('column', 0)),
                    ) for m in matches
                ]

                return SanityFailure(self.name, messages=messages)

        if stderr or status:
            summary = u'%s' % SubprocessError(
                cmd=cmd, status=status, stderr=stderr, stdout=stdout)
            return SanityFailure(self.name, summary=summary)

        return SanitySuccess(self.name)
コード例 #34
0
def command_network_integration(args):
    """
    :type args: NetworkIntegrationConfig
    """
    default_filename = 'test/integration/inventory.networking'

    if args.inventory:
        filename = os.path.join('test/integration', args.inventory)
    else:
        filename = default_filename

    if not args.explain and not args.platform and not os.path.exists(filename):
        if args.inventory:
            filename = os.path.abspath(filename)

        raise ApplicationError(
            'Inventory not found: %s\n'
            'Use --inventory to specify the inventory path.\n'
            'Use --platform to provision resources and generate an inventory file.\n'
            'See also inventory template: %s.template' %
            (filename, default_filename))

    all_targets = tuple(walk_network_integration_targets(include_hidden=True))
    internal_targets = command_integration_filter(args, all_targets)
    platform_targets = set(a for t in internal_targets for a in t.aliases
                           if a.startswith('network/'))

    if args.platform:
        instances = []  # type: list [lib.thread.WrappedThread]

        # generate an ssh key (if needed) up front once, instead of for each instance
        SshKey(args)

        for platform_version in args.platform:
            platform, version = platform_version.split('/', 1)
            platform_target = 'network/%s/' % platform

            if platform_target not in platform_targets and 'network/basics/' not in platform_targets:
                display.warning(
                    'Skipping "%s" because selected tests do not target the "%s" platform.'
                    % (platform_version, platform))
                continue

            instance = lib.thread.WrappedThread(
                functools.partial(network_run, args, platform, version))
            instance.daemon = True
            instance.start()
            instances.append(instance)

        install_command_requirements(args)

        while any(instance.is_alive() for instance in instances):
            time.sleep(1)

        remotes = [instance.wait_for_result() for instance in instances]
        inventory = network_inventory(remotes)

        display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()),
                     verbosity=3)

        if not args.explain:
            with open(filename, 'w') as inventory_fd:
                inventory_fd.write(inventory)
    else:
        install_command_requirements(args)

    command_integration_filtered(args, internal_targets, all_targets)
コード例 #35
0
ファイル: cover.py プロジェクト: zdoop/ansible
def command_coverage_combine(args):
    """Patch paths in coverage files and merge into a single file.
    :type args: CoverageConfig
    :rtype: list[str]
    """
    coverage = initialize_coverage(args)

    modules = dict((t.module, t.path) for t in list(walk_module_targets()))

    coverage_files = [
        os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR)
        if '=coverage.' in f
    ]

    ansible_path = os.path.abspath('lib/ansible/') + '/'
    root_path = os.getcwd() + '/'

    counter = 0
    groups = {}

    if args.all or args.stub:
        sources = sorted(
            os.path.abspath(target.path) for target in walk_compile_targets())
    else:
        sources = []

    if args.stub:
        groups['=stub'] = dict((source, set()) for source in sources)

    for coverage_file in coverage_files:
        counter += 1
        display.info('[%4d/%4d] %s' %
                     (counter, len(coverage_files), coverage_file),
                     verbosity=2)

        original = coverage.CoverageData()

        group = get_coverage_group(args, coverage_file)

        if group is None:
            display.warning('Unexpected name for coverage file: %s' %
                            coverage_file)
            continue

        if os.path.getsize(coverage_file) == 0:
            display.warning('Empty coverage file: %s' % coverage_file)
            continue

        try:
            original.read_file(coverage_file)
        except Exception as ex:  # pylint: disable=locally-disabled, broad-except
            display.error(str(ex))
            continue

        for filename in original.measured_files():
            arcs = set(original.arcs(filename) or [])

            if not arcs:
                # This is most likely due to using an unsupported version of coverage.
                display.warning('No arcs found for "%s" in coverage file: %s' %
                                (filename, coverage_file))
                continue

            if '/ansible_modlib.zip/ansible/' in filename:
                new_name = re.sub('^.*/ansible_modlib.zip/ansible/',
                                  ansible_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif '/ansible_module_' in filename:
                module = re.sub('^.*/ansible_module_(?P<module>.*).py$',
                                '\\g<module>', filename)
                if module not in modules:
                    display.warning('Skipping coverage of unknown module: %s' %
                                    module)
                    continue
                new_name = os.path.abspath(modules[module])
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif re.search('^(/.*?)?/root/ansible/', filename):
                new_name = re.sub('^(/.*?)?/root/ansible/', root_path,
                                  filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name

            if group not in groups:
                groups[group] = {}

            arc_data = groups[group]

            if filename not in arc_data:
                arc_data[filename] = set()

            arc_data[filename].update(arcs)

    output_files = []

    for group in sorted(groups):
        arc_data = groups[group]

        updated = coverage.CoverageData()

        for filename in arc_data:
            if not os.path.isfile(filename):
                display.warning('Invalid coverage path: %s' % filename)
                continue

            updated.add_arcs({filename: list(arc_data[filename])})

        if args.all:
            updated.add_arcs(dict((source, []) for source in sources))

        if not args.explain:
            output_file = COVERAGE_FILE + group
            updated.write_file(output_file)
            output_files.append(output_file)

    return sorted(output_files)
コード例 #36
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning(
                'Skipping pylint on unsupported Python version %s.' %
                args.python_version)
            return SanitySkipped(self.name)

        plugin_dir = os.path.join(ANSIBLE_ROOT, 'test/sanity/pylint/plugins')
        plugin_names = sorted(
            p[0]
            for p in [os.path.splitext(p) for p in os.listdir(plugin_dir)]
            if p[1] == '.py' and p[0] != '__init__')

        skip_paths = read_lines_without_comments(PYLINT_SKIP_PATH,
                                                 optional=True)

        invalid_ignores = []

        supported_versions = set(SUPPORTED_PYTHON_VERSIONS) - set(
            UNSUPPORTED_PYTHON_VERSIONS)
        supported_versions = set([v.split('.')[0] for v in supported_versions
                                  ]) | supported_versions

        ignore_entries = read_lines_without_comments(PYLINT_IGNORE_PATH,
                                                     optional=True)
        ignore = collections.defaultdict(
            dict)  # type: t.Dict[str, t.Dict[str, int]]
        line = 0

        for ignore_entry in ignore_entries:
            line += 1

            if not ignore_entry:
                continue

            if ' ' not in ignore_entry:
                invalid_ignores.append((line, 'Invalid syntax'))
                continue

            path, code = ignore_entry.split(' ', 1)

            if not os.path.exists(path):
                invalid_ignores.append(
                    (line, 'Remove "%s" since it does not exist' % path))
                continue

            if ' ' in code:
                code, version = code.split(' ', 1)

                if version not in supported_versions:
                    invalid_ignores.append(
                        (line, 'Invalid version: %s' % version))
                    continue

                if version not in (args.python_version,
                                   args.python_version.split('.')[0]):
                    continue  # ignore version specific entries for other versions

            ignore[path][code] = line

        skip_paths_set = set(skip_paths)

        paths = sorted(
            i.path for i in targets.include
            if (os.path.splitext(i.path)[1] == '.py'
                or is_subdir(i.path, 'bin/')) and i.path not in skip_paths_set)

        module_paths = [
            os.path.relpath(p,
                            data_context().content.module_path).split(
                                os.path.sep) for p in paths
            if is_subdir(p,
                         data_context().content.module_path)
        ]
        module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1]))

        large_module_group_threshold = 500
        large_module_groups = [
            key for key, value in itertools.groupby(
                module_paths, lambda p: p[0] if len(p) > 1 else '')
            if len(list(value)) > large_module_group_threshold
        ]

        large_module_group_paths = [
            os.path.relpath(p,
                            data_context().content.module_path).split(
                                os.path.sep) for p in paths
            if any(
                is_subdir(p, os.path.join(data_context().content.module_path,
                                          g)) for g in large_module_groups)
        ]
        large_module_group_dirs = sorted(
            set([
                os.path.sep.join(p[:2]) for p in large_module_group_paths
                if len(p) > 2
            ]))

        contexts = []
        remaining_paths = set(paths)

        def add_context(available_paths, context_name, context_filter):
            """
            :type available_paths: set[str]
            :type context_name: str
            :type context_filter: (str) -> bool
            """
            filtered_paths = set(p for p in available_paths
                                 if context_filter(p))
            contexts.append((context_name, sorted(filtered_paths)))
            available_paths -= filtered_paths

        def filter_path(path_filter=None):
            """
            :type path_filter: str
            :rtype: (str) -> bool
            """
            def context_filter(path_to_filter):
                """
                :type path_to_filter: str
                :rtype: bool
                """
                return is_subdir(path_to_filter, path_filter)

            return context_filter

        for large_module_dir in large_module_group_dirs:
            add_context(
                remaining_paths, 'modules/%s' % large_module_dir,
                filter_path(
                    os.path.join(data_context().content.module_path,
                                 large_module_dir)))

        for module_dir in module_dirs:
            add_context(
                remaining_paths, 'modules/%s' % module_dir,
                filter_path(
                    os.path.join(data_context().content.module_path,
                                 module_dir)))

        add_context(remaining_paths, 'modules',
                    filter_path(data_context().content.module_path))
        add_context(remaining_paths, 'module_utils',
                    filter_path(data_context().content.module_utils_path))

        add_context(remaining_paths, 'units',
                    filter_path(data_context().content.unit_path))

        if data_context().content.collection:
            add_context(remaining_paths, 'collection', lambda p: True)
        else:
            add_context(remaining_paths, 'validate-modules',
                        filter_path('test/sanity/validate-modules/'))
            add_context(remaining_paths, 'sanity', filter_path('test/sanity/'))
            add_context(remaining_paths, 'ansible-test',
                        filter_path('test/runner/'))
            add_context(remaining_paths, 'test', filter_path('test/'))
            add_context(remaining_paths, 'hacking', filter_path('hacking/'))
            add_context(remaining_paths, 'ansible', lambda p: True)

        messages = []
        context_times = []

        test_start = datetime.datetime.utcnow()

        for context, context_paths in sorted(contexts):
            if not context_paths:
                continue

            context_start = datetime.datetime.utcnow()
            messages += self.pylint(args, context, context_paths, plugin_dir,
                                    plugin_names)
            context_end = datetime.datetime.utcnow()

            context_times.append(
                '%s: %d (%s)' %
                (context, len(context_paths), context_end - context_start))

        test_end = datetime.datetime.utcnow()

        for context_time in context_times:
            display.info(context_time, verbosity=4)

        display.info('total: %d (%s)' % (len(paths), test_end - test_start),
                     verbosity=4)

        errors = [
            SanityMessage(
                message=m['message'].replace('\n', ' '),
                path=m['path'],
                line=int(m['line']),
                column=int(m['column']),
                level=m['type'],
                code=m['symbol'],
            ) for m in messages
        ]

        if args.explain:
            return SanitySuccess(self.name)

        line = 0

        filtered = []

        for error in errors:
            if error.code in ignore[error.path]:
                ignore[error.path][
                    error.
                    code] = 0  # error ignored, clear line number of ignore entry to track usage
            else:
                filtered.append(error)  # error not ignored

        errors = filtered

        for invalid_ignore in invalid_ignores:
            errors.append(
                SanityMessage(
                    code='A201',
                    message=invalid_ignore[1],
                    path=PYLINT_IGNORE_PATH,
                    line=invalid_ignore[0],
                    column=1,
                    confidence=calculate_confidence(PYLINT_IGNORE_PATH, line,
                                                    args.metadata)
                    if args.metadata.changes else None,
                ))

        for path in skip_paths:
            line += 1

            if not path:
                continue

            if not os.path.exists(path):
                # Keep files out of the list which no longer exist in the repo.
                errors.append(
                    SanityMessage(
                        code='A101',
                        message='Remove "%s" since it does not exist' % path,
                        path=PYLINT_SKIP_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((PYLINT_SKIP_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        for path in paths:
            if path not in ignore:
                continue

            for code in ignore[path]:
                line = ignore[path][code]

                if not line:
                    continue

                errors.append(
                    SanityMessage(
                        code='A102',
                        message='Remove since "%s" passes "%s" pylint test' %
                        (path, code),
                        path=PYLINT_IGNORE_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((PYLINT_IGNORE_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        if errors:
            return SanityFailure(self.name, messages=errors)

        return SanitySuccess(self.name)
コード例 #37
0
ファイル: sanity.py プロジェクト: youngdev/ansible
def command_sanity_validate_modules(args, targets):
    """
    :type args: SanityConfig
    :type targets: SanityTargets
    :rtype: SanityResult
    """
    test = 'validate-modules'
    env = ansible_environment(args, color=False)

    paths = [
        deepest_path(i.path, 'lib/ansible/modules/')
        for i in targets.include_external
    ]
    paths = sorted(set(p for p in paths if p))

    if not paths:
        return SanitySkipped(test)

    cmd = [
        'test/sanity/validate-modules/validate-modules',
        '--format',
        'json',
    ] + paths

    with open('test/sanity/validate-modules/skip.txt', 'r') as skip_fd:
        skip_paths = skip_fd.read().splitlines()

    skip_paths += [e.path for e in targets.exclude_external]

    if skip_paths:
        cmd += ['--exclude', '^(%s)' % '|'.join(skip_paths)]

    if args.base_branch:
        cmd.extend([
            '--base-branch',
            args.base_branch,
        ])
    else:
        display.warning(
            'Cannot perform module comparison against the base branch. Base branch not detected when running locally.'
        )

    try:
        stdout, stderr = run_command(args, cmd, env=env, capture=True)
        status = 0
    except SubprocessError as ex:
        stdout = ex.stdout
        stderr = ex.stderr
        status = ex.status

    if stderr or status not in (0, 3):
        raise SubprocessError(cmd=cmd,
                              status=status,
                              stderr=stderr,
                              stdout=stdout)

    if args.explain:
        return SanitySkipped(test)

    messages = json.loads(stdout)

    results = []

    for filename in messages:
        output = messages[filename]

        for item in output['errors']:
            results.append(
                SanityMessage(
                    path=filename,
                    line=int(item['line']) if 'line' in item else 0,
                    column=int(item['column']) if 'column' in item else 0,
                    level='error',
                    code='E%s' % item['code'],
                    message=item['msg'],
                ))

    if results:
        return SanityFailure(test, messages=results)

    return SanitySuccess(test)
コード例 #38
0
ファイル: classification.py プロジェクト: silvinux/ansible
def categorize_changes(paths, verbose_command=None):
    """
    :type paths: list[str]
    :type verbose_command: str
    :rtype paths: dict[str, list[str]]
    """
    mapper = PathMapper()

    commands = {
        'sanity': set(),
        'compile': set(),
        'units': set(),
        'integration': set(),
        'windows-integration': set(),
        'network-integration': set(),
    }

    additional_paths = set()

    for path in paths:
        if not os.path.exists(path):
            continue

        dependent_paths = mapper.get_dependent_paths(path)

        if not dependent_paths:
            continue

        display.info('Expanded "%s" to %d dependent file(s):' %
                     (path, len(dependent_paths)),
                     verbosity=1)

        for dependent_path in dependent_paths:
            display.info(dependent_path, verbosity=1)
            additional_paths.add(dependent_path)

    additional_paths -= set(
        paths)  # don't count changed paths as additional paths

    if additional_paths:
        display.info(
            'Expanded %d changed file(s) into %d additional dependent file(s).'
            % (len(paths), len(additional_paths)))
        paths = sorted(set(paths) | additional_paths)

    display.info('Mapping %d changed file(s) to tests.' % len(paths))

    for path in paths:
        tests = mapper.classify(path)

        if tests is None:
            display.info('%s -> all' % path, verbosity=1)
            tests = all_tests()  # not categorized, run all tests
            display.warning('Path not categorized: %s' % path)
        else:
            tests = dict((key, value) for key, value in tests.items() if value)

            if verbose_command:
                result = '%s: %s' % (verbose_command,
                                     tests.get(verbose_command) or 'none')

                # identify targeted integration tests (those which only target a single integration command)
                if 'integration' in verbose_command and tests.get(
                        verbose_command):
                    if not any('integration' in command for command in tests
                               if command != verbose_command):
                        result += ' (targeted)'
            else:
                result = '%s' % tests

            display.info('%s -> %s' % (path, result), verbosity=1)

        for command, target in tests.items():
            commands[command].add(target)

    for command in commands:
        if any(t == 'all' for t in commands[command]):
            commands[command] = set(['all'])

    commands = dict((c, sorted(commands[c])) for c in commands if commands[c])

    return commands
コード例 #39
0
ファイル: http.py プロジェクト: sivakrishnaa/ansible-1
    def request(self, method, url, data=None, headers=None):
        """
        :type method: str
        :type url: str
        :type data: str | None
        :type headers: dict[str, str] | None
        :rtype: HttpResponse
        """
        cmd = ['curl', '-s', '-S', '-i', '-X', method]

        if self.insecure:
            cmd += ['--insecure']

        if headers is None:
            headers = {}

        headers['Expect'] = ''  # don't send expect continue header

        if self.username:
            if self.password:
                display.sensitive.add(self.password)
                cmd += ['-u', '%s:%s' % (self.username, self.password)]
            else:
                cmd += ['-u', self.username]

        for header in headers.keys():
            cmd += ['-H', '%s: %s' % (header, headers[header])]

        if data is not None:
            cmd += ['-d', data]

        if self.proxy:
            cmd += ['-x', self.proxy]

        cmd += [url]

        attempts = 0
        max_attempts = 3
        sleep_seconds = 3

        # curl error codes which are safe to retry (request never sent to server)
        retry_on_status = (
            6,  # CURLE_COULDNT_RESOLVE_HOST
        )

        stdout = ''

        while True:
            attempts += 1

            try:
                stdout = run_command(self.args,
                                     cmd,
                                     capture=True,
                                     always=self.always,
                                     cmd_verbosity=2)[0]
                break
            except SubprocessError as ex:
                if ex.status in retry_on_status and attempts < max_attempts:
                    display.warning(u'%s' % ex)
                    time.sleep(sleep_seconds)
                    continue

                raise

        if self.args.explain and not self.always:
            return HttpResponse(method, url, 200, '')

        header, body = stdout.split('\r\n\r\n', 1)

        response_headers = header.split('\r\n')
        first_line = response_headers[0]
        http_response = first_line.split(' ')
        status_code = int(http_response[1])

        return HttpResponse(method, url, status_code, body)
コード例 #40
0
ファイル: executor.py プロジェクト: ernstp/ansible
def command_integration_filtered(args, targets, all_targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    :type all_targets: tuple[IntegrationTarget]
    """
    found = False
    passed = []
    failed = []

    targets_iter = iter(targets)
    all_targets_dict = dict((target.name, target) for target in all_targets)

    setup_errors = []
    setup_targets_executed = set()

    for target in all_targets:
        for setup_target in target.setup_once + target.setup_always:
            if setup_target not in all_targets_dict:
                setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))

    if setup_errors:
        raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))

    test_dir = os.path.expanduser('~/ansible_testing')

    if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
        max_tries = 20
        display.info('SSH service required for tests. Checking to make sure we can connect.')
        for i in range(1, max_tries + 1):
            try:
                run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
                display.info('SSH service responded.')
                break
            except SubprocessError:
                if i == max_tries:
                    raise
                seconds = 3
                display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
                time.sleep(seconds)

    start_at_task = args.start_at_task

    results = {}

    for target in targets_iter:
        if args.start_at and not found:
            found = target.name == args.start_at

            if not found:
                continue

        if args.list_targets:
            print(target.name)
            continue

        tries = 2 if args.retry_on_error else 1
        verbosity = args.verbosity

        cloud_environment = get_cloud_environment(args, target)

        original_environment = EnvironmentDescription(args)

        display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)

        try:
            while tries:
                tries -= 1

                try:
                    run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, False)

                    start_time = time.time()

                    run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, True)

                    if not args.explain:
                        # create a fresh test directory for each test target
                        remove_tree(test_dir)
                        make_dirs(test_dir)

                    if target.script_path:
                        command_integration_script(args, target)
                    else:
                        command_integration_role(args, target, start_at_task)
                        start_at_task = None

                    end_time = time.time()

                    results[target.name] = dict(
                        name=target.name,
                        type=target.type,
                        aliases=target.aliases,
                        modules=target.modules,
                        run_time_seconds=int(end_time - start_time),
                        setup_once=target.setup_once,
                        setup_always=target.setup_always,
                        coverage=args.coverage,
                        coverage_label=args.coverage_label,
                        python_version=args.python_version,
                    )

                    break
                except SubprocessError:
                    if cloud_environment:
                        cloud_environment.on_failure(target, tries)

                    if not original_environment.validate(target.name, throw=False):
                        raise

                    if not tries:
                        raise

                    display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
                    display.verbosity = args.verbosity = 6

            original_environment.validate(target.name, throw=True)
            passed.append(target)
        except Exception as ex:
            failed.append(target)

            if args.continue_on_error:
                display.error(ex)
                continue

            display.notice('To resume at this test target, use the option: --start-at %s' % target.name)

            next_target = next(targets_iter, None)

            if next_target:
                display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)

            raise
        finally:
            display.verbosity = args.verbosity = verbosity

    if not args.explain:
        results_path = 'test/results/data/%s-%s.json' % (args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))

        data = dict(
            targets=results,
        )

        with open(results_path, 'w') as results_fd:
            results_fd.write(json.dumps(data, sort_keys=True, indent=4))

    if failed:
        raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
            len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
コード例 #41
0
def command_integration_filtered(args, targets, all_targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    :type all_targets: tuple[IntegrationTarget]
    """
    found = False
    passed = []
    failed = []

    targets_iter = iter(targets)
    all_targets_dict = dict((target.name, target) for target in all_targets)

    setup_errors = []
    setup_targets_executed = set()

    for target in all_targets:
        for setup_target in target.setup_once + target.setup_always:
            if setup_target not in all_targets_dict:
                setup_errors.append(
                    'Target "%s" contains invalid setup target: %s' %
                    (target.name, setup_target))

    if setup_errors:
        raise ApplicationError('Found %d invalid setup aliases:\n%s' %
                               (len(setup_errors), '\n'.join(setup_errors)))

    test_dir = os.path.expanduser('~/ansible_testing')

    if not args.explain and any('needs/ssh/' in target.aliases
                                for target in targets):
        max_tries = 20
        display.info(
            'SSH service required for tests. Checking to make sure we can connect.'
        )
        for i in range(1, max_tries + 1):
            try:
                run_command(args,
                            ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'],
                            capture=True)
                display.info('SSH service responded.')
                break
            except SubprocessError:
                if i == max_tries:
                    raise
                seconds = 3
                display.warning(
                    'SSH service not responding. Waiting %d second(s) before checking again.'
                    % seconds)
                time.sleep(seconds)

    start_at_task = args.start_at_task

    results = {}

    for target in targets_iter:
        if args.start_at and not found:
            found = target.name == args.start_at

            if not found:
                continue

        if args.list_targets:
            print(target.name)
            continue

        tries = 2 if args.retry_on_error else 1
        verbosity = args.verbosity

        cloud_environment = get_cloud_environment(args, target)

        original_environment = EnvironmentDescription(args)

        display.info('>>> Environment Description\n%s' % original_environment,
                     verbosity=3)

        try:
            while tries:
                tries -= 1

                try:
                    run_setup_targets(args, test_dir, target.setup_once,
                                      all_targets_dict, setup_targets_executed,
                                      False)

                    start_time = time.time()

                    run_setup_targets(args, test_dir, target.setup_always,
                                      all_targets_dict, setup_targets_executed,
                                      True)

                    if not args.explain:
                        # create a fresh test directory for each test target
                        remove_tree(test_dir)
                        make_dirs(test_dir)

                    if target.script_path:
                        command_integration_script(args, target)
                    else:
                        command_integration_role(args, target, start_at_task)
                        start_at_task = None

                    end_time = time.time()

                    results[target.name] = dict(
                        name=target.name,
                        type=target.type,
                        aliases=target.aliases,
                        modules=target.modules,
                        run_time_seconds=int(end_time - start_time),
                        setup_once=target.setup_once,
                        setup_always=target.setup_always,
                        coverage=args.coverage,
                        coverage_label=args.coverage_label,
                        python_version=args.python_version,
                    )

                    break
                except SubprocessError:
                    if cloud_environment:
                        cloud_environment.on_failure(target, tries)

                    if not original_environment.validate(target.name,
                                                         throw=False):
                        raise

                    if not tries:
                        raise

                    display.warning(
                        'Retrying test target "%s" with maximum verbosity.' %
                        target.name)
                    display.verbosity = args.verbosity = 6

            original_environment.validate(target.name, throw=True)
            passed.append(target)
        except Exception as ex:
            failed.append(target)

            if args.continue_on_error:
                display.error(ex)
                continue

            display.notice(
                'To resume at this test target, use the option: --start-at %s'
                % target.name)

            next_target = next(targets_iter, None)

            if next_target:
                display.notice(
                    'To resume after this test target, use the option: --start-at %s'
                    % next_target.name)

            raise
        finally:
            display.verbosity = args.verbosity = verbosity

    if not args.explain:
        results_path = 'test/results/data/%s-%s.json' % (
            args.command,
            re.sub(r'[^0-9]', '-',
                   str(datetime.datetime.utcnow().replace(microsecond=0))))

        data = dict(targets=results, )

        with open(results_path, 'w') as results_fd:
            results_fd.write(json.dumps(data, sort_keys=True, indent=4))

    if failed:
        raise ApplicationError(
            'The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s'
            % (len(failed), len(passed) + len(failed), '\n'.join(
                target.name for target in failed)))
コード例 #42
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: SanityResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning(
                'Skipping pylint on unsupported Python version %s.' %
                args.python_version)
            return SanitySkipped(self.name)

        with open(PYLINT_SKIP_PATH, 'r') as skip_fd:
            skip_paths = skip_fd.read().splitlines()

        invalid_ignores = []

        supported_versions = set(SUPPORTED_PYTHON_VERSIONS) - set(
            UNSUPPORTED_PYTHON_VERSIONS)
        supported_versions = set([v.split('.')[0] for v in supported_versions
                                  ]) | supported_versions

        with open(PYLINT_IGNORE_PATH, 'r') as ignore_fd:
            ignore_entries = ignore_fd.read().splitlines()
            ignore = collections.defaultdict(dict)
            line = 0

            for ignore_entry in ignore_entries:
                line += 1

                if ' ' not in ignore_entry:
                    invalid_ignores.append((line, 'Invalid syntax'))
                    continue

                path, code = ignore_entry.split(' ', 1)

                if ' ' in code:
                    code, version = code.split(' ', 1)

                    if version not in supported_versions:
                        invalid_ignores.append(
                            (line, 'Invalid version: %s' % version))
                        continue

                    if version != args.python_version and version != args.python_version.split(
                            '.')[0]:
                        continue  # ignore version specific entries for other versions

                ignore[path][code] = line

        skip_paths_set = set(skip_paths)

        paths = sorted(
            i.path for i in targets.include
            if (os.path.splitext(i.path)[1] == '.py'
                or i.path.startswith('bin/')) and i.path not in skip_paths_set)

        contexts = {}
        remaining_paths = set(paths)

        def add_context(available_paths, context_name, context_filter):
            """
            :type available_paths: set[str]
            :type context_name: str
            :type context_filter: (str) -> bool
            """
            filtered_paths = set(p for p in available_paths
                                 if context_filter(p))
            contexts[context_name] = sorted(filtered_paths)
            available_paths -= filtered_paths

        add_context(remaining_paths, 'ansible-test',
                    lambda p: p.startswith('test/runner/'))
        add_context(remaining_paths, 'units',
                    lambda p: p.startswith('test/units/'))
        add_context(remaining_paths, 'test', lambda p: p.startswith('test/'))
        add_context(remaining_paths, 'hacking',
                    lambda p: p.startswith('hacking/'))
        add_context(remaining_paths, 'modules',
                    lambda p: p.startswith('lib/ansible/modules/'))
        add_context(remaining_paths, 'module_utils',
                    lambda p: p.startswith('lib/ansible/module_utils/'))
        add_context(remaining_paths, 'ansible', lambda p: True)

        messages = []
        context_times = []

        test_start = datetime.datetime.utcnow()

        for context in sorted(contexts):
            context_paths = contexts[context]

            if not context_paths:
                continue

            context_start = datetime.datetime.utcnow()
            messages += self.pylint(args, context, context_paths)
            context_end = datetime.datetime.utcnow()

            context_times.append(
                '%s: %d (%s)' %
                (context, len(context_paths), context_end - context_start))

        test_end = datetime.datetime.utcnow()

        for context_time in context_times:
            display.info(context_time, verbosity=4)

        display.info('total: %d (%s)' % (len(paths), test_end - test_start),
                     verbosity=4)

        errors = [
            SanityMessage(
                message=m['message'].replace('\n', ' '),
                path=m['path'],
                line=int(m['line']),
                column=int(m['column']),
                level=m['type'],
                code=m['symbol'],
            ) for m in messages
        ]

        line = 0

        filtered = []

        for error in errors:
            if error.code in ignore[error.path]:
                ignore[error.path][
                    error.
                    code] = None  # error ignored, clear line number of ignore entry to track usage
            else:
                filtered.append(error)  # error not ignored

        errors = filtered

        for invalid_ignore in invalid_ignores:
            errors.append(
                SanityMessage(
                    code='A201',
                    message=invalid_ignore[1],
                    path=PYLINT_IGNORE_PATH,
                    line=invalid_ignore[0],
                    column=1,
                    confidence=calculate_confidence(PYLINT_IGNORE_PATH, line,
                                                    args.metadata)
                    if args.metadata.changes else None,
                ))

        for path in skip_paths:
            line += 1

            if not os.path.exists(path):
                # Keep files out of the list which no longer exist in the repo.
                errors.append(
                    SanityMessage(
                        code='A101',
                        message='Remove "%s" since it does not exist' % path,
                        path=PYLINT_SKIP_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((PYLINT_SKIP_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        for path in paths:
            if path not in ignore:
                continue

            for code in ignore[path]:
                line = ignore[path][code]

                if not line:
                    continue

                errors.append(
                    SanityMessage(
                        code='A102',
                        message='Remove since "%s" passes "%s" pylint test' %
                        (path, code),
                        path=PYLINT_IGNORE_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((PYLINT_IGNORE_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        if errors:
            return SanityFailure(self.name, messages=errors)

        return SanitySuccess(self.name)
コード例 #43
0
    def _classify(self, path):
        """
        :type path: str
        :rtype: dict[str, str] | None
        """
        dirname = os.path.dirname(path)
        filename = os.path.basename(path)
        name, ext = os.path.splitext(filename)

        minimal = {}

        if path.startswith('.github/'):
            return minimal

        if path.startswith('bin/'):
            return all_tests(self.args)  # broad impact, run all tests

        if path.startswith('contrib/'):
            return {
                'units': 'test/units/contrib/'
            }

        if path.startswith('changelogs/'):
            return minimal

        if path.startswith('docs/'):
            return minimal

        if path.startswith('examples/'):
            if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
                return {
                    'windows-integration': 'connection_winrm',
                }

            return minimal

        if path.startswith('hacking/'):
            return minimal

        if path.startswith('lib/ansible/modules/'):
            module_name = self.module_names_by_path.get(path)

            if module_name:
                return {
                    'units': module_name if module_name in self.units_modules else None,
                    'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
                    'windows-integration': self.windows_integration_by_module.get(module_name) if ext == '.ps1' else None,
                    'network-integration': self.network_integration_by_module.get(module_name),
                    FOCUSED_TARGET: True,
                }

            return minimal

        if path.startswith('lib/ansible/module_utils/'):
            if ext == '.psm1':
                return minimal  # already expanded using get_dependent_paths

            if ext == '.py':
                return minimal  # already expanded using get_dependent_paths

        if path.startswith('lib/ansible/plugins/action/'):
            if ext == '.py':
                if name.startswith('net_'):
                    network_target = 'network/.*_%s' % name[4:]

                    if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
                        return {
                            'network-integration': network_target,
                            'units': 'all',
                        }

                    return {
                        'network-integration': self.integration_all_target,
                        'units': 'all',
                    }

                if self.prefixes.get(name) == 'network':
                    network_platform = name
                elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
                    network_platform = name[:-7]
                elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
                    network_platform = name[:-9]
                else:
                    network_platform = None

                if network_platform:
                    network_target = 'network/%s/' % network_platform

                    if network_target in self.integration_targets_by_alias:
                        return {
                            'network-integration': network_target,
                            'units': 'all',
                        }

                    display.warning('Integration tests for "%s" not found.' % network_target, unique=True)

                    return {
                        'units': 'all',
                    }

        if path.startswith('lib/ansible/plugins/connection/'):
            if name == '__init__':
                return {
                    'integration': self.integration_all_target,
                    'windows-integration': self.integration_all_target,
                    'network-integration': self.integration_all_target,
                    'units': 'test/units/plugins/connection/',
                }

            units_path = 'test/units/plugins/connection/test_%s.py' % name

            if units_path not in self.units_paths:
                units_path = None

            integration_name = 'connection_%s' % name

            if integration_name not in self.integration_targets_by_name:
                integration_name = None

            # entire integration test commands depend on these connection plugins

            if name == 'winrm':
                return {
                    'windows-integration': self.integration_all_target,
                    'units': units_path,
                }

            if name == 'local':
                return {
                    'integration': self.integration_all_target,
                    'network-integration': self.integration_all_target,
                    'units': units_path,
                }

            if name == 'network_cli':
                return {
                    'network-integration': self.integration_all_target,
                    'units': units_path,
                }

            # other connection plugins have isolated integration and unit tests

            return {
                'integration': integration_name,
                'units': units_path,
            }

        if (path.startswith('lib/ansible/plugins/terminal/') or
                path.startswith('lib/ansible/plugins/cliconf/') or
                path.startswith('lib/ansible/plugins/netconf/')):
            if ext == '.py':
                if name in self.prefixes and self.prefixes[name] == 'network':
                    network_target = 'network/%s/' % name

                    if network_target in self.integration_targets_by_alias:
                        return {
                            'network-integration': network_target,
                            'units': 'all',
                        }

                    display.warning('Integration tests for "%s" not found.' % network_target, unique=True)

                    return {
                        'units': 'all',
                    }

                return {
                    'network-integration': self.integration_all_target,
                    'units': 'all',
                }

        if path.startswith('lib/ansible/utils/module_docs_fragments/'):
            return {
                'sanity': 'all',
            }

        if path.startswith('lib/ansible/'):
            return all_tests(self.args)  # broad impact, run all tests

        if path.startswith('packaging/'):
            if path.startswith('packaging/requirements/'):
                if name.startswith('requirements-') and ext == '.txt':
                    component = name.split('-', 1)[1]

                    candidates = (
                        'cloud/%s/' % component,
                    )

                    for candidate in candidates:
                        if candidate in self.integration_targets_by_alias:
                            return {
                                'integration': candidate,
                            }

                return all_tests(self.args)  # broad impact, run all tests

            return minimal

        if path.startswith('test/cache/'):
            return minimal

        if path.startswith('test/results/'):
            return minimal

        if path.startswith('test/legacy/'):
            return minimal

        if path.startswith('test/integration/roles/'):
            return minimal

        if path.startswith('test/integration/targets/'):
            if not os.path.exists(path):
                return minimal

            target = self.integration_targets_by_name[path.split('/')[3]]

            if 'hidden/' in target.aliases:
                if target.type == 'role':
                    return minimal  # already expanded using get_dependent_paths

                return {
                    'integration': self.integration_all_target,
                    'windows-integration': self.integration_all_target,
                    'network-integration': self.integration_all_target,
                }

            return {
                'integration': target.name if 'posix/' in target.aliases else None,
                'windows-integration': target.name if 'windows/' in target.aliases else None,
                'network-integration': target.name if 'network/' in target.aliases else None,
                FOCUSED_TARGET: True,
            }

        if path.startswith('test/integration/'):
            if dirname == 'test/integration':
                if self.prefixes.get(name) == 'network' and ext == '.yaml':
                    return minimal  # network integration test playbooks are not used by ansible-test

                if filename == 'network-all.yaml':
                    return minimal  # network integration test playbook not used by ansible-test

                if filename == 'platform_agnostic.yaml':
                    return minimal  # network integration test playbook not used by ansible-test

                for command in (
                        'integration',
                        'windows-integration',
                        'network-integration',
                ):
                    if name == command and ext == '.cfg':
                        return {
                            command: self.integration_all_target,
                        }

                if name.startswith('cloud-config-'):
                    cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]

                    if cloud_target in self.integration_targets_by_alias:
                        return {
                            'integration': cloud_target,
                        }

            return {
                'integration': self.integration_all_target,
                'windows-integration': self.integration_all_target,
                'network-integration': self.integration_all_target,
            }

        if path.startswith('test/sanity/'):
            return {
                'sanity': 'all',  # test infrastructure, run all sanity checks
            }

        if path.startswith('test/units/'):
            if path in self.units_paths:
                return {
                    'units': path,
                }

            # changes to files which are not unit tests should trigger tests from the nearest parent directory

            test_path = os.path.dirname(path)

            while test_path:
                if test_path + '/' in self.units_paths:
                    return {
                        'units': test_path + '/',
                    }

                test_path = os.path.dirname(test_path)

        if path.startswith('test/runner/completion/'):
            if path == 'test/runner/completion/docker.txt':
                return all_tests(self.args, force=True)  # force all tests due to risk of breaking changes in new test environment

        if path.startswith('test/runner/docker/'):
            return minimal  # not used by tests, only used to build the default container

        if path.startswith('test/runner/lib/cloud/'):
            cloud_target = 'cloud/%s/' % name

            if cloud_target in self.integration_targets_by_alias:
                return {
                    'integration': cloud_target,
                }

            return all_tests(self.args)  # test infrastructure, run all tests

        if path.startswith('test/runner/lib/sanity/'):
            return {
                'sanity': 'all',  # test infrastructure, run all sanity checks
            }

        if path.startswith('test/runner/requirements/'):
            if name in (
                    'integration',
                    'network-integration',
                    'windows-integration',
            ):
                return {
                    name: self.integration_all_target,
                }

            if name in (
                    'sanity',
                    'units',
            ):
                return {
                    name: 'all',
                }

            if name.startswith('integration.cloud.'):
                cloud_target = 'cloud/%s/' % name.split('.')[2]

                if cloud_target in self.integration_targets_by_alias:
                    return {
                        'integration': cloud_target,
                    }

        if path.startswith('test/runner/'):
            if dirname == 'test/runner' and name in (
                    'Dockerfile',
                    '.dockerignore',
            ):
                return minimal  # not used by tests, only used to build the default container

            return all_tests(self.args)  # test infrastructure, run all tests

        if path.startswith('test/utils/shippable/tools/'):
            return minimal  # not used by tests

        if path.startswith('test/utils/shippable/'):
            if dirname == 'test/utils/shippable':
                test_map = {
                    'cloud.sh': 'integration:cloud/',
                    'freebsd.sh': 'integration:all',
                    'linux.sh': 'integration:all',
                    'network.sh': 'network-integration:all',
                    'osx.sh': 'integration:all',
                    'rhel.sh': 'integration:all',
                    'sanity.sh': 'sanity:all',
                    'units.sh': 'units:all',
                    'windows.sh': 'windows-integration:all',
                }

                test_match = test_map.get(filename)

                if test_match:
                    test_command, test_target = test_match.split(':')

                    return {
                        test_command: test_target,
                    }

            return all_tests(self.args)  # test infrastructure, run all tests

        if path.startswith('test/utils/'):
            return minimal

        if path == 'test/README.md':
            return minimal

        if path.startswith('ticket_stubs/'):
            return minimal

        if '/' not in path:
            if path in (
                    '.gitattributes',
                    '.gitignore',
                    '.gitmodules',
                    '.mailmap',
                    'tox.ini',  # obsolete
                    'COPYING',
                    'VERSION',
                    'Makefile',
            ):
                return minimal

            if path in (
                    'shippable.yml',
                    '.coveragerc',
            ):
                return all_tests(self.args)  # test infrastructure, run all tests

            if path == 'setup.py':
                return all_tests(self.args)  # broad impact, run all tests

            if path == '.yamllint':
                return {
                    'sanity': 'all',
                }

            if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
                return minimal

        return None  # unknown, will result in fall-back to run all tests
コード例 #44
0
ファイル: classification.py プロジェクト: lefth/ansible
def categorize_changes(args, paths, verbose_command=None):
    """
    :type args: TestConfig
    :type paths: list[str]
    :type verbose_command: str
    :rtype: ChangeDescription
    """
    mapper = PathMapper(args)

    commands = {
        'sanity': set(),
        'units': set(),
        'integration': set(),
        'windows-integration': set(),
        'network-integration': set(),
    }

    focused_commands = collections.defaultdict(set)

    deleted_paths = set()
    original_paths = set()
    additional_paths = set()
    no_integration_paths = set()

    for path in paths:
        if not os.path.exists(path):
            deleted_paths.add(path)
            continue

        original_paths.add(path)

        dependent_paths = mapper.get_dependent_paths(path)

        if not dependent_paths:
            continue

        display.info('Expanded "%s" to %d dependent file(s):' %
                     (path, len(dependent_paths)),
                     verbosity=1)

        for dependent_path in dependent_paths:
            display.info(dependent_path, verbosity=1)
            additional_paths.add(dependent_path)

    additional_paths -= set(
        paths)  # don't count changed paths as additional paths

    if additional_paths:
        display.info(
            'Expanded %d changed file(s) into %d additional dependent file(s).'
            % (len(paths), len(additional_paths)))
        paths = sorted(set(paths) | additional_paths)

    display.info('Mapping %d changed file(s) to tests.' % len(paths))

    for path in paths:
        tests = mapper.classify(path)

        if tests is None:
            focused_target = False

            display.info('%s -> all' % path, verbosity=1)
            tests = all_tests(args)  # not categorized, run all tests
            display.warning('Path not categorized: %s' % path)
        else:
            focused_target = tests.pop(FOCUSED_TARGET,
                                       False) and path in original_paths

            tests = dict((key, value) for key, value in tests.items() if value)

            if focused_target and not any('integration' in command
                                          for command in tests):
                no_integration_paths.add(
                    path)  # path triggers no integration tests

            if verbose_command:
                result = '%s: %s' % (verbose_command,
                                     tests.get(verbose_command) or 'none')

                # identify targeted integration tests (those which only target a single integration command)
                if 'integration' in verbose_command and tests.get(
                        verbose_command):
                    if not any('integration' in command for command in tests
                               if command != verbose_command):
                        if focused_target:
                            result += ' (focused)'

                        result += ' (targeted)'
            else:
                result = '%s' % tests

            display.info('%s -> %s' % (path, result), verbosity=1)

        for command, target in tests.items():
            commands[command].add(target)

            if focused_target:
                focused_commands[command].add(target)

    for command in commands:
        commands[command].discard('none')

        if any(t == 'all' for t in commands[command]):
            commands[command] = set(['all'])

    commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
    focused_commands = dict(
        (c, sorted(focused_commands[c])) for c in focused_commands)

    for command in commands:
        if commands[command] == ['all']:
            commands[command] = [
            ]  # changes require testing all targets, do not filter targets

    changes = ChangeDescription()
    changes.command = verbose_command
    changes.changed_paths = sorted(original_paths)
    changes.deleted_paths = sorted(deleted_paths)
    changes.regular_command_targets = commands
    changes.focused_command_targets = focused_commands
    changes.no_integration_paths = sorted(no_integration_paths)

    return changes
コード例 #45
0
    def get(self, tries=3, sleep=15, always_raise_on=None):
        """
        Get instance connection information.
        :type tries: int
        :type sleep: int
        :type always_raise_on: list[int] | None
        :rtype: InstanceConnection
        """
        if not self.started:
            display.info('Skipping invalid %s/%s instance %s.' %
                         (self.platform, self.version, self.instance_id),
                         verbosity=1)
            return None

        if not always_raise_on:
            always_raise_on = []

        if self.connection and self.connection.running:
            return self.connection

        while True:
            tries -= 1
            response = self.client.get(self._uri)

            if response.status_code == 200:
                break

            error = self._create_http_error(response)

            if not tries or response.status_code in always_raise_on:
                raise error

            display.warning('%s. Trying again after %d seconds.' %
                            (error, sleep))
            time.sleep(sleep)

        if self.args.explain:
            self.connection = InstanceConnection(
                running=True,
                hostname='cloud.example.com',
                port=self.port or 12345,
                username='******',
                password='******' if self.platform == 'windows' else None,
            )
        else:
            response_json = response.json()

            status = response_json['status']
            con = response_json['connection']

            self.connection = InstanceConnection(
                running=status == 'running',
                hostname=con['hostname'],
                port=int(con.get('port', self.port)),
                username=con['username'],
                password=con.get('password'),
            )

            if self.connection.password:
                display.sensitive.add(str(self.connection.password))

        status = 'running' if self.connection.running else 'starting'

        display.info('Status update: %s/%s on instance %s is %s.' %
                     (self.platform, self.version, self.instance_id, status),
                     verbosity=1)

        return self.connection
コード例 #46
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning('Skipping pylint on unsupported Python version %s.' % args.python_version)
            return SanitySkipped(self.name)

        plugin_dir = os.path.join(ANSIBLE_ROOT, 'test/sanity/pylint/plugins')
        plugin_names = sorted(p[0] for p in [
            os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')

        settings = self.load_settings(args, 'ansible-test')

        paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.py' or is_subdir(i.path, 'bin/'))
        paths = settings.filter_skipped_paths(paths)

        module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
                        paths if is_subdir(p, data_context().content.module_path)]
        module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1]))

        large_module_group_threshold = 500
        large_module_groups = [key for key, value in
                               itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]

        large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
                                    if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
        large_module_group_dirs = sorted(set([os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2]))

        contexts = []
        remaining_paths = set(paths)

        def add_context(available_paths, context_name, context_filter):
            """
            :type available_paths: set[str]
            :type context_name: str
            :type context_filter: (str) -> bool
            """
            filtered_paths = set(p for p in available_paths if context_filter(p))
            contexts.append((context_name, sorted(filtered_paths)))
            available_paths -= filtered_paths

        def filter_path(path_filter=None):
            """
            :type path_filter: str
            :rtype: (str) -> bool
            """
            def context_filter(path_to_filter):
                """
                :type path_to_filter: str
                :rtype: bool
                """
                return is_subdir(path_to_filter, path_filter)

            return context_filter

        for large_module_dir in large_module_group_dirs:
            add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))

        for module_dir in module_dirs:
            add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))

        add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
        add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))

        add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))

        if data_context().content.collection:
            add_context(remaining_paths, 'collection', lambda p: True)
        else:
            add_context(remaining_paths, 'validate-modules', filter_path('test/sanity/validate-modules/'))
            add_context(remaining_paths, 'sanity', filter_path('test/sanity/'))
            add_context(remaining_paths, 'ansible-test', filter_path('test/runner/'))
            add_context(remaining_paths, 'test', filter_path('test/'))
            add_context(remaining_paths, 'hacking', filter_path('hacking/'))
            add_context(remaining_paths, 'ansible', lambda p: True)

        messages = []
        context_times = []

        test_start = datetime.datetime.utcnow()

        for context, context_paths in sorted(contexts):
            if not context_paths:
                continue

            context_start = datetime.datetime.utcnow()
            messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names)
            context_end = datetime.datetime.utcnow()

            context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))

        test_end = datetime.datetime.utcnow()

        for context_time in context_times:
            display.info(context_time, verbosity=4)

        display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)

        errors = [SanityMessage(
            message=m['message'].replace('\n', ' '),
            path=m['path'],
            line=int(m['line']),
            column=int(m['column']),
            level=m['type'],
            code=m['symbol'],
        ) for m in messages]

        if args.explain:
            return SanitySuccess(self.name)

        errors = settings.process_errors(errors, paths)

        if errors:
            return SanityFailure(self.name, messages=errors)

        return SanitySuccess(self.name)
コード例 #47
0
def command_integration_filtered(args, targets):
    """
    :type args: IntegrationConfig
    :type targets: tuple[IntegrationTarget]
    """
    found = False

    targets_iter = iter(targets)

    test_dir = os.path.expanduser('~/ansible_testing')

    if not args.explain:
        remove_tree(test_dir)
        make_dirs(test_dir)

    if any('needs/ssh/' in target.aliases for target in targets):
        max_tries = 20
        display.info('SSH service required for tests. Checking to make sure we can connect.')
        for i in range(1, max_tries + 1):
            try:
                run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
                display.info('SSH service responded.')
                break
            except SubprocessError as ex:
                if i == max_tries:
                    raise ex
                seconds = 3
                display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
                time.sleep(seconds)

    start_at_task = args.start_at_task

    for target in targets_iter:
        if args.start_at and not found:
            found = target.name == args.start_at

            if not found:
                continue

        tries = 2 if args.retry_on_error else 1
        verbosity = args.verbosity

        try:
            while tries:
                tries -= 1

                try:
                    if target.script_path:
                        command_integration_script(args, target)
                    else:
                        command_integration_role(args, target, start_at_task)
                        start_at_task = None
                    break
                except SubprocessError:
                    if not tries:
                        raise

                    display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
                    display.verbosity = args.verbosity = 6
        except:
            display.notice('To resume at this test target, use the option: --start-at %s' % target.name)

            next_target = next(targets_iter, None)

            if next_target:
                display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)

            raise
        finally:
            display.verbosity = args.verbosity = verbosity
コード例 #48
0
ファイル: __init__.py プロジェクト: awiddersheim/ansible
def command_sanity(args):
    """
    :type args: SanityConfig
    """
    changes = get_changes_filter(args)
    require = (args.require or []) + changes
    targets = SanityTargets(args.include, args.exclude, require)

    if not targets.include:
        raise AllTargetsSkipped()

    if args.delegate:
        raise Delegate(require=changes)

    install_command_requirements(args)

    tests = sanity_get_tests()

    if args.test:
        tests = [t for t in tests if t.name in args.test]
    else:
        disabled = [t.name for t in tests if not t.enabled and not args.allow_disabled]
        tests = [t for t in tests if t.enabled or args.allow_disabled]

        if disabled:
            display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))

    if args.skip_test:
        tests = [t for t in tests if t.name not in args.skip_test]

    total = 0
    failed = []

    for test in tests:
        if args.list_tests:
            display.info(test.name)
            continue

        if isinstance(test, SanityMultipleVersion):
            versions = SUPPORTED_PYTHON_VERSIONS
        else:
            versions = (None,)

        for version in versions:
            if args.python and version and version != args.python_version:
                continue

            display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else ''))

            options = ''

            if isinstance(test, SanityCodeSmellTest):
                result = test.test(args, targets)
            elif isinstance(test, SanityMultipleVersion):
                result = test.test(args, targets, python_version=version)
                options = ' --python %s' % version
            elif isinstance(test, SanitySingleVersion):
                result = test.test(args, targets)
            else:
                raise Exception('Unsupported test type: %s' % type(test))

            result.write(args)

            total += 1

            if isinstance(result, SanityFailure):
                failed.append(result.test + options)

    if failed:
        message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
            len(failed), total, '\n'.join(failed))

        if args.failure_ok:
            display.error(message)
        else:
            raise ApplicationError(message)
コード例 #49
0
ファイル: __init__.py プロジェクト: kirlics/ansible-1
def command_sanity(args):
    """
    :type args: SanityConfig
    """
    changes = get_changes_filter(args)
    require = args.require + changes
    targets = SanityTargets(args.include, args.exclude, require)

    if not targets.include:
        raise AllTargetsSkipped()

    if args.delegate:
        raise Delegate(require=changes, exclude=args.exclude)

    install_command_requirements(args)

    tests = sanity_get_tests()

    if args.test:
        tests = [t for t in tests if t.name in args.test]
    else:
        disabled = [t.name for t in tests if not t.enabled and not args.allow_disabled]
        tests = [t for t in tests if t.enabled or args.allow_disabled]

        if disabled:
            display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))

    if args.skip_test:
        tests = [t for t in tests if t.name not in args.skip_test]

    total = 0
    failed = []

    for test in tests:
        if args.list_tests:
            display.info(test.name)
            continue

        if isinstance(test, SanityMultipleVersion):
            versions = SUPPORTED_PYTHON_VERSIONS
        else:
            versions = (None,)

        for version in versions:
            if args.python and version and version != args.python_version:
                continue

            display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else ''))

            options = ''

            if isinstance(test, SanityCodeSmellTest):
                result = test.test(args, targets)
            elif isinstance(test, SanityMultipleVersion):
                result = test.test(args, targets, python_version=version)
                options = ' --python %s' % version
            elif isinstance(test, SanitySingleVersion):
                result = test.test(args, targets)
            else:
                raise Exception('Unsupported test type: %s' % type(test))

            result.write(args)

            total += 1

            if isinstance(result, SanityFailure):
                failed.append(result.test + options)

    if failed:
        message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
            len(failed), total, '\n'.join(failed))

        if args.failure_ok:
            display.error(message)
        else:
            raise ApplicationError(message)
コード例 #50
0
ファイル: __init__.py プロジェクト: zship/ansible
def integration_test_environment(args, target, inventory_path):
    """
    :type args: IntegrationConfig
    :type target: IntegrationTarget
    :type inventory_path: str
    """
    vars_file = 'integration_config.yml'

    if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
        display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')

        integration_dir = 'test/integration'
        ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)

        inventory_name = os.path.relpath(inventory_path, integration_dir)

        if '/' in inventory_name:
            inventory_name = inventory_path

        yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
        return

    root_temp_dir = os.path.expanduser('~/.ansible/test/tmp')

    prefix = '%s-' % target.name
    suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'

    if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
        display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
        suffix = '-ansible'

    if isinstance('', bytes):
        suffix = suffix.encode('utf-8')

    if args.explain:
        temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
    else:
        make_dirs(root_temp_dir)
        temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)

    try:
        display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)

        inventory_names = {
            PosixIntegrationConfig: 'inventory',
            WindowsIntegrationConfig: 'inventory.winrm',
            NetworkIntegrationConfig: 'inventory.networking',
        }

        inventory_name = inventory_names[type(args)]

        cache = IntegrationCache(args)

        target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))

        files_needed = get_files_needed(target_dependencies)

        integration_dir = os.path.join(temp_dir, 'test/integration')
        ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)

        file_copies = [
            ('test/integration/%s.cfg' % args.command, ansible_config),
            ('test/integration/integration_config.yml', os.path.join(integration_dir, vars_file)),
            (inventory_path, os.path.join(integration_dir, inventory_name)),
        ]

        file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]

        directory_copies = [
            (os.path.join('test/integration/targets', target.name), os.path.join(integration_dir, 'targets', target.name)) for target in target_dependencies
        ]

        inventory_dir = os.path.dirname(inventory_path)

        host_vars_dir = os.path.join(inventory_dir, 'host_vars')
        group_vars_dir = os.path.join(inventory_dir, 'group_vars')

        if os.path.isdir(host_vars_dir):
            directory_copies.append((host_vars_dir, os.path.join(integration_dir, os.path.basename(host_vars_dir))))

        if os.path.isdir(group_vars_dir):
            directory_copies.append((group_vars_dir, os.path.join(integration_dir, os.path.basename(group_vars_dir))))

        directory_copies = sorted(set(directory_copies))
        file_copies = sorted(set(file_copies))

        if not args.explain:
            make_dirs(integration_dir)

        for dir_src, dir_dst in directory_copies:
            display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)

            if not args.explain:
                shutil.copytree(dir_src, dir_dst, symlinks=True)

        for file_src, file_dst in file_copies:
            display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)

            if not args.explain:
                make_dirs(os.path.dirname(file_dst))
                shutil.copy2(file_src, file_dst)

        yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
    finally:
        if not args.explain:
            shutil.rmtree(temp_dir)
コード例 #51
0
    def _start(self, auth):
        """Start instance."""
        if self.started:
            display.info('Skipping started %s/%s instance %s.' %
                         (self.platform, self.version, self.instance_id),
                         verbosity=1)
            return

        display.info('Initializing new %s/%s instance %s.' %
                     (self.platform, self.version, self.instance_id),
                     verbosity=1)

        if self.platform == 'windows':
            with open('examples/scripts/ConfigureRemotingForAnsible.ps1',
                      'rb') as winrm_config_fd:
                winrm_config = winrm_config_fd.read().decode('utf-8')
        else:
            winrm_config = None

        data = dict(config=dict(
            platform=self.platform,
            version=self.version,
            public_key=self.ssh_key.pub_contents if self.ssh_key else None,
            query=False,
            winrm_config=winrm_config,
        ))

        data.update(dict(auth=auth))

        headers = {
            'Content-Type': 'application/json',
        }

        tries = 3
        sleep = 15

        while True:
            tries -= 1
            response = self.client.put(self._uri,
                                       data=json.dumps(data),
                                       headers=headers)

            if response.status_code == 200:
                break

            error = self._create_http_error(response)

            if not tries:
                raise error

            display.warning('%s. Trying again after %d seconds.' %
                            (error, sleep))
            time.sleep(sleep)

        self.started = True
        self._save()

        display.info('Started %s/%s from: %s' %
                     (self.platform, self.version, self._uri),
                     verbosity=1)

        if self.args.explain:
            return {}

        return response.json()
コード例 #52
0
ファイル: executor.py プロジェクト: ernstp/ansible
def command_network_integration(args):
    """
    :type args: NetworkIntegrationConfig
    """
    default_filename = 'test/integration/inventory.networking'

    if args.inventory:
        filename = os.path.join('test/integration', args.inventory)
    else:
        filename = default_filename

    if not args.explain and not args.platform and not os.path.exists(filename):
        if args.inventory:
            filename = os.path.abspath(filename)

        raise ApplicationError(
            'Inventory not found: %s\n'
            'Use --inventory to specify the inventory path.\n'
            'Use --platform to provision resources and generate an inventory file.\n'
            'See also inventory template: %s.template' % (filename, default_filename)
        )

    all_targets = tuple(walk_network_integration_targets(include_hidden=True))
    internal_targets = command_integration_filter(args, all_targets)
    platform_targets = set(a for t in internal_targets for a in t.aliases if a.startswith('network/'))

    if args.platform:
        instances = []  # type: list [lib.thread.WrappedThread]

        # generate an ssh key (if needed) up front once, instead of for each instance
        SshKey(args)

        for platform_version in args.platform:
            platform, version = platform_version.split('/', 1)
            platform_target = 'network/%s/' % platform

            if platform_target not in platform_targets and 'network/basics/' not in platform_targets:
                display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
                    platform_version, platform))
                continue

            instance = lib.thread.WrappedThread(functools.partial(network_run, args, platform, version))
            instance.daemon = True
            instance.start()
            instances.append(instance)

        install_command_requirements(args)

        while any(instance.is_alive() for instance in instances):
            time.sleep(1)

        remotes = [instance.wait_for_result() for instance in instances]
        inventory = network_inventory(remotes)

        display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3)

        if not args.explain:
            with open(filename, 'w') as inventory_fd:
                inventory_fd.write(inventory)
    else:
        install_command_requirements(args)

    command_integration_filtered(args, internal_targets, all_targets)
コード例 #53
0
ファイル: cover.py プロジェクト: wolfman-rack/ansible
def command_coverage_combine(args):
    """Patch paths in coverage files and merge into a single file.
    :type args: CoverageConfig
    :rtype: list[str]
    """
    coverage = initialize_coverage(args)

    modules = dict((t.module, t.path) for t in list(walk_module_targets()) if t.path.endswith('.py'))

    coverage_files = [os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f]

    ansible_path = os.path.abspath('lib/ansible/') + '/'
    root_path = os.getcwd() + '/'

    counter = 0
    groups = {}

    if args.all or args.stub:
        sources = sorted(os.path.abspath(target.path) for target in walk_compile_targets())
    else:
        sources = []

    if args.stub:
        stub_group = []
        stub_groups = [stub_group]
        stub_line_limit = 500000
        stub_line_count = 0

        for source in sources:
            with open(source, 'r') as source_fd:
                source_line_count = len(source_fd.read().splitlines())

            stub_group.append(source)
            stub_line_count += source_line_count

            if stub_line_count > stub_line_limit:
                stub_line_count = 0
                stub_group = []
                stub_groups.append(stub_group)

        for stub_index, stub_group in enumerate(stub_groups):
            if not stub_group:
                continue

            groups['=stub-%02d' % (stub_index + 1)] = dict((source, set()) for source in stub_group)

    for coverage_file in coverage_files:
        counter += 1
        display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)

        original = coverage.CoverageData()

        group = get_coverage_group(args, coverage_file)

        if group is None:
            display.warning('Unexpected name for coverage file: %s' % coverage_file)
            continue

        if os.path.getsize(coverage_file) == 0:
            display.warning('Empty coverage file: %s' % coverage_file)
            continue

        try:
            original.read_file(coverage_file)
        except Exception as ex:  # pylint: disable=locally-disabled, broad-except
            display.error(u'%s' % ex)
            continue

        for filename in original.measured_files():
            arcs = set(original.arcs(filename) or [])

            if not arcs:
                # This is most likely due to using an unsupported version of coverage.
                display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
                continue

            if '/ansible_modlib.zip/ansible/' in filename:
                # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
                new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
                # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
                new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif '/ansible_module_' in filename:
                # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
                module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
                if module_name not in modules:
                    display.warning('Skipping coverage of unknown module: %s' % module_name)
                    continue
                new_name = os.path.abspath(modules[module_name])
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
                # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
                # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
                # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
                module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$', '\\g<module>', filename).rstrip('_')
                if module_name not in modules:
                    display.warning('Skipping coverage of unknown module: %s' % module_name)
                    continue
                new_name = os.path.abspath(modules[module_name])
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif re.search('^(/.*?)?/root/ansible/', filename):
                # Rewrite the path of code running on a remote host or in a docker container as root.
                new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name
            elif '/.ansible/test/tmp/' in filename:
                # Rewrite the path of code running from an integration test temporary directory.
                new_name = re.sub(r'^.*/\.ansible/test/tmp/[^/]+/', root_path, filename)
                display.info('%s -> %s' % (filename, new_name), verbosity=3)
                filename = new_name

            if group not in groups:
                groups[group] = {}

            arc_data = groups[group]

            if filename not in arc_data:
                arc_data[filename] = set()

            arc_data[filename].update(arcs)

    output_files = []
    invalid_path_count = 0
    invalid_path_chars = 0

    for group in sorted(groups):
        arc_data = groups[group]

        updated = coverage.CoverageData()

        for filename in arc_data:
            if not os.path.isfile(filename):
                invalid_path_count += 1
                invalid_path_chars += len(filename)

                if args.verbosity > 1:
                    display.warning('Invalid coverage path: %s' % filename)

                continue

            updated.add_arcs({filename: list(arc_data[filename])})

        if args.all:
            updated.add_arcs(dict((source, []) for source in sources))

        if not args.explain:
            output_file = COVERAGE_FILE + group
            updated.write_file(output_file)
            output_files.append(output_file)

    if invalid_path_count > 0:
        display.warning('Ignored %d characters from %d invalid coverage path(s).' % (invalid_path_chars, invalid_path_count))

    return sorted(output_files)
コード例 #54
0
ファイル: delegation.py プロジェクト: psaintlaurent/ansible
def delegate_remote(args, exclude, require, integration_targets):
    """
    :type args: EnvironmentConfig
    :type exclude: list[str]
    :type require: list[str]
    :type integration_targets: tuple[IntegrationTarget]
    """
    parts = args.remote.split('/', 1)

    platform = parts[0]
    version = parts[1]

    core_ci = AnsibleCoreCI(args,
                            platform,
                            version,
                            stage=args.remote_stage,
                            provider=args.remote_provider)
    success = False

    if isinstance(args, ShellConfig):
        use_httptester = args.httptester
    else:
        use_httptester = args.httptester and any(
            'needs/httptester/' in target.aliases
            for target in integration_targets)

    if use_httptester and not docker_available():
        display.warning(
            'Assuming --disable-httptester since `docker` is not available.')
        use_httptester = False

    httptester_id = None
    ssh_options = []

    try:
        core_ci.start()

        if use_httptester:
            httptester_id, ssh_options = start_httptester(args)

        core_ci.wait()

        if platform == 'windows':
            # Windows doesn't need the ansible-test fluff, just run the SSH command
            manage = ManageWindowsCI(core_ci)
            cmd = ['powershell.exe']
        else:
            options = {
                '--remote': 1,
            }

            cmd = generate_command(args, 'ansible/test/runner/test.py',
                                   options, exclude, require)

            if httptester_id:
                cmd += ['--inject-httptester']

            if isinstance(args, TestConfig):
                if args.coverage and not args.coverage_label:
                    cmd += [
                        '--coverage-label',
                        'remote-%s-%s' % (platform, version)
                    ]

            if isinstance(args, IntegrationConfig):
                if not args.allow_destructive:
                    cmd.append('--allow-destructive')

            # remote instances are only expected to have a single python version available
            if isinstance(args, UnitsConfig) and not args.python:
                cmd += ['--python', 'default']

            manage = ManagePosixCI(core_ci)

        manage.setup()
        if isinstance(args, IntegrationConfig):
            cloud_platforms = get_cloud_providers(args)

            for cloud_platform in cloud_platforms:
                ssh_options += cloud_platform.get_remote_ssh_options()

        try:
            manage.ssh(cmd, ssh_options)
            success = True
        finally:
            if platform != 'windows':
                manage.ssh(
                    'rm -rf /tmp/results && cp -a ansible/test/results /tmp/results && chmod -R a+r /tmp/results'
                )
                manage.download('/tmp/results', 'test')
    finally:
        if args.remote_terminate == 'always' or (args.remote_terminate
                                                 == 'success' and success):
            core_ci.stop()

        if httptester_id:
            docker_rm(args, httptester_id)
コード例 #55
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
            display.warning(
                'Skipping validate-modules on unsupported Python version %s.' %
                args.python_version)
            return SanitySkipped(self.name)

        skip_paths = read_lines_without_comments(VALIDATE_SKIP_PATH,
                                                 optional=True)
        skip_paths_set = set(skip_paths)

        env = ansible_environment(args, color=False)

        paths = sorted([
            i.path for i in targets.include
            if i.module and i.path not in skip_paths_set
        ])

        if not paths:
            return SanitySkipped(self.name)

        cmd = [
            args.python_executable,
            os.path.join(INSTALL_ROOT,
                         'test/sanity/validate-modules/validate-modules'),
            '--format',
            'json',
            '--arg-spec',
        ] + paths

        invalid_ignores = []

        ignore_entries = read_lines_without_comments(VALIDATE_IGNORE_PATH,
                                                     optional=True)
        ignore = collections.defaultdict(dict)
        line = 0

        for ignore_entry in ignore_entries:
            line += 1

            if not ignore_entry:
                continue

            if ' ' not in ignore_entry:
                invalid_ignores.append((line, 'Invalid syntax'))
                continue

            path, code = ignore_entry.split(' ', 1)

            ignore[path][code] = line

        if args.base_branch:
            cmd.extend([
                '--base-branch',
                args.base_branch,
            ])
        else:
            display.warning(
                'Cannot perform module comparison against the base branch. Base branch not detected when running locally.'
            )

        try:
            stdout, stderr = run_command(args, cmd, env=env, capture=True)
            status = 0
        except SubprocessError as ex:
            stdout = ex.stdout
            stderr = ex.stderr
            status = ex.status

        if stderr or status not in (0, 3):
            raise SubprocessError(cmd=cmd,
                                  status=status,
                                  stderr=stderr,
                                  stdout=stdout)

        if args.explain:
            return SanitySuccess(self.name)

        messages = json.loads(stdout)

        errors = []

        for filename in messages:
            output = messages[filename]

            for item in output['errors']:
                errors.append(
                    SanityMessage(
                        path=filename,
                        line=int(item['line']) if 'line' in item else 0,
                        column=int(item['column']) if 'column' in item else 0,
                        level='error',
                        code='E%s' % item['code'],
                        message=item['msg'],
                    ))

        filtered = []

        for error in errors:
            if error.code in ignore[error.path]:
                ignore[error.path][
                    error.
                    code] = None  # error ignored, clear line number of ignore entry to track usage
            else:
                filtered.append(error)  # error not ignored

        errors = filtered

        for invalid_ignore in invalid_ignores:
            errors.append(
                SanityMessage(
                    code='A201',
                    message=invalid_ignore[1],
                    path=VALIDATE_IGNORE_PATH,
                    line=invalid_ignore[0],
                    column=1,
                    confidence=calculate_confidence(VALIDATE_IGNORE_PATH, line,
                                                    args.metadata)
                    if args.metadata.changes else None,
                ))

        line = 0

        for path in skip_paths:
            line += 1

            if not path:
                continue

            if not os.path.exists(path):
                # Keep files out of the list which no longer exist in the repo.
                errors.append(
                    SanityMessage(
                        code='A101',
                        message='Remove "%s" since it does not exist' % path,
                        path=VALIDATE_SKIP_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((VALIDATE_SKIP_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        for path in sorted(ignore.keys()):
            if os.path.exists(path):
                continue

            for line in sorted(ignore[path].values()):
                # Keep files out of the list which no longer exist in the repo.
                errors.append(
                    SanityMessage(
                        code='A101',
                        message='Remove "%s" since it does not exist' % path,
                        path=VALIDATE_IGNORE_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((VALIDATE_IGNORE_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        for path in paths:
            if path not in ignore:
                continue

            for code in ignore[path]:
                line = ignore[path][code]

                if not line:
                    continue

                errors.append(
                    SanityMessage(
                        code='A102',
                        message='Remove since "%s" passes "%s" test' %
                        (path, code),
                        path=VALIDATE_IGNORE_PATH,
                        line=line,
                        column=1,
                        confidence=calculate_best_confidence(
                            ((VALIDATE_IGNORE_PATH, line), (path, 0)),
                            args.metadata) if args.metadata.changes else None,
                    ))

        if errors:
            return SanityFailure(self.name, messages=errors)

        return SanitySuccess(self.name)
コード例 #56
0
ファイル: classification.py プロジェクト: silvinux/ansible
    def _classify(self, path):
        """
        :type path: str
        :rtype: dict[str, str] | None
        """
        filename = os.path.basename(path)
        name, ext = os.path.splitext(filename)

        minimal = {}

        if path.startswith('.github/'):
            return minimal

        if path.startswith('bin/'):
            return minimal

        if path.startswith('contrib/'):
            return {'units': 'test/units/contrib/'}

        if path.startswith('docs/'):
            return minimal

        if path.startswith('examples/'):
            if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
                return {
                    'windows-integration': 'connection_winrm',
                }

            return minimal

        if path.startswith('hacking/'):
            return minimal

        if path.startswith('lib/ansible/modules/'):
            module = self.module_names_by_path.get(path)

            if module:
                return {
                    'units':
                    module if module in self.units_modules else None,
                    'integration':
                    self.posix_integration_by_module.get(module)
                    if ext == '.py' else None,
                    'windows-integration':
                    self.windows_integration_by_module.get(module)
                    if ext == '.ps1' else None,
                    'network-integration':
                    self.network_integration_by_module.get(module),
                }

            return minimal

        if path.startswith('lib/ansible/module_utils/'):
            if ext == '.ps1':
                return {
                    'windows-integration': 'all',
                }

            if ext == '.py':
                return minimal  # already expanded using get_dependent_paths

        if path.startswith('lib/ansible/plugins/connection/'):
            if name == '__init__':
                return {
                    'integration': 'all',
                    'windows-integration': 'all',
                    'network-integration': 'all',
                    'units': 'test/units/plugins/connection/',
                }

            units_path = 'test/units/plugins/connection/test_%s.py' % name

            if units_path not in self.units_paths:
                units_path = None

            integration_name = 'connection_%s' % name

            if integration_name not in self.integration_targets_by_name:
                integration_name = None

            # entire integration test commands depend on these connection plugins

            if name == 'winrm':
                return {
                    'windows-integration': 'all',
                    'units': units_path,
                }

            if name == 'local':
                return {
                    'integration': 'all',
                    'network-integration': 'all',
                    'units': units_path,
                }

            if name == 'network_cli':
                return {
                    'network-integration': 'all',
                    'units': units_path,
                }

            # other connection plugins have isolated integration and unit tests

            return {
                'integration': integration_name,
                'units': units_path,
            }

        if path.startswith('lib/ansible/plugins/terminal/'):
            if ext == '.py':
                if name in self.prefixes and self.prefixes[name] == 'network':
                    network_target = 'network/%s/' % name

                    if network_target in self.integration_targets_by_alias:
                        return {
                            'network-integration': network_target,
                            'units': 'all',
                        }

                    display.warning('Integration tests for "%s" not found.' %
                                    network_target)

                    return {
                        'units': 'all',
                    }

                return {
                    'network-integration': 'all',
                    'units': 'all',
                }

        if path.startswith('lib/ansible/utils/module_docs_fragments/'):
            return {
                'sanity': 'all',
            }

        if path.startswith('lib/ansible/'):
            return all_tests()  # broad impact, run all tests

        if path.startswith('packaging/'):
            return minimal

        if path.startswith('test/compile/'):
            return {
                'compile': 'all',
            }

        if path.startswith('test/results/'):
            return minimal

        if path.startswith('test/integration/roles/'):
            return minimal

        if path.startswith('test/integration/targets/'):
            if not os.path.exists(path):
                return minimal

            target = self.integration_targets_by_name[path.split('/')[3]]

            if 'hidden/' in target.aliases:
                if target.type == 'role':
                    return minimal  # already expanded using get_dependent_paths

                return {
                    'integration': 'all',
                    'windows-integration': 'all',
                    'network-integration': 'all',
                }

            return {
                'integration':
                target.name if 'posix/' in target.aliases else None,
                'windows-integration':
                target.name if 'windows/' in target.aliases else None,
                'network-integration':
                target.name if 'network/' in target.aliases else None,
            }

        if path.startswith('test/integration/'):
            if self.prefixes.get(name) == 'network' and ext == '.yaml':
                return minimal  # network integration test playbooks are not used by ansible-test

            if filename == 'platform_agnostic.yaml':
                return minimal  # network integration test playbook not used by ansible-test

            return {
                'integration': 'all',
                'windows-integration': 'all',
                'network-integration': 'all',
            }

        if path.startswith('test/sanity/'):
            return {
                'sanity': 'all',  # test infrastructure, run all sanity checks
            }

        if path.startswith('test/units/'):
            if path in self.units_paths:
                return {
                    'units': path,
                }

            # changes to files which are not unit tests should trigger tests from the nearest parent directory

            test_path = os.path.dirname(path)

            while test_path:
                if test_path + '/' in self.units_paths:
                    return {
                        'units': test_path + '/',
                    }

                test_path = os.path.dirname(test_path)

        if path.startswith('test/runner/lib/cloud/'):
            cloud_target = 'cloud/%s/' % name

            if cloud_target in self.integration_targets_by_alias:
                return {
                    'integration': cloud_target,
                }

            return all_tests()  # test infrastructure, run all tests

        if path.startswith('test/runner/'):
            return all_tests()  # test infrastructure, run all tests

        if path.startswith('test/utils/shippable/'):
            return all_tests()  # test infrastructure, run all tests

        if path.startswith('test/utils/'):
            return minimal

        if path == 'test/README.md':
            return minimal

        if path.startswith('ticket_stubs/'):
            return minimal

        if '/' not in path:
            if path in (
                    '.gitattributes',
                    '.gitignore',
                    '.gitmodules',
                    '.mailmap',
                    'tox.ini',  # obsolete
                    'COPYING',
                    'VERSION',
                    'Makefile',
                    'setup.py',
            ):
                return minimal

            if path in (
                    'shippable.yml',
                    '.coveragerc',
            ):
                return all_tests()  # test infrastructure, run all tests

            if path == '.yamllint':
                return {
                    'sanity': 'all',
                }

            if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
                return minimal

        return None  # unknown, will result in fall-back to run all tests
コード例 #57
0
ファイル: __init__.py プロジェクト: sivakrishnaa/ansible-1
def command_sanity(args):
    """
    :type args: SanityConfig
    """
    changes = get_changes_filter(args)
    require = args.require + changes
    targets = SanityTargets.create(args.include, args.exclude, require)

    if not targets.include:
        raise AllTargetsSkipped()

    if args.delegate:
        raise Delegate(require=changes, exclude=args.exclude)

    install_command_requirements(args)

    tests = sanity_get_tests()

    if args.test:
        tests = [target for target in tests if target.name in args.test]
    else:
        disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
        tests = [target for target in tests if target.enabled or args.allow_disabled]

        if disabled:
            display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))

    if args.skip_test:
        tests = [target for target in tests if target.name not in args.skip_test]

    total = 0
    failed = []

    for test in tests:
        if args.list_tests:
            display.info(test.name)
            continue

        available_versions = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS)

        if args.python:
            # specific version selected
            versions = (args.python,)
        elif isinstance(test, SanityMultipleVersion):
            # try all supported versions for multi-version tests when a specific version has not been selected
            versions = test.supported_python_versions
        elif not test.supported_python_versions or args.python_version in test.supported_python_versions:
            # the test works with any version or the version we're already running
            versions = (args.python_version,)
        else:
            # available versions supported by the test
            versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions)))
            # use the lowest available version supported by the test or the current version as a fallback (which will be skipped)
            versions = versions[:1] or (args.python_version,)

        for version in versions:
            if isinstance(test, SanityMultipleVersion):
                skip_version = version
            else:
                skip_version = None

            options = ''

            if test.supported_python_versions and version not in test.supported_python_versions:
                display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version))
                result = SanitySkipped(test.name, skip_version)
            elif not args.python and version not in available_versions:
                display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version))
                result = SanitySkipped(test.name, skip_version)
            else:
                check_pyyaml(args, version)

                if test.supported_python_versions:
                    display.info("Running sanity test '%s' with Python %s" % (test.name, version))
                else:
                    display.info("Running sanity test '%s'" % test.name)

                if isinstance(test, SanityCodeSmellTest):
                    settings = test.load_processor(args)
                elif isinstance(test, SanityMultipleVersion):
                    settings = test.load_processor(args, version)
                elif isinstance(test, SanitySingleVersion):
                    settings = test.load_processor(args)
                elif isinstance(test, SanityVersionNeutral):
                    settings = test.load_processor(args)
                else:
                    raise Exception('Unsupported test type: %s' % type(test))

                if test.all_targets:
                    usable_targets = targets.targets
                elif test.no_targets:
                    usable_targets = tuple()
                else:
                    usable_targets = targets.include

                if test.include_directories:
                    usable_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in usable_targets]))

                usable_targets = sorted(test.filter_targets(list(usable_targets)))
                usable_targets = settings.filter_skipped_targets(usable_targets)
                sanity_targets = SanityTargets(targets.targets, tuple(usable_targets))

                if usable_targets or test.no_targets:
                    if isinstance(test, SanityCodeSmellTest):
                        result = test.test(args, sanity_targets, version)
                    elif isinstance(test, SanityMultipleVersion):
                        result = test.test(args, sanity_targets, version)
                        options = ' --python %s' % version
                    elif isinstance(test, SanitySingleVersion):
                        result = test.test(args, sanity_targets, version)
                    elif isinstance(test, SanityVersionNeutral):
                        result = test.test(args, sanity_targets)
                    else:
                        raise Exception('Unsupported test type: %s' % type(test))
                else:
                    result = SanitySkipped(test.name, skip_version)

            result.write(args)

            total += 1

            if isinstance(result, SanityFailure):
                failed.append(result.test + options)

    if failed:
        message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
            len(failed), total, '\n'.join(failed))

        if args.failure_ok:
            display.error(message)
        else:
            raise ApplicationError(message)