def command_sanity_ansible_doc(args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str """ with open('test/sanity/ansible-doc/skip.txt', 'r') as skip_fd: skip_modules = set(skip_fd.read().splitlines()) modules = sorted(set(m for i in targets.include_external for m in i.modules) - set(m for i in targets.exclude_external for m in i.modules) - skip_modules) if not modules: display.info('No tests applicable.', verbosity=1) return env = ansible_environment(args) cmd = ['ansible-doc'] + modules stdout, stderr = intercept_command(args, cmd, env=env, capture=True, python_version=python_version) if stderr: # consider any output on stderr an error, even though the return code is zero raise SubprocessError(cmd, stderr=stderr) if stdout: display.info(stdout.strip(), verbosity=3)
def _setup_dynamic(self): """Request Tower credentials through the Ansible Core CI service.""" display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) # temporary solution to allow version selection self.version = os.environ.get('TOWER_VERSION', '3.2.3') self.check_tower_version(os.environ.get('TOWER_CLI_VERSION')) aci = get_tower_aci(self.args, self.version) aci.start() connection = aci.get() config = self._read_config_template() if not self.args.explain: self.aci = aci values = dict( VERSION=self.version, HOST=connection.hostname, USERNAME=connection.username, PASSWORD=connection.password, ) config = self._populate_config_template(config, values) self._write_config(config)
def _setup_static(self): """Configure CloudStack tests for use with static configuration.""" parser = configparser.RawConfigParser() parser.read(self.config_static_path) self.endpoint = parser.get('cloudstack', 'endpoint') parts = urlparse(self.endpoint) self.host = parts.hostname if not self.host: raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint) if parts.port: self.port = parts.port elif parts.scheme == 'http': self.port = 80 elif parts.scheme == 'https': self.port = 443 else: raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint) display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1) self._wait_for_service()
def _start(self, auth): """Start instance.""" if self.started: display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) return data = dict( config=dict( platform=self.platform, version=self.version, public_key=self.ssh_key.pub_contents if self.ssh_key else None, query=False, ) ) data.update(dict(auth=auth)) headers = { 'Content-Type': 'application/json', } response = self.client.put(self._uri, data=json.dumps(data), headers=headers) if response.status_code != 200: raise self._create_http_error(response) self.started = True self._save() display.info('Started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
def _start_at_threshold(self, data, headers, threshold): """ :type data: dict[str, any] :type headers: dict[str, str] :type threshold: int :rtype: HttpResponse | None """ tries = 3 sleep = 15 data['threshold'] = threshold display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1) while True: tries -= 1 response = self.client.put(self._uri, data=json.dumps(data), headers=headers) if response.status_code == 200: return response error = self._create_http_error(response) if response.status_code == 503: raise error if not tries: raise error display.warning('%s. Trying again after %d seconds.' % (error, sleep)) time.sleep(sleep)
def _start_try_endpoints(self, data, headers): """ :type data: dict[str, any] :type headers: dict[str, str] :rtype: HttpResponse """ threshold = 1 while threshold <= self.max_threshold: for self.endpoint in self.endpoints: try: return self._start_at_threshold(data, headers, threshold) except CoreHttpError as ex: if ex.status == 503: display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1) continue display.error(ex.remote_message) except HttpError as ex: display.error(u'%s' % ex) time.sleep(3) threshold += 1 raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
def command_sanity_validate_modules(args, targets): """ :type args: SanityConfig :type targets: SanityTargets """ env = ansible_environment(args) paths = [deepest_path(i.path, 'lib/ansible/modules/') for i in targets.include_external] paths = sorted(set(p for p in paths if p)) if not paths: display.info('No tests applicable.', verbosity=1) return cmd = ['test/sanity/validate-modules/validate-modules'] + paths with open('test/sanity/validate-modules/skip.txt', 'r') as skip_fd: skip_paths = skip_fd.read().splitlines() skip_paths += [e.path for e in targets.exclude_external] if skip_paths: cmd += ['--exclude', '^(%s)' % '|'.join(skip_paths)] run_command(args, cmd, env=env)
def setup_cli(self): """Install the correct Tower CLI for the version of Tower being tested.""" tower_cli_version = self._get_cloud_config('tower_cli_version') display.info('Installing Tower CLI version: %s' % tower_cli_version) cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version] run_command(self.args, cmd)
def command_units(args): """ :type args: UnitsConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes include, exclude = walk_external_targets(walk_units_targets(), args.include, args.exclude, require) if not include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) version_commands = [] for version in SUPPORTED_PYTHON_VERSIONS: # run all versions unless version given, in which case run only that version if args.python and version != args.python: continue env = ansible_environment(args) cmd = [ 'pytest', '--boxed', '-r', 'a', '--color', 'yes' if args.color else 'no', '--junit-xml', 'test/results/junit/python%s-units.xml' % version, ] if args.collect_only: cmd.append('--collect-only') if args.verbosity: cmd.append('-' + ('v' * args.verbosity)) if exclude: cmd += ['--ignore=%s' % target.path for target in exclude] cmd += [target.path for target in include] version_commands.append((version, cmd, env)) for version, command, env in version_commands: display.info('Unit test with Python %s' % version) try: intercept_command(args, command, target_name='units', env=env, python_version=version) except SubprocessError as ex: # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case if ex.status != 5: raise
def start(self): """Start instance.""" if self.started: display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) return if is_shippable(): return self.start_shippable() return self.start_remote()
def detect_changes_local(args): """ :type args: TestConfig :rtype: list[str] """ git = Git(args) result = LocalChanges(args, git) display.info('Detected branch %s forked from %s at commit %s' % ( result.current_branch, result.fork_branch, result.fork_point)) if result.untracked and not args.untracked: display.warning('Ignored %s untracked file(s). Use --untracked to include them.' % len(result.untracked)) if result.committed and not args.committed: display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' % len(result.committed)) if result.staged and not args.staged: display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' % len(result.staged)) if result.unstaged and not args.unstaged: display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' % len(result.unstaged)) names = set() if args.tracked: names |= set(result.tracked) if args.untracked: names |= set(result.untracked) if args.committed: names |= set(result.committed) if args.staged: names |= set(result.staged) if args.unstaged: names |= set(result.unstaged) if not args.metadata.changes: args.metadata.populate_changes(result.diff) for path in result.untracked: if is_binary_file(path): args.metadata.changes[path] = ((0, 0),) continue with open(path, 'r') as source_fd: line_count = len(source_fd.read().splitlines()) args.metadata.changes[path] = ((1, line_count),) return sorted(names)
def create_tarfile(dst_path, src_path, tar_filter): """ :type dst_path: str :type src_path: str :type tar_filter: (tarfile.TarInfo) -> tarfile.TarInfo | None """ display.info('Creating a compressed tar archive of path: %s' % src_path, verbosity=1) with tarfile.TarFile.gzopen(dst_path, mode='w', compresslevel=4) as tar: tar.add(src_path, filter=tar_filter) display.info('Resulting archive is %d bytes.' % os.path.getsize(dst_path), verbosity=1)
def command_sanity_yamllint(args, targets): """ :type args: SanityConfig :type targets: SanityTargets """ paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.yml', '.yaml')) if not paths: display.info('No tests applicable.', verbosity=1) return run_command(args, ['yamllint'] + paths)
def command_integration_role(args, target, start_at_task): """ :type args: IntegrationConfig :type target: IntegrationTarget :type start_at_task: str """ display.info('Running %s integration test role' % target.name) vars_file = 'integration_config.yml' if 'windows/' in target.aliases: inventory = 'inventory.winrm' hosts = 'windows' gather_facts = False elif 'network/' in target.aliases: inventory = 'inventory.network' hosts = target.name[:target.name.find('_')] gather_facts = False else: inventory = 'inventory' hosts = 'testhost' gather_facts = True playbook = ''' - hosts: %s gather_facts: %s roles: - { role: %s } ''' % (hosts, gather_facts, target.name) with tempfile.NamedTemporaryFile(dir='test/integration', prefix='%s-' % target.name, suffix='.yml') as pb_fd: pb_fd.write(playbook.encode('utf-8')) pb_fd.flush() filename = os.path.basename(pb_fd.name) display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3) cmd = ['ansible-playbook', filename, '-i', inventory, '-e', '@%s' % vars_file] if start_at_task: cmd += ['--start-at-task', start_at_task] if args.verbosity: cmd.append('-' + ('v' * args.verbosity)) env = integration_environment(args) cwd = 'test/integration' env['ANSIBLE_ROLES_PATH'] = os.path.abspath('test/integration/targets') intercept_command(args, cmd, env=env, cwd=cwd)
def recurse_import(import_name, depth=0, seen=None): """Recursively expand module_utils imports from module_utils files. :type import_name: str :type depth: int :type seen: set[str] | None :rtype set[str] """ display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4) if seen is None: seen = set([import_name]) results = set([import_name]) # virtual packages depend on the modules they contain instead of the reverse if import_name in VIRTUAL_PACKAGES: for sub_import in sorted(virtual_utils): if sub_import.startswith('%s.' % import_name): if sub_import in seen: continue seen.add(sub_import) matches = sorted(recurse_import(sub_import, depth + 1, seen)) for result in matches: results.add(result) import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/')) if import_path not in imports_by_target_path: import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py') if import_path not in imports_by_target_path: raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name) # process imports in reverse so the deepest imports come first for name in sorted(imports_by_target_path[import_path], reverse=True): if name in virtual_utils: continue if name in seen: continue seen.add(name) matches = sorted(recurse_import(name, depth + 1, seen)) for result in matches: results.add(result) return results
def command_compile(args): """ :type args: CompileConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes include, exclude = walk_external_targets(walk_compile_targets(), args.include, args.exclude, require) if not include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) version_commands = [] for version in COMPILE_PYTHON_VERSIONS: # run all versions unless version given, in which case run only that version if args.python and version != args.python: continue # optional list of regex patterns to exclude from tests skip_file = 'test/compile/python%s-skip.txt' % version if os.path.exists(skip_file): with open(skip_file, 'r') as skip_fd: skip_paths = skip_fd.read().splitlines() else: skip_paths = [] # augment file exclusions skip_paths += [e.path for e in exclude] skip_paths.append('/.tox/') skip_paths = sorted(skip_paths) python = 'python%s' % version cmd = [python, '-m', 'compileall', '-fq'] if skip_paths: cmd += ['-x', '|'.join(skip_paths)] cmd += [target.path if target.path == '.' else './%s' % target.path for target in include] version_commands.append((version, cmd)) for version, command in version_commands: display.info('Compile with Python %s' % version) run_command(args, command)
def _use_static_config(self): """ :rtype: bool """ if os.path.isfile(self.config_static_path): display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1) self.config_path = self.config_static_path static = True else: static = False self.managed = not static return static
def command_sanity_code_smell(args, _): """ :type args: SanityConfig :type _: SanityTargets """ with open('test/sanity/code-smell/skip.txt', 'r') as skip_fd: skip_tests = skip_fd.read().splitlines() tests = glob.glob('test/sanity/code-smell/*') tests = sorted(p for p in tests if os.access(p, os.X_OK) and os.path.basename(p) not in skip_tests) for test in tests: display.info('Code smell check using %s' % os.path.basename(test)) run_command(args, [test])
def get_powershell_module_utils_usage(self, path): """ :type path: str :rtype: list[str] """ if not self.powershell_module_utils_imports: display.info('Analyzing powershell module_utils imports...') before = time.time() self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets) after = time.time() display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before)) name = os.path.splitext(os.path.basename(path))[0] return sorted(self.powershell_module_utils_imports[name])
def command_sanity_shellcheck(args, targets): """ :type args: SanityConfig :type targets: SanityTargets """ with open('test/sanity/shellcheck/skip.txt', 'r') as skip_fd: skip_paths = set(skip_fd.read().splitlines()) paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.sh' and i.path not in skip_paths) if not paths: display.info('No tests applicable.', verbosity=1) return run_command(args, ['shellcheck'] + paths)
def _setup_dynamic(self): """Create a CloudStack simulator using docker.""" config = self._read_config_template() self.container_name = self.DOCKER_SIMULATOR_NAME results = docker_inspect(self.args, self.container_name) if results and not results[0]['State']['Running']: docker_rm(self.args, self.container_name) results = [] if results: display.info('Using the existing CloudStack simulator docker container.', verbosity=1) else: display.info('Starting a new CloudStack simulator docker container.', verbosity=1) docker_pull(self.args, self.image) docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name]) if not self.args.explain: display.notice('The CloudStack simulator will probably be ready in 5 - 10 minutes.') container_id = get_docker_container_id() if container_id: display.info('Running in docker container: %s' % container_id, verbosity=1) self.host = self._get_simulator_address() display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1) else: self.host = 'localhost' self.port = 8888 self.endpoint = 'http://%s:%d' % (self.host, self.port) self._wait_for_service() if self.args.explain: values = dict( HOST=self.host, PORT=str(self.port), ) else: credentials = self._get_credentials() if self.args.docker: host = self.DOCKER_SIMULATOR_NAME else: host = self.host values = dict( HOST=host, PORT=str(self.port), KEY=credentials['apikey'], SECRET=credentials['secretkey'], ) config = self._populate_config_template(config, values) self._write_config(config)
def command_integration_script(args, target): """ :type args: IntegrationConfig :type target: IntegrationTarget """ display.info('Running %s integration test script' % target.name) cmd = ['./%s' % os.path.basename(target.script_path)] if args.verbosity: cmd.append('-' + ('v' * args.verbosity)) env = integration_environment(args, target, cmd) cwd = target.path intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd)
def _write_config(self, content): """ :type content: str """ prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0] with tempfile.NamedTemporaryFile(dir=self.TEST_DIR, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd: filename = os.path.join(self.TEST_DIR, os.path.basename(config_fd.name)) self.config_path = config_fd.name self.remove_config = True self._set_cloud_config('config_path', filename) display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3) config_fd.write(content.encode('utf-8')) config_fd.flush()
def get(self): """ Get instance connection information. :rtype: InstanceConnection """ if not self.started: display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) return None if self.connection and self.connection.running: return self.connection response = self.client.get(self._uri) if response.status_code != 200: raise self._create_http_error(response) if self.args.explain: self.connection = InstanceConnection( running=True, hostname='cloud.example.com', port=self.port or 12345, username='******', password='******' if self.platform == 'windows' else None, ) else: response_json = response.json() status = response_json['status'] con = response_json['connection'] self.connection = InstanceConnection( running=status == 'running', hostname=con['hostname'], port=int(con.get('port', self.port)), username=con['username'], password=con.get('password'), ) status = 'running' if self.connection.running else 'starting' display.info('Retrieved %s %s/%s instance %s.' % (status, self.platform, self.version, self.instance_id), verbosity=1) return self.connection
def detect_changes_shippable(args): """Initialize change detection on Shippable. :type args: CommonConfig :rtype: list[str] """ git = Git(args) result = ShippableChanges(args, git) if result.is_pr: job_type = 'pull request' elif result.is_tag: job_type = 'tag' else: job_type = 'merge commit' display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit)) return result.paths
def command_sanity(args): """ :type args: SanityConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes targets = SanityTargets(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) tests = SANITY_TESTS if args.test: tests = [t for t in tests if t.name in args.test] if args.skip_test: tests = [t for t in tests if t.name not in args.skip_test] for test in tests: if args.list_tests: display.info(test.name) continue if test.intercept: versions = SUPPORTED_PYTHON_VERSIONS else: versions = None, for version in versions: if args.python and version and version != args.python: continue display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else '')) if test.intercept: test.func(args, targets, python_version=version) else: test.func(args, targets)
def command_compile(args): """ :type args: CompileConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes include, exclude = walk_external_targets(walk_compile_targets(), args.include, args.exclude, require) if not include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) total = 0 failed = [] for version in COMPILE_PYTHON_VERSIONS: # run all versions unless version given, in which case run only that version if args.python and version != args.python: continue display.info('Compile with Python %s' % version) result = compile_version(args, version, include, exclude) result.write(args) total += 1 if isinstance(result, TestFailure): failed.append('compile --python %s' % version) if failed: message = 'The %d compile test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message)
def _wait_for_service(self): """Wait for the CloudStack service endpoint to accept connections.""" if self.args.explain: return client = HttpClient(self.args, always=True) endpoint = self.endpoint for _ in range(1, 30): display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1) try: client.get(endpoint) return except SubprocessError: pass time.sleep(30) raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_parallels_endpoints(self): """ :rtype: tuple[str] """ client = HttpClient(self.args, always=True) display.info('Getting available endpoints...', verbosity=1) sleep = 3 for _ in range(1, 10): response = client.get('https://s3.amazonaws.com/ansible-ci-files/ansible-test/parallels-endpoints.txt') if response.status_code == 200: endpoints = tuple(response.response.splitlines()) display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1) return endpoints display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep)) time.sleep(sleep) raise ApplicationError('Unable to get available endpoints.')
def command_windows_integration(args): """ :type args: WindowsIntegrationConfig """ filename = 'test/integration/inventory.winrm' if not args.explain and not args.windows and not os.path.isfile(filename): raise ApplicationError('Use the --windows option or provide an inventory file (see %s.template).' % filename) all_targets = tuple(walk_windows_integration_targets(include_hidden=True)) internal_targets = command_integration_filter(args, all_targets) if args.windows: instances = [] # type: list [lib.thread.WrappedThread] for version in args.windows: instance = lib.thread.WrappedThread(functools.partial(windows_run, args, version)) instance.daemon = True instance.start() instances.append(instance) install_command_requirements(args) while any(instance.is_alive() for instance in instances): time.sleep(1) remotes = [instance.wait_for_result() for instance in instances] inventory = windows_inventory(remotes) display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3) if not args.explain: with open(filename, 'w') as inventory_fd: inventory_fd.write(inventory) else: install_command_requirements(args) try: command_integration_filtered(args, internal_targets, all_targets) finally: pass
def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ skip_file = 'test/sanity/compile/python%s-skip.txt' % python_version if os.path.exists(skip_file): skip_paths = read_lines_without_comments(skip_file) else: skip_paths = [] paths = sorted( i.path for i in targets.include if (os.path.splitext(i.path)[1] == '.py' or i.path.startswith('bin/')) and i.path not in skip_paths) if not paths: return SanitySkipped(self.name, python_version=python_version) cmd = [ find_python(python_version), os.path.join(ANSIBLE_ROOT, 'test/sanity/compile/compile.py') ] data = '\n'.join(paths) display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stderr: raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) if args.explain: return SanitySuccess(self.name, python_version=python_version) pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' results = parse_to_list_of_dict(pattern, stdout) results = [ SanityMessage( message=r['message'], path=r['path'].replace('./', ''), line=int(r['line']), column=int(r['column']), ) for r in results ] line = 0 for path in skip_paths: line += 1 if not path: continue if not os.path.exists(path): # Keep files out of the list which no longer exist in the repo. results.append( SanityMessage( code='A101', message='Remove "%s" since it does not exist' % path, path=skip_file, line=line, column=1, confidence=calculate_best_confidence( ((skip_file, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) if results: return SanityFailure(self.name, messages=results, python_version=python_version) return SanitySuccess(self.name, python_version=python_version)
def command_integration_filtered(args, targets, all_targets): """ :type args: IntegrationConfig :type targets: tuple[IntegrationTarget] :type all_targets: tuple[IntegrationTarget] """ found = False passed = [] failed = [] targets_iter = iter(targets) all_targets_dict = dict((target.name, target) for target in all_targets) setup_errors = [] setup_targets_executed = set() for target in all_targets: for setup_target in target.setup_once + target.setup_always: if setup_target not in all_targets_dict: setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target)) if setup_errors: raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors))) test_dir = os.path.expanduser('~/ansible_testing') if not args.explain and any('needs/ssh/' in target.aliases for target in targets): max_tries = 20 display.info('SSH service required for tests. Checking to make sure we can connect.') for i in range(1, max_tries + 1): try: run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True) display.info('SSH service responded.') break except SubprocessError: if i == max_tries: raise seconds = 3 display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds) time.sleep(seconds) start_at_task = args.start_at_task results = {} for target in targets_iter: if args.start_at and not found: found = target.name == args.start_at if not found: continue if args.list_targets: print(target.name) continue tries = 2 if args.retry_on_error else 1 verbosity = args.verbosity cloud_environment = get_cloud_environment(args, target) original_environment = EnvironmentDescription(args) display.info('>>> Environment Description\n%s' % original_environment, verbosity=3) try: while tries: tries -= 1 try: run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, False) start_time = time.time() run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, True) if not args.explain: # create a fresh test directory for each test target remove_tree(test_dir) make_dirs(test_dir) if target.script_path: command_integration_script(args, target) else: command_integration_role(args, target, start_at_task) start_at_task = None end_time = time.time() results[target.name] = dict( name=target.name, type=target.type, aliases=target.aliases, modules=target.modules, run_time_seconds=int(end_time - start_time), setup_once=target.setup_once, setup_always=target.setup_always, coverage=args.coverage, coverage_label=args.coverage_label, python_version=args.python_version, ) break except SubprocessError: if cloud_environment: cloud_environment.on_failure(target, tries) if not original_environment.validate(target.name, throw=False): raise if not tries: raise display.warning('Retrying test target "%s" with maximum verbosity.' % target.name) display.verbosity = args.verbosity = 6 original_environment.validate(target.name, throw=True) passed.append(target) except Exception as ex: failed.append(target) if args.continue_on_error: display.error(ex) continue display.notice('To resume at this test target, use the option: --start-at %s' % target.name) next_target = next(targets_iter, None) if next_target: display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name) raise finally: display.verbosity = args.verbosity = verbosity if not args.explain: results_path = 'test/results/data/%s-%s.json' % (args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0)))) data = dict( targets=results, ) with open(results_path, 'w') as results_fd: results_fd.write(json.dumps(data, sort_keys=True, indent=4)) if failed: raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % ( len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
def _setup_dynamic(self): """Create a ACME test container using docker.""" container_id = get_docker_container_id() if container_id: display.info('Running in docker container: %s' % container_id, verbosity=1) self.container_name = self.DOCKER_SIMULATOR_NAME results = docker_inspect(self.args, self.container_name) if results and not results[0].get('State', {}).get('Running'): docker_rm(self.args, self.container_name) results = [] if results: display.info('Using the existing ACME docker test container.', verbosity=1) else: display.info('Starting a new ACME docker test container.', verbosity=1) if not self.args.docker and not container_id: # publish the simulator ports when not running inside docker publish_ports = [ '-p', '5000:5000', # control port for flask app in container '-p', '14000:14000', # Pebble ACME CA ] else: publish_ports = [] if not os.environ.get('ANSIBLE_ACME_CONTAINER'): docker_pull(self.args, self.image) docker_run( self.args, self.image, ['-d', '--name', self.container_name] + publish_ports, ) if self.args.docker: acme_host = self.DOCKER_SIMULATOR_NAME acme_host_ip = self._get_simulator_address() elif container_id: acme_host = self._get_simulator_address() acme_host_ip = acme_host display.info('Found ACME test container address: %s' % acme_host, verbosity=1) else: acme_host = 'localhost' acme_host_ip = acme_host self._set_cloud_config('acme_host', acme_host) self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller') self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: SanityResult """ cmd = [self.path] env = ansible_environment(args, color=False) pattern = None data = None if self.config: with open(self.config, 'r') as config_fd: config = json.load(config_fd) output = config.get('output') extensions = config.get('extensions') prefixes = config.get('prefixes') files = config.get('files') always = config.get('always') if output == 'path-line-column-message': pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' elif output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: pattern = ApplicationError('Unsupported output type: %s' % output) paths = sorted(i.path for i in targets.include) if always: paths = [] # short-term work-around for paths being str instead of unicode on python 2.x if sys.version_info[0] == 2: paths = [p.decode('utf-8') for p in paths] if extensions: paths = [ p for p in paths if os.path.splitext(p)[1] in extensions or ( p.startswith('bin/') and '.py' in extensions) ] if prefixes: paths = [ p for p in paths if any( p.startswith(pre) for pre in prefixes) ] if files: paths = [p for p in paths if os.path.basename(p) in files] if not paths and not always: return SanitySkipped(self.name) data = '\n'.join(paths) if data: display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stdout and not stderr: if pattern: matches = [ parse_to_dict(pattern, line) for line in stdout.splitlines() ] messages = [ SanityMessage( message=m['message'], path=m['path'], line=int(m.get('line', 0)), column=int(m.get('column', 0)), ) for m in matches ] return SanityFailure(self.name, messages=messages) if stderr or status: summary = u'%s' % SubprocessError( cmd=cmd, status=status, stderr=stderr, stdout=stdout) return SanityFailure(self.name, summary=summary) return SanitySuccess(self.name)
def categorize_changes(args, paths, verbose_command=None): """ :type args: TestConfig :type paths: list[str] :type verbose_command: str :rtype: ChangeDescription """ mapper = PathMapper(args) commands = { 'sanity': set(), 'units': set(), 'integration': set(), 'windows-integration': set(), 'network-integration': set(), } focused_commands = collections.defaultdict(set) deleted_paths = set() original_paths = set() additional_paths = set() no_integration_paths = set() for path in paths: if not os.path.exists(path): deleted_paths.add(path) continue original_paths.add(path) dependent_paths = mapper.get_dependent_paths(path) if not dependent_paths: continue display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=1) for dependent_path in dependent_paths: display.info(dependent_path, verbosity=1) additional_paths.add(dependent_path) additional_paths -= set(paths) # don't count changed paths as additional paths if additional_paths: display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths))) paths = sorted(set(paths) | additional_paths) display.info('Mapping %d changed file(s) to tests.' % len(paths)) for path in paths: tests = mapper.classify(path) if tests is None: focused_target = False display.info('%s -> all' % path, verbosity=1) tests = all_tests(args) # not categorized, run all tests display.warning('Path not categorized: %s' % path) else: focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths tests = dict((key, value) for key, value in tests.items() if value) if focused_target and not any('integration' in command for command in tests): no_integration_paths.add(path) # path triggers no integration tests if verbose_command: result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none') # identify targeted integration tests (those which only target a single integration command) if 'integration' in verbose_command and tests.get(verbose_command): if not any('integration' in command for command in tests if command != verbose_command): if focused_target: result += ' (focused)' result += ' (targeted)' else: result = '%s' % tests display.info('%s -> %s' % (path, result), verbosity=1) for command, target in tests.items(): commands[command].add(target) if focused_target: focused_commands[command].add(target) for command in commands: commands[command].discard('none') if any(t == 'all' for t in commands[command]): commands[command] = set(['all']) commands = dict((c, sorted(commands[c])) for c in commands if commands[c]) focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands) for command in commands: if commands[command] == ['all']: commands[command] = [] # changes require testing all targets, do not filter targets changes = ChangeDescription() changes.command = verbose_command changes.changed_paths = sorted(original_paths) changes.deleted_paths = sorted(deleted_paths) changes.regular_command_targets = commands changes.focused_command_targets = focused_commands changes.no_integration_paths = sorted(no_integration_paths) return changes
def integration_test_environment(args, target, inventory_path): """ :type args: IntegrationConfig :type target: IntegrationTarget :type inventory_path: str """ vars_file = 'integration_config.yml' if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases: display.warning( 'Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.' ) integration_dir = os.path.abspath('test/integration') inventory_path = os.path.abspath(inventory_path) ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command) vars_file = os.path.join(integration_dir, vars_file) yield IntegrationEnvironment(integration_dir, inventory_path, ansible_config, vars_file) return root_temp_dir = os.path.expanduser('~/.ansible/test/tmp') prefix = '%s-' % target.name suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8' if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases: display.warning( 'Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.' ) suffix = '-ansible' if isinstance('', bytes): suffix = suffix.encode('utf-8') if args.explain: temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix)) else: make_dirs(root_temp_dir) temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) try: display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2) inventory_names = { PosixIntegrationConfig: 'inventory', WindowsIntegrationConfig: 'inventory.winrm', NetworkIntegrationConfig: 'inventory.networking', } inventory_name = inventory_names[type(args)] cache = IntegrationCache(args) target_dependencies = sorted( [target] + list(cache.dependency_map.get(target.name, set()))) files_needed = get_files_needed(target_dependencies) integration_dir = os.path.join(temp_dir, 'test/integration') ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command) file_copies = [ (os.path.join(INSTALL_ROOT, 'test/integration/%s.cfg' % args.command), ansible_config), (os.path.join(INSTALL_ROOT, 'test/integration/integration_config.yml'), os.path.join(integration_dir, vars_file)), (os.path.join(INSTALL_ROOT, inventory_path), os.path.join(integration_dir, inventory_name)), ] file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed] directory_copies = [(os.path.join('test/integration/targets', target.name), os.path.join(integration_dir, 'targets', target.name)) for target in target_dependencies] inventory_dir = os.path.dirname(inventory_path) host_vars_dir = os.path.join(inventory_dir, 'host_vars') group_vars_dir = os.path.join(inventory_dir, 'group_vars') if os.path.isdir(host_vars_dir): directory_copies.append( (host_vars_dir, os.path.join(integration_dir, os.path.basename(host_vars_dir)))) if os.path.isdir(group_vars_dir): directory_copies.append( (group_vars_dir, os.path.join(integration_dir, os.path.basename(group_vars_dir)))) directory_copies = sorted(set(directory_copies)) file_copies = sorted(set(file_copies)) if not args.explain: make_dirs(integration_dir) for dir_src, dir_dst in directory_copies: display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2) if not args.explain: shutil.copytree(dir_src, dir_dst, symlinks=True) for file_src, file_dst in file_copies: display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2) if not args.explain: make_dirs(os.path.dirname(file_dst)) shutil.copy2(file_src, file_dst) inventory_path = os.path.join(integration_dir, inventory_name) vars_file = os.path.join(integration_dir, vars_file) yield IntegrationEnvironment(integration_dir, inventory_path, ansible_config, vars_file) finally: if not args.explain: shutil.rmtree(temp_dir)
def command_coverage_combine(args): """Patch paths in coverage files and merge into a single file. :type args: CoverageConfig :rtype: list[str] """ coverage = initialize_coverage(args) modules = dict((t.module, t.path) for t in list(walk_module_targets()) if t.path.endswith('.py')) coverage_files = [ os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f ] ansible_path = os.path.abspath('lib/ansible/') + '/' root_path = os.getcwd() + '/' counter = 0 groups = {} if args.all or args.stub: sources = sorted( os.path.abspath(target.path) for target in walk_compile_targets()) else: sources = [] if args.stub: groups['=stub'] = dict((source, set()) for source in sources) for coverage_file in coverage_files: counter += 1 display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2) original = coverage.CoverageData() group = get_coverage_group(args, coverage_file) if group is None: display.warning('Unexpected name for coverage file: %s' % coverage_file) continue if os.path.getsize(coverage_file) == 0: display.warning('Empty coverage file: %s' % coverage_file) continue try: original.read_file(coverage_file) except Exception as ex: # pylint: disable=locally-disabled, broad-except display.error(str(ex)) continue for filename in original.measured_files(): arcs = set(original.arcs(filename) or []) if not arcs: # This is most likely due to using an unsupported version of coverage. display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file)) continue if '/ansible_modlib.zip/ansible/' in filename: # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier. new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename): # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later. new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif '/ansible_module_' in filename: # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier. module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename) if module_name not in modules: display.warning('Skipping coverage of unknown module: %s' % module_name) continue new_name = os.path.abspath(modules[module_name]) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif re.search( r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename): # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later. # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex. # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex. module_name = re.sub( r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$', '\\g<module>', filename).rstrip('_') if module_name not in modules: display.warning('Skipping coverage of unknown module: %s' % module_name) continue new_name = os.path.abspath(modules[module_name]) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif re.search('^(/.*?)?/root/ansible/', filename): # Rewrite the path of code running on a remote host or in a docker container as root. new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name if group not in groups: groups[group] = {} arc_data = groups[group] if filename not in arc_data: arc_data[filename] = set() arc_data[filename].update(arcs) output_files = [] for group in sorted(groups): arc_data = groups[group] updated = coverage.CoverageData() for filename in arc_data: if not os.path.isfile(filename): display.warning('Invalid coverage path: %s' % filename) continue updated.add_arcs({filename: list(arc_data[filename])}) if args.all: updated.add_arcs(dict((source, []) for source in sources)) if not args.explain: output_file = COVERAGE_FILE + group updated.write_file(output_file) output_files.append(output_file) return sorted(output_files)
def _start(self, auth): """Start instance.""" if self.started: display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) return display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) if self.platform == 'windows': with open('examples/scripts/ConfigureRemotingForAnsible.ps1', 'rb') as winrm_config_fd: winrm_config = winrm_config_fd.read().decode('utf-8') else: winrm_config = None data = dict(config=dict( platform=self.platform, version=self.version, public_key=self.ssh_key.pub_contents if self.ssh_key else None, query=False, winrm_config=winrm_config, )) data.update(dict(auth=auth)) headers = { 'Content-Type': 'application/json', } tries = 3 sleep = 15 while True: tries -= 1 response = self.client.put(self._uri, data=json.dumps(data), headers=headers) if response.status_code == 200: break error = self._create_http_error(response) if not tries: raise error display.warning('%s. Trying again after %d seconds.' % (error, sleep)) time.sleep(sleep) self.started = True self._save() display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) if self.args.explain: return {} return response.json()
def __init__(self, args, platform, version, stage='prod', persist=True, name=None): """ :type args: EnvironmentConfig :type platform: str :type version: str :type stage: str :type persist: bool :type name: str """ self.args = args self.platform = platform self.version = version self.stage = stage self.client = HttpClient(args) self.connection = None self.instance_id = None self.name = name if name else '%s-%s' % (self.platform, self.version) self.ci_key = os.path.expanduser('~/.ansible-core-ci.key') aws_platforms = ( 'aws', 'windows', 'freebsd', 'rhel', 'vyos', 'junos', 'ios', ) osx_platforms = ('osx', ) if self.platform in aws_platforms: if args.remote_aws_region: # permit command-line override of region selection region = args.remote_aws_region # use a dedicated CI key when overriding the region selection self.ci_key += '.%s' % args.remote_aws_region elif is_shippable(): # split Shippable jobs across multiple regions to maximize use of launch credits if self.platform == 'windows': region = 'us-east-2' else: region = 'us-east-1' else: # send all non-Shippable jobs to us-east-1 to reduce api key maintenance region = 'us-east-1' self.endpoint = AWS_ENDPOINTS[region] if self.platform == 'windows': self.ssh_key = None self.port = 5986 else: self.ssh_key = SshKey(args) self.port = 22 elif self.platform in osx_platforms: self.endpoint = 'https://osx.testing.ansible.com' self.ssh_key = SshKey(args) self.port = None else: raise ApplicationError('Unsupported platform: %s' % platform) self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s' % (self.name, self.stage)) if persist and self._load(): try: display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) self.connection = self.get(always_raise_on=[404]) display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) except HttpError as ex: if ex.status != 404: raise self._clear() display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) self.instance_id = None else: self.instance_id = None self._clear() if self.instance_id: self.started = True else: self.started = False self.instance_id = str(uuid.uuid4())
def get(self, tries=3, sleep=15, always_raise_on=None): """ Get instance connection information. :type tries: int :type sleep: int :type always_raise_on: list[int] | None :rtype: InstanceConnection """ if not self.started: display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) return None if not always_raise_on: always_raise_on = [] if self.connection and self.connection.running: return self.connection while True: tries -= 1 response = self.client.get(self._uri) if response.status_code == 200: break error = self._create_http_error(response) if not tries or response.status_code in always_raise_on: raise error display.warning('%s. Trying again after %d seconds.' % (error, sleep)) time.sleep(sleep) if self.args.explain: self.connection = InstanceConnection( running=True, hostname='cloud.example.com', port=self.port or 12345, username='******', password='******' if self.platform == 'windows' else None, ) else: response_json = response.json() status = response_json['status'] con = response_json['connection'] self.connection = InstanceConnection( running=status == 'running', hostname=con['hostname'], port=int(con.get('port', self.port)), username=con['username'], password=con.get('password'), ) status = 'running' if self.connection.running else 'starting' display.info('Status update: %s/%s on instance %s is %s.' % (self.platform, self.version, self.instance_id, status), verbosity=1) return self.connection
def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: SanityResult """ if args.python_version in UNSUPPORTED_PYTHON_VERSIONS: display.warning( 'Skipping pylint on unsupported Python version %s.' % args.python_version) return SanitySkipped(self.name) with open(PYLINT_SKIP_PATH, 'r') as skip_fd: skip_paths = skip_fd.read().splitlines() invalid_ignores = [] supported_versions = set(SUPPORTED_PYTHON_VERSIONS) - set( UNSUPPORTED_PYTHON_VERSIONS) supported_versions = set([v.split('.')[0] for v in supported_versions ]) | supported_versions with open(PYLINT_IGNORE_PATH, 'r') as ignore_fd: ignore_entries = ignore_fd.read().splitlines() ignore = collections.defaultdict(dict) line = 0 for ignore_entry in ignore_entries: line += 1 if ' ' not in ignore_entry: invalid_ignores.append((line, 'Invalid syntax')) continue path, code = ignore_entry.split(' ', 1) if not os.path.exists(path): invalid_ignores.append( (line, 'Remove "%s" since it does not exist' % path)) continue if ' ' in code: code, version = code.split(' ', 1) if version not in supported_versions: invalid_ignores.append( (line, 'Invalid version: %s' % version)) continue if version != args.python_version and version != args.python_version.split( '.')[0]: continue # ignore version specific entries for other versions ignore[path][code] = line skip_paths_set = set(skip_paths) paths = sorted( i.path for i in targets.include if (os.path.splitext(i.path)[1] == '.py' or i.path.startswith('bin/')) and i.path not in skip_paths_set) contexts = {} remaining_paths = set(paths) def add_context(available_paths, context_name, context_filter): """ :type available_paths: set[str] :type context_name: str :type context_filter: (str) -> bool """ filtered_paths = set(p for p in available_paths if context_filter(p)) contexts[context_name] = sorted(filtered_paths) available_paths -= filtered_paths add_context(remaining_paths, 'ansible-test', lambda p: p.startswith('test/runner/')) add_context(remaining_paths, 'units', lambda p: p.startswith('test/units/')) add_context(remaining_paths, 'test', lambda p: p.startswith('test/')) add_context(remaining_paths, 'hacking', lambda p: p.startswith('hacking/')) add_context(remaining_paths, 'modules', lambda p: p.startswith('lib/ansible/modules/')) add_context(remaining_paths, 'module_utils', lambda p: p.startswith('lib/ansible/module_utils/')) add_context(remaining_paths, 'ansible', lambda p: True) messages = [] context_times = [] test_start = datetime.datetime.utcnow() for context in sorted(contexts): context_paths = contexts[context] if not context_paths: continue context_start = datetime.datetime.utcnow() messages += self.pylint(args, context, context_paths) context_end = datetime.datetime.utcnow() context_times.append( '%s: %d (%s)' % (context, len(context_paths), context_end - context_start)) test_end = datetime.datetime.utcnow() for context_time in context_times: display.info(context_time, verbosity=4) display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4) errors = [ SanityMessage( message=m['message'].replace('\n', ' '), path=m['path'], line=int(m['line']), column=int(m['column']), level=m['type'], code=m['symbol'], ) for m in messages ] if args.explain: return SanitySuccess(self.name) line = 0 filtered = [] for error in errors: if error.code in ignore[error.path]: ignore[error.path][ error. code] = None # error ignored, clear line number of ignore entry to track usage else: filtered.append(error) # error not ignored errors = filtered for invalid_ignore in invalid_ignores: errors.append( SanityMessage( code='A201', message=invalid_ignore[1], path=PYLINT_IGNORE_PATH, line=invalid_ignore[0], column=1, confidence=calculate_confidence(PYLINT_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None, )) for path in skip_paths: line += 1 if not os.path.exists(path): # Keep files out of the list which no longer exist in the repo. errors.append( SanityMessage( code='A101', message='Remove "%s" since it does not exist' % path, path=PYLINT_SKIP_PATH, line=line, column=1, confidence=calculate_best_confidence( ((PYLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) for path in paths: if path not in ignore: continue for code in ignore[path]: line = ignore[path][code] if not line: continue errors.append( SanityMessage( code='A102', message='Remove since "%s" passes "%s" pylint test' % (path, code), path=PYLINT_IGNORE_PATH, line=line, column=1, confidence=calculate_best_confidence( ((PYLINT_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) if errors: return SanityFailure(self.name, messages=errors) return SanitySuccess(self.name)
def command_sanity(args): """ :type args: SanityConfig """ changes = get_changes_filter(args) require = args.require + changes targets = SanityTargets(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes, exclude=args.exclude) install_command_requirements(args) tests = sanity_get_tests() if args.test: tests = [target for target in tests if target.name in args.test] else: disabled = [ target.name for target in tests if not target.enabled and not args.allow_disabled ] tests = [ target for target in tests if target.enabled or args.allow_disabled ] if disabled: display.warning( 'Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled))) if args.skip_test: tests = [ target for target in tests if target.name not in args.skip_test ] total = 0 failed = [] for test in tests: if args.list_tests: display.info(test.name) continue if isinstance(test, SanityMultipleVersion): versions = SUPPORTED_PYTHON_VERSIONS else: versions = (None, ) for version in versions: if args.python and version and version != args.python_version: continue check_pyyaml(args, version or args.python_version) display.info( 'Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else '')) options = '' if isinstance(test, SanityCodeSmellTest): result = test.test(args, targets) elif isinstance(test, SanityMultipleVersion): result = test.test(args, targets, python_version=version) options = ' --python %s' % version elif isinstance(test, SanitySingleVersion): result = test.test(args, targets) else: raise Exception('Unsupported test type: %s' % type(test)) result.write(args) total += 1 if isinstance(result, SanityFailure): failed.append(result.test + options) if failed: message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message)
def _setup_dynamic(self): """Create a CloudStack simulator using docker.""" config = self._read_config_template() self.container_name = self.DOCKER_SIMULATOR_NAME results = docker_inspect(self.args, self.container_name) if results and not results[0]['State']['Running']: docker_rm(self.args, self.container_name) results = [] if results: display.info( 'Using the existing CloudStack simulator docker container.', verbosity=1) else: display.info( 'Starting a new CloudStack simulator docker container.', verbosity=1) docker_pull(self.args, self.image) docker_run( self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name]) if not self.args.explain: display.notice( 'The CloudStack simulator will probably be ready in 2 - 4 minutes.' ) container_id = get_docker_container_id() if container_id: display.info('Running in docker container: %s' % container_id, verbosity=1) self.host = self._get_simulator_address() display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1) else: self.host = 'localhost' self.port = 8888 self.endpoint = 'http://%s:%d' % (self.host, self.port) self._wait_for_service() if self.args.explain: values = dict( HOST=self.host, PORT=str(self.port), ) else: credentials = self._get_credentials() if self.args.docker: host = self.DOCKER_SIMULATOR_NAME else: host = self.host values = dict( HOST=host, PORT=str(self.port), KEY=credentials['apikey'], SECRET=credentials['secretkey'], ) config = self._populate_config_template(config, values) self._write_config(config)
def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: SanityResult """ with open('test/sanity/ansible-doc/skip.txt', 'r') as skip_fd: skip_modules = set(skip_fd.read().splitlines()) modules = sorted( set(m for i in targets.include_external for m in i.modules) - set(m for i in targets.exclude_external for m in i.modules) - skip_modules) if not modules: return SanitySkipped(self.name, python_version=python_version) module_paths = dict( (t.module, t.path) for t in targets.targets if t.module) env = ansible_environment(args, color=False) cmd = ['ansible-doc'] + modules try: stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stderr: errors = stderr.strip().splitlines() messages = [self.parse_error(e, module_paths) for e in errors] if messages and all(messages): return SanityFailure(self.name, messages=messages, python_version=python_version) if status: summary = u'%s' % SubprocessError( cmd=cmd, status=status, stderr=stderr) return SanityFailure(self.name, summary=summary, python_version=python_version) if stdout: display.info(stdout.strip(), verbosity=3) if stderr: summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError( cmd, stderr=stderr) return SanityFailure(self.name, summary=summary, python_version=python_version) return SanitySuccess(self.name, python_version=python_version)
def command_sanity(args): """ :type args: SanityConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes targets = SanityTargets(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) tests = sanity_get_tests() if args.test: tests = [t for t in tests if t.name in args.test] if args.skip_test: tests = [t for t in tests if t.name not in args.skip_test] total = 0 failed = [] for test in tests: if args.list_tests: display.info(test.name) continue if test.intercept: versions = SUPPORTED_PYTHON_VERSIONS else: versions = None, for version in versions: if args.python and version and version != args.python: continue display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else '')) options = '' if test.script: result = test.func(args, targets, test.script) elif test.intercept: result = test.func(args, targets, python_version=version) options = ' --python %s' % version else: result = test.func(args, targets) result.write(args) total += 1 if isinstance(result, SanityFailure): failed.append(result.test + options) if failed: message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message)
def cleanup_python_paths(): """Clean up all temporary python directories.""" for path in sorted(PYTHON_PATHS.values()): display.info('Cleaning up temporary python directory: %s' % path, verbosity=2) shutil.rmtree(path)
def write_console(self): """Write results to console.""" display.info('No tests applicable.', verbosity=1)
def get_python_path(args, interpreter): """ :type args: TestConfig :type interpreter: str :rtype: str """ # When the python interpreter is already named "python" its directory can simply be added to the path. # Using another level of indirection is only required when the interpreter has a different name. if os.path.basename(interpreter) == 'python': return os.path.dirname(interpreter) python_path = PYTHON_PATHS.get(interpreter) if python_path: return python_path prefix = 'python-' suffix = '-ansible' root_temp_dir = '/tmp' if args.explain: return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix))) python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) injected_interpreter = os.path.join(python_path, 'python') # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments. # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter. use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter) if use_symlink: display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) os.symlink(interpreter, injected_interpreter) else: display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) code = textwrap.dedent(''' #!%s from __future__ import absolute_import from os import execv from sys import argv python = '%s' execv(python, [python] + argv[1:]) ''' % (interpreter, interpreter)).lstrip() with open(injected_interpreter, 'w') as python_fd: python_fd.write(code) os.chmod(injected_interpreter, MODE_FILE_EXECUTE) os.chmod(python_path, MODE_DIRECTORY) if not PYTHON_PATHS: atexit.register(cleanup_python_paths) PYTHON_PATHS[interpreter] = python_path return python_path
def categorize_changes(args, paths, verbose_command=None): """ :type args: TestConfig :type paths: list[str] :type verbose_command: str :rtype paths: dict[str, list[str]] """ mapper = PathMapper(args) commands = { 'sanity': set(), 'units': set(), 'integration': set(), 'windows-integration': set(), 'network-integration': set(), } additional_paths = set() for path in paths: if not os.path.exists(path): continue dependent_paths = mapper.get_dependent_paths(path) if not dependent_paths: continue display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=1) for dependent_path in dependent_paths: display.info(dependent_path, verbosity=1) additional_paths.add(dependent_path) additional_paths -= set( paths) # don't count changed paths as additional paths if additional_paths: display.info( 'Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths))) paths = sorted(set(paths) | additional_paths) display.info('Mapping %d changed file(s) to tests.' % len(paths)) for path in paths: tests = mapper.classify(path) if tests is None: display.info('%s -> all' % path, verbosity=1) tests = all_tests(args) # not categorized, run all tests display.warning('Path not categorized: %s' % path) else: tests = dict((key, value) for key, value in tests.items() if value) if verbose_command: result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none') # identify targeted integration tests (those which only target a single integration command) if 'integration' in verbose_command and tests.get( verbose_command): if not any('integration' in command for command in tests if command != verbose_command): result += ' (targeted)' else: result = '%s' % tests display.info('%s -> %s' % (path, result), verbosity=1) for command, target in tests.items(): commands[command].add(target) for command in commands: commands[command].discard('none') if any(t == 'all' for t in commands[command]): commands[command] = set(['all']) commands = dict((c, sorted(commands[c])) for c in commands if commands[c]) return commands
def command_integration_role(args, target, start_at_task): """ :type args: IntegrationConfig :type target: IntegrationTarget :type start_at_task: str | None """ display.info('Running %s integration test role' % target.name) vars_file = 'integration_config.yml' if isinstance(args, WindowsIntegrationConfig): inventory = 'inventory.winrm' hosts = 'windows' gather_facts = False elif isinstance(args, NetworkIntegrationConfig): inventory = args.inventory or 'inventory.networking' hosts = target.name[:target.name.find('_')] gather_facts = False else: inventory = 'inventory' hosts = 'testhost' gather_facts = True cloud_environment = get_cloud_environment(args, target) if cloud_environment: hosts = cloud_environment.inventory_hosts or hosts playbook = ''' - hosts: %s gather_facts: %s roles: - { role: %s } ''' % (hosts, gather_facts, target.name) with tempfile.NamedTemporaryFile(dir='test/integration', prefix='%s-' % target.name, suffix='.yml') as pb_fd: pb_fd.write(playbook.encode('utf-8')) pb_fd.flush() filename = os.path.basename(pb_fd.name) display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3) cmd = ['ansible-playbook', filename, '-i', inventory, '-e', '@%s' % vars_file] if start_at_task: cmd += ['--start-at-task', start_at_task] if args.tags: cmd += ['--tags', args.tags] if args.skip_tags: cmd += ['--skip-tags', args.skip_tags] if args.diff: cmd += ['--diff'] if isinstance(args, NetworkIntegrationConfig): if args.testcase: cmd += ['-e', 'testcase=%s' % args.testcase] if args.verbosity: cmd.append('-' + ('v' * args.verbosity)) env = integration_environment(args, target, cmd) cwd = 'test/integration' env['ANSIBLE_ROLES_PATH'] = os.path.abspath('test/integration/targets') intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd)
def command_network_integration(args): """ :type args: NetworkIntegrationConfig """ default_filename = 'test/integration/inventory.networking' if args.inventory: filename = os.path.join('test/integration', args.inventory) else: filename = default_filename if not args.explain and not args.platform and not os.path.exists(filename): if args.inventory: filename = os.path.abspath(filename) raise ApplicationError( 'Inventory not found: %s\n' 'Use --inventory to specify the inventory path.\n' 'Use --platform to provision resources and generate an inventory file.\n' 'See also inventory template: %s.template' % (filename, default_filename) ) all_targets = tuple(walk_network_integration_targets(include_hidden=True)) internal_targets = command_integration_filter(args, all_targets, init_callback=network_init) instances = [] # type: list [lib.thread.WrappedThread] if args.platform: configs = dict((config['platform_version'], config) for config in args.metadata.instance_config) for platform_version in args.platform: platform, version = platform_version.split('/', 1) config = configs.get(platform_version) if not config: continue instance = lib.thread.WrappedThread(functools.partial(network_run, args, platform, version, config)) instance.daemon = True instance.start() instances.append(instance) while any(instance.is_alive() for instance in instances): time.sleep(1) remotes = [instance.wait_for_result() for instance in instances] inventory = network_inventory(remotes) display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3) if not args.explain: with open(filename, 'w') as inventory_fd: inventory_fd.write(inventory) success = False try: command_integration_filtered(args, internal_targets, all_targets) success = True finally: if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): for instance in instances: instance.result.stop()
def command_units(args): """ :type args: UnitsConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes include, exclude = walk_external_targets(walk_units_targets(), args.include, args.exclude, require) if not include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) version_commands = [] for version in SUPPORTED_PYTHON_VERSIONS: # run all versions unless version given, in which case run only that version if args.python and version != args.python_version: continue if args.requirements_mode != 'skip': install_command_requirements(args, version) env = ansible_environment(args) cmd = [ 'pytest', '--boxed', '-r', 'a', '-n', 'auto', '--color', 'yes' if args.color else 'no', '--junit-xml', 'test/results/junit/python%s-units.xml' % version, ] if args.collect_only: cmd.append('--collect-only') if args.verbosity: cmd.append('-' + ('v' * args.verbosity)) if exclude: cmd += ['--ignore=%s' % target.path for target in exclude] cmd += [target.path for target in include] version_commands.append((version, cmd, env)) if args.requirements_mode == 'only': sys.exit() for version, command, env in version_commands: display.info('Unit test with Python %s' % version) try: intercept_command(args, command, target_name='units', env=env, python_version=version) except SubprocessError as ex: # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case if ex.status != 5: raise
def command_network_integration(args): """ :type args: NetworkIntegrationConfig """ default_filename = 'test/integration/inventory.networking' if args.inventory: filename = os.path.join('test/integration', args.inventory) else: filename = default_filename if not args.explain and not args.platform and not os.path.exists(filename): if args.inventory: filename = os.path.abspath(filename) raise ApplicationError( 'Inventory not found: %s\n' 'Use --inventory to specify the inventory path.\n' 'Use --platform to provision resources and generate an inventory file.\n' 'See also inventory template: %s.template' % (filename, default_filename)) all_targets = tuple(walk_network_integration_targets(include_hidden=True)) internal_targets = command_integration_filter(args, all_targets) platform_targets = set(a for t in internal_targets for a in t.aliases if a.startswith('network/')) if args.platform: instances = [] # type: list [lib.thread.WrappedThread] # generate an ssh key (if needed) up front once, instead of for each instance SshKey(args) for platform_version in args.platform: platform, version = platform_version.split('/', 1) platform_target = 'network/%s/' % platform if platform_target not in platform_targets and 'network/basics/' not in platform_targets: display.warning( 'Skipping "%s" because selected tests do not target the "%s" platform.' % (platform_version, platform)) continue instance = lib.thread.WrappedThread( functools.partial(network_run, args, platform, version)) instance.daemon = True instance.start() instances.append(instance) install_command_requirements(args) while any(instance.is_alive() for instance in instances): time.sleep(1) remotes = [instance.wait_for_result() for instance in instances] inventory = network_inventory(remotes) display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3) if not args.explain: with open(filename, 'w') as inventory_fd: inventory_fd.write(inventory) else: install_command_requirements(args) command_integration_filtered(args, internal_targets, all_targets)
def command_integration_filtered(args, targets): """ :type args: IntegrationConfig :type targets: tuple[IntegrationTarget] """ found = False targets_iter = iter(targets) test_dir = os.path.expanduser('~/ansible_testing') if any('needs/ssh/' in target.aliases for target in targets): max_tries = 20 display.info('SSH service required for tests. Checking to make sure we can connect.') for i in range(1, max_tries + 1): try: run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True) display.info('SSH service responded.') break except SubprocessError: if i == max_tries: raise seconds = 3 display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds) time.sleep(seconds) start_at_task = args.start_at_task for target in targets_iter: if args.start_at and not found: found = target.name == args.start_at if not found: continue tries = 2 if args.retry_on_error else 1 verbosity = args.verbosity cloud_environment = get_cloud_environment(args, target) original_environment = EnvironmentDescription() display.info('>>> Environment Description\n%s' % original_environment, verbosity=3) try: while tries: tries -= 1 if not args.explain: # create a fresh test directory for each test target remove_tree(test_dir) make_dirs(test_dir) try: if target.script_path: command_integration_script(args, target) else: command_integration_role(args, target, start_at_task) start_at_task = None break except SubprocessError: if cloud_environment: cloud_environment.on_failure(target, tries) if not original_environment.validate(target.name, throw=False): raise if not tries: raise display.warning('Retrying test target "%s" with maximum verbosity.' % target.name) display.verbosity = args.verbosity = 6 original_environment.validate(target.name, throw=True) except: display.notice('To resume at this test target, use the option: --start-at %s' % target.name) next_target = next(targets_iter, None) if next_target: display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name) raise finally: display.verbosity = args.verbosity = verbosity
def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ skip_file = 'test/sanity/ansible-doc/skip.txt' skip_modules = set( read_lines_without_comments(skip_file, remove_blank_lines=True)) plugin_type_blacklist = set([ # not supported by ansible-doc 'action', 'cliconf', 'filter', 'httpapi', 'netconf', 'terminal', 'test', ]) modules = sorted( set(m for i in targets.include_external for m in i.modules) - set(m for i in targets.exclude_external for m in i.modules) - skip_modules) plugins = [ os.path.splitext(i.path)[0].split('/')[-2:] + [i.path] for i in targets.include if os.path.splitext(i.path)[1] == '.py' and os.path.basename(i.path) != '__init__.py' and re.search(r'^lib/ansible/plugins/[^/]+/', i.path) and i.path != 'lib/ansible/plugins/cache/base.py' ] doc_targets = collections.defaultdict(list) target_paths = collections.defaultdict(dict) for module in modules: doc_targets['module'].append(module) for plugin_type, plugin_name, plugin_path in plugins: if plugin_type in plugin_type_blacklist: continue doc_targets[plugin_type].append(plugin_name) target_paths[plugin_type][plugin_name] = plugin_path if not doc_targets: return SanitySkipped(self.name, python_version=python_version) target_paths['module'] = dict( (t.module, t.path) for t in targets.targets if t.module) env = ansible_environment(args, color=False) error_messages = [] for doc_type in sorted(doc_targets): cmd = ['ansible-doc', '-t', doc_type] + sorted( doc_targets[doc_type]) try: stdout, stderr = intercept_command( args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stderr: errors = stderr.strip().splitlines() messages = [self.parse_error(e, target_paths) for e in errors] if messages and all(messages): error_messages += messages continue if status: summary = u'%s' % SubprocessError( cmd=cmd, status=status, stderr=stderr) return SanityFailure(self.name, summary=summary, python_version=python_version) if stdout: display.info(stdout.strip(), verbosity=3) if stderr: summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError( cmd, stderr=stderr) return SanityFailure(self.name, summary=summary, python_version=python_version) if error_messages: return SanityFailure(self.name, messages=error_messages, python_version=python_version) return SanitySuccess(self.name, python_version=python_version)
def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ settings = self.load_processor(args, python_version) paths = [target.path for target in targets.include] env = ansible_environment(args, color=False) # create a clean virtual environment to minimize the available imports beyond the python standard library virtual_environment_path = os.path.abspath( 'test/runner/.tox/minimal-py%s' % python_version.replace('.', '')) virtual_environment_bin = os.path.join(virtual_environment_path, 'bin') remove_tree(virtual_environment_path) python = find_python(python_version) cmd = [ python, '-m', 'virtualenv', virtual_environment_path, '--python', python, '--no-setuptools', '--no-wheel' ] if not args.coverage: cmd.append('--no-pip') run_command(args, cmd, capture=True) # add the importer to our virtual environment so it can be accessed through the coverage injector importer_path = os.path.join(virtual_environment_bin, 'importer.py') if not args.explain: os.symlink( os.path.abspath( os.path.join(ANSIBLE_ROOT, 'test/sanity/import/importer.py')), importer_path) # create a minimal python library python_path = os.path.abspath('test/runner/.tox/import/lib') ansible_path = os.path.join(python_path, 'ansible') ansible_init = os.path.join(ansible_path, '__init__.py') ansible_link = os.path.join(ansible_path, 'module_utils') if not args.explain: remove_tree(ansible_path) make_dirs(ansible_path) with open(ansible_init, 'w'): pass os.symlink(os.path.join(ANSIBLE_ROOT, 'lib/ansible/module_utils'), ansible_link) if data_context().content.collection: # inject just enough Ansible code for the collections loader to work on all supported Python versions # the __init__.py files are needed only for Python 2.x # the empty modules directory is required for the collection loader to generate the synthetic packages list make_dirs(os.path.join(ansible_path, 'utils')) with open(os.path.join(ansible_path, 'utils/__init__.py'), 'w'): pass os.symlink( os.path.join(ANSIBLE_ROOT, 'lib/ansible/utils/collection_loader.py'), os.path.join(ansible_path, 'utils/collection_loader.py')) os.symlink( os.path.join(ANSIBLE_ROOT, 'lib/ansible/utils/singleton.py'), os.path.join(ansible_path, 'utils/singleton.py')) make_dirs(os.path.join(ansible_path, 'modules')) with open(os.path.join(ansible_path, 'modules/__init__.py'), 'w'): pass # activate the virtual environment env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH']) env['PYTHONPATH'] = python_path # make sure coverage is available in the virtual environment if needed if args.coverage: run_command(args, generate_pip_install(['pip'], 'sanity.import', packages=['setuptools']), env=env) run_command(args, generate_pip_install(['pip'], 'sanity.import', packages=['coverage']), env=env) run_command(args, [ 'pip', 'uninstall', '--disable-pip-version-check', '-y', 'setuptools' ], env=env) run_command(args, [ 'pip', 'uninstall', '--disable-pip-version-check', '-y', 'pip' ], env=env) cmd = ['importer.py'] data = '\n'.join(paths) display.info(data, verbosity=4) results = [] virtualenv_python = os.path.join(virtual_environment_bin, 'python') try: with coverage_context(args): stdout, stderr = intercept_command( args, cmd, self.name, env, capture=True, data=data, python_version=python_version, virtualenv=virtualenv_python) if stdout or stderr: raise SubprocessError(cmd, stdout=stdout, stderr=stderr) except SubprocessError as ex: if ex.status != 10 or ex.stderr or not ex.stdout: raise pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' results = parse_to_list_of_dict(pattern, ex.stdout) results = [ SanityMessage( message=r['message'], path=r['path'], line=int(r['line']), column=int(r['column']), ) for r in results ] results = settings.process_errors(results, paths) if results: return SanityFailure(self.name, messages=results, python_version=python_version) return SanitySuccess(self.name, python_version=python_version)
def analyze_integration_target_dependencies(integration_targets): """ :type integration_targets: list[IntegrationTarget] :rtype: dict[str,set[str]] """ real_target_root = os.path.realpath('test/integration/targets') + '/' role_targets = [target for target in integration_targets if target.type == 'role'] hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases) dependencies = collections.defaultdict(set) # handle setup dependencies for target in integration_targets: for setup_target_name in target.setup_always + target.setup_once: dependencies[setup_target_name].add(target.name) # handle target dependencies for target in integration_targets: for need_target in target.needs_target: dependencies[need_target].add(target.name) # handle symlink dependencies between targets # this use case is supported, but discouraged for target in integration_targets: for path in data_context().content.walk_files(target.path): if not os.path.islink(path): continue real_link_path = os.path.realpath(path) if not real_link_path.startswith(real_target_root): continue link_target = real_link_path[len(real_target_root):].split('/')[0] if link_target == target.name: continue dependencies[link_target].add(target.name) # intentionally primitive analysis of role meta to avoid a dependency on pyyaml # script based targets are scanned as they may execute a playbook with role dependencies for target in integration_targets: meta_dir = os.path.join(target.path, 'meta') if not os.path.isdir(meta_dir): continue meta_paths = data_context().content.get_files(meta_dir) for meta_path in meta_paths: if os.path.exists(meta_path): with open(meta_path, 'rb') as meta_fd: # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file) try: meta_lines = to_text(meta_fd.read()).splitlines() except UnicodeDecodeError: continue for meta_line in meta_lines: if re.search(r'^ *#.*$', meta_line): continue if not meta_line.strip(): continue for hidden_target_name in hidden_role_target_names: if hidden_target_name in meta_line: dependencies[hidden_target_name].add(target.name) while True: changes = 0 for dummy, dependent_target_names in dependencies.items(): for dependent_target_name in list(dependent_target_names): new_target_names = dependencies.get(dependent_target_name) if new_target_names: for new_target_name in new_target_names: if new_target_name not in dependent_target_names: dependent_target_names.add(new_target_name) changes += 1 if not changes: break for target_name in sorted(dependencies): consumers = dependencies[target_name] if not consumers: continue display.info('%s:' % target_name, verbosity=4) for consumer in sorted(consumers): display.info(' %s' % consumer, verbosity=4) return dependencies
def command_sanity_pep8(args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: SanityResult """ test = 'pep8' with open(PEP8_SKIP_PATH, 'r') as skip_fd: skip_paths = skip_fd.read().splitlines() with open(PEP8_LEGACY_PATH, 'r') as legacy_fd: legacy_paths = legacy_fd.read().splitlines() with open('test/sanity/pep8/legacy-ignore.txt', 'r') as ignore_fd: legacy_ignore = set(ignore_fd.read().splitlines()) with open('test/sanity/pep8/current-ignore.txt', 'r') as ignore_fd: current_ignore = sorted(ignore_fd.read().splitlines()) skip_paths_set = set(skip_paths) legacy_paths_set = set(legacy_paths) paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.py' and i.path not in skip_paths_set) if not paths: return SanitySkipped(test) cmd = [ 'pycodestyle', '--max-line-length', '160', '--config', '/dev/null', '--ignore', ','.join(sorted(current_ignore)), ] + paths try: stdout, stderr = run_command(args, cmd, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stderr: raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) if args.explain: return SanitySkipped(test) pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$' results = [re.search(pattern, line).groupdict() for line in stdout.splitlines()] results = [SanityMessage( message=r['message'], path=r['path'], line=int(r['line']), column=int(r['column']), level='warning' if r['code'].startswith('W') else 'error', code=r['code'], ) for r in results] failed_result_paths = set([result.path for result in results]) used_paths = set(paths) errors = [] summary = {} line = 0 for path in legacy_paths: line += 1 if not os.path.exists(path): # Keep files out of the list which no longer exist in the repo. errors.append(SanityMessage( code='A101', message='Remove "%s" since it does not exist' % path, path=PEP8_LEGACY_PATH, line=line, column=1, confidence=calculate_best_confidence(((PEP8_LEGACY_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) if path in used_paths and path not in failed_result_paths: # Keep files out of the list which no longer require the relaxed rule set. errors.append(SanityMessage( code='A201', message='Remove "%s" since it passes the current rule set' % path, path=PEP8_LEGACY_PATH, line=line, column=1, confidence=calculate_best_confidence(((PEP8_LEGACY_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) line = 0 for path in skip_paths: line += 1 if not os.path.exists(path): # Keep files out of the list which no longer exist in the repo. errors.append(SanityMessage( code='A101', message='Remove "%s" since it does not exist' % path, path=PEP8_SKIP_PATH, line=line, column=1, confidence=calculate_best_confidence(((PEP8_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) for result in results: if result.path in legacy_paths_set and result.code in legacy_ignore: # Files on the legacy list are permitted to have errors on the legacy ignore list. # However, we want to report on their existence to track progress towards eliminating these exceptions. display.info('PEP 8: %s (legacy)' % result, verbosity=3) key = '%s %s' % (result.code, re.sub('[0-9]+', 'NNN', result.message)) if key not in summary: summary[key] = 0 summary[key] += 1 else: # Files not on the legacy list and errors not on the legacy ignore list are PEP 8 policy errors. errors.append(result) if summary: lines = [] count = 0 for key in sorted(summary): count += summary[key] lines.append('PEP 8: %5d %s' % (summary[key], key)) display.info('PEP 8: There were %d different legacy issues found (%d total):' % (len(summary), count), verbosity=1) display.info('PEP 8: Count Code Message', verbosity=1) for line in lines: display.info(line, verbosity=1) if errors: return SanityFailure(test, messages=errors) return SanitySuccess(test)
def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ if args.python_version in self.UNSUPPORTED_PYTHON_VERSIONS: display.warning('Skipping %s on unsupported Python version %s.' % (self.name, args.python_version)) return SanitySkipped(self.name) if self.path.endswith('.py'): cmd = [args.python_executable, self.path] else: cmd = [self.path] env = ansible_environment(args, color=False) pattern = None data = None if self.config: output = self.config.get('output') extensions = self.config.get('extensions') prefixes = self.config.get('prefixes') files = self.config.get('files') always = self.config.get('always') text = self.config.get('text') ignore_changes = self.config.get('ignore_changes') if output == 'path-line-column-message': pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' elif output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: pattern = ApplicationError('Unsupported output type: %s' % output) if ignore_changes: paths = sorted(i.path for i in targets.targets) always = False else: paths = sorted(i.path for i in targets.include) if always: paths = [] # short-term work-around for paths being str instead of unicode on python 2.x if sys.version_info[0] == 2: paths = [p.decode('utf-8') for p in paths] if text is not None: if text: paths = [p for p in paths if not is_binary_file(p)] else: paths = [p for p in paths if is_binary_file(p)] if extensions: paths = [ p for p in paths if os.path.splitext(p)[1] in extensions or ( p.startswith('bin/') and '.py' in extensions) ] if prefixes: paths = [ p for p in paths if any( p.startswith(pre) for pre in prefixes) ] if files: paths = [p for p in paths if os.path.basename(p) in files] if not paths and not always: return SanitySkipped(self.name) data = '\n'.join(paths) if data: display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stdout and not stderr: if pattern: matches = parse_to_list_of_dict(pattern, stdout) messages = [ SanityMessage( message=m['message'], path=m['path'], line=int(m.get('line', 0)), column=int(m.get('column', 0)), ) for m in matches ] return SanityFailure(self.name, messages=messages) if stderr or status: summary = u'%s' % SubprocessError( cmd=cmd, status=status, stderr=stderr, stdout=stdout) return SanityFailure(self.name, summary=summary) return SanitySuccess(self.name)
def command_coverage_combine(args): """Patch paths in coverage files and merge into a single file. :type args: CoverageConfig """ coverage = initialize_coverage(args) modules = dict((t.module, t.path) for t in list(walk_module_targets())) coverage_files = [ os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if f.startswith('coverage') and f != 'coverage' ] arc_data = {} ansible_path = os.path.abspath('lib/ansible/') + '/' root_path = os.getcwd() + '/' counter = 0 for coverage_file in coverage_files: counter += 1 display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2) original = coverage.CoverageData() if os.path.getsize(coverage_file) == 0: display.warning('Empty coverage file: %s' % coverage_file) continue try: original.read_file(coverage_file) except Exception as ex: # pylint: disable=locally-disabled, broad-except display.error(str(ex)) continue for filename in original.measured_files(): arcs = original.arcs(filename) if '/ansible_modlib.zip/ansible/' in filename: new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif '/ansible_module_' in filename: module = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename) new_name = os.path.abspath(modules[module]) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name elif filename.startswith('/root/ansible/'): new_name = re.sub('^/.*?/ansible/', root_path, filename) display.info('%s -> %s' % (filename, new_name), verbosity=3) filename = new_name if filename not in arc_data: arc_data[filename] = [] arc_data[filename] += arcs updated = coverage.CoverageData() for filename in arc_data: if not os.path.isfile(filename): display.warning('Invalid coverage path: %s' % filename) continue updated.add_arcs({filename: arc_data[filename]}) if not args.explain: updated.write_file(COVERAGE_FILE)