def test_external_dependency_is_ok( default_rules_collection: RulesCollection) -> None: playbook_path = 'examples/roles/dependency_in_meta/meta/main.yml'.format_map( locals()) good_runner = Runner(playbook_path, rules=default_rules_collection) assert [] == good_runner.run()
def main() -> int: """Linter CLI entry point.""" cwd = pathlib.Path.cwd() options = cli.get_config(sys.argv[1:]) initialize_logger(options.verbosity) _logger.debug("Options: %s", options) formatter_factory = choose_formatter_factory(options) formatter = formatter_factory(cwd, options.display_relative_path) if options.use_default_rules: rulesdirs = options.rulesdir + [DEFAULT_RULESDIR] else: rulesdirs = options.rulesdir or [DEFAULT_RULESDIR] rules = RulesCollection(rulesdirs) if options.listrules: formatted_rules = rules if options.format == 'plain' else rules_as_rst( rules) print(formatted_rules) return 0 if options.listtags: print(rules.listtags()) return 0 if isinstance(options.tags, str): options.tags = options.tags.split(',') skip = set() for s in options.skip_list: skip.update(str(s).split(',')) options.skip_list = frozenset(skip) if not options.playbook: # no args triggers auto-detection mode playbooks = get_playbooks_and_roles(options=options) else: playbooks = sorted(set(options.playbook)) matches = list() checked_files: Set[str] = set() for playbook in playbooks: runner = Runner(rules, playbook, options.tags, options.skip_list, options.exclude_paths, options.verbosity, checked_files) matches.extend(runner.run()) # Assure we do not print duplicates and the order is consistent matches = sorted(set(matches)) for match in matches: print(formatter.format(match, options.colored)) # If run under GitHub Actions we also want to emit output recognized by it. if os.getenv('GITHUB_ACTIONS') == 'true' and os.getenv('GITHUB_WORKFLOW'): formatter = formatters.AnnotationsFormatter(cwd, True) for match in matches: print(formatter.format(match)) if matches: return 2 else: return 0
def test_file_negative(self) -> None: failure = 'examples/playbooks/jinja2-when-failure.yml' bad_runner = Runner(failure, rules=self.collection) errs = bad_runner.run() self.assertEqual(2, len(errs))
def test_example_custom_module(default_rules_collection): """custom_module.yml is expected to pass.""" result = Runner('examples/playbooks/custom_module.yml', rules=default_rules_collection).run() assert len(result) == 0
def main(args): # formatter = formatters.Formatter() parser = optparse.OptionParser("%prog [options] [playbook.yml [playbook2 ...]]|roledirectory", version="%prog " + __version__) parser.add_option('-L', dest='listrules', default=False, action='store_true', help="list all the rules") parser.add_option('-q', dest='quiet', default=False, action='store_true', help="quieter, although not silent output") parser.add_option('-p', dest='parseable', default=False, action='store_true', help="parseable output in the format of pep8") parser.add_option('--parseable-severity', dest='parseable_severity', default=False, action='store_true', help="parseable output including severity of rule") parser.add_option('-r', action='append', dest='rulesdir', default=[], type='str', help="specify one or more rules directories using " "one or more -r arguments. Any -r flags override " "the default rules in %s, unless -R is also used." % DEFAULT_RULESDIR) parser.add_option('-R', action='store_true', default=False, dest='use_default_rules', help="Use default rules in %s in addition to any extra " "rules directories specified with -r. There is " "no need to specify this if no -r flags are used" % DEFAULT_RULESDIR) parser.add_option('-t', dest='tags', action='append', default=[], help="only check rules whosef id/tags match these values") parser.add_option('-T', dest='listtags', action='store_true', help="list all the tags") parser.add_option('-v', dest='verbosity', action='count', help="Increase verbosity level", default=0) parser.add_option('-x', dest='skip_list', default=[], action='append', help="only check rules whose id/tags do not " + "match these values") parser.add_option('--nocolor', dest='colored', default=hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(), action='store_false', help="disable colored output") parser.add_option('--force-color', dest='colored', action='store_true', help="Try force colored output (relying on ansible's code)") parser.add_option('--exclude', dest='exclude_paths', action='append', help='path to directories or files to skip. This option' ' is repeatable.', default=[]) parser.add_option('-c', dest='config_file', help='Specify configuration file to use. Defaults to ".ansible-lint"') options, args = parser.parse_args(args) config = load_config(options.config_file) if config: if 'quiet' in config: options.quiet = options.quiet or config['quiet'] if 'parseable' in config: options.parseable = options.parseable or config['parseable'] if 'parseable_severity' in config: options.parseable_severity = options.parseable_severity or \ config['parseable_severity'] if 'use_default_rules' in config: options.use_default_rules = options.use_default_rules or config['use_default_rules'] if 'verbosity' in config: options.verbosity = options.verbosity + config['verbosity'] options.exclude_paths.extend( config.get('exclude_paths', [])) if 'rulesdir' in config: options.rulesdir = options.rulesdir + config['rulesdir'] if 'skip_list' in config: options.skip_list = options.skip_list + config['skip_list'] if 'tags' in config: options.tags = options.tags + config['tags'] if options.quiet: formatter = formatters.QuietFormatter() if options.parseable: formatter = formatters.ParseableFormatter() if options.parseable_severity: formatter = formatters.ParseableSeverityFormatter() # # # no args triggers auto-detection mode # if len(args) == 0 and not (options.listrules or options.listtags): # args = get_playbooks_and_roles(options=options) if options.use_default_rules: rulesdirs = options.rulesdir + [DEFAULT_RULESDIR] else: rulesdirs = options.rulesdir or [DEFAULT_RULESDIR] rules = RulesCollection(rulesdirs) # for rulesdir in rulesdirs: # rules.extend(RulesCollection.load_plugins(rulesdir)) if options.listrules: print(rules) return 0 if options.listtags: print(rules.listtags()) return 0 if isinstance(options.tags, six.string_types): options.tags = options.tags.split(',') skip = set() for s in options.skip_list: skip.update(str(s).split(',')) options.skip_list = frozenset(skip) playbooks = sorted(set(args)) matches = list() checked_files = set() for playbook in playbooks: runner = Runner(playbook=playbook, rules=rules) matches.extend(runner.run()) matches.sort(key=lambda x: (normpath(x.filename), x.linenumber, x.rule.id)) return matches
def test_example(default_rules_collection): """example.yml is expected to have 15 match errors inside.""" result = Runner(default_rules_collection, 'examples/example.yml', [], [], []).run() assert len(result) == 16
def test_package_not_latest_negative(self) -> None: failure = 'examples/playbooks/package-check-failure.yml' bad_runner = Runner(failure, rules=self.collection) errs = bad_runner.run() self.assertEqual(4, len(errs))
def test_no_same_owner_rule(default_rules_collection, test_file, failures) -> None: """Test rule matches.""" results = Runner(test_file, rules=default_rules_collection).run() assert len(results) == failures for result in results: assert result.message == NoSameOwnerRule.shortdesc
def test_package_not_latest_positive(self): success = 'test/package-check-success.yml' good_runner = Runner(self.collection, success, [], [], []) self.assertEqual([], good_runner.run())
def test_runner(default_rules_collection, playbook, exclude, length): runner = Runner(default_rules_collection, playbook, [], [], exclude) matches = runner.run() assert len(matches) == length
def test_runner_with_directory(default_rules_collection, directory_name): runner = Runner(default_rules_collection, directory_name, [], [], []) assert list(runner.playbooks)[0][1] == 'role'
def test_file_negative(self): failure = 'test/command-instead-of-shell-failure.yml' bad_runner = Runner(self.collection, failure, [], [], []) errs = bad_runner.run() self.assertEqual(2, len(errs))
def test_file_positive(self): success = 'test/command-instead-of-shell-success.yml' good_runner = Runner(self.collection, success, [], [], []) self.assertEqual([], good_runner.run())
def test_included_tasks(default_rules_collection, filename, playbooks_count): lintable = Lintable('test/{filename}.yml'.format(**locals())) runner = Runner(default_rules_collection, lintable, [], [], []) runner.run() assert len(runner.playbooks) == playbooks_count
def test_file(self): file_name = 'testResources/ansible-smell/hardcodepassword5.yml' good_runner = Runner(playbook=file_name, rules=self.collection) print(good_runner.run())
def test_package_not_latest_negative(self): failure = 'test/package-check-failure.yml' bad_runner = Runner(self.collection, failure, [], [], []) errs = bad_runner.run() self.assertEqual(3, len(errs))
def test_example_plain_string(default_rules_collection): """Validates that loading valid YAML string produce error.""" result = Runner(default_rules_collection, 'examples/plain_string.yml', [], [], []).run() assert len(result) == 1 assert "Failed to load or parse file" in result[0].message
def test_command_changes_positive(self): success = 'test/command-check-success.yml' good_runner = Runner(self.collection, success, [], [], []) self.assertEqual([], good_runner.run())
def test_package_not_latest_positive(self) -> None: success = 'examples/playbooks/package-check-success.yml' good_runner = Runner(success, rules=self.collection) self.assertEqual([], good_runner.run())
def test_command_changes_negative(self): failure = 'test/command-check-failure.yml' bad_runner = Runner(self.collection, failure, [], [], []) errs = bad_runner.run() self.assertEqual(2, len(errs))
def test_example(default_rules_collection): """example.yml is expected to have 15 match errors inside.""" result = Runner('playbooks/example.yml', rules=default_rules_collection).run() assert len(result) == 15
def test_file_positive(self): success = 'test/task-has-name-success.yml' good_runner = Runner(self.collection, success, [], [], []) self.assertEqual([], good_runner.run())
def test_file(self): file_name = 'testResources/ansible-smell/unnamedconstructs.yml' good_runner = Runner(playbook=file_name, rules=self.collection) print(good_runner.run())
def test_file_negative(self): failure = 'test/task-has-name-failure.yml' bad_runner = Runner(self.collection, failure, [], [], []) errs = bad_runner.run() self.assertEqual(2, len(errs))
def _call_runner(self, path): runner = Runner(self.collection, path, [], [], []) return runner.run()
def test_skip_import_playbook(default_rules_collection, playbook): runner = Runner(playbook, rules=default_rules_collection) results = runner.run() assert len(results) == 0
def test_file_positive(self) -> None: success = 'examples/playbooks/jinja2-when-success.yml' good_runner = Runner(success, rules=self.collection) self.assertEqual([], good_runner.run())
def _call_runner(self, path) -> List["MatchError"]: runner = Runner(self.collection, path) return runner.run()
def runner( play_file_path: Union[Lintable, str], default_rules_collection: RulesCollection ) -> Runner: """Fixture to return a Runner() instance.""" return Runner(play_file_path, rules=default_rules_collection)
def test_file(self): collection = RulesCollection() collection.register(SuspiciousComment()) file_name = str('testResources/ansible-smell/suspiciouscomment.yml') good_runner = Runner(playbook=file_name, rules=collection) print(good_runner.run())