def should_run_with_config(robocop_instance, cfg): config = Config() config.parse_opts(cfg.split()) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run() return robocop_instance
def configure_robocop(robocop_instance, args): config = Config() config.parse_opts(args.split()) robocop_instance.config = config robocop_instance.load_checkers() robocop_instance.load_reports() robocop_instance.configure_checkers_or_reports()
def test_use_nested_argument_file(self, test_data_dir): config = Config() nested_args_path = str(test_data_dir / "argument_file" / "args_nested.txt") with pytest.raises(NestedArgumentFileError) as err: config.parse_opts(["-A", nested_args_path, str(test_data_dir)]) assert "Nested argument file in " in str(err)
def test_run_non_existing_file(self, robocop_instance): config = Config() config.parse_opts(['some_path']) robocop_instance.config = config with pytest.raises(FileError) as err: robocop_instance.run() assert 'File "some_path" does not exist' in str(err)
def test_use_argument_file(self, robocop_instance): config = Config() config.parse_opts(['-A', str(Path(Path(__file__).parent.parent, 'test_data/argument_file/args.txt')), str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_use_nested_argument_file(self): config = Config() nested_args_path = str(Path(Path(__file__).parent.parent, 'test_data/argument_file/args_nested.txt')) with pytest.raises(NestedArgumentFileError) as err: config.parse_opts(['-A', nested_args_path, str(Path(Path(__file__).parent.parent, 'test_data'))]) assert 'Nested argument file in ' in str(err)
def test_use_not_existing_argument_file(self, test_data_dir): config = Config() with pytest.raises(ArgumentFileNotFoundError) as err: config.parse_opts( ["--argumentfile", "some_file", str(test_data_dir)]) assert 'Argument file "some_file" does not exist' in str(err)
def test_run_all_checkers(self, robocop_instance): config = Config() config.parse_opts( [str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_configure_return_status_with_non_exist(self, robocop_instance): config = Config() config.parse_opts(['--configure', 'return_status:smth:E=0:W=0', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(ConfigGeneralError) as err: robocop_instance.configure_checkers_or_reports() assert "Provided param 'smth' for report 'return_status' does not exist" in str(err)
def test_configure_return_status_invalid_value(self, robocop_instance): config = Config() config.parse_opts(['--configure', 'return_status:quality_gate:E0', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config robocop_instance.configure_checkers_or_reports() with pytest.raises(SystemExit): robocop_instance.run()
def test_configure_invalid_config(self, robocop_instance): config = Config() config.parse_opts(['--configure', '0202:', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(ConfigGeneralError) as err: robocop_instance.configure_checkers_or_reports() assert "Provided invalid config: '0202:' (general pattern: <rule>:<param>:<value>)" in str(err)
def test_configure_invalid_rule(self, robocop_instance): config = Config() config.parse_opts(['--configure', 'idontexist:severity:E', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(ConfigGeneralError) as err: robocop_instance.configure_checkers_or_reports() assert "Provided rule or report 'idontexist' does not exist" in str(err)
def test_configure_rule_option(self, robocop_instance): config = Config() config.parse_opts(['-c', 'line-too-long:line_length:1000', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config robocop_instance.configure_checkers_or_reports() with pytest.raises(SystemExit): robocop_instance.run()
def test_configure_rule_severity(self, robocop_instance): config = Config() config.parse_opts(['-c', '0201:severity:E,E0202:severity:I', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config robocop_instance.configure_checkers_or_reports() with pytest.raises(SystemExit): robocop_instance.run()
def test_use_not_existing_argument_file(self): config = Config() with pytest.raises(ArgumentFileNotFoundError) as err: config.parse_opts([ '--argumentfile', 'some_file', str(Path(Path(__file__).parent.parent, 'test_data')) ]) assert 'Argument file "some_file" does not exist' in str(err)
def test_configure_invalid_param(self, robocop_instance): config = Config() config.parse_opts(['--configure', '0202:idontexist:E', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config with pytest.raises(ConfigGeneralError) as err: robocop_instance.configure_checkers_or_reports() assert r"Provided param 'idontexist' for rule '0202' does not exist. " \ r"Available configurable(s) for this rule:\n severity" in str(err)
def test_all_reports(self, robocop_instance): config = Config() config.parse_opts([ '-r', 'rules_by_id,rules_by_error_type,scan_timer', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_include_exclude_invalid_rule(self, robocop_instance, rules, expected): for method in ("--include", "--exclude"): config = Config() config.parse_opts([method, rules, "."]) robocop_instance.config = config with pytest.raises(ConfigGeneralError) as err: robocop_instance.reload_config() assert expected in str(err)
def test_ignore_file_with_pattern(self, robocop_instance): config = Config() config.parse_opts([ '--ignore', '*.robot', '--include', '0502', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_set_rule_invalid_threshold(self, robocop_instance): config = Config() config.parse_opts([ '--threshold', '3', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_include_one_rule(self, robocop_instance): config = Config() config.parse_opts([ '--include', '0503', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_disable_all_pattern(self, robocop_instance): config = Config() config.parse_opts([ '--exclude', '*', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def test_all_reports(self, robocop_instance): config = Config() config.parse_opts([ '-r', 'all', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run()
def configure_robocop_with_rule(runner, rule, path): config = Config() config.parse_opts([ '--include', rule, '--format', '{source}:{line}:{col} [{severity}] {rule_id} {desc}', '--configure', 'return_status:quality_gate:E=0:W=0:I=0', str(path) ]) runner.config = config return runner
def test_run_with_return_status_bigger_than_zero(self, robocop_instance): config = Config() config.parse_opts([ '--configure', 'return_status:quality_gate:E=0:W=0', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run() assert robocop_instance.reports['return_status'].return_status > 0
def test_run_with_return_status_0(self, robocop_instance): config = Config() config.parse_opts([ '-c', 'return_status:quality_gate:E=-1:W=-1', str(Path(Path(__file__).parent.parent, 'test_data')) ]) robocop_instance.config = config with pytest.raises(SystemExit): robocop_instance.run() assert robocop_instance.reports['return_status'].return_status == 0
def test_run_with_return_status_1(self, robocop_instance): config = Config() config.parse_opts(['--configure', 'return_status:quality_gate:E=0:W=0', str(Path(Path(__file__).parent.parent, 'test_data'))]) robocop_instance.config = config robocop_instance.configure_checkers_or_reports() with pytest.raises(SystemExit): robocop_instance.run() for report in robocop_instance.reports: if report.name == 'return_status': assert report.return_status == 1
def test_no_issues_all_reports(self, robocop_instance): config = Config() config.parse_opts([ '-r', 'rules_by_id,rules_by_error_type', str(Path(Path(__file__).parent.parent, 'test_data/all_passing.robot')) ]) robocop_instance.config = config robocop_instance.load_reports() with pytest.raises(SystemExit): robocop_instance.run()
def configure_robocop_with_rule(args, runner, rule, path): runner.from_cli = True config = Config() config.parse_opts([ "--include", rule, "--format", "{source}:{line}:{col} [{severity}] {rule_id} {desc}", "--configure", "return_status:quality_gate:E=0:W=0:I=0", *args, str(path), ]) runner.config = config return runner
class TestArgumentValidation(unittest.TestCase): def setUp(self): self.config = Config() def test_prog_name(self): self.assertEqual(self.config.parser.prog, "robocop") def test_parser_default_help_disabled(self): self.assertFalse(self.config.parser.add_help) def test_default_args(self): self.assertSetEqual(self.config.filetypes, {".resource", ".robot", ".tsv"}) self.assertSetEqual(self.config.include, set()) self.assertSetEqual(self.config.exclude, set()) self.assertSetEqual(self.config.reports, {"return_status"}) self.assertListEqual(self.config.configure, []) self.assertEqual( self.config.format, "{source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})", ) self.assertListEqual(self.config.paths, ["."]) self.assertIsNone(self.config.output) self.assertFalse(self.config.list_reports) def test_default_args_after_parse(self): args = self.config.parse_opts([""]) self.assertSetEqual(args.filetypes, {".resource", ".robot", ".tsv"}) self.assertSetEqual(args.include, set()) self.assertSetEqual(args.exclude, set()) self.assertSetEqual(args.reports, {"return_status"}) self.assertListEqual(args.configure, []) self.assertEqual( args.format, "{source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})") self.assertListEqual(args.paths, [""]) self.assertIsNone(args.output) def test_filetypes_duplicate_defaults(self): args = self.config.parse_opts(["--filetypes", "robot,resource", ""]) self.assertSetEqual(args.filetypes, {".resource", ".robot", ".tsv"}) def test_filetypes_duplicate_dot_prefixed_defaults(self): args = self.config.parse_opts(["--filetypes", ".robot,.resource", ""]) self.assertSetEqual(args.filetypes, {".resource", ".robot", ".tsv"}) def test_include_one_rule(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts(["--include", rule_name, ""]) self.assertSetEqual(args.include, {rule_name}) def test_include_two_same_rules_comma_separated(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts( ["--include", ",".join([rule_name, rule_name]), ""]) self.assertSetEqual(args.include, {rule_name}) def test_include_two_same_rules_provided_separately(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts( ["--include", rule_name, "--include", rule_name, ""]) self.assertSetEqual(args.include, {rule_name}) def test_include_two_different_rules_comma_separated(self): rule_name1 = "missing-keyword-doc" rule_name2 = "not-allowed-char-in-name" rules_names = ",".join([rule_name1, rule_name2]) args = self.config.parse_opts(["--include", rules_names, ""]) self.assertSetEqual(args.include, {rule_name1, rule_name2}) def test_include_two_different_rules_provided_separately(self): rule_name1 = "missing-keyword-doc" rule_name2 = "not-allowed-char-in-name" args = self.config.parse_opts( ["--include", rule_name1, "--include", rule_name2, ""]) self.assertSetEqual(args.include, {rule_name1, rule_name2}) def test_exclude_one_rule(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts(["--exclude", rule_name, ""]) self.assertSetEqual(args.exclude, {rule_name}) def test_exclude_two_same_rules_comma_separated(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts( ["--exclude", ",".join([rule_name, rule_name]), ""]) self.assertSetEqual(args.exclude, {rule_name}) def test_exclude_two_same_rules_provided_separately(self): rule_name = "missing-keyword-doc" args = self.config.parse_opts( ["--exclude", rule_name, "--exclude", rule_name, ""]) self.assertSetEqual(args.exclude, {rule_name}) def test_exclude_two_different_rules_comma_separated(self): rule_name1 = "missing-keyword-doc" rule_name2 = "not-allowed-char-in-name" rules_names = ",".join([rule_name1, rule_name2]) args = self.config.parse_opts(["--exclude", rules_names, ""]) self.assertSetEqual(args.exclude, {rule_name1, rule_name2}) def test_exclude_two_different_rules_provided_separately(self): rule_name1 = "missing-keyword-doc" rule_name2 = "not-allowed-char-in-name" args = self.config.parse_opts( ["--exclude", rule_name1, "--exclude", rule_name2, ""]) self.assertSetEqual(args.exclude, {rule_name1, rule_name2}) def test_format_overwrite_default(self): default_format = "{source}:{line}:{col} [{severity}] {rule_id} {desc}" args = self.config.parse_opts(["--format", default_format, ""]) self.assertEqual(args.format, default_format) def test_format_empty(self): empty_format = "" args = self.config.parse_opts(["--format", empty_format, ""]) self.assertEqual(args.format, "") def test_format_new_value(self): new_format = "{source}: {rule_id} {desc}" args = self.config.parse_opts(["--format", new_format, ""]) self.assertEqual(args.format, new_format) def test_output_new_value(self): output_file = "results" args = self.config.parse_opts(["--output", output_file, ""]) self.assertIsNotNone(args.output) self.assertIsInstance(args.output, io.TextIOWrapper) self.assertEqual(args.output.name, output_file) self.assertEqual(args.output.mode, "w") self.assertTrue(pathlib.Path(output_file).exists()) # parser will not close the file itself if not self.config.output.closed: self.config.output.close() # remove created file pathlib.Path(output_file).unlink() @patch("sys.stdout", new_callable=StringIO) def test_help_message(self, mock_stdout): with self.assertRaises(SystemExit): self.config.parse_opts(["-h"]) self.assertRegex(mock_stdout.getvalue(), r"usage:") @patch("sys.stdout", new_callable=StringIO) def test_help_message_long(self, mock_stdout): with self.assertRaises(SystemExit): self.config.parse_opts(["--help"]) self.assertRegex(mock_stdout.getvalue(), r"usage:") @patch("sys.stdout", new_callable=StringIO) def test_version_number(self, mock_stdout): with self.assertRaises(SystemExit): self.config.parse_opts(["-v"]) self.assertRegex(mock_stdout.getvalue(), __version__) @patch("sys.stdout", new_callable=StringIO) def test_version_number_long(self, mock_stdout): with self.assertRaises(SystemExit): self.config.parse_opts(["--version"]) self.assertRegex(mock_stdout.getvalue(), __version__) def test_paths_new_value(self): args = self.config.parse_opts(["tests.robot"]) self.assertListEqual(args.paths, ["tests.robot"]) def test_paths_two_values(self): args = self.config.parse_opts(["tests.robot", "test2.robot"]) self.assertListEqual(args.paths, ["tests.robot", "test2.robot"]) def test_list_reports(self): args = self.config.parse_opts(["--list-reports"]) self.assertTrue(args.list_reports)