Ejemplo n.º 1
0
class TestOctalPermissionsRuleWithFile(unittest.TestCase):

    collection = RulesCollection()
    VALID_MODES = [
        0o777, 0o775, 0o770, 0o755, 0o750, 0o711, 0o710, 0o700, 0o666, 0o664,
        0o660, 0o644, 0o640, 0o600, 0o555, 0o551, 0o550, 0o511, 0o510, 0o500,
        0o444, 0o440, 0o400
    ]

    INVALID_MODES = [
        777,
        775,
        770,
        755,
        750,
        711,
        710,
        700,
        666,
        664,
        660,
        644,
        640,
        622,
        620,
        600,
        555,
        551,
        550,  # 511 == 0o777, 510 == 0o776, 500 == 0o764
        444,
        440,
        400
    ]

    def setUp(self):
        self.rule = OctalPermissionsRule()
        self.collection.register(self.rule)
        self.runner = RunFromText(self.collection)

    def test_success(self):
        results = self.runner.run_playbook(SUCCESS_TASKS)
        self.assertEqual(0, len(results))

    def test_fail(self):
        results = self.runner.run_playbook(FAIL_TASKS)
        self.assertEqual(4, len(results))

    def test_valid_modes(self):
        for mode in self.VALID_MODES:
            self.assertFalse(self.rule.is_invalid_permission(mode),
                             msg="0o%o should be a valid mode" % mode)

    def test_invalid_modes(self):
        for mode in self.INVALID_MODES:
            self.assertTrue(self.rule.is_invalid_permission(mode),
                            msg="%d should be an invalid mode" % mode)
Ejemplo n.º 2
0
class TestEnvVarsInCommand(unittest.TestCase):
    collection = RulesCollection()
    collection.register(EnvVarsInCommandRule())

    def setUp(self):
        self.runner = RunFromText(self.collection)

    def test_success(self):
        results = self.runner.run_playbook(SUCCESS_PLAY_TASKS)
        self.assertEqual(0, len(results))

    def test_fail(self):
        results = self.runner.run_playbook(FAIL_PLAY_TASKS)
        self.assertEqual(2, len(results))
Ejemplo n.º 3
0
class TestShellWithoutPipeFail(unittest.TestCase):
    collection = RulesCollection()
    collection.register(ShellWithoutPipefail())

    def setUp(self) -> None:
        self.runner = RunFromText(self.collection)

    def test_fail(self) -> None:
        results = self.runner.run_playbook(FAIL_TASKS)
        self.assertEqual(3, len(results))

    def test_success(self) -> None:
        results = self.runner.run_playbook(SUCCESS_TASKS)
        self.assertEqual(0, len(results))
Ejemplo n.º 4
0
def test_pre_tasks(default_text_runner: RunFromText, playbook: str,
                   length: int) -> None:
    # When
    results = default_text_runner.run_playbook(playbook)

    # Then
    assert len(results) == length
 def test_rule_command_instead_of_shell(
     default_text_runner: RunFromText, text: str, expected: int
 ) -> None:
     """Validate that rule works as intended."""
     results = default_text_runner.run_playbook(text)
     for result in results:
         assert result.rule.id == UseCommandInsteadOfShellRule.id, result
     assert len(results) == expected
Ejemplo n.º 6
0
class TestSudoRule(unittest.TestCase):
    collection = RulesCollection()
    collection.register(SudoRule())

    def setUp(self):
        self.runner = RunFromText(self.collection)

    def test_run_role_fail(self):
        results = self.runner.run_role_tasks_main(ROLE_2_ERRORS)
        self.assertEqual(2, len(results))

    def test_run_role_pass(self):
        results = self.runner.run_role_tasks_main(ROLE_0_ERRORS)
        self.assertEqual(0, len(results))

    def test_play_root_and_task_fail(self):
        results = self.runner.run_playbook(PLAY_4_ERRORS)
        self.assertEqual(4, len(results))

    def test_play_task_fail(self):
        results = self.runner.run_playbook(PLAY_1_ERROR)
        self.assertEqual(1, len(results))
class TestComparisonToEmptyStringRule(unittest.TestCase):
    collection = RulesCollection()
    collection.register(ComparisonToEmptyStringRule())

    def setUp(self):
        self.runner = RunFromText(self.collection)

    def test_success(self):
        results = self.runner.run_role_tasks_main(SUCCESS_TASKS)
        self.assertEqual(0, len(results))

    def test_fail(self):
        results = self.runner.run_playbook(FAIL_TASKS)
        self.assertEqual(2, len(results))
Ejemplo n.º 8
0
 def test_failed_when(rule_runner: RunFromText) -> None:
     """Instead of ignore_errors, this task uses failed_when."""
     results = rule_runner.run_playbook(FAILED_WHEN)
     assert len(results) == 0
Ejemplo n.º 9
0
 def test_ignore_errors_register(rule_runner: RunFromText) -> None:
     """The task uses ignore_errors: but output is registered and managed."""
     results = rule_runner.run_playbook(IGNORE_ERRORS_REGISTER)
     assert len(results) == 0
Ejemplo n.º 10
0
 def test_rule_empty_string_compare_pass(rule_runner: RunFromText) -> None:
     """Test rule matches."""
     results = rule_runner.run_playbook(SUCCESS_PLAY)
     assert len(results) == 0, results
 def test_systemd_environment(rule_runner: RunFromText) -> None:
     """Showing the environment is not supported by the systemd module."""
     results = rule_runner.run_playbook(SYSTEMD_ENVIRONMENT)
     assert len(results) == 0
Ejemplo n.º 12
0
 def test_invalid_var_name_playbook(rule_runner: RunFromText) -> None:
     """Test rule matches."""
     results = rule_runner.run_playbook(FAIL_PLAY)
     assert len(results) == 2
     for result in results:
         assert result.rule.id == VariableNamingRule.id
Ejemplo n.º 13
0
 def test_fqcn_builtin_pass(rule_runner: RunFromText) -> None:
     """Test rule does not match."""
     results = rule_runner.run_playbook(SUCCESS_PLAY)
     assert len(results) == 0, results
Ejemplo n.º 14
0
 def test_password_with_lock(rule_runner: RunFromText) -> None:
     """The task sets a password but also lock the user."""
     results = rule_runner.run_playbook(PASSWORD_WITH_LOCK)
     assert len(results) == 1
Ejemplo n.º 15
0
 def test_no_log_no(rule_runner: RunFromText) -> None:
     """The task sets no_log to no."""
     results = rule_runner.run_playbook(NO_LOG_NO)
     assert len(results) == 1
Ejemplo n.º 16
0
 def test_no_log_false(rule_runner: RunFromText) -> None:
     """The task sets no_log to false."""
     results = rule_runner.run_playbook(NO_LOG_FALSE)
     assert len(results) == 1
Ejemplo n.º 17
0
 def test_no_log_unused(rule_runner: RunFromText) -> None:
     """The task does not use no_log but also no loop."""
     results = rule_runner.run_playbook(NO_LOG_UNUSED)
     assert len(results) == 0
Ejemplo n.º 18
0
def test_playbook(
    default_text_runner: RunFromText, playbook_src: str, results_num: int
) -> None:
    results = default_text_runner.run_playbook(playbook_src)
    assert len(results) == results_num
Ejemplo n.º 19
0
def test_null_tasks(default_text_runner: RunFromText) -> None:
    """Assure we do not fail when encountering null tasks."""
    results = default_text_runner.run_playbook(PB_WITH_NULL_TASKS)
    assert not results
Ejemplo n.º 20
0
 def test_ignore_errors_true(rule_runner: RunFromText) -> None:
     """The task uses ignore_errors."""
     results = rule_runner.run_playbook(IGNORE_ERRORS_TRUE)
     assert len(results) == 1
Ejemplo n.º 21
0
 def test_ignore_errors_false(rule_runner: RunFromText) -> None:
     """The task uses ignore_errors: false, oddly enough."""
     results = rule_runner.run_playbook(IGNORE_ERRORS_FALSE)
     assert len(results) == 0
Ejemplo n.º 22
0
 def test_no_log_yes(rule_runner: RunFromText) -> None:
     """The task sets no_log to yes."""
     results = rule_runner.run_playbook(NO_LOG_YES)
     assert len(results) == 0
Ejemplo n.º 23
0
 def test_fqcn_builtin_fail(rule_runner: RunFromText) -> None:
     """Test rule matches."""
     results = rule_runner.run_playbook(FAIL_PLAY)
     assert len(results) == 1
     for result in results:
         assert result.message == FQCNBuiltinsRule.shortdesc
Ejemplo n.º 24
0
 def test_no_log_true(rule_runner: RunFromText) -> None:
     """The task sets no_log to true."""
     results = rule_runner.run_playbook(NO_LOG_TRUE)
     assert len(results) == 0
 def test_systemd_status(rule_runner: RunFromText) -> None:
     """Set-default is not supported by the systemd module."""
     results = rule_runner.run_playbook(SYSTEMCTL_STATUS)
     assert len(results) == 0
Ejemplo n.º 26
0
 def test_password_lock_false(rule_runner: RunFromText) -> None:
     """The task does not actually lock the user."""
     results = rule_runner.run_playbook(PASSWORD_LOCK_FALSE)
     assert len(results) == 0
 def test_systemd_runlevel(rule_runner: RunFromText) -> None:
     """Set-default is not supported by the systemd module."""
     results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL)
     assert len(results) == 0
Ejemplo n.º 28
0
 def test_ignore_errors_check_mode(rule_runner: RunFromText) -> None:
     """The task uses ignore_errors: "{{ ansible_check_mode }}"."""
     results = rule_runner.run_playbook(IGNORE_ERRORS_CHECK_MODE)
     print(results)
     assert len(results) == 0
Ejemplo n.º 29
0
 def test_password_lock_yes(rule_runner: RunFromText) -> None:
     """The task only locks the user."""
     results = rule_runner.run_playbook(PASSWORD_LOCK_YES)
     assert len(results) == 0
Ejemplo n.º 30
0
 def test_rule_empty_string_compare_fail(rule_runner: RunFromText) -> None:
     """Test rule matches."""
     results = rule_runner.run_playbook(FAIL_PLAY)
     assert len(results) == 2
     for result in results:
         assert result.message == ComparisonToEmptyStringRule.shortdesc