예제 #1
0
def test_integration_has_no_test_playbook_should_fail_on_validation(mocker):
    """
    Given
    - integration_a was modified
    - no tests provided for integration_a

    When
    - filtering tests to run

    Then
    - ensure the validation is failing
    """
    from Tests.scripts import configure_tests
    configure_tests._FAILED = False  # reset the FAILED flag

    try:
        # Given
        # - integration_a was modified
        # - no tests provided for integration_a
        fake_integration = TestUtils.create_integration(name='integration_a', with_commands=['a-command'])

        # mark as modified
        TestUtils.mock_get_modified_files(mocker,
                                          modified_files_list=[
                                              fake_integration['path']
                                          ])

        # - both in conf.json
        fake_conf = TestUtils.create_tests_conf()

        fake_id_set = TestUtils.create_id_set(
            with_integration=fake_integration['id_set']
        )

        # When
        # - filtering tests to run
        get_test_list(
            files_string='',
            branch_name='dummy_branch',
            two_before_ga_ver=TWO_BEFORE_GA_VERSION,
            conf=fake_conf,
            id_set=fake_id_set
        )

        # Then
        # - ensure the validation is failing
        assert configure_tests._FAILED
    finally:
        # delete the mocked files
        TestUtils.delete_files([
            fake_integration['path']
        ])

        # reset _FAILED flag
        configure_tests._FAILED = False
예제 #2
0
def get_mock_test_list(two_before_ga=TWO_BEFORE_GA_VERSION, get_modified_files_ret=None, mocker=None, git_diff_ret=''):
    branch_name = 'BranchA'
    if get_modified_files_ret is not None:
        mocker.patch('Tests.scripts.configure_tests.get_modified_files', return_value=get_modified_files_ret)

    tests = get_test_list(git_diff_ret, branch_name, two_before_ga, id_set=MOCK_ID_SET, conf=TestConf(MOCK_CONF))
    return tests
예제 #3
0
def test_conf_has_modified(mocker):
    """
    Given
    - Tests/conf.json has been modified

    When
    - filtering tests to run

    Then
    - ensure the validation not failing
    """
    from Tests.scripts import configure_tests
    configure_tests._FAILED = False  # reset the FAILED flag

    try:
        # Given
        # - Tests/conf.json has been modified
        TestUtils.mock_get_modified_files(mocker,
                                          modified_files_list=[],
                                          is_conf_json=True)

        TestUtils.mock_run_command(
            mocker,
            on_command='git diff origin/master...dummy_branch Tests/conf.json',
            return_value='something'
        )
        # - both in conf.json
        fake_conf = TestUtils.create_tests_conf()

        fake_id_set = TestUtils.create_id_set()

        # When
        # - filtering tests to run
        get_test_list(
            files_string='',
            branch_name='dummy_branch',
            two_before_ga_ver=TWO_BEFORE_GA_VERSION,
            conf=fake_conf,
            id_set=fake_id_set
        )

        # Then
        # - ensure the validation not failing
        assert not configure_tests._FAILED
    finally:
        # reset _FAILED flag
        configure_tests._FAILED = False
예제 #4
0
def test_skipped_integration_should_not_be_tested(mocker):
    """
    Given
    - conf.json file with IntegrationA is skipped
    - no tests provided for IntegrationA

    When
    - filtering tests to run

    Then
    - ensure IntegrationA is skipped
    - ensure the validation not failing
    """
    from Tests.scripts import configure_tests
    configure_tests._FAILED = False  # reset the FAILED flag

    # Given
    # - conf.json file with IntegrationA is skipped
    # - no tests provided for IntegrationA
    fake_integration = TestUtils.create_integration(name='integration_a', with_commands=['a-command'])

    # mark as modified
    TestUtils.mock_get_modified_files(mocker,
                                      modified_files_list=[
                                          fake_integration['path']
                                      ])

    mock_conf_dict = copy.deepcopy(MOCK_CONF)
    mock_conf_dict['skipped_integrations']['integration_a'] = 'comment'

    fake_id_set = TestUtils.create_id_set()

    # When
    # - filtering tests to run
    filtered_tests = get_test_list(
        files_string='',
        branch_name='dummy_branch',
        two_before_ga_ver=TWO_BEFORE_GA_VERSION,
        conf=TestConf(mock_conf_dict),
        id_set=fake_id_set
    )

    # Then
    # - ensure IntegrationA is skipped
    assert 'integration_a' not in filtered_tests

    # - ensure the validation not failing
    assert not configure_tests._FAILED
예제 #5
0
    def create_test_file(self):
        branches = self.run_git_command("git branch")
        branch_name_reg = re.search("(?<=\* )\w+", branches)
        branch_name = branch_name_reg.group(0)

        print("Getting changed files from the branch: {0}".format(branch_name))
        tests_string = ''
        if branch_name != 'master':
            files_string = self.run_git_command("git diff --name-status origin/master...{0}".format(branch_name))

            tests = get_test_list(files_string, branch_name)
            tests_string = '\n'.join(tests)
            print('Collected the following tests:\n{0}'.format(tests_string))

        print("Creating filter_file.txt")
        with open(FILTER_CONF, "w") as filter_file:
            filter_file.write(tests_string)
예제 #6
0
    def create_test_file(self):
        branches = self.run_git_command("git branch")
        branch_name_reg = re.search("(?<=\* )\w+", branches)
        branch_name = branch_name_reg.group(0)

        print("Getting changed files from the branch: {0}".format(branch_name))
        tests_string = ''
        if branch_name != 'master':
            files_string = self.run_git_command("git diff --name-status origin/master...{0}".format(branch_name))

            tests = get_test_list(files_string, branch_name)
            tests_string = '\n'.join(tests)
            print('Collected the following tests:\n{0}'.format(tests_string))

        print("Creating filter_file.txt")
        with open(FILTER_CONF, "w") as filter_file:
            filter_file.write(tests_string)
예제 #7
0
def test_dont_fail_integration_on_no_tests_if_it_has_test_playbook_in_conf(mocker):
    """
    If there is an integration in conf.json configured with test playbook
    Ensure that this integration don't fails on validation.

    Given
    - integration_a that fetches incidents
    - test_playbook_a exists that should test FetchFromInstance of integration_a
    - both in conf.json

    When
    - filtering tests to run

    Then
    - ensure test_playbook_a will run/returned
    - ensure the validation not failing
    """
    from Tests.scripts import configure_tests
    configure_tests._FAILED = False  # reset the FAILED flag

    # Given
    # - integration_a exists
    fake_integration = TestUtils.create_integration(name='integration_a', with_commands=['a-command'])

    # mark as modified
    TestUtils.mock_get_modified_files(mocker,
                                      modified_files_list=[
                                          fake_integration['path']
                                      ])

    # - test_playbook_a exists that should test FetchFromInstance of integration_a
    fake_test_playbook = TestUtils.create_test_playbook(name='test_playbook_a',
                                                        with_scripts=['FetchFromInstance'])

    try:
        # - both in conf.json
        fake_conf = TestUtils.create_tests_conf(
            with_test_configuration={
                'integrations': 'integration_a',
                'playbookID': 'test_playbook_a'
            }
        )

        fake_id_set = TestUtils.create_id_set(
            with_integration=fake_integration['id_set'],
            with_test_playbook=fake_test_playbook['id_set']
        )

        # When
        # - filtering tests to run
        filtered_tests = get_test_list(
            files_string='',
            branch_name='dummy_branch',
            two_before_ga_ver=TWO_BEFORE_GA_VERSION,
            conf=fake_conf,
            id_set=fake_id_set
        )

        # Then
        # - ensure test_playbook_a will run/returned
        assert 'test_playbook_a' in filtered_tests

        # - ensure the validation not failing
        assert not configure_tests._FAILED
    finally:
        # delete the mocked files
        TestUtils.delete_files([
            fake_integration['path'],
            fake_test_playbook['path']
        ])

        # reset _FAILED flag
        configure_tests._FAILED = False
예제 #8
0
    def test_mismatching_script_id(self, mocker):
        """
        Given
        - integration_a was modified
        - tests were provided for integration_a with mismatching id

        When
        - filtering tests to run

        Then
        - ensure test_playbook_a will run/returned
        """
        from Tests.scripts import configure_tests
        configure_tests._FAILED = False  # reset the FAILED flag

        # Given
        # - integration_a exists
        script_name = 'script_a'
        fake_script = TestUtils.create_script(name=script_name)

        # - tests were provided for integration_a with mismatching id
        id_set_obj = fake_script['id_set'][script_name]
        fake_script['id_set'] = {'wrong_id': id_set_obj}

        # mark as modified
        TestUtils.mock_get_modified_files(mocker,
                                          modified_files_list=[
                                              fake_script['path']
                                          ])

        # - test_playbook_a exists that should test script_a
        fake_test_playbook = TestUtils.create_test_playbook(name='test_playbook_a',
                                                            with_scripts=[script_name])

        try:
            # - both in conf.json
            fake_conf = TestUtils.create_tests_conf(
                with_test_configuration={
                    'playbookID': 'test_playbook_a'
                }
            )

            fake_id_set = TestUtils.create_id_set(
                with_scripts=fake_script['id_set'],
                with_test_playbook=fake_test_playbook['id_set']
            )

            # When
            # - filtering tests to run
            filtered_tests = get_test_list(
                files_string='',
                branch_name='dummy_branch',
                two_before_ga_ver=TWO_BEFORE_GA_VERSION,
                conf=fake_conf,
                id_set=fake_id_set
            )

            # Then
            # - ensure test_playbook_a will run/returned
            assert 'test_playbook_a' in filtered_tests

            # - ensure the validation not failing
            assert not configure_tests._FAILED
        finally:
            # delete the mocked files
            TestUtils.delete_files([
                fake_script['path'],
                fake_test_playbook['path']
            ])

            # reset _FAILED flag
            configure_tests._FAILED = False