Example #1
0
    def test_expect_true_second_criterion_pass_pass_pass_fail(self):
        behaviorChange = Behaviour_check()
        executions_test_base = []
        executions_test_left = []
        executions_test_right = []
        executions_test_merge = []

        #test1 passes on base, left and right commits but fails on merge commit.
        #So a test conflict is expected to be reported by the second criterion
        executions_test_base.append(
            JUnitResult({"test1"}, set(), 1, set(), set(), set(), 0, set(),
                        set(), set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_left.append(
            JUnitResult({"test1"}, set(), 1, set(), set(), set(), 0, set(),
                        set(), set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_right.append(
            JUnitResult({"test1"}, set(), 1, set(), set(), set(), 0, set(),
                        set(), set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_merge.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))

        #parent_one, parent_two, parent_tree, path_suite, commitBase, commitParentTestSuite, commitMerge, tool
        aux = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        #parent_base, parent_left, parent_right, parent_merge, path_suite, commitBase, commitParentTestSuite, commitParentOther, commitMerge, tool
        aux2 = behaviorChange.check_conflict_occurrence_for_second_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_right[0], executions_test_merge[0], ["", ""],
            "aaa", "bbb", "ccc", "ddd", "AUX")

        self.assertFalse(aux[0])
        self.assertTrue(aux2[0])

        #parent_one, parent_two, path_suite, commitOneSHA, commitTwoSHA, tool
        behavior_change = behaviorChange.check_different_test_results_for_commit_pair(
            executions_test_base[0], executions_test_left[0], ["", ""], "aaa",
            "bbb", "Aux")
        behavior_change_merge = behaviorChange.check_different_test_results_for_commit_pair(
            executions_test_merge[0], executions_test_left[0], ["", ""], "aaa",
            "bbb", "Aux")

        self.assertFalse(behavior_change[0])
        self.assertTrue(behavior_change_merge[0])
Example #2
0
 def sum_nimrod_result(ran, evo, differential, is_equal_coverage):
     if ran and evo:
         return Nimrod.create_nimrod_result(
             JUnitResult(
                 ok_tests=ran.ok_tests + evo.ok_tests,
                 fail_tests=ran.fail_tests + evo.fail_tests,
                 fail_test_set=ran.fail_test_set.union(evo.fail_test_set),
                 run_time=ran.run_time + evo.run_time,
                 coverage=Coverage(
                     call_points=(ran.coverage.call_points.union(
                         evo.coverage.call_points)),
                     test_cases=(ran.coverage.test_cases.union(
                         evo.coverage.test_cases)),
                     executions=(ran.coverage.executions +
                                 evo.coverage.executions),
                     class_coverage=dict(evo.coverage.class_coverage,
                                         **ran.coverage.class_coverage),
                 ),
                 timeout=ran.timeout or evo.timeout), differential, '',
             is_equal_coverage)
     elif ran:
         return Nimrod.create_nimrod_result(ran, differential, '',
                                            is_equal_coverage)
     elif evo:
         return Nimrod.create_nimrod_result(evo, differential, '',
                                            is_equal_coverage)
Example #3
0
    def test_expect_false_for_both_criteria(self):
        behaviorChange = Behaviour_check()
        executions_test_base = []
        executions_test_left = []
        executions_test_right = []
        executions_test_merge = []

        #empty sets for all merge scenario commits
        executions_test_base.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_left.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_right.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_merge.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))

        aux = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        aux2 = behaviorChange.check_conflict_occurrence_for_second_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_right[0], executions_test_merge[0], ["", ""],
            "aaa", "bbb", "ccc", "ddd", "AUX")

        self.assertFalse(aux[0])
        self.assertFalse(aux2[0])

        behavior_change = behaviorChange.check_different_test_results_for_commit_pair(
            executions_test_base[0], executions_test_left[0], ["", ""], "aaa",
            "bbb", "Aux")
        behavior_change_merge = behaviorChange.check_different_test_results_for_commit_pair(
            executions_test_base[0], executions_test_left[0], ["", ""], "aaa",
            "bbb", "Aux")
        self.assertFalse(behavior_change[0])
        self.assertFalse(behavior_change_merge[0])
Example #4
0
    def test_expect_false_for_first_criterion_fail_fail_fail(self):
        behaviorChange = Behaviour_check()
        executions_test_base = []
        executions_test_left = []
        executions_test_right = []
        executions_test_merge = []

        #test1 fails on base, left, and merge commits.
        #So no test conflict is expected to be reported by the first criterion
        #test1 was not executed on right commit, so no conflict is expected for the right commit.
        executions_test_base.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_left.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_right.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), {"test1"}, set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_merge.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))

        conflict_for_left_first_criterion = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        conflict_for_right_first_criterion = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_right[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        conflict_for_second_criteria = behaviorChange.check_conflict_occurrence_for_second_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_right[0], executions_test_merge[0], ["", ""],
            "aaa", "bbb", "ccc", "ddd", "AUX")

        self.assertFalse(conflict_for_left_first_criterion[0])
        self.assertFalse(conflict_for_right_first_criterion[0])
        self.assertFalse(conflict_for_second_criteria[0])
Example #5
0
    def test_expect_false_for_second_criterion_fail_fail_fail_fail(self):
        behaviorChange = Behaviour_check()
        executions_test_base = []
        executions_test_left = []
        executions_test_right = []
        executions_test_merge = []

        #test1 fails on all merge scenario commits.
        #So no test conflict is expected to be reported by the second criterion
        executions_test_base.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_left.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_right.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_merge.append(
            JUnitResult(set(), set(), 0, set(), {"test1"},
                        {"RegressionText.test1()"}, 0, set(), set(), set(),
                        set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))

        aux = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        aux2 = behaviorChange.check_conflict_occurrence_for_second_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_right[0], executions_test_merge[0], ["", ""],
            "aaa", "bbb", "ccc", "ddd", "AUX")

        self.assertFalse(aux[0])
        self.assertFalse(aux2[0])
Example #6
0
    def test_expect_false_for_first_criterion_not_executed(self):
        behaviorChange = Behaviour_check()
        executions_test_base = []
        executions_test_left = []
        executions_test_right = []
        executions_test_merge = []

        #test1 passes on base and merge commits, but it was not executed on left and right commits.
        #So no test conflict is expected to be reported by the first criterion
        executions_test_base.append(
            JUnitResult({"test1"}, set(), 1, set(), set(), set(), 0, set(),
                        set(), set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_left.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), {"test1"}, set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_right.append(
            JUnitResult(set(), set(), 0, set(), set(), set(), 0, set(), set(),
                        set(), set(), {"test1"}, set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))
        executions_test_merge.append(
            JUnitResult({"test1"}, set(), 1, set(), set(), set(), 0, set(),
                        set(), set(), set(), set(), set(), set(), 0,
                        Coverage(set(), set(), 0, dict()), False))

        conflict_for_left_first_criterion = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        conflict_for_right_first_criterion = behaviorChange.check_conflict_occurrence_for_first_criterion(
            executions_test_base[0], executions_test_right[0],
            executions_test_merge[0], ["", ""], "aaa", "bbb", "ccc", "AUX")
        conflict_for_second_criteria = behaviorChange.check_conflict_occurrence_for_second_criterion(
            executions_test_base[0], executions_test_left[0],
            executions_test_right[0], executions_test_merge[0], ["", ""],
            "aaa", "bbb", "ccc", "ddd", "AUX")

        self.assertFalse(conflict_for_left_first_criterion[0])
        self.assertFalse(conflict_for_right_first_criterion[0])
        self.assertFalse(conflict_for_second_criteria[0])