示例#1
0
    def test_coverage_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        actions: ActionsByName = {}
        action_base_name_to_default_param = {}
        with open(actions_filename) as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions, \
                open(enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.tsv")
        coverage_tests: List[CoverageTest] = []
        with open(coverage_filename) as f:
            coverage_tsv = csv.reader(f, delimiter='\t')
            coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_tsv, actions, action_base_name_to_default_param)

        self.assertEqual(len(coverage_tests), 4)
示例#2
0
    def test_action_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        with open(actions_filename, "r", encoding="utf-8") as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions, \
                open (enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))

            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)
            self.assertEqual(len(actions), 13)
            self.assertEqual(len(action_base_name_to_default_param), 3)

            # Check parameterized action state.
            self.assertIn('changes_Chicken', actions)
            self.assertIn('changes_Dog', actions)

            self.assertTrue('checks' in actions)
            checks_output_actions = actions['checks'].output_actions
            self.assertEqual(len(checks_output_actions), 2)
            self.assertCountEqual(
                checks_output_actions,
                [actions['check_a_Chicken'], actions['check_b_Chicken_Green']])
示例#3
0
    def test_action_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.csv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        with open(actions_filename) as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions))
            actions_csv = csv.reader(f, delimiter=',')
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_csv, supported_actions)
            self.assertEqual(len(actions), 10)
            self.assertEqual(len(action_base_name_to_default_param), 4)

            # Check parameterized action state.
            self.assertTrue('changes_Mode1' in actions)
            self.assertTrue('changes_Mode2' in actions)

            self.assertTrue('checks' in actions)
            checks_output_actions = actions['checks'].output_actions
            self.assertEqual(len(checks_output_actions), 2)
            self.assertCountEqual(
                checks_output_actions,
                [actions['check_a_Mode1'], actions['check_b_Mode1']])
示例#4
0
    def test_processed_coverage(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        actions: ActionsByName = {}
        action_base_name_to_default_param = {}
        with open(actions_filename, "r", encoding="utf-8") as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions_file, \
                open(enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions_file, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.tsv")
        coverage_tests: List[CoverageTest] = []
        with open(coverage_filename, "r", encoding="utf-8") as f:
            coverage_csv = csv.reader(f, delimiter='\t')
            coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)
        coverage_tests = expand_parameterized_tests(coverage_tests)

        # Compare with expected
        expected_processed_tests = []
        processed_filename = os.path.join(TEST_DATA_DIR,
                                          "expected_processed_coverage.tsv")
        with open(processed_filename, "r", encoding="utf-8") as f:
            coverage_csv = csv.reader(f, delimiter='\t')
            expected_processed_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)

        # Hack for easy comparison and printing: transform coverage tests into
        # a Tuple[List[str], Set[TestPlatform]].
        self.assertCountEqual([([action.name
                                 for action in test.actions], test.platforms)
                               for test in coverage_tests],
                              [([action.name
                                 for action in test.actions], test.platforms)
                               for test in expected_processed_tests])
示例#5
0
    def test_supported_actions(self):
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")

        with open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions:
            supported = read_platform_supported_actions(
                csv.reader(supported_actions, delimiter=','))
            self.assertEqual(len(supported), 4)
            (check_a_partial, check_a_full) = supported["check_a"]
            self.assertEqual(len(check_a_partial), 1)
            self.assertEqual(len(check_a_full), 3)
            (check_b_partial, check_b_full) = supported["check_b"]
            self.assertEqual(len(check_b_partial), 1)
            self.assertEqual(len(check_b_full), 3)
            (state_change_a_partial,
             state_change_a_full) = supported["state_change_a"]
            self.assertEqual(len(state_change_a_partial), 0)
            self.assertEqual(len(state_change_a_full), 4)
            (state_change_b_partial,
             state_change_b_full) = supported["state_change_b"]
            self.assertEqual(len(state_change_b_partial), 0)
            self.assertEqual(len(state_change_b_full), 3)
示例#6
0
def generate_framework_tests_and_coverage(
        supported_framework_action_file: TextIOWrapper,
        actions_file: TextIOWrapper, coverage_required_file: TextIOWrapper,
        custom_partitions: List[TestPartitionDescription],
        default_partition: TestPartitionDescription, coverage_output_dir: str,
        graph_output_dir: Optional[str]):

    for partition_a in custom_partitions:
        check_partition_prefixes(partition_a, default_partition)
        for partition_b in custom_partitions:
            if partition_a == partition_b:
                continue
            check_partition_prefixes(partition_a, partition_b)
    actions_csv = csv.reader(actions_file, delimiter=',')
    platform_supported_actions = read_platform_supported_actions(
        csv.reader(supported_framework_action_file))
    (actions, action_base_name_to_default_param) = read_actions_file(
        actions_csv, platform_supported_actions)

    coverage_csv = csv.reader(coverage_required_file, delimiter=',')
    required_coverage_tests = read_unprocessed_coverage_tests_file(
        coverage_csv, actions, action_base_name_to_default_param)

    required_coverage_tests = expand_parameterized_tests(
        required_coverage_tests)

    if graph_output_dir:
        coverage_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(coverage_root_node, required_coverage_tests)
        graph_file = generage_graphviz_dot_file(coverage_root_node, None)
        output_coverage_graph_file_name = os.path.join(
            graph_output_dir, "coverage_required_graph.dot")
        with open(output_coverage_graph_file_name, "w",
                  encoding="'utf-8") as coverage_graph_file:
            coverage_graph_file.write("# This is a generated file.\n")
            coverage_graph_file.write(graph_file)
            coverage_graph_file.close()

    # Each platform can have unique tests. Start by generating the required
    # tests per platform, and the generated testes per platform.
    required_coverage_by_platform: CoverageTestsByPlatform = {}
    generated_tests_by_platform: CoverageTestsByPlatform = {}
    for platform in TestPlatform:
        platform_tests = filter_coverage_tests_for_platform(
            required_coverage_tests.copy(), platform)
        required_coverage_by_platform[platform] = platform_tests

        generated_tests_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(generated_tests_root_node, platform_tests)
        trim_graph_to_platform_actions(generated_tests_root_node, platform)
        generated_tests_by_platform[platform] = generate_framework_tests(
            generated_tests_root_node, platform)
        if graph_output_dir:
            graph_file = generage_graphviz_dot_file(generated_tests_root_node,
                                                    platform)
            output_coverage_graph_file_name = os.path.join(
                graph_output_dir,
                "generated_tests_graph_" + platform.suffix + ".dot")
            with open(output_coverage_graph_file_name, "w",
                      encoding="'utf-8") as coverage_graph_file:
                coverage_graph_file.write("# This is a generated file.\n")
                coverage_graph_file.write(graph_file)

    # A test can be required to run on on multiple platforms, and we group
    # required tests by platform set to output minimal number of browser tests
    # files. This allows the test to exist only in one place for ease of
    # sheriffing. Example:
    # Linux:    testA, testB
    # Mac:      testA, testB
    # Windows:  testA
    # ChromeOS: testA, testC
    # ->
    # {Linux, Mac, Windows, ChromeOS} -> testA
    # {Linux, Mac} -> testB
    # {ChromeOS} -> testC
    required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
        partition_framework_tests_per_platform_combination(
            generated_tests_by_platform))

    # Find all existing tests.
    all_partitions = [default_partition]
    all_partitions.extend(custom_partitions)
    (existing_tests_ids_by_platform_set, disabled_test_ids_by_platform
     ) = find_existing_and_disabled_tests(all_partitions)

    # Print all diffs that are required.
    compare_and_print_tests_to_remove_and_add(
        existing_tests_ids_by_platform_set, required_coverage_by_platform_set,
        custom_partitions, default_partition)

    # To calculate coverage we need to incorporate any disabled tests.
    # Remove any disabled tests from the generated tests per platform.
    for platform, tests in generated_tests_by_platform.items():
        disabled_tests = disabled_test_ids_by_platform.get(platform, [])
        tests_minus_disabled: List[CoverageTest] = []
        for test in tests:
            if test.id not in disabled_tests:
                tests_minus_disabled.append(test)
            else:
                logging.info("Removing disabled test from coverage: " +
                             test.id)
        generated_tests_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(generated_tests_root_node,
                                tests_minus_disabled)
        (coverage_file, full, partial) = generate_coverage_file_and_percents(
            required_coverage_by_platform[platform], generated_tests_root_node,
            platform)
        coverage_filename = os.path.join(coverage_output_dir,
                                         f"coverage_{platform.suffix}.tsv")
        with open(coverage_filename, 'w+', encoding="'utf-8") as file:
            file.write("# This is a generated file.\n")
            file.write(f"# Full coverage: {full:.0%}, "
                       f"with partial coverage: {partial:.0%}\n")
            file.write(coverage_file + "\n")
    return
    def test_test_generation(self):
        self.maxDiff = None
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.csv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.csv")

        test_partition = TestPartitionDescription(
            action_name_prefixes=set(),
            browsertest_dir=os.path.join(TEST_DATA_DIR, "expected_test_txt"),
            test_file_prefix="tests_default",
            test_fixture="TestName")

        with open(actions_filename, "r", encoding="utf-8") as actions_file, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions_file, \
                open(coverage_filename, "r", encoding="utf-8") \
                    as coverage_file:
            actions_csv = csv.reader(actions_file, delimiter=',')
            platform_supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions_file))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_csv, platform_supported_actions)

            coverage_csv = csv.reader(coverage_file, delimiter=',')
            required_coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)

            required_coverage_tests = expand_parameterized_tests(
                required_coverage_tests)

            required_coverage_by_platform: CoverageTestsByPlatform = {}
            generated_tests_by_platform: CoverageTestsByPlatform = {}
            for platform in TestPlatform:
                platform_tests = filter_coverage_tests_for_platform(
                    required_coverage_tests.copy(), platform)
                required_coverage_by_platform[platform] = platform_tests

                generated_tests_root_node = ActionNode.CreateRootNode()
                build_action_node_graph(generated_tests_root_node,
                                        platform_tests)
                trim_graph_to_platform_actions(generated_tests_root_node,
                                               platform)
                generated_tests_by_platform[
                    platform] = generate_framework_tests(
                        generated_tests_root_node, platform)

            required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
                partition_framework_tests_per_platform_combination(
                    generated_tests_by_platform))
            for platform_set, tests in required_coverage_by_platform_set.items(
            ):
                expected_filename = os.path.join(
                    test_partition.browsertest_dir,
                    test_partition.test_file_prefix)
                if len(platform_set) != len(TestPlatform):
                    for platform in TestPlatform:
                        if platform in platform_set:
                            expected_filename += "_" + platform.suffix
                expected_filename += ".txt"
                with open(expected_filename, "r",
                          encoding="utf-8") as expected_tests_file:
                    expected_tests_str = expected_tests_file.read()
                    actual_tests_str = "\n".join([
                        test.generate_browsertest(test_partition)
                        for test in tests
                    ])
                    self.assertEqual(expected_tests_str, actual_tests_str)