Esempio n. 1
0
    def test_coverage_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        actions: ActionsByName = {}
        action_base_name_to_default_param = {}
        with open(actions_filename) as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions, \
                open(enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.tsv")
        coverage_tests: List[CoverageTest] = []
        with open(coverage_filename) as f:
            coverage_tsv = csv.reader(f, delimiter='\t')
            coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_tsv, actions, action_base_name_to_default_param)

        self.assertEqual(len(coverage_tests), 4)
Esempio n. 2
0
    def test_action_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        with open(actions_filename, "r", encoding="utf-8") as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions, \
                open (enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))

            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)
            self.assertEqual(len(actions), 13)
            self.assertEqual(len(action_base_name_to_default_param), 3)

            # Check parameterized action state.
            self.assertIn('changes_Chicken', actions)
            self.assertIn('changes_Dog', actions)

            self.assertTrue('checks' in actions)
            checks_output_actions = actions['checks'].output_actions
            self.assertEqual(len(checks_output_actions), 2)
            self.assertCountEqual(
                checks_output_actions,
                [actions['check_a_Chicken'], actions['check_b_Chicken_Green']])
Esempio n. 3
0
    def test_action_file_reading(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.csv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        with open(actions_filename) as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions))
            actions_csv = csv.reader(f, delimiter=',')
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_csv, supported_actions)
            self.assertEqual(len(actions), 10)
            self.assertEqual(len(action_base_name_to_default_param), 4)

            # Check parameterized action state.
            self.assertTrue('changes_Mode1' in actions)
            self.assertTrue('changes_Mode2' in actions)

            self.assertTrue('checks' in actions)
            checks_output_actions = actions['checks'].output_actions
            self.assertEqual(len(checks_output_actions), 2)
            self.assertCountEqual(
                checks_output_actions,
                [actions['check_a_Mode1'], actions['check_b_Mode1']])
Esempio n. 4
0
    def test_processed_coverage(self):
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.tsv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")
        enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.tsv")

        actions: ActionsByName = {}
        action_base_name_to_default_param = {}
        with open(actions_filename, "r", encoding="utf-8") as f, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions_file, \
                open(enums_filename, "r", encoding="utf-8") as enums:
            supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions_file, delimiter=','))
            actions_tsv = csv.reader(f, delimiter='\t')
            enums = read_enums_file(csv.reader(enums, delimiter='\t'))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_tsv, enums, supported_actions)

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.tsv")
        coverage_tests: List[CoverageTest] = []
        with open(coverage_filename, "r", encoding="utf-8") as f:
            coverage_csv = csv.reader(f, delimiter='\t')
            coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)
        coverage_tests = expand_parameterized_tests(coverage_tests)

        # Compare with expected
        expected_processed_tests = []
        processed_filename = os.path.join(TEST_DATA_DIR,
                                          "expected_processed_coverage.tsv")
        with open(processed_filename, "r", encoding="utf-8") as f:
            coverage_csv = csv.reader(f, delimiter='\t')
            expected_processed_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)

        # Hack for easy comparison and printing: transform coverage tests into
        # a Tuple[List[str], Set[TestPlatform]].
        self.assertCountEqual([([action.name
                                 for action in test.actions], test.platforms)
                               for test in coverage_tests],
                              [([action.name
                                 for action in test.actions], test.platforms)
                               for test in expected_processed_tests])
Esempio n. 5
0
def generate_framework_tests_and_coverage(
        supported_framework_action_file: TextIOWrapper,
        actions_file: TextIOWrapper, coverage_required_file: TextIOWrapper,
        custom_partitions: List[TestPartitionDescription],
        default_partition: TestPartitionDescription, coverage_output_dir: str,
        graph_output_dir: Optional[str]):

    for partition_a in custom_partitions:
        check_partition_prefixes(partition_a, default_partition)
        for partition_b in custom_partitions:
            if partition_a == partition_b:
                continue
            check_partition_prefixes(partition_a, partition_b)
    actions_csv = csv.reader(actions_file, delimiter=',')
    platform_supported_actions = read_platform_supported_actions(
        csv.reader(supported_framework_action_file))
    (actions, action_base_name_to_default_param) = read_actions_file(
        actions_csv, platform_supported_actions)

    coverage_csv = csv.reader(coverage_required_file, delimiter=',')
    required_coverage_tests = read_unprocessed_coverage_tests_file(
        coverage_csv, actions, action_base_name_to_default_param)

    required_coverage_tests = expand_parameterized_tests(
        required_coverage_tests)

    if graph_output_dir:
        coverage_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(coverage_root_node, required_coverage_tests)
        graph_file = generage_graphviz_dot_file(coverage_root_node, None)
        output_coverage_graph_file_name = os.path.join(
            graph_output_dir, "coverage_required_graph.dot")
        with open(output_coverage_graph_file_name, "w",
                  encoding="'utf-8") as coverage_graph_file:
            coverage_graph_file.write("# This is a generated file.\n")
            coverage_graph_file.write(graph_file)
            coverage_graph_file.close()

    # Each platform can have unique tests. Start by generating the required
    # tests per platform, and the generated testes per platform.
    required_coverage_by_platform: CoverageTestsByPlatform = {}
    generated_tests_by_platform: CoverageTestsByPlatform = {}
    for platform in TestPlatform:
        platform_tests = filter_coverage_tests_for_platform(
            required_coverage_tests.copy(), platform)
        required_coverage_by_platform[platform] = platform_tests

        generated_tests_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(generated_tests_root_node, platform_tests)
        trim_graph_to_platform_actions(generated_tests_root_node, platform)
        generated_tests_by_platform[platform] = generate_framework_tests(
            generated_tests_root_node, platform)
        if graph_output_dir:
            graph_file = generage_graphviz_dot_file(generated_tests_root_node,
                                                    platform)
            output_coverage_graph_file_name = os.path.join(
                graph_output_dir,
                "generated_tests_graph_" + platform.suffix + ".dot")
            with open(output_coverage_graph_file_name, "w",
                      encoding="'utf-8") as coverage_graph_file:
                coverage_graph_file.write("# This is a generated file.\n")
                coverage_graph_file.write(graph_file)

    # A test can be required to run on on multiple platforms, and we group
    # required tests by platform set to output minimal number of browser tests
    # files. This allows the test to exist only in one place for ease of
    # sheriffing. Example:
    # Linux:    testA, testB
    # Mac:      testA, testB
    # Windows:  testA
    # ChromeOS: testA, testC
    # ->
    # {Linux, Mac, Windows, ChromeOS} -> testA
    # {Linux, Mac} -> testB
    # {ChromeOS} -> testC
    required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
        partition_framework_tests_per_platform_combination(
            generated_tests_by_platform))

    # Find all existing tests.
    all_partitions = [default_partition]
    all_partitions.extend(custom_partitions)
    (existing_tests_ids_by_platform_set, disabled_test_ids_by_platform
     ) = find_existing_and_disabled_tests(all_partitions)

    # Print all diffs that are required.
    compare_and_print_tests_to_remove_and_add(
        existing_tests_ids_by_platform_set, required_coverage_by_platform_set,
        custom_partitions, default_partition)

    # To calculate coverage we need to incorporate any disabled tests.
    # Remove any disabled tests from the generated tests per platform.
    for platform, tests in generated_tests_by_platform.items():
        disabled_tests = disabled_test_ids_by_platform.get(platform, [])
        tests_minus_disabled: List[CoverageTest] = []
        for test in tests:
            if test.id not in disabled_tests:
                tests_minus_disabled.append(test)
            else:
                logging.info("Removing disabled test from coverage: " +
                             test.id)
        generated_tests_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(generated_tests_root_node,
                                tests_minus_disabled)
        (coverage_file, full, partial) = generate_coverage_file_and_percents(
            required_coverage_by_platform[platform], generated_tests_root_node,
            platform)
        coverage_filename = os.path.join(coverage_output_dir,
                                         f"coverage_{platform.suffix}.tsv")
        with open(coverage_filename, 'w+', encoding="'utf-8") as file:
            file.write("# This is a generated file.\n")
            file.write(f"# Full coverage: {full:.0%}, "
                       f"with partial coverage: {partial:.0%}\n")
            file.write(coverage_file + "\n")
    return
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(
        description='WebApp Integration Test Analysis CLI Tool')
    parser.add_argument('-v',
                        dest='v',
                        action='store_true',
                        help='Include info logging.',
                        required=False)

    script_dir = os.path.dirname(os.path.realpath(__file__))

    parser.add_argument(
        '--test_dir',
        dest='test_dir',
        action='store',
        help=('Specify a directory to find all required files, instead of ' +
              'specifying each file individually. Overrides those options.'),
        required=False)

    parser.add_argument(
        '--coverage_required',
        dest='coverage_required',
        action='store',
        default=(script_dir + '/data/coverage_required.csv'),
        help=(
            'Test list csv file, which lists all integration tests that would '
            + 'give the required full coverage of the system. The first two ' +
            'lines are skipped.'),
        required=False)
    parser.add_argument('--coverage_test_row',
                        dest='coverage_test_row',
                        action='append',
                        help='Individually select a coverage test row.',
                        required=False)

    parser.add_argument('--actions',
                        dest='actions',
                        action='store',
                        default=(script_dir + '/data/actions.csv'),
                        help='Actions csv file, defining all actions.',
                        required=False)

    parser.add_argument('--framework_actions',
                        dest='framework_actions',
                        default=(script_dir +
                                 '/data/framework_actions_linux.csv'),
                        help=('Framework actions csv file, enumerating ' +
                              'all actions supported by the framework'),
                        action='store',
                        required=False)
    parser.add_argument(
        '--tests',
        dest='tests',
        action='append',
        help=('Test csv files, enumerating all existing tests for coverage ' +
              'calculations. First column is expected to be the test name.'),
        required=False)

    subparsers = parser.add_subparsers(dest="cmd", required=True)
    subparsers.add_parser('list_actions')
    subparsers.add_parser('list_coverage_tests')
    subparsers.add_parser('list_processed_coverage_tests')
    subparsers.add_parser('coverage_required_graph')

    framework_parse = subparsers.add_parser(
        'generate_framework_tests_for_platform')
    framework_parse.add_argument('--platform',
                                 dest='platform',
                                 required=True,
                                 choices=["M", "W", "L", "C"],
                                 action="store")
    framework_parse.add_argument('--graph_framework_tests',
                                 dest='graph_framework_tests',
                                 default=False,
                                 action='store_true')

    subparsers.add_parser('print_all_framework_tests')

    save_files_parse = subparsers.add_parser(
        'save_or_modify_framework_test_files')
    save_files_parse.add_argument('--dir',
                                  dest='dir',
                                  default='.',
                                  action="store")
    save_files_parse.add_argument('--base_filename',
                                  dest='base_filename',
                                  default='web_app_integration_browsertest',
                                  action="store")

    subparsers.add_parser('generate_test_coverage')

    options = parser.parse_args()

    actions_file = options.actions
    coverage_required_file = options.coverage_required

    if options.test_dir:
        actions_file = options.test_dir + "/actions.csv"
        coverage_required_file = options.test_dir + "/coverage_required.csv"

    logging.basicConfig(level=logging.INFO if options.v else logging.WARN,
                        format='[%(asctime)s %(levelname)s] %(message)s',
                        datefmt='%H:%M:%S')

    logging.info('Script directory: ' + script_dir)

    actions_csv = csv.reader(open(actions_file, "r", encoding="utf-8"),
                             delimiter=',')
    (actions,
     action_base_name_to_default_param) = read_actions_file(actions_csv)

    default_partition = TestPartitionDescription([], os.path.join(script_dir),
                                                 "test_browsertest",
                                                 "WebAppIntegrationTestBase")

    if options.cmd == 'list_actions':
        for action in actions.values():
            print(action)
        return
    if options.cmd == 'list_coverage_tests':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        for test in required_coverage_tests:
            print(test if options.v else test.id)
        return
    if options.cmd == 'list_processed_coverage_tests':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        required_coverage_tests = expand_parameterized_tests(
            required_coverage_tests)
        for test in required_coverage_tests:
            print(test if options.v else test.name)
        return
    if options.cmd == 'coverage_required_graph':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        required_coverage_tests = expand_parameterized_tests(
            required_coverage_tests)
        coverage_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(coverage_root_node, required_coverage_tests)
        graph_file = generage_graphviz_dot_file(coverage_root_node)
        print(graph_file)
        return
    if options.cmd == 'generate_framework_tests_for_platform':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        required_coverage_tests = expand_parameterized_tests(
            required_coverage_tests)
        platform_lookup = {
            "M": TestPlatform.MAC,
            "W": TestPlatform.WINDOWS,
            "L": TestPlatform.LINUX,
            "C": TestPlatform.CHROME_OS
        }
        platform = platform_lookup[options.platform]
        required_coverage_tests = filter_coverage_tests_for_platform(
            required_coverage_tests, platform)
        coverage_root_node = ActionNode.CreateRootNode()
        build_action_node_graph(coverage_root_node, required_coverage_tests)
        trim_graph_to_platform_actions(coverage_root_node, platform)
        if options.graph_framework_tests:
            return generage_graphviz_dot_file(coverage_root_node)
        lines = []
        tests = generate_framework_tests(coverage_root_node)
        for test in tests:
            lines.append(test.GenerateBrowsertest(default_partition))
        print("\n".join(lines))
        return
    if options.cmd == 'print_all_framework_tests':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        required_coverage_tests = expand_parameterized_tests(
            required_coverage_tests)
        platform_set_to_tests = partition_framework_tests_per_platform_combination(
            required_coverage_tests)

        for platforms, tests in platform_set_to_tests.items():
            print(f"\n\n\nTests for {platforms!r}!")
            for test in tests:
                print(test.generate_browsertest(default_partition))
        return
    if options.cmd == 'save_or_modify_framework_test_files':
        coverage_csv = csv.reader(open(coverage_required_file,
                                       "r",
                                       encoding="utf-8"),
                                  delimiter=',')
        required_coverage_tests = read_unprocessed_coverage_tests_file(
            coverage_csv, actions, action_base_name_to_default_param)
        required_coverage_tests = MaybeFilterCoverageTests(
            required_coverage_tests, options.coverage_test_row)
        required_coverage_tests = expand_parameterized_tests(
            required_coverage_tests)
        platform_set_to_tests = partition_framework_tests_per_platform_combination(
            required_coverage_tests)

        existing_tests_by_platform_set = {}

        for file in os.listdir(options.dir):
            if not file.startswith(options.base_filename):
                continue
            platforms = TestPlatform.get_platforms_from_browsertest_filename(
                file)
            platforms = frozenset(platforms)
            with open(options.dir + os.path.sep + file, "r",
                      encoding="utf-8") as f:
                file = f.read()
                tests = get_tests_in_browsertest(file)
                existing_tests_by_platform_set[platforms] = list(tests.keys())

        for platforms, tests in platform_set_to_tests.items():
            tests_to_add = []
            for test in tests:
                if platforms in existing_tests_by_platform_set:
                    existing_tests = existing_tests_by_platform_set[platforms]
                    if test.name not in existing_tests:
                        tests_to_add.append(test)
                    else:
                        existing_tests.remove(test.name)
                else:
                    tests_to_add.append(test)
            if not tests_to_add:
                continue
            print(f"\n\nAdd this following tests to "
                  f"{default_partition.generate_test_filename(platforms)}:\n")
            for test in tests_to_add:
                print(test.GenerateBrowsertest(default_partition) + "\n")

        for platforms, test_names in existing_tests_by_platform_set.items():
            if not test_names:
                continue
            print(f"\n\nRemove this following tests from "
                  f"{default_partition.generate_test_filename(platforms)}: "
                  f"{', '.join(test_names)}")
        return
    def test_test_generation(self):
        self.maxDiff = None
        actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.csv")
        supported_actions_filename = os.path.join(
            TEST_DATA_DIR, "framework_supported_actions.csv")

        coverage_filename = os.path.join(TEST_DATA_DIR,
                                         "test_unprocessed_coverage.csv")

        test_partition = TestPartitionDescription(
            action_name_prefixes=set(),
            browsertest_dir=os.path.join(TEST_DATA_DIR, "expected_test_txt"),
            test_file_prefix="tests_default",
            test_fixture="TestName")

        with open(actions_filename, "r", encoding="utf-8") as actions_file, \
                open(supported_actions_filename, "r", encoding="utf-8") \
                    as supported_actions_file, \
                open(coverage_filename, "r", encoding="utf-8") \
                    as coverage_file:
            actions_csv = csv.reader(actions_file, delimiter=',')
            platform_supported_actions = read_platform_supported_actions(
                csv.reader(supported_actions_file))
            (actions, action_base_name_to_default_param) = read_actions_file(
                actions_csv, platform_supported_actions)

            coverage_csv = csv.reader(coverage_file, delimiter=',')
            required_coverage_tests = read_unprocessed_coverage_tests_file(
                coverage_csv, actions, action_base_name_to_default_param)

            required_coverage_tests = expand_parameterized_tests(
                required_coverage_tests)

            required_coverage_by_platform: CoverageTestsByPlatform = {}
            generated_tests_by_platform: CoverageTestsByPlatform = {}
            for platform in TestPlatform:
                platform_tests = filter_coverage_tests_for_platform(
                    required_coverage_tests.copy(), platform)
                required_coverage_by_platform[platform] = platform_tests

                generated_tests_root_node = ActionNode.CreateRootNode()
                build_action_node_graph(generated_tests_root_node,
                                        platform_tests)
                trim_graph_to_platform_actions(generated_tests_root_node,
                                               platform)
                generated_tests_by_platform[
                    platform] = generate_framework_tests(
                        generated_tests_root_node, platform)

            required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
                partition_framework_tests_per_platform_combination(
                    generated_tests_by_platform))
            for platform_set, tests in required_coverage_by_platform_set.items(
            ):
                expected_filename = os.path.join(
                    test_partition.browsertest_dir,
                    test_partition.test_file_prefix)
                if len(platform_set) != len(TestPlatform):
                    for platform in TestPlatform:
                        if platform in platform_set:
                            expected_filename += "_" + platform.suffix
                expected_filename += ".txt"
                with open(expected_filename, "r",
                          encoding="utf-8") as expected_tests_file:
                    expected_tests_str = expected_tests_file.read()
                    actual_tests_str = "\n".join([
                        test.generate_browsertest(test_partition)
                        for test in tests
                    ])
                    self.assertEqual(expected_tests_str, actual_tests_str)