def test_adds_yaml_data(self):
        with mock.patch('ast_parser.core.analyze.yaml_utils') \
          as yaml_mock:
            analyze.analyze_json(
                os.path.join(_TEST_DIR, 'polyglot_snippet_data.json'),
                _TEST_DIR)

            yaml_mock.add_yaml_data_to_source_methods.assert_called()
    def test_adds_child_drift_data(self):
        with mock.patch('ast_parser.core.analyze.polyglot_parser') \
          as parser_mock:
            parser_mock.get_region_tag_regions.return_value = ([], [])

            analyze.analyze_json(
                os.path.join(_TEST_DIR, 'polyglot_snippet_data.json'),
                _TEST_DIR)

            source_path = os.path.abspath(
                os.path.join(_TEST_DIR, 'http/http_main.py'))
            parser_mock.get_region_tag_regions.assert_any_call(source_path)
def validate_yaml(data_json: str,
                  root_dir: str,
                  output_file: str = None) -> None:
    """Validates .drift-data.yml files in a directory

    This method coordinates the function calls necessary to validate
    .drift-data.yml files in a given directory. (The validation process
    requires data provided by analyze_json(), and this method is responsible
    for passing that in.)

    Args:
        data_json: A path to a polyglot_drift_data.json file for the specified
                   root directory
        root_dir: A path to the target root directory.
        output_file: (Optional) A filepath to write the YAML validation
                     results to. Results will be written to stdout if this
                     argument is omitted.
    """
    grep_tags, source_tags, ignored_tags, source_methods = (
        analyze.analyze_json(data_json, root_dir))

    (is_valid, output) = cli_yaml.validate_yaml_syntax(root_dir, grep_tags,
                                                       source_tags)

    if is_valid:
        output.append('All files are valid.')
    else:
        output.append('Invalid file(s) found!')

    _write_output(output, output_file)
    def test_dedupes_region_tags(self):
        analyze_result = analyze.analyze_json(
            os.path.join(_TEST_DIR, 'polyglot_snippet_data.json'), _TEST_DIR)

        _, _, _, source_methods = analyze_result
        tag_sets = [method.region_tags for method in source_methods]

        assert sum(tag_set == ['not_main'] for tag_set in tag_sets) == 1
    def _get_analyze_result(self):
        basic_test_dir = os.path.join(_TEST_DIR, 'edge_cases')
        analyze_result = analyze.analyze_json(
            os.path.join(basic_test_dir, 'polyglot_snippet_data.json'),
            basic_test_dir)

        (self.grep_tags, self.source_tags, self.ignored_tags,
         self.source_methods) = analyze_result
def inject_snippet_mapping(data_json: str,
                           root_dir: str,
                           stdin_lines: List[str],
                           output_file: str = None) -> None:
    """Adds snippet mapping to XUnit results

    This method injects test-snippet mappings into XUnit test results provided
    via stdin. It then saves the modified XUnit results to a file (if
    output_file is specified) or prints them to stdout (if output_file is *not*
    specified).

    Args:
        data_json: A path to a polyglot_drift_data.json file for the specified
                   root directory
        root_dir: A path to the target root directory.
        stdin_lines: The lines of an XUnit test result file.
        output_file: (Optional) A filepath to write the modified XUnit test
                     output to. Modified XUnit output will be written to
                     stdout if this argument is omitted.
    """

    grep_tags, source_tags, ignored_tags, source_methods = (
        analyze.analyze_json(data_json, root_dir))

    xunit_tree = etree.fromstring(''.join(stdin_lines))

    for elem in xunit_tree.findall('.//testcase'):
        class_parts = [
            part for part in elem.attrib['classname'].split('.')
            if not part.startswith('Test')
        ]
        test_key = (class_parts[-1], elem.attrib['name'])
        for method in source_methods:
            method_test_keys = [
                (os.path.splitext(os.path.basename(test[0]))[0], test[1])
                for test in method['test_methods']
            ]

            if test_key in method_test_keys:
                # Inject region tags into region_tags XML attribute
                existing_tag_str = elem.attrib.get('region_tags')
                existing_tag_list = (existing_tag_str.split(',')
                                     if existing_tag_str else [])

                deduped_tag_list = (set(existing_tag_list +
                                        method['region_tags']))

                elem.set('region_tags', ','.join(deduped_tag_list))

    _write_output([etree.tostring(xunit_tree).decode()], output_file)
    def test_handle_snippet_invocation_methods(self):
        test_dir = os.path.join(_TEST_DIR, 'snippet_invocation_methods')
        analyze_result = analyze.analyze_json(
            os.path.join(test_dir, 'polyglot_snippet_data.json'), test_dir)
        _, _, _, source_methods = analyze_result

        assert len(source_methods) == 3

        # make sure the methods were parsed
        # (in the right order, to make testing easier)
        assert source_methods[0].name == 'some_method'
        assert source_methods[1].name == 'another_method'
        assert source_methods[2].name in constants.SNIPPET_INVOCATION_METHODS

        # make sure these methods' tests were detected
        assert source_methods[0].test_methods
        assert source_methods[1].test_methods
def process_list_source_files(
    invocation: cli_datatypes.ListSourceFilesInvocation
) -> cli_datatypes.ListSourceFilesResult:
    """Compute values displayed in list_source_files

    This method is a helper method that computes the values displayed in
    list_source_files. (Some of the returned values may not be displayed,
    depending on CLI options.)

    Args:
        invocation: A CLI invocation object with the requisite user input.

    Returns:
        A CLI response object with the required processed data.
    """

    grep_tags, source_tags, ignored_tags, source_methods = (
        analyze.analyze_json(invocation.data_json, invocation.root_dir))

    # Ignore methods without region tags
    source_methods = [
        method for method in source_methods if method['region_tags']
    ]

    any_tested_files = set(method['source_path'] for method in source_methods
                           if method['test_methods'])
    any_untested_files = set(method['source_path'] for method in source_methods
                             if not method['test_methods'])

    all_files = set(method['source_path'] for method in source_methods)

    all_tested_files = [
        file for file in any_tested_files if file not in any_untested_files
    ]
    not_tested_files = [
        file for file in any_untested_files if file not in any_tested_files
    ]

    return cli_datatypes.ListSourceFilesResult(
        all_files,
        all_tested_files,
        any_tested_files,
        not_tested_files,
    )
Example #9
0
def process_list_region_tags(
    invocation: cli_datatypes.ListRegionTagsInvocation
) -> cli_datatypes.ListRegionTagsResult:
    """Compute values displayed in list_region_tags

    This method is a helper method that computes the values displayed in
    list_region_tags. (Some of the returned values may not be displayed,
    depending on CLI options.)

    Args:
        invocation: A CLI invocation object with the requisite user input.

    Returns:
        A CLI response object with the required processed data.
    """
    def _get_test_count_str(region_tag):
        test_data_matches = [
            method for method in source_methods
            if region_tag in method['region_tags']
        ]

        total_tests = 0
        for test_data in test_data_matches:
            total_tests += len(test_data['test_methods'])

        return f'({total_tests} test(s))'

    grep_tags, source_tags, ignored_tags, source_methods = (
        analyze.analyze_json(invocation.data_json, invocation.root_dir))

    test_count_map = {tag: _get_test_count_str(tag) for tag in source_tags}

    undetected_tags = [
        tag for tag in grep_tags
        if tag not in source_tags and tag not in ignored_tags
    ]

    return cli_datatypes.ListRegionTagsResult(source_methods, source_tags,
                                              undetected_tags, ignored_tags,
                                              test_count_map)
    def _get_analyze_result(self):
        analyze_result = analyze.analyze_json(
            os.path.join(_TEST_DIR, 'polyglot_snippet_data.json'), _TEST_DIR)

        (self.grep_tags, self.source_tags, self.ignored_tags,
         self.source_methods) = analyze_result