Beispiel #1
0
def generate_docs(
        config_file,
        verbosity,
        report_files
):
    """
    Generate documentation from a code annotations report.
    """
    start_time = datetime.datetime.now()

    try:
        config = AnnotationConfig(config_file, verbosity)

        for key in (
                'report_template_dir',
                'rendered_report_dir',
                'rendered_report_file_extension',
                'rendered_report_source_link_prefix'
        ):
            if not getattr(config, key):
                raise ConfigurationException(f"No {key} key in {config_file}")

        config.echo("Rendering the following reports: \n{}".format("\n".join([r.name for r in report_files])))

        renderer = ReportRenderer(config, report_files)
        renderer.render()

        elapsed = datetime.datetime.now() - start_time
        click.echo(f"Report rendered in {elapsed.total_seconds()} seconds.")
    except Exception as exc:  # pylint: disable=broad-except
        click.echo(traceback.print_exc())
        fail(str(exc))
Beispiel #2
0
def test_annotation_errors():
    config = AnnotationConfig(
        "tests/test_configurations/.annotations_test",
        verbosity=-1,
        source_path_override=
        "tests/extensions/python_test_files/choice_failures_1.pyt",
    )
    search = StaticSearch(config)
    results = search.search()
    search.check_results(results)

    # The first error should be an invalid choice error
    annotation, error_type, args = search.annotation_errors[0]
    assert {
        "annotation_data": ["doesnotexist"],
        "annotation_token": ".. ignored:",
        "filename": "choice_failures_1.pyt",
        "found_by": "python",
        "line_number": 1,
    } == annotation
    assert annotation_errors.InvalidChoice == error_type
    assert (
        "doesnotexist",
        ".. ignored:",
        ["irrelevant", "terrible", "silly-silly"],
    ) == args
Beispiel #3
0
def find_annotations(source_path, config_path, group_by_key):
    """
    Find the feature toggles as defined in the configuration file.

    Return:
        toggles (dict): feature toggles indexed by name.
    """
    config = AnnotationConfig(config_path,
                              verbosity=-1,
                              source_path_override=source_path)
    search = StaticSearch(config)
    all_results = search.search()
    toggles = {}
    for filename in all_results:
        for annotations in search.iter_groups(all_results[filename]):
            current_entry = {}
            for annotation in annotations:
                key = annotation["annotation_token"]
                value = annotation["annotation_data"]
                if key == group_by_key:
                    toggle_name = value
                    toggles[toggle_name] = current_entry
                    current_entry["filename"] = filename
                    current_entry["line_number"] = annotation["line_number"]
                else:
                    current_entry[key] = value

    return toggles
Beispiel #4
0
def test_annotation_configuration_errors(test_config, expected_message):
    with pytest.raises(ConfigurationException) as exception:
        AnnotationConfig('tests/test_configurations/{}'.format(test_config),
                         None, 3)

    exc_msg = str(exception.value)
    assert expected_message in exc_msg
Beispiel #5
0
def test_missing_config(test_config, expected_message):
    with pytest.raises(ConfigurationException) as exception:
        AnnotationConfig(f'tests/test_configurations/{test_config}', None, 3)

    exc_msg = str(exception.value)
    assert "required keys are missing from the configuration file" in exc_msg
    assert expected_message in exc_msg
Beispiel #6
0
def test_multi_line_annotations(test_file, annotations):
    config = AnnotationConfig('tests/test_configurations/.annotations_test')
    annotator = PythonAnnotationExtension(config, VerboseEcho())

    with open(f'tests/extensions/python_test_files/{test_file}') as fi:
        result_annotations = annotator.search(fi)

    assert len(annotations) == len(result_annotations)
    for annotation, result_annotation in zip(annotations, result_annotations):
        assert result_annotation['annotation_token'] == annotation[0]
        assert result_annotation['annotation_data'] == annotation[1]
Beispiel #7
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.config_search = []
     for config_filename in self.CONFIG_FILENAMES:
         config_path = pkg_resources.resource_filename(
             "code_annotations",
             os.path.join("contrib", "config", config_filename),
         )
         config = AnnotationConfig(config_path, verbosity=-1)
         search = StaticSearch(config)
         self.config_search.append((config, search))
         self.current_module_annotations = []
Beispiel #8
0
def static_find_annotations(config_file, source_path, report_path, verbosity, lint, report):
    """
    Subcommand to find annotations via static file analysis.
    """
    try:
        start_time = datetime.datetime.now()
        config = AnnotationConfig(config_file, report_path, verbosity, source_path)
        searcher = StaticSearch(config)
        all_results = searcher.search()

        if lint:
            click.echo("Performing linting checks...")
            # Check grouping and choices
            searcher.check_results(all_results)

            # If there are any errors, do not generate the report
            if searcher.errors:
                click.secho("\nSearch failed due to linting errors!", fg="red")
                click.secho("{} errors:".format(len(searcher.errors)), fg="red")
                click.secho("---------------------------------", fg="red")
                click.echo("\n".join(searcher.errors))
                sys.exit(1)
            click.echo("Linting passed without errors.")

        if report:
            click.echo("Writing report...")
            report_filename = searcher.report(all_results)
            click.echo(f"Report written to {report_filename}.")

        elapsed = datetime.datetime.now() - start_time
        annotation_count = 0

        for filename in all_results:
            annotation_count += len(all_results[filename])

        click.echo(f"Search found {annotation_count} annotations in {elapsed}.")

    except Exception as exc:  # pylint: disable=broad-except
        click.echo(traceback.print_exc())
        fail(str(exc))
Beispiel #9
0
def django_find_annotations(
        config_file,
        seed_safelist,
        list_local_models,
        app_name,
        report_path,
        verbosity,
        lint,
        report,
        coverage
):
    """
    Subcommand for dealing with annotations in Django models.
    """
    try:
        start_time = datetime.datetime.now()
        config = AnnotationConfig(config_file, report_path, verbosity)
        searcher = DjangoSearch(config)

        # Early out if we're trying to do coverage, but a coverage target is not configured
        if coverage and not config.coverage_target:
            raise ConfigurationException("Please add 'coverage_target' to your configuration before running --coverage")

        if seed_safelist:
            searcher.seed_safelist()

        if list_local_models:
            searcher.list_local_models()

        if lint or report or coverage:
            annotated_models = searcher.search()

            if lint:
                click.echo("Performing linting checks...")

                # Check grouping and choices
                if not searcher.check_results(annotated_models):
                    click.secho("\nSearch failed due to linting errors!", fg="red")
                    click.secho("{} errors:".format(len(searcher.errors)), fg="red")
                    click.secho("---------------------------------", fg="red")
                    click.echo("\n".join(searcher.errors))
                    # If there are any errors, do not continue
                    sys.exit(1)
                click.echo("Linting passed without errors.")

            if coverage:
                if not searcher.check_coverage():
                    # If there are any errors, do not continue
                    sys.exit(1)

                click.echo("Coverage passed without errors.")

            if report:
                searcher.report(annotated_models, app_name)

            annotation_count = 0

            for filename in annotated_models:
                annotation_count += len(annotated_models[filename])

            elapsed = datetime.datetime.now() - start_time
            click.echo("Search found {} annotations in {} seconds.".format(
                annotation_count, elapsed.total_seconds()
            ))

    except Exception as exc:  # pylint: disable=broad-except
        click.echo(traceback.print_exc())
        fail(str(exc))
Beispiel #10
0
def test_coverage_target_int():
    # We just care that this doesn't throw an exception
    AnnotationConfig(
        'tests/test_configurations/{}'.format(
            '.annotations_test_coverage_int'), None, 3)
Beispiel #11
0
def test_bad_coverage_targets(test_config, expected_message):
    with pytest.raises(ConfigurationException) as exception:
        AnnotationConfig(f'tests/test_configurations/{test_config}', None, 3)

    exc_msg = str(exception.value)
    assert expected_message in exc_msg