def test_seeding_safelist(local_models, non_local_models, **kwargs):
    """
    Test the success case for seeding the safelist.
    """
    mock_get_models_requiring_annotations = kwargs['get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (
        local_models,
        non_local_models,
        0,  # Number of total models found, irrelevant here
        []  # List of model ids that need anntations, irrelevant here
    )

    def test_safelist_callback():
        assert os.path.exists(DEFAULT_FAKE_SAFELIST_PATH)
        with open(DEFAULT_FAKE_SAFELIST_PATH) as fake_safelist_file:
            fake_safelist = fake_safelist_file.read()
        for model in non_local_models:
            assert DjangoSearch.get_model_id(model) in fake_safelist
        for model in local_models:
            assert DjangoSearch.get_model_id(model) not in fake_safelist

    result = call_script_isolated(
        ['django_find_annotations', '--config_file', 'test_config.yml', '--seed_safelist'],
        test_filesystem_cb=test_safelist_callback,
        fake_safelist_data=None
    )
    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Successfully created safelist file' in result.output
def test_find_django_model_in_safelist_annotated(**kwargs):
    """
    Test that a safelisted model succeeds.
    """
    test_models = {
        FakeBaseModelNoAnnotation,
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    fake_safelist_data = """
    {
        fake_app_1.FakeBaseModelNoAnnotation: {".. no_pii:": "This model is annotated."}
    }
    """

    result = call_script_isolated(
        [
            'django_find_annotations', '--config_file', 'test_config.yml',
            '--lint', '--report'
        ],
        fake_safelist_data=fake_safelist_data,
    )

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Linting passed without errors' in result.output
    assert 'Generating report to' in result.output
    assert 'Search found 1 annotations' in result.output
def test_coverage_all_models(mock_get_app_configs, mock_is_non_local,
                             mock_setup_django, mock_issubclass):
    # Lots of fakery going on here. This class mocks Django AppConfigs to deliver our fake models.
    class FakeAppConfig:
        def get_models(self):
            return ALL_FAKE_MODELS

    # This lets us deterministically decide that one model is local, and the other isn't, for testing both branches.
    mock_is_non_local.side_effect = [True, False] * 8

    # This just fakes setting up Django
    mock_setup_django.return_value = True

    # This mocks out Django's get_app_configs to return our fake AppConfig
    mock_get_app_configs.return_value = [FakeAppConfig()]

    # This lets us pretend that all of our fake models inherit from Django's model.Model.
    # If we try to do that inheritance Django will throw errors unless we do a full Django
    # testing setup.
    mock_issubclass.return_value = True

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--coverage', '-vvv'
    ], )

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Found 11 total models.' in result.output
    assert 'Coverage is 66.7%' in result.output
    assert 'Coverage found 3 uncovered models:' in result.output
    assert 'Search found 10 annotations' in result.output
def test_find_django_model_in_safelist_not_annotated(**kwargs):
    """
    Test that a safelisted model with no annotations fails.
    """
    test_models = {
        FakeBaseModelNoAnnotation,
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    fake_safelist_data = """
    {
        fake_app_1.FakeBaseModelNoAnnotation: {}
    }
    """

    result = call_script_isolated(
        [
            'django_find_annotations', '--config_file', 'test_config.yml',
            '--lint', '--report'
        ],
        fake_safelist_data=fake_safelist_data,
    )

    assert result.exit_code == EXIT_CODE_FAILURE
    assert 'fake_app_1.FakeBaseModelNoAnnotation is in the safelist but has no annotations!' in result.output
    assert '1 errors:' in result.output
    assert 'Generating report to' not in result.output
def test_safelist_exists(**kwargs):
    """
    Test the success case for seeding the safelist.
    """
    mock_get_models_requiring_annotations = kwargs['get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = ([], [], 0, [])

    result = call_script_isolated(
        ['django_find_annotations', '--config_file', 'test_config.yml', '--seed_safelist']
    )
    assert result.exit_code == EXIT_CODE_FAILURE
    assert 'already exists, not overwriting.' in result.output
def test_coverage_thresholds(local_models, should_succeed, expected_message,
                             **kwargs):
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (
        set(local_models), set(), len(local_models),
        [DjangoSearch.get_model_id(m) for m in local_models])

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--coverage', '-vvv'
    ], )

    assert result.exit_code == EXIT_CODE_SUCCESS if should_succeed else EXIT_CODE_FAILURE
    assert expected_message in result.output
def test_find_django_no_viable_models(**kwargs):
    """
    Tests the basic case where all models have annotations, with an empty safelist.
    """
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (set(), set(), 0, [])

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--lint', '--report'
    ], )

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Linting passed without errors' in result.output
    assert 'Generating report to' in result.output
    assert 'Search found 0 annotations' in result.output
def test_find_django_without_report(**kwargs):
    """
    Tests to make sure reports will be written in the case of errors, if linting is off.
    """
    test_models = {FakeChildModelSingleAnnotation}
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml', '--lint'
    ])

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Linting passed without errors' in result.output
    assert 'Generating report to' not in result.output
def test_find_django_simple_success(**kwargs):
    """
    Tests the basic case where all models have annotations, with an empty safelist.
    """
    test_models = {
        FakeChildModelSingleAnnotation, FakeChildModelMultiAnnotation,
        FakeChildModelSingleWithAnnotation, FakeBaseModelWithNoDocstring
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (
        test_models, set(), len(test_models),
        [DjangoSearch.get_model_id(m) for m in test_models])

    def report_callback(report_contents):
        """
        Get the text of the report and make sure all of the expected models are in it.

        Args:
            report_contents:

        Returns:
            Raw text contents of the generated report file
        """
        for model in test_models:
            assert 'object_id: {}'.format(
                DjangoSearch.get_model_id(model)) in report_contents

    fake_safelist = """
    fake_app_2.FakeBaseModelWithNoDocstring:
        ".. no_pii:": "No PII"
    """

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--lint', '--report', '--coverage', '-vvv'
    ],
                                  test_filesystem_report_cb=report_callback,
                                  fake_safelist_data=fake_safelist)

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'Linting passed without errors' in result.output
    assert 'Generating report to' in result.output
    assert 'Search found 6 annotations' in result.output
def test_find_django_no_safelist(**kwargs):
    """
    Test that we fail when there is no safelist.
    """
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (set(), set(), 0, [])

    result = call_script_isolated(
        [
            'django_find_annotations', '--config_file', 'test_config.yml',
            '--lint', '--report'
        ],
        fake_safelist_data=None,
    )

    assert result.exit_code == EXIT_CODE_FAILURE
    assert 'Safelist not found!' in result.output
    assert 'Generating report to' not in result.output
def test_find_django_ordering_error(**kwargs):
    """
    Tests broken annotations to make sure the error paths work.
    """
    test_models = {
        FakeChildModelSingleAnnotation,
        FakeChildModelMultiWithBrokenAnnotations
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--lint', '--report'
    ])

    assert result.exit_code == EXIT_CODE_FAILURE
    assert "missing non-optional annotation: '.. pii:'" in result.output
def test_find_django_no_docstring(**kwargs):
    """
    Test that a model with no docstring doesn't break anything.
    """
    test_models = {
        FakeBaseModelWithNoDocstring,
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])
    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--lint', '--report', '-vv'
    ])

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'fake_app_2.FakeBaseModelWithNoDocstring has no annotations' in result.output
    assert 'Linting passed without errors.' in result.output
    assert 'Generating report to' in result.output
def test_find_django_model_not_annotated(**kwargs):
    """
    Test that a non-annotated model fails.
    """
    test_models = {
        FakeBaseModelNoAnnotation,
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--lint', '--report', '-vv'
    ])

    assert result.exit_code == EXIT_CODE_SUCCESS
    assert 'fake_app_1.FakeBaseModelNoAnnotation has no annotations' in result.output
    assert 'Linting passed without errors.' in result.output
    assert 'Generating report to' in result.output
Exemple #14
0
def test_listing_local_models(local_model_ids, non_local_model_ids, **kwargs):
    """
    Test the success case for listing local models.
    """
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (
        local_model_ids,
        non_local_model_ids,
        0,  # Number of total models found, irrelevant here
        []  # List of model ids that need anntations, irrelevant here
    )
    result = call_script_isolated([
        'django_find_annotations', '--config_file', 'test_config.yml',
        '--list_local_models'
    ])
    assert result.exit_code == 0
    if not local_model_ids:
        assert 'No local models requiring annotations.' in result.output
    else:
        assert 'Listing {} local models requiring annotations'.format(
            len(local_model_ids)) in result.output
def test_find_django_in_safelist_and_annotated(**kwargs):
    """
    Test that a model which is annotated and also in the safelist fails.
    """
    test_models = {
        FakeBaseModelWithAnnotation,
    }
    mock_get_models_requiring_annotations = kwargs[
        'get_models_requiring_annotations']
    mock_get_models_requiring_annotations.return_value = (test_models, set(),
                                                          0, [])

    result = call_script_isolated(
        [
            'django_find_annotations', '--config_file', 'test_config.yml',
            '--lint', '--report'
        ],
        fake_safelist_data='{{{}: ".. no_pii:"}}'.format(
            DjangoSearch.get_model_id(FakeBaseModelWithAnnotation)))

    assert result.exit_code == EXIT_CODE_FAILURE
    assert 'fake_app_2.FakeBaseModelWithAnnotation is annotated, but also in the safelist.' in result.output
    assert '1 errors:' in result.output
    assert 'Generating report to' not in result.output