def test_safelist_callback(): assert os.path.exists(DEFAULT_FAKE_SAFELIST_PATH) with open(DEFAULT_FAKE_SAFELIST_PATH) as fake_safelist_file: fake_safelist = fake_safelist_file.read() for model in non_local_models: assert DjangoSearch.get_model_id(model) in fake_safelist for model in local_models: assert DjangoSearch.get_model_id(model) not in fake_safelist
def test_get_models_requiring_annotations(mock_get_app_configs, mock_is_non_local, mock_setup_django, mock_issubclass): # Lots of fakery going on here. This class mocks Django AppConfigs to deliver our fake models. class FakeAppConfig: def get_models(self): return [FakeBaseModelBoring, FakeBaseModelBoringWithAnnotations] # This lets us deterministically decide that one model is local, and the other isn't, for testing both branches. mock_is_non_local.side_effect = [True, False] # This just fakes setting up Django mock_setup_django.return_value = True # This mocks out Django's get_app_configs to return our fake AppConfig mock_get_app_configs.return_value = [FakeAppConfig()] # This lets us pretend that all of our fake models inherit from Django's model.Model. # If we try to do that inheritance Django will throw errors unless we do a full Django # testing setup. mock_issubclass.return_value = True local_models, non_local_models, total, needing_annotations = DjangoSearch.get_models_requiring_annotations( ) assert len(local_models) == 1 assert len(non_local_models) == 1 assert list(local_models)[0] == FakeBaseModelBoringWithAnnotations assert list(non_local_models)[0] == FakeBaseModelBoring assert total == 2 assert len(needing_annotations) == 2
def report_callback(report_contents): """ Get the text of the report and make sure all of the expected models are in it. Args: report_contents: Returns: Raw text contents of the generated report file """ for model in test_models: assert 'object_id: {}'.format( DjangoSearch.get_model_id(model)) in report_contents
def test_coverage_thresholds(local_models, should_succeed, expected_message, **kwargs): mock_get_models_requiring_annotations = kwargs[ 'get_models_requiring_annotations'] mock_get_models_requiring_annotations.return_value = ( set(local_models), set(), len(local_models), [DjangoSearch.get_model_id(m) for m in local_models]) result = call_script_isolated([ 'django_find_annotations', '--config_file', 'test_config.yml', '--coverage', '-vvv' ], ) assert result.exit_code == EXIT_CODE_SUCCESS if should_succeed else EXIT_CODE_FAILURE assert expected_message in result.output
def test_is_non_local_site(mock_getsourcefile): """ Try to test the various non-local paths, if the environment allows. """ # This code is duplicated from the method itself. non_local_path_prefixes = [] for path in sys.path: if 'dist-packages' in path or 'site-packages' in path: non_local_path_prefixes.append(path) if non_local_path_prefixes: for prefix in non_local_path_prefixes: mock_getsourcefile.return_value = f'{prefix}/bar.py' assert DjangoSearch.is_non_local(FakeBaseModelAbstract) is True else: # If there are no prefixes in the test environment, there's really nothing to do here. pass
def test_find_django_simple_success(**kwargs): """ Tests the basic case where all models have annotations, with an empty safelist. """ test_models = { FakeChildModelSingleAnnotation, FakeChildModelMultiAnnotation, FakeChildModelSingleWithAnnotation, FakeBaseModelWithNoDocstring } mock_get_models_requiring_annotations = kwargs[ 'get_models_requiring_annotations'] mock_get_models_requiring_annotations.return_value = ( test_models, set(), len(test_models), [DjangoSearch.get_model_id(m) for m in test_models]) def report_callback(report_contents): """ Get the text of the report and make sure all of the expected models are in it. Args: report_contents: Returns: Raw text contents of the generated report file """ for model in test_models: assert 'object_id: {}'.format( DjangoSearch.get_model_id(model)) in report_contents fake_safelist = """ fake_app_2.FakeBaseModelWithNoDocstring: ".. no_pii:": "No PII" """ result = call_script_isolated([ 'django_find_annotations', '--config_file', 'test_config.yml', '--lint', '--report', '--coverage', '-vvv' ], test_filesystem_report_cb=report_callback, fake_safelist_data=fake_safelist) assert result.exit_code == EXIT_CODE_SUCCESS assert 'Linting passed without errors' in result.output assert 'Generating report to' in result.output assert 'Search found 6 annotations' in result.output
def test_find_django_in_safelist_and_annotated(**kwargs): """ Test that a model which is annotated and also in the safelist fails. """ test_models = { FakeBaseModelWithAnnotation, } mock_get_models_requiring_annotations = kwargs[ 'get_models_requiring_annotations'] mock_get_models_requiring_annotations.return_value = (test_models, set(), 0, []) result = call_script_isolated( [ 'django_find_annotations', '--config_file', 'test_config.yml', '--lint', '--report' ], fake_safelist_data='{{{}: ".. no_pii:"}}'.format( DjangoSearch.get_model_id(FakeBaseModelWithAnnotation))) assert result.exit_code == EXIT_CODE_FAILURE assert 'fake_app_2.FakeBaseModelWithAnnotation is annotated, but also in the safelist.' in result.output assert '1 errors:' in result.output assert 'Generating report to' not in result.output
def django_find_annotations( config_file, seed_safelist, list_local_models, app_name, report_path, verbosity, lint, report, coverage ): """ Subcommand for dealing with annotations in Django models. """ try: start_time = datetime.datetime.now() config = AnnotationConfig(config_file, report_path, verbosity) searcher = DjangoSearch(config) # Early out if we're trying to do coverage, but a coverage target is not configured if coverage and not config.coverage_target: raise ConfigurationException("Please add 'coverage_target' to your configuration before running --coverage") if seed_safelist: searcher.seed_safelist() if list_local_models: searcher.list_local_models() if lint or report or coverage: annotated_models = searcher.search() if lint: click.echo("Performing linting checks...") # Check grouping and choices if not searcher.check_results(annotated_models): click.secho("\nSearch failed due to linting errors!", fg="red") click.secho("{} errors:".format(len(searcher.errors)), fg="red") click.secho("---------------------------------", fg="red") click.echo("\n".join(searcher.errors)) # If there are any errors, do not continue sys.exit(1) click.echo("Linting passed without errors.") if coverage: if not searcher.check_coverage(): # If there are any errors, do not continue sys.exit(1) click.echo("Coverage passed without errors.") if report: searcher.report(annotated_models, app_name) annotation_count = 0 for filename in annotated_models: annotation_count += len(annotated_models[filename]) elapsed = datetime.datetime.now() - start_time click.echo("Search found {} annotations in {} seconds.".format( annotation_count, elapsed.total_seconds() )) except Exception as exc: # pylint: disable=broad-except click.echo(traceback.print_exc()) fail(str(exc))
def test_setup_django(mock_django_setup): """ This is really just for coverage. """ mock_django_setup.return_value = True DjangoSearch.setup_django()
def test_is_non_local_simple(): """ Our model is local, should show up as such """ assert DjangoSearch.is_non_local(FakeBaseModelAbstract) is False
def test_requires_annotations_not_a_model(): """ Things which are not models should not require annotations """ assert DjangoSearch.requires_annotations(dict) is False
def test_requires_annotations_normal(mock_issubclass): """ Non-abstract, non-proxy models should require annotations """ mock_issubclass.return_value = True assert DjangoSearch.requires_annotations(FakeBaseModelBoring) is True
def test_requires_annotations_proxy(mock_issubclass): """ Proxy classes should not require annotations """ mock_issubclass.return_value = True assert DjangoSearch.requires_annotations(FakeBaseModelProxy) is False
def test_requires_annotations_abstract(mock_issubclass): """ Abstract classes should not require annotations """ mock_issubclass.return_value = True assert DjangoSearch.requires_annotations(FakeBaseModelAbstract) is False