Esempio n. 1
0
def test_expectation__get_renderers():

    expectation_name = "expect_column_values_to_match_regex"
    my_expectation = _registered_expectations[expectation_name]()

    supported_renderers = my_expectation._get_supported_renderers(
        expectation_name)
    examples = my_expectation._get_examples()
    example_data, example_test = my_expectation._choose_example(examples)

    my_batch = Batch(data=example_data)

    my_expectation_config = ExpectationConfiguration(**{
        "expectation_type": expectation_name,
        "kwargs": example_test
    })

    my_validation_results = my_expectation._instantiate_example_validation_results(
        test_batch=my_batch,
        expectation_config=my_expectation_config,
    )
    my_validation_result = my_validation_results[0]

    renderer_dict = my_expectation._get_renderer_dict(
        expectation_name,
        my_expectation_config,
        my_validation_result,
    )

    print(json.dumps(renderer_dict, indent=2))

    assert renderer_dict == {
        "standard": {
            "renderer.answer":
            'Less than 90.0% of values in column "a" match the regular expression ^a.',
            "renderer.diagnostic.unexpected_statement":
            "\n\n1 unexpected values found. 20% of 5 total rows.",
            "renderer.diagnostic.observed_value":
            "20% unexpected",
            "renderer.diagnostic.status_icon":
            "",
            "renderer.diagnostic.unexpected_table":
            None,
            "renderer.prescriptive":
            "a values must match this regular expression: ^a, at least 90 % of the time.",
            "renderer.question":
            'Do at least 90.0% of values in column "a" match the regular expression ^a?',
        },
        "custom": [],
    }

    # Expectation with no new renderers specified
    print([x for x in _registered_expectations.keys() if "second" in x])
    expectation_name = "expect_column_values_to_equal_three___second_iteration"
    my_expectation = _registered_expectations[expectation_name]()

    supported_renderers = my_expectation._get_supported_renderers(
        expectation_name)
    examples = my_expectation._get_examples()
    example_data, example_test = my_expectation._choose_example(examples)

    my_batch = Batch(data=example_data)

    my_expectation_config = ExpectationConfiguration(**{
        "expectation_type": expectation_name,
        "kwargs": example_test
    })

    my_validation_results = my_expectation._instantiate_example_validation_results(
        test_batch=my_batch,
        expectation_config=my_expectation_config,
    )
    my_validation_result = my_validation_results[0]

    renderer_dict = my_expectation._get_renderer_dict(
        expectation_name,
        my_expectation_config,
        my_validation_result,
    )

    print(json.dumps(renderer_dict, indent=2))

    assert renderer_dict == {
        "standard": {
            "renderer.answer": None,
            "renderer.diagnostic.observed_value": "20% unexpected",
            "renderer.diagnostic.status_icon": "",
            "renderer.diagnostic.unexpected_statement": "",
            "renderer.diagnostic.unexpected_table": None,
            "renderer.prescriptive":
            "expect_column_values_to_equal_three___second_iteration(**{'column': 'mostly_threes', 'mostly': 0.6})",
            "renderer.question": None,
        },
        "custom": [],
    }

    # Expectation with no renderers specified
    print([x for x in _registered_expectations.keys() if "second" in x])
    expectation_name = "expect_column_values_to_equal_three___third_iteration"
    my_expectation = _registered_expectations[expectation_name]()

    supported_renderers = my_expectation._get_supported_renderers(
        expectation_name)
    examples = my_expectation._get_examples()
    example_data, example_test = my_expectation._choose_example(examples)
    my_batch = Batch(data=example_data)

    my_expectation_config = ExpectationConfiguration(**{
        "expectation_type": expectation_name,
        "kwargs": example_test
    })

    my_validation_results = my_expectation._instantiate_example_validation_results(
        test_batch=my_batch,
        expectation_config=my_expectation_config,
    )
    my_validation_result = my_validation_results[0]

    renderer_dict = my_expectation._get_renderer_dict(
        expectation_name,
        my_expectation_config,
        my_validation_result,
    )

    print(json.dumps(renderer_dict, indent=2))

    assert renderer_dict == {
        "standard": {
            "renderer.answer":
            'At least 60.0% of values in column "mostly_threes" equal 3.',
            "renderer.diagnostic.observed_value":
            "20% unexpected",
            "renderer.diagnostic.status_icon":
            "",
            "renderer.diagnostic.unexpected_statement":
            "",
            "renderer.diagnostic.unexpected_table":
            None,
            "renderer.prescriptive":
            "mostly_threes values must be equal to 3, at least 60 % of the time.",
            "renderer.question":
            'Do at least 60.0% of values in column "mostly_threes" equal 3?',
        },
        "custom": [],
    }
Esempio n. 2
0
def test_expectation__get_renderers():

    expectation_name = "expect_column_values_to_match_regex"
    my_expectation = _registered_expectations[expectation_name]()

    from great_expectations.expectations.registry import (
        _registered_metrics,
        _registered_renderers,
    )

    # supported_renderers = my_expectation._get_registered_renderers(
    #     expectation_name,
    #     _registered_renderers,
    # )
    examples = my_expectation._get_examples()
    my_expectation_config = my_expectation._get_expectation_configuration_from_examples(
        examples)
    my_metric_diagnostics_list = my_expectation._get_metric_diagnostics_list(
        expectation_config=my_expectation_config)
    my_execution_engine_diagnostics = my_expectation._get_execution_engine_diagnostics(
        metric_diagnostics_list=my_metric_diagnostics_list,
        registered_metrics=_registered_metrics,
    )
    my_test_results = my_expectation._get_test_results(
        expectation_type=expectation_name,
        test_data_cases=examples,
        execution_engine_diagnostics=my_execution_engine_diagnostics,
        raise_exceptions_for_backends=False,
    )
    renderer_diagnostics = my_expectation._get_renderer_diagnostics(
        expectation_type=expectation_name,
        test_diagnostics=my_test_results,
        registered_renderers=_registered_renderers,
    )

    assert isinstance(renderer_diagnostics, list)
    assert len(renderer_diagnostics) == 10
    for element in renderer_diagnostics:
        print(json.dumps(element.to_dict(), indent=2))
        assert isinstance(element, ExpectationRendererDiagnostics)

    print([rd.name for rd in renderer_diagnostics])
    assert {rd.name
            for rd in renderer_diagnostics} == {
                "renderer.diagnostic.unexpected_statement",
                "renderer.diagnostic.meta_properties",
                "renderer.diagnostic.unexpected_table",
                "renderer.diagnostic.status_icon",
                "renderer.answer",
                "atomic.prescriptive.summary",
                "atomic.diagnostic.observed_value",
                "renderer.question",
                "renderer.prescriptive",
                "renderer.diagnostic.observed_value",
            }

    # assert renderer_diagnostics[0].to_dict() == {
    #     "name": "renderer.diagnostic.meta_properties",
    #     "is_supported": True,
    #     "is_standard": False,
    #     "samples": [
    #         ""
    #     ]
    # }

    # Expectation with no new renderers specified
    print([x for x in _registered_expectations.keys() if "second" in x])
    expectation_name = "expect_column_values_to_equal_three___second_iteration"
    my_expectation = _registered_expectations[expectation_name]()

    # supported_renderers = my_expectation._get_registered_renderers(
    #     expectation_name,
    #     _registered_renderers,
    # )
    examples = my_expectation._get_examples()
    my_expectation_config = my_expectation._get_expectation_configuration_from_examples(
        examples)
    my_metric_diagnostics_list = my_expectation._get_metric_diagnostics_list(
        expectation_config=my_expectation_config)
    my_execution_engine_diagnostics = my_expectation._get_execution_engine_diagnostics(
        metric_diagnostics_list=my_metric_diagnostics_list,
        registered_metrics=_registered_metrics,
    )
    my_test_results = my_expectation._get_test_results(
        expectation_type=expectation_name,
        test_data_cases=examples,
        execution_engine_diagnostics=my_execution_engine_diagnostics,
        raise_exceptions_for_backends=False,
    )
    renderer_diagnostics = my_expectation._get_renderer_diagnostics(
        expectation_type=expectation_name,
        test_diagnostics=my_test_results,
        registered_renderers=_registered_renderers,
    )

    assert isinstance(renderer_diagnostics, list)
    for element in renderer_diagnostics:
        print(json.dumps(element.to_dict(), indent=2))
        assert isinstance(element, ExpectationRendererDiagnostics)

    assert len(renderer_diagnostics) == 10
    assert {rd.name
            for rd in renderer_diagnostics} == {
                "renderer.diagnostic.observed_value",
                "renderer.prescriptive",
                "renderer.diagnostic.meta_properties",
                "renderer.diagnostic.status_icon",
                "renderer.diagnostic.unexpected_table",
                "atomic.diagnostic.observed_value",
                "atomic.prescriptive.summary",
                "renderer.answer",
                "renderer.question",
                "renderer.diagnostic.unexpected_statement",
            }

    # Expectation with no renderers specified
    print([x for x in _registered_expectations.keys() if "second" in x])
    expectation_name = "expect_column_values_to_equal_three___third_iteration"
    my_expectation = _registered_expectations[expectation_name]()

    # supported_renderers = my_expectation._get_registered_renderers(
    #     expectation_name,
    #     _registered_renderers,
    # )
    examples = my_expectation._get_examples()
    my_expectation_config = my_expectation._get_expectation_configuration_from_examples(
        examples)
    my_metric_diagnostics_list = my_expectation._get_metric_diagnostics_list(
        expectation_config=my_expectation_config)
    my_execution_engine_diagnostics = my_expectation._get_execution_engine_diagnostics(
        metric_diagnostics_list=my_metric_diagnostics_list,
        registered_metrics=_registered_metrics,
    )
    my_test_results = my_expectation._get_test_results(
        expectation_type=expectation_name,
        test_data_cases=examples,
        execution_engine_diagnostics=my_execution_engine_diagnostics,
        raise_exceptions_for_backends=False,
    )
    renderer_diagnostics = my_expectation._get_renderer_diagnostics(
        expectation_type=expectation_name,
        test_diagnostics=my_test_results,
        registered_renderers=_registered_renderers,
    )

    assert isinstance(renderer_diagnostics, list)
    assert len(renderer_diagnostics) == 10
    for element in renderer_diagnostics:
        print(json.dumps(element.to_dict(), indent=2))
        assert isinstance(element, ExpectationRendererDiagnostics)

    assert len(renderer_diagnostics) == 10
    assert {rd.name
            for rd in renderer_diagnostics} == {
                "renderer.diagnostic.observed_value",
                "renderer.prescriptive",
                "renderer.diagnostic.meta_properties",
                "renderer.diagnostic.status_icon",
                "renderer.diagnostic.unexpected_table",
                "atomic.diagnostic.observed_value",
                "atomic.prescriptive.summary",
                "renderer.answer",
                "renderer.question",
                "renderer.diagnostic.unexpected_statement",
            }