def test_StoreAction():
    fake_in_memory_store = ValidationsStore(
        store_backend={
            "class_name": "InMemoryStoreBackend",
        }
    )
    stores = {"fake_in_memory_store": fake_in_memory_store}

    class Object:
        ge_cloud_mode = False

    data_context = Object()
    data_context.stores = stores

    action = StoreValidationResultAction(
        data_context=data_context,
        target_store_name="fake_in_memory_store",
    )
    assert fake_in_memory_store.list_keys() == []

    action.run(
        validation_result_suite_identifier=ValidationResultIdentifier(
            expectation_suite_identifier=ExpectationSuiteIdentifier(
                expectation_suite_name="default_expectations"
            ),
            run_id=RunIdentifier(run_name="prod_20190801"),
            batch_identifier="1234",
        ),
        validation_result_suite=ExpectationSuiteValidationResult(
            success=False, results=[]
        ),
        data_asset=None,
    )

    expected_run_id = RunIdentifier(
        run_name="prod_20190801", run_time="20190926T134241.000000Z"
    )

    assert len(fake_in_memory_store.list_keys()) == 1
    stored_identifier = fake_in_memory_store.list_keys()[0]
    assert stored_identifier.batch_identifier == "1234"
    assert (
        stored_identifier.expectation_suite_identifier.expectation_suite_name
        == "default_expectations"
    )
    assert stored_identifier.run_id == expected_run_id

    assert fake_in_memory_store.get(
        ValidationResultIdentifier(
            expectation_suite_identifier=ExpectationSuiteIdentifier(
                expectation_suite_name="default_expectations"
            ),
            run_id=expected_run_id,
            batch_identifier="1234",
        )
    ) == ExpectationSuiteValidationResult(success=False, results=[])
def test_render_DefaultJinjaPageView_meta_info():
    validation_results = ExpectationSuiteValidationResult(
        **{
            "results": [],
            "statistics": {
                "evaluated_expectations": 156,
                "successful_expectations": 139,
                "unsuccessful_expectations": 17,
                "success_percent": 89.1025641025641,
            },
            "meta": {
                "great_expectations_version": "0.7.0-beta",
                "data_asset_name": "datasource/generator/tetanusvaricella",
                "expectation_suite_name": "my_suite",
                "run_id": "2019-06-25T14:58:09.960521",
                "batch_kwargs": {
                    "path":
                    "/Users/user/project_data/public_healthcare_datasets/tetanusvaricella/tetvardata.csv",
                    "timestamp": 1561474688.693565,
                },
            },
        })

    document = ProfilingResultsPageRenderer().render(validation_results)
    html = DefaultJinjaPageView().render(document)
    with open(
            file_relative_path(
                __file__,
                "./output/test_render_DefaultJinjaPageView_meta_info.html"),
            "w",
    ) as outfile:
        outfile.write(html)
def ge_validation_result_suite() -> ExpectationSuiteValidationResult:
    validation_result_suite = ExpectationSuiteValidationResult(
        results=[{
            "success": True,
            "result": {
                "observed_value": 10000
            },
            "expectation_config": {
                "expectation_type": "expect_table_row_count_to_be_between",
                "kwargs": {
                    "max_value": 10000,
                    "min_value": 10000,
                    "batch_id": "010ef8c1cd417910b971f4468f024ec5",
                },
                "meta": {},
            },
        }],
        success=True,
        statistics={
            "evaluated_expectations": 1,
            "successful_expectations": 1,
            "unsuccessful_expectations": 0,
            "success_percent": 100,
        },
        meta={
            "great_expectations_version": "v0.13.40",
            "expectation_suite_name": "asset.default",
            "run_id": {
                "run_name": "test_100",
            },
            "validation_time": "20211228T120000.000000Z",
        },
    )
    return validation_result_suite
def test_OpsgenieRenderer_validation_results_failure():

    validation_result_suite = ExpectationSuiteValidationResult(
        results=[],
        success=False,
        statistics={
            "evaluated_expectations": 1,
            "successful_expectations": 0,
            "unsuccessful_expectations": 1,
            "success_percent": None,
        },
        meta={
            "great_expectations_version": "v0.12.2__develop",
            "batch_kwargs": {
                "data_asset_name": "x/y/z"
            },
            "data_asset_name": {
                "datasource": "x",
                "generator": "y",
                "generator_asset": "z",
            },
            "expectation_suite_name": "default",
            "run_id": "2021-01-01T000000.000000Z",
        },
    )

    rendered_output = OpsgenieRenderer().render(validation_result_suite)

    expected_output = "Batch Validation Status: Failed ❌\nExpectation suite name: default\nData asset name: x/y/z\nRun ID: 2021-01-01T000000.000000Z\nBatch ID: data_asset_name=x/y/z\nSummary: 0 of 1 expectations were met"

    assert rendered_output == expected_output
def test_OpsgenieRenderer_checkpoint_validation_results_success():
    batch_definition = BatchDefinition(
        datasource_name="test_datasource",
        data_connector_name="test_dataconnector",
        data_asset_name="test_data_asset",
        batch_identifiers=IDDict({"id": "my_id"}),
    )
    validation_result_suite = ExpectationSuiteValidationResult(
        results=[],
        success=True,
        statistics={
            "evaluated_expectations": 0,
            "successful_expectations": 0,
            "unsuccessful_expectations": 0,
            "success_percent": None,
        },
        meta={
            "great_expectations_version": "v0.12.2__develop",
            "active_batch_definition": batch_definition,
            "expectation_suite_name": "default",
            "run_id": "2021-01-01T000000.000000Z",
        },
    )

    rendered_output = OpsgenieRenderer().render(validation_result_suite)

    expected_output = "Batch Validation Status: Success 🎉\nExpectation suite name: default\nData asset name: test_data_asset\nRun ID: 2021-01-01T000000.000000Z\nBatch ID: ()\nSummary: 0 of 0 expectations were met"

    assert rendered_output == expected_output
def test_EmailRenderer_validation_results_with_datadocs():

    validation_result_suite = ExpectationSuiteValidationResult(
        results=[],
        success=True,
        statistics={
            "evaluated_expectations": 0,
            "successful_expectations": 0,
            "unsuccessful_expectations": 0,
            "success_percent": None,
        },
        meta={
            "great_expectations_version": "v0.8.0__develop",
            "batch_kwargs": {
                "data_asset_name": "x/y/z"
            },
            "data_asset_name": {
                "datasource": "x",
                "generator": "y",
                "generator_asset": "z",
            },
            "expectation_suite_name": "default",
            "run_id": "2019-09-25T060538.829112Z",
        },
    )

    rendered_output = EmailRenderer().render(validation_result_suite)

    expected_output = (
        "default: Success 🎉",
        '<p><strong>Batch Validation Status</strong>: Success 🎉</p>\n<p><strong>Expectation suite name</strong>: default</p>\n<p><strong>Data asset name</strong>: x/y/z</p>\n<p><strong>Run ID</strong>: 2019-09-25T060538.829112Z</p>\n<p><strong>Batch ID</strong>: data_asset_name=x/y/z</p>\n<p><strong>Summary</strong>: <strong>0</strong> of <strong>0</strong> expectations were met</p><p>Learn <a href="https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html">here</a> how to review validation results in Data Docs</p>',
    )
    assert rendered_output == expected_output

    data_docs_pages = {"local_site": "file:///localsite/index.html"}
    notify_with = ["local_site"]
    rendered_output = EmailRenderer().render(validation_result_suite,
                                             data_docs_pages, notify_with)

    expected_output = (
        "default: Success 🎉",
        '<p><strong>Batch Validation Status</strong>: Success 🎉</p>\n<p><strong>Expectation suite name</strong>: default</p>\n<p><strong>Data asset name</strong>: x/y/z</p>\n<p><strong>Run ID</strong>: 2019-09-25T060538.829112Z</p>\n<p><strong>Batch ID</strong>: data_asset_name=x/y/z</p>\n<p><strong>Summary</strong>: <strong>0</strong> of <strong>0</strong> expectations were met</p><p><strong>DataDocs</strong> can be found here: <a href="file:///localsite/index.html">file:///localsite/index.html</a>.</br>(Please copy and paste link into a browser to view)</p><p>Learn <a href="https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html">here</a> how to review validation results in Data Docs</p>',
    )
    assert rendered_output == expected_output

    # not configured
    notify_with = ["fake_site"]
    rendered_output = EmailRenderer().render(validation_result_suite,
                                             data_docs_pages, notify_with)

    expected_output = (
        "default: Success 🎉",
        '<p><strong>Batch Validation Status</strong>: Success 🎉</p>\n<p><strong>Expectation suite name</strong>: default</p>\n<p><strong>Data asset name</strong>: x/y/z</p>\n<p><strong>Run ID</strong>: 2019-09-25T060538.829112Z</p>\n<p><strong>Batch ID</strong>: data_asset_name=x/y/z</p>\n<p><strong>Summary</strong>: <strong>0</strong> of <strong>0</strong> expectations were met</p><strong>ERROR</strong>: The email is trying to provide a link to the following DataDocs: `fake_site`, but it is not configured under data_docs_sites in the great_expectations.yml</br><p>Learn <a href="https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html">here</a> how to review validation results in Data Docs</p>',
    )

    assert rendered_output == expected_output
Exemple #7
0
    def self_check(self, pretty_print):
        return_obj = {}

        if pretty_print:
            print("Checking for existing keys...")

        return_obj["keys"] = self.list_keys()
        return_obj["len_keys"] = len(return_obj["keys"])
        len_keys = return_obj["len_keys"]

        if pretty_print:
            if return_obj["len_keys"] == 0:
                print(f"\t{len_keys} keys found")
            else:
                print(f"\t{len_keys} keys found:")
                for key in return_obj["keys"][:10]:
                    print(f"		{str(key)}")
            if len_keys > 10:
                print("\t\t...")
            print()

        test_key_name = "test-key-" + "".join(
            [random.choice(list("0123456789ABCDEF")) for i in range(20)])

        if self.ge_cloud_mode:
            test_key: GeCloudIdentifier = self.key_class(
                resource_type=GeCloudRESTResource.CONTRACT,
                ge_cloud_id=str(uuid.uuid4()),
            )

        else:
            test_key: ValidationResultIdentifier = self.key_class(
                expectation_suite_identifier=ExpectationSuiteIdentifier(
                    expectation_suite_name="temporary_test_suite", ),
                run_id="temporary_test_run_id",
                batch_identifier=test_key_name,
            )
        test_value = ExpectationSuiteValidationResult(success=True)

        if pretty_print:
            print(f"Attempting to add a new test key: {test_key}...")
        self.set(key=test_key, value=test_value)
        if pretty_print:
            print("\tTest key successfully added.")
            print()

        if pretty_print:
            print(
                f"Attempting to retrieve the test value associated with key: {test_key}..."
            )
        test_value = self.get(key=test_key, )
        if pretty_print:
            print("\tTest value successfully retrieved.")
            print()

        return return_obj
Exemple #8
0
def test_validate():
    with open(
        file_relative_path(__file__, "./test_sets/titanic_expectations.json")
    ) as f:
        my_expectation_suite = expectationSuiteSchema.loads(f.read())

    with mock.patch("uuid.uuid1") as uuid:
        uuid.return_value = "1234"
        my_df = ge.read_csv(
            file_relative_path(__file__, "./test_sets/Titanic.csv"),
            expectation_suite=my_expectation_suite,
        )
    my_df.set_default_expectation_argument("result_format", "COMPLETE")

    results = my_df.validate(catch_exceptions=False)

    with open(
        file_relative_path(
            __file__, "./test_sets/titanic_expected_data_asset_validate_results.json"
        )
    ) as f:
        expected_results = expectationSuiteValidationResultSchema.loads(f.read())

    del results.meta["great_expectations_version"]
    del results.meta["expectation_suite_meta"]["great_expectations_version"]
    assert results.to_json_dict() == expected_results.to_json_dict()

    # Now, change the results and ensure they are no longer equal
    results.results[0] = ExpectationValidationResult()
    assert results.to_json_dict() != expected_results.to_json_dict()

    # Finally, confirm that only_return_failures works
    # and does not affect the "statistics" field.
    validation_results = my_df.validate(only_return_failures=True)
    del validation_results.meta["great_expectations_version"]
    del validation_results.meta["expectation_suite_meta"]["great_expectations_version"]
    expected_results = ExpectationSuiteValidationResult(
        meta={
            "expectation_suite_name": "titanic",
            "run_id": {"run_name": None, "run_time": "1955-11-05T00:00:00+00:00"},
            "validation_time": "19551105T000000.000000Z",
            "batch_kwargs": {"ge_batch_id": "1234"},
            "expectation_suite_meta": {},
            "batch_markers": {},
            "batch_parameters": {},
        },
        results=[
            ExpectationValidationResult(
                expectation_config=ExpectationConfiguration(
                    expectation_type="expect_column_values_to_be_in_set",
                    kwargs={"column": "PClass", "value_set": ["1st", "2nd", "3rd"]},
                ),
                success=False,
                exception_info={
                    "exception_message": None,
                    "exception_traceback": None,
                    "raised_exception": False,
                },
                result={
                    "partial_unexpected_index_list": [456],
                    "unexpected_count": 1,
                    "unexpected_list": ["*"],
                    "unexpected_percent": 0.07616146230007616,
                    "element_count": 1313,
                    "missing_percent": 0.0,
                    "partial_unexpected_counts": [{"count": 1, "value": "*"}],
                    "partial_unexpected_list": ["*"],
                    "unexpected_percent_total": 0.07616146230007616,
                    "unexpected_percent_nonmissing": 0.07616146230007616,
                    "missing_count": 0,
                    "unexpected_index_list": [456],
                },
            )
        ],
        success=expected_results.success,  # unaffected
        statistics=expected_results["statistics"],  # unaffected
    )
    assert validation_results.to_json_dict() == expected_results.to_json_dict()
Exemple #9
0
def test_evaluation_parameter_store_methods(
    data_context_parameterized_expectation_suite: DataContext, ):
    run_id = RunIdentifier(run_name="20191125T000000.000000Z")
    source_patient_data_results = ExpectationSuiteValidationResult(
        meta={
            "expectation_suite_name": "source_patient_data.default",
            "run_id": run_id,
        },
        results=[
            ExpectationValidationResult(
                expectation_config=ExpectationConfiguration(
                    expectation_type="expect_table_row_count_to_equal",
                    kwargs={
                        "value": 1024,
                    },
                ),
                success=True,
                exception_info={
                    "exception_message": None,
                    "exception_traceback": None,
                    "raised_exception": False,
                },
                result={
                    "observed_value": 1024,
                    "element_count": 1024,
                    "missing_percent": 0.0,
                    "missing_count": 0,
                },
            )
        ],
        success=True,
    )

    data_context_parameterized_expectation_suite.store_evaluation_parameters(
        source_patient_data_results)

    bound_parameters = data_context_parameterized_expectation_suite.evaluation_parameter_store.get_bind_params(
        run_id)
    assert bound_parameters == {
        "urn:great_expectations:validations:source_patient_data.default:expect_table_row_count_to_equal.result"
        ".observed_value":
        1024
    }
    source_diabetes_data_results = ExpectationSuiteValidationResult(
        meta={
            "expectation_suite_name": "source_diabetes_data.default",
            "run_id": run_id,
        },
        results=[
            ExpectationValidationResult(
                expectation_config=ExpectationConfiguration(
                    expectation_type=
                    "expect_column_unique_value_count_to_be_between",
                    kwargs={
                        "column": "patient_nbr",
                        "min": 2048,
                        "max": 2048
                    },
                ),
                success=True,
                exception_info={
                    "exception_message": None,
                    "exception_traceback": None,
                    "raised_exception": False,
                },
                result={
                    "observed_value": 2048,
                    "element_count": 5000,
                    "missing_percent": 0.0,
                    "missing_count": 0,
                },
            )
        ],
        success=True,
    )

    data_context_parameterized_expectation_suite.store_evaluation_parameters(
        source_diabetes_data_results)
    bound_parameters = data_context_parameterized_expectation_suite.evaluation_parameter_store.get_bind_params(
        run_id)
    assert bound_parameters == {
        "urn:great_expectations:validations:source_patient_data.default:expect_table_row_count_to_equal.result"
        ".observed_value":
        1024,
        "urn:great_expectations:validations:source_diabetes_data.default"
        ":expect_column_unique_value_count_to_be_between.result.observed_value:column=patient_nbr":
        2048,
    }
def test_StoreMetricsAction(basic_in_memory_data_context_for_validation_operator):
    action = StoreMetricsAction(
        data_context=basic_in_memory_data_context_for_validation_operator,
        requested_metrics={
            "*": [
                "statistics.evaluated_expectations",
                "statistics.successful_expectations",
            ]
        },
        target_store_name="metrics_store",
    )

    run_id = RunIdentifier(run_name="bar")

    validation_result = ExpectationSuiteValidationResult(
        success=False,
        meta={"expectation_suite_name": "foo", "run_id": run_id},
        statistics={"evaluated_expectations": 5, "successful_expectations": 3},
    )

    # Run the action and store our metrics
    action.run(
        validation_result,
        ValidationResultIdentifier.from_object(validation_result),
        data_asset=None,
    )

    validation_result = ExpectationSuiteValidationResult(
        success=False,
        meta={"expectation_suite_name": "foo.warning", "run_id": run_id},
        statistics={"evaluated_expectations": 8, "successful_expectations": 4},
    )

    action.run(
        validation_result,
        ValidationResultIdentifier.from_object(validation_result),
        data_asset=None,
    )

    assert (
        basic_in_memory_data_context_for_validation_operator.stores[
            "metrics_store"
        ].get(
            ValidationMetricIdentifier(
                run_id=run_id,
                data_asset_name=None,
                expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
                metric_name="statistics.evaluated_expectations",
                metric_kwargs_id=None,
            )
        )
        == 5
    )

    assert (
        basic_in_memory_data_context_for_validation_operator.stores[
            "metrics_store"
        ].get(
            ValidationMetricIdentifier(
                run_id=run_id,
                data_asset_name=None,
                expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
                metric_name="statistics.successful_expectations",
                metric_kwargs_id=None,
            )
        )
        == 3
    )

    assert (
        basic_in_memory_data_context_for_validation_operator.stores[
            "metrics_store"
        ].get(
            ValidationMetricIdentifier(
                run_id=run_id,
                data_asset_name=None,
                expectation_suite_identifier=ExpectationSuiteIdentifier("foo.warning"),
                metric_name="statistics.evaluated_expectations",
                metric_kwargs_id=None,
            )
        )
        == 8
    )

    assert (
        basic_in_memory_data_context_for_validation_operator.stores[
            "metrics_store"
        ].get(
            ValidationMetricIdentifier(
                run_id=run_id,
                data_asset_name=None,
                expectation_suite_identifier=ExpectationSuiteIdentifier("foo.warning"),
                metric_name="statistics.successful_expectations",
                metric_kwargs_id=None,
            )
        )
        == 4
    )
def test_StoreMetricsAction_column_metric(
    basic_in_memory_data_context_for_validation_operator,
):
    action = StoreMetricsAction(
        data_context=basic_in_memory_data_context_for_validation_operator,
        requested_metrics={
            "*": [
                {
                    "column": {
                        "provider_id": [
                            "expect_column_values_to_be_unique.result.unexpected_count"
                        ]
                    }
                },
                "statistics.evaluated_expectations",
                "statistics.successful_expectations",
            ]
        },
        target_store_name="metrics_store",
    )

    run_id = RunIdentifier(run_name="bar")

    validation_result = ExpectationSuiteValidationResult(
        success=False,
        meta={"expectation_suite_name": "foo", "run_id": run_id},
        results=[
            ExpectationValidationResult(
                meta={},
                result={
                    "element_count": 10,
                    "missing_count": 0,
                    "missing_percent": 0.0,
                    "unexpected_count": 7,
                    "unexpected_percent": 0.0,
                    "unexpected_percent_nonmissing": 0.0,
                    "partial_unexpected_list": [],
                },
                success=True,
                expectation_config=ExpectationConfiguration(
                    expectation_type="expect_column_values_to_be_unique",
                    kwargs={"column": "provider_id", "result_format": "BASIC"},
                ),
                exception_info=None,
            )
        ],
        statistics={"evaluated_expectations": 5, "successful_expectations": 3},
    )

    action.run(
        validation_result,
        ValidationResultIdentifier.from_object(validation_result),
        data_asset=None,
    )

    assert (
        basic_in_memory_data_context_for_validation_operator.stores[
            "metrics_store"
        ].get(
            ValidationMetricIdentifier(
                run_id=run_id,
                data_asset_name=None,
                expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
                metric_name="expect_column_values_to_be_unique.result.unexpected_count",
                metric_kwargs_id="column=provider_id",
            )
        )
        == 7
    )
def test_SlackNotificationAction(
    data_context_parameterized_expectation_suite,
    validation_result_suite,
    validation_result_suite_id,
):
    renderer = {
        "module_name": "great_expectations.render.renderer.slack_renderer",
        "class_name": "SlackRenderer",
    }
    slack_webhook = "https://hooks.slack.com/services/test/slack/webhook"
    slack_token = "test"
    slack_channel = "test"
    notify_on = "all"

    # test with just web_hook set; expect pass
    slack_action = SlackNotificationAction(
        data_context=data_context_parameterized_expectation_suite,
        renderer=renderer,
        slack_webhook=slack_webhook,
        notify_on=notify_on,
    )

    assert slack_action.run(
        validation_result_suite_identifier=validation_result_suite_id,
        validation_result_suite=validation_result_suite,
        data_asset=None,
    ) == {"slack_notification_result": "Slack notification succeeded."}

    # Test with slack_token and slack_channel set; expect pass
    slack_action = SlackNotificationAction(
        data_context=data_context_parameterized_expectation_suite,
        renderer=renderer,
        slack_token=slack_token,
        slack_channel=slack_channel,
        notify_on=notify_on,
    )

    assert slack_action.run(
        validation_result_suite_identifier=validation_result_suite_id,
        validation_result_suite=validation_result_suite,
        data_asset=None,
    ) == {"slack_notification_result": "Slack notification succeeded."}

    # Test with just slack_token set; expect fail
    with pytest.raises(AssertionError):
        SlackNotificationAction(
            data_context=data_context_parameterized_expectation_suite,
            renderer=renderer,
            slack_token=slack_token,
            notify_on=notify_on,
        )

    # Test with just slack_channel set; expect fail
    with pytest.raises(AssertionError):
        slack_action = SlackNotificationAction(
            data_context=data_context_parameterized_expectation_suite,
            renderer=renderer,
            slack_channel=slack_channel,
            notify_on=notify_on,
        )

    # Test with slack_channel, slack_token, and slack_webhook set; expect fail
    with pytest.raises(AssertionError):
        SlackNotificationAction(
            data_context=data_context_parameterized_expectation_suite,
            renderer=renderer,
            slack_channel=slack_channel,
            slack_token=slack_token,
            slack_webhook=slack_webhook,
            notify_on=notify_on,
        )

    # test notify on with failed run; expect pass
    notify_on = "failure"
    slack_action = SlackNotificationAction(
        data_context=data_context_parameterized_expectation_suite,
        renderer=renderer,
        slack_webhook=slack_webhook,
        notify_on=notify_on,
    )

    assert slack_action.run(
        validation_result_suite_identifier=validation_result_suite_id,
        validation_result_suite=ExpectationSuiteValidationResult(
            success=False,
            results=[],
            statistics={
                "successful_expectations": [],
                "evaluated_expectations": [],
            },
        ),
        data_asset=None,
    ) == {"slack_notification_result": "Slack notification succeeded."}

    # test notify on with successful run; expect pass
    notify_on = "failure"
    validation_result_suite.success = False
    slack_action = SlackNotificationAction(
        data_context=data_context_parameterized_expectation_suite,
        renderer=renderer,
        slack_webhook=slack_webhook,
        notify_on=notify_on,
    )

    assert slack_action.run(
        validation_result_suite_identifier=validation_result_suite_id,
        validation_result_suite=ExpectationSuiteValidationResult(
            success=True,
            results=[],
            statistics={
                "successful_expectations": [],
                "evaluated_expectations": [],
            },
        ),
        data_asset=None,
    ) == {"slack_notification_result": "none required"}
Exemple #13
0
def test_SlackRenderer_validation_results_with_datadocs():

    validation_result_suite = ExpectationSuiteValidationResult(
        results=[],
        success=True,
        statistics={
            "evaluated_expectations": 0,
            "successful_expectations": 0,
            "unsuccessful_expectations": 0,
            "success_percent": None,
        },
        meta={
            "great_expectations_version": "v0.8.0__develop",
            "batch_kwargs": {
                "data_asset_name": "x/y/z"
            },
            "data_asset_name": {
                "datasource": "x",
                "generator": "y",
                "generator_asset": "z",
            },
            "expectation_suite_name": "default",
            "run_id": "2019-09-25T060538.829112Z",
        },
    )

    rendered_output = SlackRenderer().render(validation_result_suite)

    expected_output = {
        "blocks": [
            {
                "type": "section",
                "text": {
                    "type":
                    "mrkdwn",
                    "text":
                    "*Batch Validation Status*: Success :tada:\n*Expectation suite name*: `default`\n*Data asset name*: `x/y/z`\n*Run ID*: `2019-09-25T060538.829112Z`\n*Batch ID*: `data_asset_name=x/y/z`\n*Summary*: *0* of *0* expectations were met",
                },
            },
            {
                "type": "divider"
            },
            {
                "type":
                "context",
                "elements": [{
                    "type":
                    "mrkdwn",
                    "text":
                    "Learn how to review validation results in Data Docs: https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html",
                }],
            },
        ],
        "text":
        "default: Success :tada:",
    }
    print(rendered_output)
    print(expected_output)
    assert rendered_output == expected_output

    data_docs_pages = {"local_site": "file:///localsite/index.html"}
    notify_with = ["local_site"]
    rendered_output = SlackRenderer().render(validation_result_suite,
                                             data_docs_pages, notify_with)

    expected_output = {
        "blocks": [
            {
                "type": "section",
                "text": {
                    "type":
                    "mrkdwn",
                    "text":
                    "*Batch Validation Status*: Success :tada:\n*Expectation suite name*: `default`\n*Data asset name*: `x/y/z`\n*Run ID*: `2019-09-25T060538.829112Z`\n*Batch ID*: `data_asset_name=x/y/z`\n*Summary*: *0* of *0* expectations were met",
                },
            },
            {
                "type": "section",
                "text": {
                    "type":
                    "mrkdwn",
                    "text":
                    "*DataDocs* can be found here: `file:///localsite/index.html` \n (Please copy and paste link into a browser to view)\n",
                },
            },
            {
                "type": "divider"
            },
            {
                "type":
                "context",
                "elements": [{
                    "type":
                    "mrkdwn",
                    "text":
                    "Learn how to review validation results in Data Docs: https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html",
                }],
            },
        ],
        "text":
        "default: Success :tada:",
    }
    assert rendered_output == expected_output

    # not configured
    notify_with = ["fake_site"]
    rendered_output = SlackRenderer().render(validation_result_suite,
                                             data_docs_pages, notify_with)

    expected_output = {
        "blocks": [
            {
                "type": "section",
                "text": {
                    "type":
                    "mrkdwn",
                    "text":
                    "*Batch Validation Status*: Success :tada:\n*Expectation suite name*: `default`\n*Data asset name*: `x/y/z`\n*Run ID*: `2019-09-25T060538.829112Z`\n*Batch ID*: `data_asset_name=x/y/z`\n*Summary*: *0* of *0* expectations were met",
                },
            },
            {
                "type": "section",
                "text": {
                    "type":
                    "mrkdwn",
                    "text":
                    "*ERROR*: Slack is trying to provide a link to the following DataDocs: `fake_site`, but it is not configured under `data_docs_sites` in the `great_expectations.yml`\n",
                },
            },
            {
                "type": "divider"
            },
            {
                "type":
                "context",
                "elements": [{
                    "type":
                    "mrkdwn",
                    "text":
                    "Learn how to review validation results in Data Docs: https://docs.greatexpectations.io/en/latest/guides/tutorials/getting_started/set_up_data_docs.html",
                }],
            },
        ],
        "text":
        "default: Success :tada:",
    }

    assert rendered_output == expected_output