def test_expectation_summary_in_ExpectationSuitePageRenderer_render_expectation_suite_notes(
):
    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(expectation_suite_name="test",
                         meta={},
                         expectations=None))
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(result.text) == [
        'This Expectation suite currently contains 0 total Expectations across 0 columns.'
    ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={"notes": {
                "format": "markdown",
                "content": ["hi"]
            }}))
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            {
                'content_block_type': 'markdown',
                'styling': {
                    'parent': {}
                },
                'markdown': 'hi'
            }
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            'hi',
        ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={},
            expectations=[
                ExpectationConfiguration(
                    expectation_type="expect_table_row_count_to_be_between",
                    kwargs={
                        "min_value": 0,
                        "max_value": None
                    }),
                ExpectationConfiguration(
                    expectation_type="expect_column_to_exist",
                    kwargs={"column": "x"}),
                ExpectationConfiguration(
                    expectation_type="expect_column_to_exist",
                    kwargs={"column": "y"})
            ]))
    # print(RenderedContent.rendered_content_list_to_json(result.text)[0])
    assert RenderedContent.rendered_content_list_to_json(
        result.text
    )[0] == 'This Expectation suite currently contains 3 total Expectations across 2 columns.'
def test_expectation_summary_in_render_asset_notes():
    result = ExpectationSuitePageRenderer._render_asset_notes({
        "meta": {},
        "expectations": {}
    })
    print(result)
    assert result["content"] == [
        'This Expectation suite currently contains 0 total Expectations across 0 columns.'
    ]

    result = ExpectationSuitePageRenderer._render_asset_notes({
        "meta": {
            "notes": {
                "format": "markdown",
                "content": ["hi"]
            }
        },
        "expectations": {}
    })
    print(result)

    try:
        pypandoc.convert_text("*test*", format='md', to="html")
        assert result["content"] == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            '<p>hi</p>\n',
        ]
    except OSError:
        assert result["content"] == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            'hi',
        ]

    result = ExpectationSuitePageRenderer._render_asset_notes({
        "meta": {},
        "expectations": [
            {
                "expectation_type": "expect_table_row_count_to_be_between",
                "kwargs": {
                    "min_value": 0,
                    "max_value": None,
                }
            },
            {
                "expectation_type": "expect_column_to_exist",
                "kwargs": {
                    "column": "x",
                }
            },
            {
                "expectation_type": "expect_column_to_exist",
                "kwargs": {
                    "column": "y",
                }
            },
        ]
    })
    print(result)
    assert result["content"][
        0] == 'This Expectation suite currently contains 3 total Expectations across 2 columns.'
def test_render_expectation_suite_for_Markdown(expectation_suite_to_render_with_notes):
    expectation_suite_page_renderer = ExpectationSuitePageRenderer()
    rendered_document_content_list = expectation_suite_page_renderer.render(
        expectation_suite_to_render_with_notes
    )
    md_str = DefaultMarkdownPageView().render(rendered_document_content_list)
    md_str = " ".join(md_str)
    md_str = md_str.replace(" ", "").replace("\t", "").replace("\n", "")
    assert (
        md_str
        == r"""
   # Validation Results
## Overview
### Info
 |  |  |
 | ------------  | ------------ |
Expectation Suite Name  | default
Great Expectations Version  | 0.13.0-test
### Notes
    This Expectation suite currently contains 5 total Expectations across 5 columns.
## infinities
  * is a required field.
  * ***
## irrationals
  * distinct values must belong to this set: \* **1st** **2nd**.
  * ***
## naturals
  * is a required field.
  * ***
## nulls
  * is a required field.
  * ***
## testings
  * values must be unique.
    #### Notes:
      Example notes about this expectation. **Markdown** `Supported`.

      Second example note **with** *Markdown*
  * ***
-----------------------------------------------------------
Powered by [Great Expectations](https://greatexpectations.io/)
    """.replace(
            " ", ""
        )
        .replace("\t", "")
        .replace("\n", "")
    )
Example #4
0
def test_render_profiled_fixture_expectation_suite(titanic_dataset_profiler_expectations):
    rendered_content = ExpectationSuitePageRenderer().render(titanic_dataset_profiler_expectations)
    rendered_page = DefaultJinjaPageView().render(rendered_content)

    with open(file_relative_path(__file__, './output/test_render_profiled_fixture_expectation_suite.html'), 'wb') as f:
        f.write(rendered_page.encode("utf-8"))

    assert rendered_page[:15] == "<!DOCTYPE html>"
    assert rendered_page[-7:] == "</html>"
Example #5
0
def test_render_profiled_fixture_expectation_suite(titanic_dataset_profiler_expectations):
    rendered_json = ExpectationSuitePageRenderer().render(titanic_dataset_profiler_expectations)
    rendered_page = DefaultJinjaPageView().render(rendered_json)

    with open('./tests/render/output/test_render_profiled_fixture_expectation_suite.html', 'wb') as f:
        f.write(rendered_page.encode("utf-8"))

    assert rendered_page[:15] == "<!DOCTYPE html>"
    assert rendered_page[-7:] == "</html>"
Example #6
0
def render(render_object):
    """Render a great expectations object to documentation.

    RENDER_OBJECT: path to a GE object to render
    """
    with open(render_object, "r") as infile:
        raw = json.load(infile)

    if "results" in raw:
        model = ProfilingResultsPageRenderer.render(raw)
    else:
        model = ExpectationSuitePageRenderer.render(raw)
    print(DefaultJinjaPageView.render(model))
Example #7
0
def test_render_profiled_fixture_expectation_suite_with_distribution(
    titanic_dataset_profiler_expectations_with_distribution, ):
    # Tests sparkline
    rendered_content = ExpectationSuitePageRenderer().render(
        titanic_dataset_profiler_expectations_with_distribution)
    rendered_page = DefaultJinjaPageView().render(rendered_content)

    with open(
            file_relative_path(
                __file__,
                "./output/titanic_dataset_profiler_expectation_suite_with_distribution.html",
            ),
            "wb",
    ) as f:
        f.write(rendered_page.encode("utf-8"))

    assert rendered_page[:15] == "<!DOCTYPE html>"
    assert rendered_page[-7:] == "</html>"
def test_ExpectationSuitePageRenderer_render_expectation_suite_notes():
    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(expectation_suite_name="test", meta={"notes": "*hi*"})
    )
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(result.text) == [
        "This Expectation suite currently contains 0 total Expectations across 0 columns.",
        "*hi*",
    ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={"notes": ["*alpha*", "_bravo_", "charlie"]},
        )
    )
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(result.text) == [
        "This Expectation suite currently contains 0 total Expectations across 0 columns.",
        "*alpha*",
        "_bravo_",
        "charlie",
    ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={
                "notes": {
                    "format": "string",
                    "content": ["*alpha*", "_bravo_", "charlie"],
                }
            },
        )
    )
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(result.text) == [
        "This Expectation suite currently contains 0 total Expectations across 0 columns.",
        "*alpha*",
        "_bravo_",
        "charlie",
    ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={"notes": {"format": "markdown", "content": "*alpha*"}},
        )
    )
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            {
                "content_block_type": "markdown",
                "styling": {"parent": {}},
                "markdown": "*alpha*",
            },
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            "*alpha*",
        ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={
                "notes": {
                    "format": "markdown",
                    "content": ["*alpha*", "_bravo_", "charlie"],
                }
            },
        )
    )
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            {
                "content_block_type": "markdown",
                "styling": {"parent": {}},
                "markdown": "*alpha*",
            },
            {
                "content_block_type": "markdown",
                "styling": {"parent": {}},
                "markdown": "_bravo_",
            },
            {
                "content_block_type": "markdown",
                "styling": {"parent": {}},
                "markdown": "charlie",
            },
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            "*alpha*",
            "_bravo_",
            "charlie",
        ]
Example #9
0
def test_ExpectationSuitePageRenderer_render_expectation_suite_notes():
    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(ExpectationSuite(
        expectation_suite_name="test",
        meta={
            "notes": "*hi*"
        }
    ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(
        result.text) == ['This Expectation suite currently contains 0 total Expectations across 0 columns.', "*hi*"]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(ExpectationSuite(
        expectation_suite_name="test",
        meta={
            "notes": ["*alpha*", "_bravo_", "charlie"]
        }
    ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(
        result.text) == ['This Expectation suite currently contains 0 total Expectations across 0 columns.',
                              "*alpha*", "_bravo_", "charlie"]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(ExpectationSuite(
        expectation_suite_name="test",
        meta={
            "notes": {
                "format": "string",
                "content": ["*alpha*", "_bravo_", "charlie"]
            }
        }
    ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(
        result.text) == ['This Expectation suite currently contains 0 total Expectations across 0 columns.',
                           "*alpha*", "_bravo_", "charlie"]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(ExpectationSuite(
        expectation_suite_name="test",
        meta={
            "notes": {
                "format": "markdown",
                "content": "*alpha*"
            }
        }
    ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(
            result.text) == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            {'content_block_type': 'markdown', 'styling': {'parent': {}}, 'markdown': '*alpha*'}
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(
            result.text) == ['This Expectation suite currently contains 0 total Expectations across 0 columns.',
                               "*alpha*"]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(ExpectationSuite(
        expectation_suite_name="test",
        meta={
            "notes": {
                "format": "markdown",
                "content": ["*alpha*", "_bravo_", "charlie"]
            }
        }
    ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(
            result.text) == [
            'This Expectation suite currently contains 0 total Expectations across 0 columns.',
            {'content_block_type': 'markdown', 'styling': {'parent': {}}, 'markdown': '*alpha*'},
            {'content_block_type': 'markdown', 'styling': {'parent': {}}, 'markdown': '_bravo_'},
            {'content_block_type': 'markdown', 'styling': {'parent': {}}, 'markdown': 'charlie'}
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(
            result.text) == ['This Expectation suite currently contains 0 total Expectations across 0 columns.',
                               "*alpha*", "_bravo_", "charlie"]
def test_ExpectationSuitePageRenderer_render_asset_notes():
    # import pypandoc
    # print(pypandoc.convert_text("*hi*", to='html', format="md"))

    result = ExpectationSuitePageRenderer._render_asset_notes(
        {"meta": {
            "notes": "*hi*"
        }})
    print(result)
    assert result["content"] == ["*hi*"]

    result = ExpectationSuitePageRenderer._render_asset_notes(
        {"meta": {
            "notes": ["*alpha*", "_bravo_", "charlie"]
        }})
    print(result)
    assert result["content"] == ["*alpha*", "_bravo_", "charlie"]

    result = ExpectationSuitePageRenderer._render_asset_notes({
        "meta": {
            "notes": {
                "format": "string",
                "content": ["*alpha*", "_bravo_", "charlie"]
            }
        }
    })
    print(result)
    assert result["content"] == ["*alpha*", "_bravo_", "charlie"]

    result = ExpectationSuitePageRenderer._render_asset_notes(
        {"meta": {
            "notes": {
                "format": "markdown",
                "content": "*alpha*"
            }
        }})
    print(result)

    try:
        pypandoc.convert_text("*test*", format='md', to="html")
        assert result["content"] == ["<p><em>alpha</em></p>\n"]
    except OSError:
        assert result["content"] == ["*alpha*"]

    result = ExpectationSuitePageRenderer._render_asset_notes({
        "meta": {
            "notes": {
                "format": "markdown",
                "content": ["*alpha*", "_bravo_", "charlie"]
            }
        }
    })
    print(result)

    try:
        pypandoc.convert_text("*test*", format='md', to="html")
        assert result["content"] == [
            "<p><em>alpha</em></p>\n", "<p><em>bravo</em></p>\n",
            "<p>charlie</p>\n"
        ]
    except OSError:
        assert result["content"] == ["*alpha*", "_bravo_", "charlie"]
def test_expectation_summary_in_ExpectationSuitePageRenderer_render_expectation_suite_notes(
    empty_data_context, ):
    context: ExpectationSuite = empty_data_context
    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={},
            expectations=None,
            data_context=context,
        ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))
    assert RenderedContent.rendered_content_list_to_json(result.text) == [
        "This Expectation suite currently contains 0 total Expectations across 0 columns."
    ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={"notes": {
                "format": "markdown",
                "content": ["hi"]
            }},
            data_context=context,
        ))
    # print(RenderedContent.rendered_content_list_to_json(result.text))

    try:
        mistune.markdown("*test*")
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            {
                "content_block_type": "markdown",
                "styling": {
                    "parent": {}
                },
                "markdown": "hi",
            },
        ]
    except OSError:
        assert RenderedContent.rendered_content_list_to_json(result.text) == [
            "This Expectation suite currently contains 0 total Expectations across 0 columns.",
            "hi",
        ]

    result = ExpectationSuitePageRenderer._render_expectation_suite_notes(
        ExpectationSuite(
            expectation_suite_name="test",
            meta={},
            expectations=[
                ExpectationConfiguration(
                    expectation_type="expect_table_row_count_to_be_between",
                    kwargs={
                        "min_value": 0,
                        "max_value": None
                    },
                ),
                ExpectationConfiguration(
                    expectation_type="expect_column_to_exist",
                    kwargs={"column": "x"}),
                ExpectationConfiguration(
                    expectation_type="expect_column_to_exist",
                    kwargs={"column": "y"}),
            ],
            data_context=context,
        ))
    # print(RenderedContent.rendered_content_list_to_json(result.text)[0])
    assert (
        RenderedContent.rendered_content_list_to_json(result.text)[0] ==
        "This Expectation suite currently contains 3 total Expectations across 2 columns."
    )