def test_smoke_render_validation_results_page_renderer(titanic_profiler_evrs): rendered = ValidationResultsPageRenderer.render(titanic_profiler_evrs) with open( './tests/render/output/test_render_validation_results_page_renderer.json', 'w') as outfile: json.dump(rendered, outfile, indent=2) assert len(rendered["sections"]) > 5
def _ge_validation_fn(context, dataset): data_context = context.resources.ge_data_context validator_kwargs = { "datasource_name": datasource_name, "data_connector_name": data_connector_name, "data_asset_name": datasource_name or data_asset_name, "runtime_parameters": { runtime_method_type: dataset }, "batch_identifiers": batch_identifiers, "expectation_suite_name": suite_name, **extra_kwargs, } validator = data_context.get_validator(**validator_kwargs) run_id = { "run_name": datasource_name + " run", "run_time": datetime.datetime.utcnow(), } results = validator.validate(run_id=run_id) validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True) rendered_document_content_list = validation_results_page_renderer.render( validation_results=results) md_str = "".join( DefaultMarkdownPageView().render(rendered_document_content_list)) meta_stats = MetadataEntry("Expectation Results", value=MetadataValue.md(md_str)) yield ExpectationResult( success=bool(results["success"]), metadata_entries=[meta_stats], ) yield Output(results.to_json_dict())
def render_multiple_validation_result_pages_markdown( validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True, ): """ Loop through and render multiple validation results to markdown. Args: validation_operator_result: (ValidationOperatorResult) Result of validation operator run run_info_at_end: move run info below expectation results Returns: string containing formatted markdown validation results """ md_str = "" validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=run_info_at_end) for validation_result in validation_operator_result.list_validation_results( ): rendered_document_content = validation_results_page_renderer.render( validation_result) md_str += DefaultMarkdownPageView().render( rendered_document_content) + " " return md_str
def test_render_validation_results(titanic_profiled_evrs_1): rendered_json = ValidationResultsPageRenderer.render( titanic_profiled_evrs_1) rendered_page = DefaultJinjaPageView.render(rendered_json) with open('./tests/render/output/test_render_validation_results.html', 'w') as f: f.write(rendered_page) assert rendered_page[:15] == "<!DOCTYPE html>" assert rendered_page[-7:] == "</html>"
def test_snapshot_ValidationResultsPageRenderer_render_with_run_info_at_start( titanic_profiled_evrs_1, ValidationResultsPageRenderer_render_with_run_info_at_start, ): validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=False) rendered_validation_results = validation_results_page_renderer.render( titanic_profiled_evrs_1).to_json_dict() print(rendered_validation_results) # with open(file_relative_path(__file__, "./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_start_nc.json"), "w") as f: # json.dump(rendered_validation_results, f, indent=2) assert (rendered_validation_results == ValidationResultsPageRenderer_render_with_run_info_at_start)
def test_snapshot_ValidationResultsPageRenderer_render_with_run_info_at_end( titanic_profiled_evrs_1, ValidationResultsPageRenderer_render_with_run_info_at_end, ): validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True) rendered_validation_results = validation_results_page_renderer.render( titanic_profiled_evrs_1).to_json_dict() # replace version of vega-lite in res to match snapshot test content_block = rendered_validation_results["sections"][5][ "content_blocks"][1]["table"][10][2]["content_blocks"][1] content_block["graph"]["$schema"] = re.sub( r"v\d*\.\d*\.\d*", "v4.8.1", content_block["graph"]["$schema"]) assert (rendered_validation_results == ValidationResultsPageRenderer_render_with_run_info_at_end)
def test_snapshot_ValidationResultsPageRenderer_render_with_run_info_at_end( titanic_profiled_evrs_1, ValidationResultsPageRenderer_render_with_run_info_at_end, ): validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True) rendered_validation_results = validation_results_page_renderer.render( titanic_profiled_evrs_1).to_json_dict() import pprint pprint.pprint(rendered_validation_results["sections"]) # with open(file_relative_path(__file__, "./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_end_nc.json"), "w") as f: # json.dump(rendered_validation_results, f, indent=2) pprint.pprint(ValidationResultsPageRenderer_render_with_run_info_at_end) assert (rendered_validation_results == ValidationResultsPageRenderer_render_with_run_info_at_end)
def test_snapshot_ValidationResultsPageRenderer_render_with_run_info_at_start( titanic_profiled_evrs_1, ValidationResultsPageRenderer_render_with_run_info_at_start, ): validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=False) rendered_validation_results = validation_results_page_renderer.render( titanic_profiled_evrs_1).to_json_dict() # replace version of vega-lite in res to match snapshot test content_block = rendered_validation_results["sections"][5][ "content_blocks"][1]["table"][10][2]["content_blocks"][1] content_block["graph"]["$schema"] = re.sub( r"v\d*\.\d*\.\d*", "v4.8.1", content_block["graph"]["$schema"]) # with open(file_relative_path(__file__, "./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_start_nc.json"), "w") as f: # json.dump(rendered_validation_results, f, indent=2) assert (rendered_validation_results == ValidationResultsPageRenderer_render_with_run_info_at_start)
def test_snapshot_render_section_page_with_fixture_data( validation_operator_result): """ Make sure the appropriate markdown rendering is done for the applied fixture. Args: validation_operator_result: test fixture Returns: None """ validation_operator_result = ValidationOperatorResult( **validation_operator_result) md_str = "" validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True) for validation_result in validation_operator_result.list_validation_results( ): rendered_document_content = validation_results_page_renderer.render( validation_result) md_str += DefaultMarkdownPageView().render( rendered_document_content) + " " md_str = md_str.replace(" ", "").replace("\t", "").replace("\n", "") print(md_str) assert (md_str == """ # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have between **27000** and **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be between **54.24** and **56.24**. | 55.24 ✅ | maximum value must be between **196.96** and **198.96**. | 197.96 ✅ | mean must be between **97.01899999999998** and **99.01899999999998**. | ≈98.019 ❌ | median must be between **84000.75** and **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be more than **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have between **27000** and **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be between **54.24** and **56.24**. | 55.24 ✅ | maximum value must be between **196.96** and **198.96**. | 197.96 ✅ | mean must be between **97.01899999999998** and **99.01899999999998**. | ≈98.019 ❌ | median must be between **84000.75** and **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be more than **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) """.replace(" ", "").replace("\t", "").replace("\n", ""))