def test_cli_profile(titanic_data_context): titanic_data_context.profile_datasource( titanic_data_context.list_datasources()[0]["name"]) site_config = { "site_store": { "type": "filesystem", "base_directory": "uncommitted/documentation/local_site" }, "validations_store": { "type": "filesystem", "base_directory": "uncommitted/validations/", "run_id_filter": { "ne": "profiling" } }, "profiling_store": { "type": "filesystem", "base_directory": "uncommitted/validations/", "run_id_filter": { "eq": "profiling" } }, "datasources": "*", "sections": { "index": { "renderer": { "module": "great_expectations.render.renderer", "class": "SiteIndexPageRenderer" } }, "expectations": { "renderer": { "module": "great_expectations.render.renderer", "class": "ExpectationSuitePageRenderer" }, "view": { "module": "great_expectations.render.view", "class": "DefaultJinjaPageView" } }, } } res = SiteBuilder.build(titanic_data_context, site_config) index_page_locator_info = res[0] index_links_dict = res[1] assert index_page_locator_info[ 'path'] == titanic_data_context.data_doc_directory + '/local_site/index.html' assert len(index_links_dict['mydatasource']['mygenerator']['Titanic'] ['expectation_suite_links']) == 1 assert len(index_links_dict['mydatasource']['mygenerator']['Titanic'] ['validation_links']) == 0 assert len(index_links_dict['mydatasource']['mygenerator']['Titanic'] ['profiling_links']) == 0
def test_configuration_driven_site_builder(site_builder_data_context_with_html_store_titanic_random): context = site_builder_data_context_with_html_store_titanic_random context.add_validation_operator( "validate_and_store", { "class_name": "ActionListValidationOperator", "action_list": [{ "name": "store_validation_result", "action": { "class_name": "StoreAction", "target_store_name": "validations_store", } }, { "name": "extract_and_store_eval_parameters", "action": { "class_name": "ExtractAndStoreEvaluationParamsAction", "target_store_name": "evaluation_parameter_store", } }] } ) # profiling the Titanic datasource will generate one expectation suite and one validation # that is a profiling result context.profile_datasource(context.list_datasources()[0]["name"]) # creating another validation result using the profiler's suite (no need to use a new expectation suite # for this test). having two validation results - one with run id "profiling" - allows us to test # the logic of run_id_filter that helps filtering validation results to be included in # the profiling and the validation sections. batch = context.get_batch('Titanic', expectation_suite_name='BasicDatasetProfiler', batch_kwargs=context.yield_batch_kwargs('Titanic')) run_id = "test_run_id_12345" context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) data_docs_config = context._project_config.get('data_docs_sites') local_site_config = data_docs_config['local_site'] # local_site_config.pop('module_name') # This isn't necessary local_site_config.pop('class_name') # set datasource_whitelist local_site_config['datasource_whitelist'] = ['titanic'] keys_as_strings = [x.to_string() for x in context.stores["validations_store"].list_keys()] assert set(keys_as_strings) == set([ "ValidationResultIdentifier.titanic.default.Titanic.BasicDatasetProfiler.test_run_id_12345", "ValidationResultIdentifier.titanic.default.Titanic.BasicDatasetProfiler.profiling", "ValidationResultIdentifier.random.default.f2.BasicDatasetProfiler.profiling", "ValidationResultIdentifier.random.default.f1.BasicDatasetProfiler.profiling", ]) site_builder = SiteBuilder( data_context=context, **local_site_config ) res = site_builder.build() index_page_locator_info = res[0] index_links_dict = res[1] print( json.dumps(index_page_locator_info, indent=2) ) assert index_page_locator_info == context.root_directory + '/uncommitted/data_docs/local_site/index.html' print( json.dumps(index_links_dict, indent=2) ) assert json.loads(json.dumps(index_links_dict)) == json.loads("""\ { "titanic": { "default": { "Titanic": { "profiling_links": [ { "full_data_asset_name": "titanic/default/Titanic", "expectation_suite_name": "BasicDatasetProfiler", "filepath": "validations/profiling/titanic/default/Titanic/BasicDatasetProfiler.html", "source": "titanic", "generator": "default", "asset": "Titanic", "run_id": "profiling", "validation_success": false } ], "validations_links": [ { "full_data_asset_name": "titanic/default/Titanic", "expectation_suite_name": "BasicDatasetProfiler", "filepath": "validations/test_run_id_12345/titanic/default/Titanic/BasicDatasetProfiler.html", "source": "titanic", "generator": "default", "asset": "Titanic", "run_id": "test_run_id_12345", "validation_success": false } ], "expectations_links": [ { "full_data_asset_name": "titanic/default/Titanic", "expectation_suite_name": "BasicDatasetProfiler", "filepath": "expectations/titanic/default/Titanic/BasicDatasetProfiler.html", "source": "titanic", "generator": "default", "asset": "Titanic", "run_id": null, "validation_success": null } ] } } } } """) assert "random" not in index_links_dict, \ """`random` must not appear in this documentation, because `datasource_whitelist` config option specifies only `titanic`""" assert len(index_links_dict['titanic']['default']['Titanic']['validations_links']) == 1, \ """ The only rendered validation should be the one not generated by the profiler """ # save documentation locally safe_mmkdir("./tests/render/output") safe_mmkdir("./tests/render/output/documentation") if os.path.isdir("./tests/render/output/documentation"): shutil.rmtree("./tests/render/output/documentation") shutil.copytree( os.path.join( site_builder_data_context_with_html_store_titanic_random.root_directory, "uncommitted/data_docs/" ), "./tests/render/output/documentation" ) # let's create another validation result and run the site builder to add it # to the data docs # the operator does not have an StoreAction action configured, so the site # will not be updated without our call to site builder ts_last_mod_0 = os.path.getmtime(os.path.join(site_builder.site_index_builder.target_store.store_backends[ValidationResultIdentifier].full_base_directory, "validations/test_run_id_12345/titanic/default/Titanic/BasicDatasetProfiler.html")) run_id = "test_run_id_12346" operator_result = context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) validation_result_id = ValidationResultIdentifier( expectation_suite_identifier=[key for key in operator_result["details"].keys()][0], run_id=run_id) res = site_builder.build(resource_identifiers=[validation_result_id]) index_links_dict = res[1] # verify that an additional validation result HTML file was generated assert len(index_links_dict["titanic"]["default"]["Titanic"]["validations_links"]) == 2 site_builder.site_index_builder.target_store.store_backends[ValidationResultIdentifier].full_base_directory # verify that the validation result HTML file rendered in the previous run was NOT updated ts_last_mod_1 = os.path.getmtime(os.path.join(site_builder.site_index_builder.target_store.store_backends[ValidationResultIdentifier].full_base_directory, "validations/test_run_id_12345/titanic/default/Titanic/BasicDatasetProfiler.html")) assert ts_last_mod_0 == ts_last_mod_1 print("mmm")
def test_configuration_driven_site_builder( site_builder_data_context_with_html_store_titanic_random, ): context = site_builder_data_context_with_html_store_titanic_random context.add_validation_operator( "validate_and_store", { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", "target_store_name": "validations_store", }, }, { "name": "extract_and_store_eval_parameters", "action": { "class_name": "StoreEvaluationParametersAction", "target_store_name": "evaluation_parameter_store", }, }, ], }, ) # profiling the Titanic datasource will generate one expectation suite and one validation # that is a profiling result datasource_name = "titanic" data_asset_name = "Titanic" profiler_name = "BasicDatasetProfiler" generator_name = "subdir_reader" context.profile_datasource(datasource_name) # creating another validation result using the profiler's suite (no need to use a new expectation suite # for this test). having two validation results - one with run id "profiling" - allows us to test # the logic of run_name_filter that helps filtering validation results to be included in # the profiling and the validation sections. batch_kwargs = context.build_batch_kwargs( datasource=datasource_name, batch_kwargs_generator=generator_name, name=data_asset_name, ) expectation_suite_name = "{}.{}.{}.{}".format(datasource_name, generator_name, data_asset_name, profiler_name) batch = context.get_batch( batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name, ) run_id = RunIdentifier(run_name="test_run_id_12345") context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) data_docs_config = context._project_config.data_docs_sites local_site_config = data_docs_config["local_site"] validations_set = set(context.stores["validations_store"].list_keys()) assert len(validations_set) == 6 assert (ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="test_run_id_12345", batch_identifier=batch.batch_id, ) in validations_set) assert (ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id, ) in validations_set) assert (ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id, ) in validations_set) assert (ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id, ) in validations_set) site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **local_site_config) res = site_builder.build() index_page_locator_info = res[0] index_links_dict = res[1] # assert that how-to buttons and related elements are rendered (default behavior) assert_how_to_buttons(context, index_page_locator_info, index_links_dict) # print(json.dumps(index_page_locator_info, indent=2)) assert (index_page_locator_info == "file://" + context.root_directory + "/uncommitted/data_docs/local_site/index.html") # print(json.dumps(index_links_dict, indent=2)) assert "site_name" in index_links_dict assert "expectations_links" in index_links_dict assert len(index_links_dict["expectations_links"]) == 5 assert "validations_links" in index_links_dict assert (len(index_links_dict["validations_links"]) == 1), """ The only rendered validation should be the one not generated by the profiler """ assert "profiling_links" in index_links_dict assert len(index_links_dict["profiling_links"]) == 5 # save documentation locally os.makedirs("./tests/render/output", exist_ok=True) os.makedirs("./tests/render/output/documentation", exist_ok=True) if os.path.isdir("./tests/render/output/documentation"): shutil.rmtree("./tests/render/output/documentation") shutil.copytree( os.path.join( site_builder_data_context_with_html_store_titanic_random. root_directory, "uncommitted/data_docs/", ), "./tests/render/output/documentation", ) # let's create another validation result and run the site builder to add it # to the data docs # the operator does not have an StoreValidationResultAction action configured, so the site # will not be updated without our call to site builder expectation_suite_path_component = expectation_suite_name.replace(".", "/") validation_result_page_path = os.path.join( site_builder.site_index_builder.target_store. store_backends[ValidationResultIdentifier].full_base_directory, "validations", expectation_suite_path_component, run_id.run_name, run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"), batch.batch_id + ".html", ) ts_last_mod_0 = os.path.getmtime(validation_result_page_path) run_id = RunIdentifier(run_name="test_run_id_12346") operator_result = context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) validation_result_id = operator_result.list_validation_result_identifiers( )[0] res = site_builder.build(resource_identifiers=[validation_result_id]) index_links_dict = res[1] # verify that an additional validation result HTML file was generated assert len(index_links_dict["validations_links"]) == 2 site_builder.site_index_builder.target_store.store_backends[ ValidationResultIdentifier].full_base_directory # verify that the validation result HTML file rendered in the previous run was NOT updated ts_last_mod_1 = os.path.getmtime(validation_result_page_path) assert ts_last_mod_0 == ts_last_mod_1 # verify that the new method of the site builder that returns the URL of the HTML file that renders # a resource new_validation_result_page_path = os.path.join( site_builder.site_index_builder.target_store. store_backends[ValidationResultIdentifier].full_base_directory, "validations", expectation_suite_path_component, run_id.run_name, run_id.run_time.strftime("%Y%m%dT%H%M%S.%fZ"), batch.batch_id + ".html", ) html_url = site_builder.get_resource_url( resource_identifier=validation_result_id) assert "file://" + new_validation_result_page_path == html_url html_url = site_builder.get_resource_url() assert ("file://" + os.path.join( site_builder.site_index_builder.target_store. store_backends[ValidationResultIdentifier].full_base_directory, "index.html", ) == html_url) team_site_config = data_docs_config["team_site"] team_site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **team_site_config) team_site_builder.clean_site() obs = [ url_dict for url_dict in context.get_docs_sites_urls(site_name="team_site") if url_dict.get("site_url") ] assert len(obs) == 0 # exercise clean_site site_builder.clean_site() obs = [ url_dict for url_dict in context.get_docs_sites_urls() if url_dict.get("site_url") ] assert len(obs) == 0 # restore site context = site_builder_data_context_with_html_store_titanic_random site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **local_site_config) res = site_builder.build()
def test_configuration_driven_site_builder_without_how_to_buttons( site_builder_data_context_with_html_store_titanic_random, ): context = site_builder_data_context_with_html_store_titanic_random context.add_validation_operator( "validate_and_store", { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", "target_store_name": "validations_store", }, }, { "name": "extract_and_store_eval_parameters", "action": { "class_name": "StoreEvaluationParametersAction", "target_store_name": "evaluation_parameter_store", }, }, ], }, ) # profiling the Titanic datasource will generate one expectation suite and one validation # that is a profiling result datasource_name = "titanic" data_asset_name = "Titanic" profiler_name = "BasicDatasetProfiler" generator_name = "subdir_reader" context.profile_datasource(datasource_name) # creating another validation result using the profiler's suite (no need to use a new expectation suite # for this test). having two validation results - one with run id "profiling" - allows us to test # the logic of run_name_filter that helps filtering validation results to be included in # the profiling and the validation sections. batch_kwargs = context.build_batch_kwargs( datasource=datasource_name, batch_kwargs_generator=generator_name, name=data_asset_name, ) expectation_suite_name = "{}.{}.{}.{}".format(datasource_name, generator_name, data_asset_name, profiler_name) batch = context.get_batch( batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name, ) run_id = "test_run_id_12345" context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) data_docs_config = context._project_config.data_docs_sites local_site_config = data_docs_config["local_site"] # set this flag to false in config to hide how-to buttons and related elements local_site_config["show_how_to_buttons"] = False site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **local_site_config) res = site_builder.build() index_page_locator_info = res[0] index_links_dict = res[1] assert_how_to_buttons(context, index_page_locator_info, index_links_dict, show_how_to_buttons=False)
def test_configuration_driven_site_builder_skip_and_clean_missing( site_builder_data_context_with_html_store_titanic_random, ): # tests auto-cleaning functionality of DefaultSiteIndexBuilder # when index page is built, if an HTML page is present without corresponding suite or validation result, # the HTML page should be removed and not appear on index page context = site_builder_data_context_with_html_store_titanic_random context.add_validation_operator( "validate_and_store", { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", "target_store_name": "validations_store", }, }, { "name": "extract_and_store_eval_parameters", "action": { "class_name": "StoreEvaluationParametersAction", "target_store_name": "evaluation_parameter_store", }, }, ], }, ) # profiling the Titanic datasource will generate one expectation suite and one validation # that is a profiling result datasource_name = "titanic" data_asset_name = "Titanic" profiler_name = "BasicDatasetProfiler" generator_name = "subdir_reader" context.profile_datasource(datasource_name) # creating another validation result using the profiler's suite (no need to use a new expectation suite # for this test). having two validation results - one with run id "profiling" - allows us to test # the logic of run_name_filter that helps filtering validation results to be included in # the profiling and the validation sections. batch_kwargs = context.build_batch_kwargs( datasource=datasource_name, batch_kwargs_generator=generator_name, data_asset_name=data_asset_name, ) expectation_suite_name = "{}.{}.{}.{}".format( datasource_name, generator_name, data_asset_name, profiler_name ) batch = context.get_batch( batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name, ) run_id = RunIdentifier(run_name="test_run_id_12345") context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) data_docs_config = context._project_config.data_docs_sites local_site_config = data_docs_config["local_site"] validations_set = set(context.stores["validations_store"].list_keys()) assert len(validations_set) == 6 expectation_suite_set = set(context.stores["expectations_store"].list_keys()) assert len(expectation_suite_set) == 5 site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **local_site_config ) site_builder.build() # test expectation suite pages expectation_suite_html_pages = { ExpectationSuiteIdentifier.from_tuple(suite_tuple) for suite_tuple in site_builder.target_store.store_backends[ ExpectationSuiteIdentifier ].list_keys() } # suites in expectations store should match html pages assert expectation_suite_set == expectation_suite_html_pages # remove suites from expectations store for i in range(2): context.stores["expectations_store"].remove_key(list(expectation_suite_set)[i]) # re-build data docs, which should remove suite HTML pages that no longer have corresponding suite in # expectations store site_builder.build() expectation_suite_set = set(context.stores["expectations_store"].list_keys()) expectation_suite_html_pages = { ExpectationSuiteIdentifier.from_tuple(suite_tuple) for suite_tuple in site_builder.target_store.store_backends[ ExpectationSuiteIdentifier ].list_keys() } assert expectation_suite_set == expectation_suite_html_pages # test validation result pages validation_html_pages = { ValidationResultIdentifier.from_tuple(result_tuple) for result_tuple in site_builder.target_store.store_backends[ ValidationResultIdentifier ].list_keys() } # validations in store should match html pages assert validations_set == validation_html_pages # remove validations from store for i in range(2): context.stores["validations_store"].store_backend.remove_key( list(validations_set)[i] ) # re-build data docs, which should remove validation HTML pages that no longer have corresponding validation in # validations store site_builder.build() validations_set = set(context.stores["validations_store"].list_keys()) validation_html_pages = { ValidationResultIdentifier.from_tuple(result_tuple) for result_tuple in site_builder.target_store.store_backends[ ValidationResultIdentifier ].list_keys() } assert validations_set == validation_html_pages
def test_configuration_driven_site_builder( site_builder_data_context_with_html_store_titanic_random): context = site_builder_data_context_with_html_store_titanic_random context.add_validation_operator( "validate_and_store", { "class_name": "ActionListValidationOperator", "action_list": [{ "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", "target_store_name": "validations_store", } }, { "name": "extract_and_store_eval_parameters", "action": { "class_name": "StoreEvaluationParametersAction", "target_store_name": "evaluation_parameter_store", } }] }) # profiling the Titanic datasource will generate one expectation suite and one validation # that is a profiling result datasource_name = 'titanic' data_asset_name = "Titanic" profiler_name = 'BasicDatasetProfiler' generator_name = "subdir_reader" context.profile_datasource(datasource_name) # creating another validation result using the profiler's suite (no need to use a new expectation suite # for this test). having two validation results - one with run id "profiling" - allows us to test # the logic of run_id_filter that helps filtering validation results to be included in # the profiling and the validation sections. batch_kwargs = context.build_batch_kwargs(datasource=datasource_name, generator=generator_name, name=data_asset_name) expectation_suite_name = "{}.{}.{}.{}".format(datasource_name, generator_name, data_asset_name, profiler_name) batch = context.get_batch( batch_kwargs=batch_kwargs, expectation_suite_name=expectation_suite_name, ) run_id = "test_run_id_12345" context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) data_docs_config = context._project_config.data_docs_sites local_site_config = data_docs_config['local_site'] # local_site_config.pop('module_name') # This isn't necessary local_site_config.pop('class_name') validations_set = set(context.stores["validations_store"].list_keys()) assert len(validations_set) == 4 assert ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="test_run_id_12345", batch_identifier=batch.batch_id) in validations_set assert ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id) in validations_set assert ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id) in validations_set assert ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name), run_id="profiling", batch_identifier=batch.batch_id) in validations_set site_builder = SiteBuilder( data_context=context, runtime_environment={"root_directory": context.root_directory}, **local_site_config) res = site_builder.build() index_page_locator_info = res[0] index_links_dict = res[1] # assert that how-to buttons and related elements are rendered (default behavior) assert_how_to_buttons(context, index_page_locator_info, index_links_dict) print(json.dumps(index_page_locator_info, indent=2)) assert index_page_locator_info == context.root_directory + '/uncommitted/data_docs/local_site/index.html' print(json.dumps(index_links_dict, indent=2)) assert "site_name" in index_links_dict assert "expectations_links" in index_links_dict assert len(index_links_dict["expectations_links"]) == 3 assert "validations_links" in index_links_dict assert len(index_links_dict["validations_links"]) == 1, \ """ The only rendered validation should be the one not generated by the profiler """ assert "profiling_links" in index_links_dict assert len(index_links_dict["profiling_links"]) == 3 # save documentation locally safe_mmkdir("./tests/render/output") safe_mmkdir("./tests/render/output/documentation") if os.path.isdir("./tests/render/output/documentation"): shutil.rmtree("./tests/render/output/documentation") shutil.copytree( os.path.join( site_builder_data_context_with_html_store_titanic_random. root_directory, "uncommitted/data_docs/"), "./tests/render/output/documentation") # let's create another validation result and run the site builder to add it # to the data docs # the operator does not have an StoreValidationResultAction action configured, so the site # will not be updated without our call to site builder expectation_suite_path_component = expectation_suite_name.replace('.', '/') validation_result_page_path = os.path.join( site_builder.site_index_builder.target_store. store_backends[ValidationResultIdentifier].full_base_directory, "validations", expectation_suite_path_component, run_id, batch.batch_id + ".html") ts_last_mod_0 = os.path.getmtime(validation_result_page_path) run_id = "test_run_id_12346" operator_result = context.run_validation_operator( assets_to_validate=[batch], run_id=run_id, validation_operator_name="validate_and_store", ) validation_result_id = ValidationResultIdentifier( expectation_suite_identifier=[ key for key in operator_result["details"].keys() ][0], run_id=run_id, batch_identifier=batch.batch_id) res = site_builder.build(resource_identifiers=[validation_result_id]) index_links_dict = res[1] # verify that an additional validation result HTML file was generated assert len(index_links_dict["validations_links"]) == 2 site_builder.site_index_builder.target_store.store_backends[ ValidationResultIdentifier].full_base_directory # verify that the validation result HTML file rendered in the previous run was NOT updated ts_last_mod_1 = os.path.getmtime(validation_result_page_path) assert ts_last_mod_0 == ts_last_mod_1 # verify that the new method of the site builder that returns the URL of the HTML file that renders # a resource new_validation_result_page_path = os.path.join( site_builder.site_index_builder.target_store. store_backends[ValidationResultIdentifier].full_base_directory, "validations", expectation_suite_path_component, run_id, batch.batch_id + ".html") html_url = site_builder.get_resource_url( resource_identifier=validation_result_id) assert "file://" + new_validation_result_page_path == html_url html_url = site_builder.get_resource_url() assert "file://" + os.path.join(site_builder.site_index_builder.target_store.store_backends[\ ValidationResultIdentifier].full_base_directory, "index.html") == html_url