def test_suite_delete_with_one_suite( mock_emit, caplog, empty_data_context_stats_enabled ): project_dir = empty_data_context_stats_enabled.root_directory context = DataContext(project_dir) suite = context.create_expectation_suite("a.warning") context.save_expectation_suite(suite) mock_emit.reset_mock() suite_dir = os.path.join(project_dir, "expectations", "a") suite_path = os.path.join(suite_dir, "warning.json") assert os.path.isfile(suite_path) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, "suite delete a.warning -d {}".format(project_dir), catch_exceptions=False, ) assert result.exit_code == 0 assert "Deleted the expectation suite named: a.warning" in result.output # assert not os.path.isdir(suite_dir) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call({"event": "cli.suite.delete", "event_payload": {}, "success": True}), ] assert_no_logging_messages_or_tracebacks(caplog, result)
def create_empty_suite(context: DataContext, expectation_suite_name: str, batch_kwargs) -> None: suite = context.create_expectation_suite(expectation_suite_name, overwrite_existing=False) suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs) context.save_expectation_suite(suite, expectation_suite_name)
def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations( caplog, site_builder_data_context_with_html_store_titanic_random, ): """ Here we verify that the "suite edit" command helps the user to specify the batch kwargs when it is called without the optional arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit - this step is a just a setup. We call the "suite edit" command without any optional arguments. This means that the command will help us specify the batch kwargs interactively. The data context has two datasources - we choose one of them. It has a generator configured. We choose to use the generator and select a generator asset from the list. """ root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory os.chdir(root_dir) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "new", "-d", root_dir, "--suite", "foo_suite", "--no-view"], input="2\n1\n1\n\n", catch_exceptions=False, ) stdout = result.stdout assert result.exit_code == 0 assert "A new Expectation suite 'foo_suite' was added to your project" in stdout # remove the citations from the suite context = DataContext(root_dir) suite = context.get_expectation_suite("foo_suite") suite.meta.pop("citations") context.save_expectation_suite(suite) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "edit", "foo_suite", "-d", root_dir, "--no-jupyter"], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "Select a datasource" in stdout assert "Which data would you like to use" in stdout assert "To continue editing this suite, run" in stdout expected_notebook_path = os.path.join(root_dir, "uncommitted", "foo_suite.ipynb") assert os.path.isfile(expected_notebook_path) expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json") assert os.path.isfile(expected_suite_path) assert_no_logging_messages_or_tracebacks(caplog, result)
def create_empty_suite(context: DataContext, expectation_suite_name: str, batch_kwargs) -> None: cli_message(""" Great Expectations will create a new Expectation Suite '{0:s}' and store it here: {1:s} """.format( expectation_suite_name, context.stores[ context.expectations_store_name].store_backend.get_url_for_key( ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name).to_tuple()), )) suite = context.create_expectation_suite(expectation_suite_name) suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs) context.save_expectation_suite(suite, expectation_suite_name)
def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations( mock_webbrowser, mock_subprocess, caplog, site_builder_data_context_with_html_store_titanic_random, ): """ Here we verify that the "suite edit" command helps the user to specify the batch kwargs when it is called without the optional arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit - this step is a just a setup. We call the "suite edit" command without any optional arguments. This means that the command will help us specify the batch kwargs interactively. The data context has two datasources - we choose one of them. It has a generator configured. We choose to use the generator and select a generator asset from the list. The command should: - NOT open Data Docs - open jupyter """ root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory os.chdir(root_dir) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "demo", "-d", root_dir, "--suite", "foo_suite"], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 assert mock_webbrowser.call_count == 2 assert mock_subprocess.call_count == 0 mock_webbrowser.reset_mock() mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(root_dir) suite = context.get_expectation_suite("foo_suite") assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations") context.save_expectation_suite(suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "edit", "foo_suite", "-d", root_dir,], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert "Which data would you like to use" in stdout expected_notebook_path = os.path.join( root_dir, "uncommitted", "edit_foo_suite.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json") assert os.path.isfile(expected_suite_path) assert mock_webbrowser.call_count == 0 assert mock_subprocess.call_count == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
def test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations( mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2 ): """ Here we verify that the "suite edit" command helps the user to specify the batch kwargs when it is called without the optional arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit - this step is a just a setup. We call the "suite edit" command without any optional arguments. This means that the command will help us specify the batch kwargs interactively. The data context has one datasource. The datasource has no generators configured. The command prompts us to enter the file path. """ empty_data_context.add_datasource( "my_datasource", module_name="great_expectations.datasource", class_name="PandasDatasource", ) not_so_empty_data_context = empty_data_context project_root_dir = not_so_empty_data_context.root_directory root_dir = project_root_dir os.chdir(root_dir) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "demo", "-d", root_dir], input="{0:s}\nmy_new_suite\n\n".format( os.path.join(filesystem_csv_2, "f1.csv") ), catch_exceptions=False, ) stdout = result.stdout assert mock_webbrowser.call_count == 1 assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() mock_webbrowser.reset_mock() assert result.exit_code == 0 assert ( "Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:" in stdout ) # remove the citations from the suite context = DataContext(project_root_dir) suite = context.get_expectation_suite("my_new_suite") suite.meta.pop("citations") context.save_expectation_suite(suite) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["suite", "edit", "my_new_suite", "-d", root_dir], input="{0:s}\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")), catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "Select a datasource" not in stdout assert "Which data would you like to use" not in stdout assert "Enter the path" in stdout expected_notebook_path = os.path.join( root_dir, "uncommitted", "edit_my_new_suite.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json") assert os.path.isfile(expected_suite_path) assert mock_webbrowser.call_count == 0 assert mock_subprocess.call_count == 1 assert_no_logging_messages_or_tracebacks(caplog, result)