Exemple #1
0
def test_logging_failures_are_handled_as_expected():
    experiment_name = "test_run_creation_termination"
    MlflowClient().create_experiment(experiment_name)
    experiment_id = MlflowClient().get_experiment_by_name(
        experiment_name).experiment_id

    with mock.patch(
            "mlflow.utils.autologging_utils.client.MlflowClient.log_batch"
    ) as log_batch_mock:
        log_batch_mock.side_effect = Exception("Batch logging failed!")

        client = MlflowAutologgingQueueingClient()
        pending_run_id = client.create_run(experiment_id=experiment_id)
        client.log_metrics(run_id=pending_run_id, metrics={"a": 1})
        client.set_terminated(run_id=pending_run_id, status="KILLED")

        with pytest.raises(MlflowException) as exc:
            client.flush()

        runs = mlflow.search_runs(experiment_ids=[experiment_id],
                                  output_format="list")
        assert len(runs) == 1
        run = runs[0]
        # Verify that metrics are absent due to the failure of batch logging
        assert not run.data.metrics
        # Verify that the run termination operation was still performed successfully
        assert run.info.status == "KILLED"

        assert "Failed to perform one or more operations on the run with ID {run_id}".format(
            run_id=run.info.run_id) in str(exc.value)
        assert "Batch logging failed!" in str(exc.value)
def test_client_run_creation_and_termination_are_successful():
    experiment_name = "test_run_creation_termination"
    MlflowClient().create_experiment(experiment_name)
    experiment_id = MlflowClient().get_experiment_by_name(experiment_name).experiment_id

    client = MlflowAutologgingQueueingClient()
    pending_run_id = client.create_run(experiment_id=experiment_id, start_time=5, tags={"a": "b"})
    client.set_terminated(run_id=pending_run_id, status="FINISHED", end_time=6)
    client.flush()

    runs = mlflow.search_runs(experiment_ids=[experiment_id], output_format="list")
    assert len(runs) == 1
    run = runs[0]
    assert run.info.start_time == 5
    assert run.info.end_time == 6
    assert run.info.status == "FINISHED"
    assert {"a": "b"}.items() <= run.data.tags.items()
Exemple #3
0
def test_flush_clears_pending_operations():
    with mock.patch("mlflow.utils.autologging_utils.client.MlflowClient",
                    autospec=True) as mlflow_client_mock:
        client = MlflowAutologgingQueueingClient()

        pending_run_id = client.create_run(experiment_id=5)
        client.log_params(run_id=pending_run_id, params={"a": "b"})
        client.log_metrics(run_id=pending_run_id, metrics={"c": 1})
        client.set_terminated(run_id=pending_run_id, status="FINISHED")
        client.flush()

        logging_call_count_1 = len(mlflow_client_mock.method_calls)
        # Verify that at least 3 calls have been made to MLflow logging APIs as a result
        # of the flush (i.e. log_batch, create_run, and set_terminated)
        assert logging_call_count_1 >= 3

        client.flush()

        logging_call_count_2 = len(mlflow_client_mock.method_calls)
        # Verify that performing a second flush did not result in any additional logging API calls,
        # since no new run content was added prior to the flush
        assert logging_call_count_2 == logging_call_count_1