def test_start_run_with_existing_execution_and_artifact(
        self,
        get_execution_mock,
        add_context_artifacts_and_executions_mock,
        get_artifact_mock,
        add_execution_events_mock,
    ):
        aiplatform.init(
            project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
        )
        aiplatform.start_run(_TEST_RUN)

        get_execution_mock.assert_called_once_with(
            name=_TEST_EXECUTION_NAME, retry=base._DEFAULT_RETRY
        )
        add_context_artifacts_and_executions_mock.assert_called_once_with(
            context=_TEST_CONTEXT_NAME,
            artifacts=None,
            executions=[_TEST_EXECUTION_NAME],
        )
        get_artifact_mock.assert_called_once_with(
            name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
        )
        add_execution_events_mock.assert_called_once_with(
            execution=_TEST_EXECUTION_NAME,
            events=[Event(artifact=_TEST_ARTIFACT_NAME, type_=Event.Type.OUTPUT)],
        )
 def test_log_metrics_string_value_raise_error(self):
     aiplatform.init(
         project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
     )
     aiplatform.start_run(_TEST_RUN)
     with pytest.raises(TypeError):
         aiplatform.log_metrics({"test": "string"})
 def test_start_run_with_wrong_metrics_artifact_schema(
     self,
 ):
     aiplatform.init(
         project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
     )
     with pytest.raises(ValueError):
         aiplatform.start_run(_TEST_RUN)
    def test_init_experiment_reset(self):
        aiplatform.init(
            project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
        )
        aiplatform.start_run(_TEST_RUN)

        aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)

        assert metadata.metadata_service.experiment_name == _TEST_EXPERIMENT
        assert metadata.metadata_service.run_name == _TEST_RUN

        aiplatform.init(project=_TEST_OTHER_PROJECT, location=_TEST_LOCATION)

        assert metadata.metadata_service.experiment_name is None
        assert metadata.metadata_service.run_name is None
    def test_log_metrics(
        self, update_artifact_mock,
    ):
        aiplatform.init(
            project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
        )
        aiplatform.start_run(_TEST_RUN)
        aiplatform.log_metrics(_TEST_METRICS)

        updated_artifact = GapicArtifact(
            name=_TEST_ARTIFACT_NAME,
            display_name=_TEST_ARTIFACT_ID,
            schema_title=constants.SYSTEM_METRICS,
            schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
            metadata=_TEST_METRICS,
        )

        update_artifact_mock.assert_called_once_with(artifact=updated_artifact)
    def test_log_params(
        self, update_execution_mock,
    ):
        aiplatform.init(
            project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
        )
        aiplatform.start_run(_TEST_RUN)
        aiplatform.log_params(_TEST_PARAMS)

        updated_execution = GapicExecution(
            name=_TEST_EXECUTION_NAME,
            display_name=_TEST_RUN,
            schema_title=constants.SYSTEM_RUN,
            schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
            metadata=_TEST_PARAMS,
        )

        update_execution_mock.assert_called_once_with(execution=updated_execution)
Example #7
0
    def test_experiment_logging(self, shared_state):

        # Truncating the name because of resource id constraints from the service
        experiment_name = self._make_display_name("experiment")[:56]

        aiplatform.init(
            project=e2e_base._PROJECT,
            location=e2e_base._LOCATION,
            experiment=experiment_name,
        )

        shared_state["resources"] = [
            aiplatform.metadata.metadata_service._experiment
        ]

        # Truncating the name because of resource id constraints from the service
        run_name = self._make_display_name("run")[:56]

        aiplatform.start_run(run_name)

        shared_state["resources"].extend([
            aiplatform.metadata.metadata_service._run,
            aiplatform.metadata.metadata_service._metrics,
        ])

        aiplatform.log_params(PARAMS)

        aiplatform.log_metrics(METRICS)

        df = aiplatform.get_experiment_df()

        true_df_dict = {
            f"metric.{key}": value
            for key, value in METRICS.items()
        }
        for key, value in PARAMS.items():
            true_df_dict[f"param.{key}"] = value

        true_df_dict["experiment_name"] = experiment_name
        true_df_dict["run_name"] = run_name

        assert true_df_dict == df.to_dict("records")[0]