def test_get_experiment_df(
        self, list_executions_mock, query_execution_inputs_and_outputs_mock
    ):
        try:
            import pandas as pd
        except ImportError:
            raise ImportError(
                "Pandas is not installed and is required to test the get_experiment_df method. "
                'Please install the SDK using "pip install python-aiplatform[full]"'
            )
        aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)

        experiment_df = aiplatform.get_experiment_df(_TEST_EXPERIMENT)

        expected_filter = f'schema_title="{constants.SYSTEM_RUN}" AND in_context("{_TEST_CONTEXT_NAME}")'
        list_executions_mock.assert_called_once_with(
            request=ListExecutionsRequest(
                parent=_TEST_PARENT,
                filter=expected_filter,
            )
        )
        query_execution_inputs_and_outputs_mock.assert_has_calls(
            [
                call(execution=_TEST_EXECUTION_NAME),
                call(execution=_TEST_OTHER_EXECUTION_NAME),
            ]
        )
        experiment_df_truth = pd.DataFrame(
            [
                {
                    "experiment_name": _TEST_EXPERIMENT,
                    "run_name": _TEST_RUN,
                    "param.%s" % _TEST_PARAM_KEY_1: 0.01,
                    "param.%s" % _TEST_PARAM_KEY_2: 0.2,
                    "metric.%s" % _TEST_METRIC_KEY_1: 222,
                    "metric.%s" % _TEST_METRIC_KEY_2: 1,
                },
                {
                    "experiment_name": _TEST_EXPERIMENT,
                    "run_name": _TEST_OTHER_RUN,
                    "param.%s" % _TEST_PARAM_KEY_1: 0.02,
                    "param.%s" % _TEST_PARAM_KEY_2: 0.3,
                    "metric.%s" % _TEST_METRIC_KEY_2: 0.9,
                },
            ]
        )

        _assert_frame_equal_with_sorted_columns(experiment_df, experiment_df_truth)
Esempio n. 2
0
    def test_experiment_logging(self, shared_state):

        # Truncating the name because of resource id constraints from the service
        experiment_name = self._make_display_name("experiment")[:56]

        aiplatform.init(
            project=e2e_base._PROJECT,
            location=e2e_base._LOCATION,
            experiment=experiment_name,
        )

        shared_state["resources"] = [
            aiplatform.metadata.metadata_service._experiment
        ]

        # Truncating the name because of resource id constraints from the service
        run_name = self._make_display_name("run")[:56]

        aiplatform.start_run(run_name)

        shared_state["resources"].extend([
            aiplatform.metadata.metadata_service._run,
            aiplatform.metadata.metadata_service._metrics,
        ])

        aiplatform.log_params(PARAMS)

        aiplatform.log_metrics(METRICS)

        df = aiplatform.get_experiment_df()

        true_df_dict = {
            f"metric.{key}": value
            for key, value in METRICS.items()
        }
        for key, value in PARAMS.items():
            true_df_dict[f"param.{key}"] = value

        true_df_dict["experiment_name"] = experiment_name
        true_df_dict["run_name"] = run_name

        assert true_df_dict == df.to_dict("records")[0]
 def test_get_experiment_df_wrong_schema(self):
     aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
     with pytest.raises(ValueError):
         aiplatform.get_experiment_df(_TEST_EXPERIMENT)
 def test_get_experiment_df_not_exist(self):
     aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
     with pytest.raises(exceptions.NotFound):
         aiplatform.get_experiment_df(_TEST_EXPERIMENT)