Esempio n. 1
0
 def create_run(self, experiment_id, user_id, run_name, source_type,
                source_name, entry_point_name, start_time, source_version,
                tags):
     """
     Creates a run with the specified attributes.
     """
     if self.get_experiment(experiment_id) is None:
         raise Exception(
             "Could not create run under experiment with ID %s - no such experiment "
             "exists." % experiment_id)
     run_uuid = uuid.uuid4().hex
     artifact_uri = self._get_artifact_dir(experiment_id, run_uuid)
     num_runs = len(self._list_run_uuids(experiment_id))
     run_info = RunInfo(run_uuid=run_uuid,
                        experiment_id=experiment_id,
                        name="Run %s" % num_runs,
                        artifact_uri=artifact_uri,
                        source_type=source_type,
                        source_name=source_name,
                        entry_point_name=entry_point_name,
                        user_id=user_id,
                        status=RunStatus.RUNNING,
                        start_time=start_time,
                        end_time=None,
                        source_version=source_version,
                        tags=tags)
     # Persist run metadata and create directories for logging metrics, parameters, artifacts
     run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid)
     mkdir(run_dir)
     write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, dict(run_info))
     mkdir(run_dir, FileStore.METRICS_FOLDER_NAME)
     mkdir(run_dir, FileStore.PARAMS_FOLDER_NAME)
     mkdir(run_dir, FileStore.ARTIFACTS_FOLDER_NAME)
     return Run(run_info=run_info, run_data=None)
Esempio n. 2
0
    def create_run(self, experiment_id, user_id, run_name, source_type,
                   source_name, entry_point_name, start_time, source_version,
                   tags):
        """
        Creates a run under the specified experiment ID, setting the run's status to "RUNNING"
        and the start time to the current time.

        :param experiment_id: ID of the experiment for this run
        :param user_id: ID of the user launching this run
        :param source_type: Enum (integer) describing the source of the run
        :return: The created Run object
        """
        tag_protos = [tag.to_proto() for tag in tags]
        req_body = _message_to_json(
            CreateRun(experiment_id=experiment_id,
                      user_id=user_id,
                      run_name=run_name,
                      source_type=source_type,
                      source_name=source_name,
                      entry_point_name=entry_point_name,
                      start_time=start_time,
                      source_version=source_version,
                      tags=tag_protos))
        response_proto = self._call_endpoint(CreateRun, req_body)
        return Run.from_proto(response_proto.run)
Esempio n. 3
0
 def get_run(self, run_uuid):
     run_dir = self._find_run_root(run_uuid)
     if run_dir is None:
         raise Exception("Run '%s' not found" % run_uuid)
     run_info = self.get_run_info(run_dir)
     metrics = self.get_all_metrics(run_uuid)
     params = self.get_all_params(run_uuid)
     return Run(run_info, RunData(metrics, params))
Esempio n. 4
0
    def get_run(self, run_uuid):
        """
        Fetches the run from backend store

        :param run_uuid: Unique identifier for the run
        :return: A single Run object if it exists, otherwise raises an Exception
        """
        req_body = _message_to_json(GetRun(run_uuid=run_uuid))
        response_proto = self._call_endpoint(GetRun, req_body)
        return Run.from_proto(response_proto.run)
Esempio n. 5
0
    def test_creation_and_hydration(self):
        run_data, metrics, params = TestRunData._create()
        (run_info, run_uuid, experiment_id, name, source_type, source_name,
         entry_point_name, user_id, status, start_time, end_time,
         source_version, tags, artifact_uri) = TestRunInfo._create()

        run1 = Run(run_info, run_data)

        self._check_run(run1, run_info, run_data)

        as_dict = {
            "info": {
                "run_uuid": run_uuid,
                "experiment_id": experiment_id,
                "name": name,
                "source_type": source_type,
                "source_name": source_name,
                "entry_point_name": entry_point_name,
                "user_id": user_id,
                "status": status,
                "start_time": start_time,
                "end_time": end_time,
                "source_version": source_version,
                "tags": tags,
                "artifact_uri": artifact_uri,
            },
            "data": {
                "metrics": metrics,
                "params": params
            }
        }
        self.assertEqual(dict(run1), as_dict)

        # proto = run1.to_proto()
        # run2 = Run.from_proto(proto)
        # self._check_run(run2, run_info, run_data)

        run3 = Run.from_dictionary(as_dict)
        self._check_run(run3, run_info, run_data)
Esempio n. 6
0
    def search_runs(self, experiment_ids, search_expressions):
        """
        Returns runs that match the given list of search expressions within the experiments.
        Given multiple search expressions, all these expressions are ANDed together for search.

        :param experiment_ids: List of experiment ids to scope the search
        :param search_expression: list of search expressions

        :return: A list of Run objects that satisfy the search expressions
        """
        search_expressions_protos = [expr.to_proto() for expr in search_expressions]
        req_body = _message_to_json(SearchRuns(experiment_ids=experiment_ids,
                                               search_expressions=search_expressions_protos))
        response_proto = self._call_endpoint(SearchRuns, req_body)
        return [Run.from_proto(proto_run) for proto_run in response_proto.runs]
Esempio n. 7
0
from dbx.commands.deploy import deploy, _update_job  # noqa
from dbx.utils.common import write_json, DEFAULT_DEPLOYMENT_FILE_PATH
from .utils import DbxTest, invoke_cli_runner, test_dbx_config

run_info = RunInfo(
    run_uuid="1",
    experiment_id="1",
    user_id="dbx",
    status="STATUS",
    start_time=dt.datetime.now(),
    end_time=dt.datetime.now(),
    lifecycle_stage="STAGE",
    artifact_uri="dbfs:/Shared/dbx-testing",
)
run_data = RunData()
run_mock = ActiveRun(Run(run_info, run_data))


class DeployTest(DbxTest):
    @patch("databricks_cli.sdk.service.DbfsService.get_status", return_value=None)
    @patch(
        "databricks_cli.configure.provider.ProfileConfigProvider.get_config",
        return_value=test_dbx_config,
    )
    @patch(
        "databricks_cli.configure.provider.ProfileConfigProvider.get_config",
        return_value=test_dbx_config,
    )
    @patch("databricks_cli.workspace.api.WorkspaceService.mkdirs", return_value=True)
    @patch("mlflow.set_experiment", return_value=None)
    @patch("mlflow.start_run", return_value=run_mock)