def get_metric(self, project_name, experiment_id, metric_name): """Retrieve a metric from the configured filesystem. Parameters ---------- project_name : str The name of the project the experiment with ID `experiment_id` is logged to. experiment_id : str The ID of the experiment the metric with name `metric_name` is logged to. metric_name : str The name of the metric to retrieve. Returns ------- rubicon.domain.Metric The metric with name `metric_name`. """ metric_metadata_path = self._get_metric_metadata_path( project_name, experiment_id, metric_name) try: open_file = self.filesystem.open(metric_metadata_path) except FileNotFoundError: raise RubiconException( f"No metric with name '{metric_name}' found.") with open_file as f: metric = json.load(f) return domain.Metric(**metric)
def get_metrics(self, project_name, experiment_id): """Retrieve all metrics from the configured filesystem that belong to the experiment with ID `experiment_id`. Parameters ---------- project_name : str The name of the project the experiment with ID `experiment_id` is logged to. experiment_id : str The ID of the experiment to retrieve all metrics from. Returns ------- list of rubicon.domain.Metric The metrics logged to the experiment with ID `experiment_id`. """ metric_metadata_root = self._get_metric_metadata_root( project_name, experiment_id) try: metric_metadata_paths = self._ls_directories_only( metric_metadata_root) metrics = [ domain.Metric(**json.loads(data)) for data in self.filesystem.cat(metric_metadata_paths).values() ] except FileNotFoundError: return [] return metrics
def log_metric(self, name, value, directionality="score", description=None): """Create a metric under the experiment. Parameters ---------- name : str The metric's name. value : float The metric's value. directionality : str, optional The metric's directionality. Must be one of ["score", "loss"], where "score" represents a metric to maximize, while "loss" represents a metric to minimize. Defaults to "score". description : str, optional The metric's description. Use to provide additional context. Returns ------- rubicon.client.Metric The created metric. """ metric = domain.Metric(name, value, directionality=directionality, description=description) self.repository.create_metric(metric, self.project.name, self.id) return Metric(metric, self._config)
async def get_metric(self, project_name, experiment_id, metric_name): """Overrides `rubicon.repository.BaseRepository.get_metric` to asynchronously retrieve a metric from the configured filesystem. Parameters ---------- project_name : str The name of the project this metric belongs to. experiment_id : str The ID of the experiment the metric with name `metric_name` is logged to. metric_name : str The name of the metric to retrieve. Returns ------- rubicon.domain.Metric The metric with name `metric_name`. """ metric_metadata_path = self._get_metric_metadata_path( project_name, experiment_id, metric_name) try: metric = json.loads( await self.filesystem._cat_file(metric_metadata_path)) except FileNotFoundError: raise RubiconException( f"No metric with name '{metric_name}' found.") return domain.Metric(**metric)
async def get_metrics(self, project_name, experiment_id): """Overrides `rubicon.repository.BaseRepository.get_metrics` to asynchronously retrieve all metrics from the configured filesystem that belong to the experiment with ID `experiment_id`. Parameters ---------- project_name : str The name of the project the experiment with ID `experiment_id` is logged to. experiment_id : str The ID of the experiment to retrieve all metrics from. Returns ------- list of rubicon.domain.Metric The metrics logged to the experiment with ID `experiment_id`. """ metric_metadata_root = self._get_metric_metadata_root(project_name, experiment_id) try: metric_metadata_paths = await self._ls_directories_only(metric_metadata_root) metrics = [ domain.Metric(**json.loads(data)) for data in await asyncio.gather( *[self.filesystem._cat_file(path) for path in metric_metadata_paths] ) ] except FileNotFoundError: return [] return metrics
def _create_metric(repository, experiment=None): if experiment is None: experiment = _create_experiment(repository) metric = domain.Metric(name=f"Test Metric {uuid.uuid4()}", value=24) repository.create_metric(metric, experiment.project_name, experiment.id) return metric
def test_properties(): domain_metric = domain.Metric("Accuracy", 99, description="some description") metric = Metric(domain_metric) assert metric.name == "Accuracy" assert metric.value == 99 assert metric.directionality == "score" assert metric.description == "some description" assert metric.id == domain_metric.id assert metric.created_at == domain_metric.created_at
def test_to_dask_df(asyn_client_w_mock_repo): rubicon = asyn_client_w_mock_repo project_name = f"Test Project {uuid.uuid4()}" project = asyncio.run(rubicon.create_project(project_name)) experiment_domains = [ domain.Experiment(project_name=project_name, name=f"Test Experiment {uuid.uuid4()}") for _ in range(0, 2) ] parameter_domains = [domain.Parameter("n_components")] metric_domains = [domain.Metric("accuracy", 90)] rubicon.repository.get_experiments.return_value = experiment_domains rubicon.repository.get_tags.return_value = [{ "added_tags": [], "removed_tags": [] }] rubicon.repository.get_parameters.return_value = parameter_domains rubicon.repository.get_metrics.return_value = metric_domains ddf = asyncio.run(project.to_dask_df()) df = ddf.compute() # check that all experiments made it into df assert len(df) == 2 # check the cols within the df exp_details = [ "id", "name", "description", "model_name", "commit_hash", "tags", "created_at" ] for detail in exp_details: assert detail in df.columns
def _create_metric_domain(experiment=None): if experiment is None: project = domain.Project(f"Test Project {uuid.uuid4()}") experiment = _create_experiment_domain(project) return experiment, domain.Metric(name="test metric", value=0)