def testBestLogdir(self):
     analysis = ExperimentAnalysis(self.test_dir)
     logdir = analysis.get_best_logdir(self.metric, mode="max")
     self.assertTrue(logdir.startswith(self.test_dir))
     logdir2 = analysis.get_best_logdir(self.metric, mode="min")
     self.assertTrue(logdir2.startswith(self.test_dir))
     self.assertNotEqual(logdir, logdir2)
Beispiel #2
0
def load_best_model(exp: tune.ExperimentAnalysis,
                    metric="val_loss",
                    mode="min"):
    """Loads best checkpoint overall"""
    tr = exp.get_best_trial(metric=metric, mode=mode, scope="all")
    chkpoint = exp.get_best_checkpoint(tr, metric=metric, mode=mode)
    model = CLIPFineTunedModel.load_from_checkpoint(chkpoint + "/checkpoint")
    return model
Beispiel #3
0
    def register_analysis(self, analysis: ExperimentAnalysis):
        """Integrate the given analysis into the gaussian process.

        Args:
            analysis (ExperimentAnalysis): Optionally, the previous analysis
                to integrate.
        """
        for (_, report), params in zip(
                analysis.dataframe(metric=self._metric,
                                   mode=self._mode).iterrows(),
                analysis.get_all_configs().values()):
            # We add the obtained results to the
            # gaussian process optimizer
            self._register_result(params, report)
    def testInitLegacy(self):
        """Should still work if checkpoints are not json strings"""
        experiment_checkpoint_path = os.path.join(self.test_dir,
                                                  "experiment_state.json")
        checkpoint_data = {
            "checkpoints": [{
                "trial_id":
                "abcd1234",
                "status":
                Trial.TERMINATED,
                "trainable_name":
                "MockTrainable",
                "local_dir":
                self.test_dir,
                "relative_logdir":
                "MockTrainable_0_id=3_2020-07-12",
            }]
        }

        with open(experiment_checkpoint_path, "w") as f:
            f.write(json.dumps(checkpoint_data))

        experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
        self.assertEqual(len(experiment_analysis._checkpoints_and_paths), 1)
        self.assertTrue(experiment_analysis.trials)
Beispiel #5
0
    def testInit(self):
        experiment_checkpoint_path = os.path.join(
            self.test_dir, "experiment_state.json"
        )
        checkpoint_data = {
            "checkpoints": [
                json.dumps(
                    {
                        "trial_id": "abcd1234",
                        "status": Trial.TERMINATED,
                        "trainable_name": "MockTrainable",
                        "local_dir": self.test_dir,
                        "relative_logdir": "MockTrainable_0_id=3_2020-07-12",
                    },
                    cls=TuneFunctionEncoder,
                )
            ]
        }

        with open(experiment_checkpoint_path, "w") as f:
            f.write(json.dumps(checkpoint_data))

        experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
        self.assertEqual(len(experiment_analysis._checkpoints_and_paths), 1)
        self.assertTrue(experiment_analysis.trials)
Beispiel #6
0
def raytune_analysis(exp_dir, save, skip, mode, metric):
    from ray.tune import ExperimentAnalysis

    experiment_analysis = ExperimentAnalysis(exp_dir,
                                             default_metric=metric,
                                             default_mode=mode)
    plot_ray_analysis(experiment_analysis, save=save, skip=skip)
    analyze_ray_experiment(exp_dir, default_metric=metric, default_mode=mode)
Beispiel #7
0
    async def collect(self):
        """
        Collects and cleans data on the running Tune experiment from the
        Tune logs so that users can see this information in the front-end
        client
        """
        self._trial_records = {}
        self._errors = {}
        if not self._logdir or not ExperimentAnalysis:
            return

        # search through all the sub_directories in log directory
        analysis = ExperimentAnalysis(str(self._logdir))
        df = analysis.dataframe(metric=None, mode=None)

        if len(df) == 0 or "trial_id" not in df.columns:
            return

        self._trials_available = True

        # make sure that data will convert to JSON without error
        df["trial_id_key"] = df["trial_id"].astype(str)
        df = df.fillna(0)

        trial_ids = df["trial_id"]
        for i, value in df["trial_id"].iteritems():
            if type(value) != str and type(value) != int:
                trial_ids[i] = int(value)

        df["trial_id"] = trial_ids

        # convert df to python dict
        df = df.set_index("trial_id_key")
        trial_data = df.to_dict(orient="index")

        # clean data and update class attribute
        if len(trial_data) > 0:
            trial_data = self.clean_trials(trial_data)
            self._trial_records.update(trial_data)

        self.collect_errors(df)
Beispiel #8
0
    def testInit(self):
        experiment_checkpoint_path = os.path.join(self.test_dir,
                                                  "experiment_state.json")
        checkpoint_data = {
            "checkpoints": [{
                "trainable_name": "MockTrainable",
                "logdir": "/mock/test/MockTrainable_0_id=3_2020-07-12"
            }]
        }

        with open(experiment_checkpoint_path, "w") as f:
            f.write(json.dumps(checkpoint_data))

        experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
        self.assertEqual(len(experiment_analysis._checkpoints), 1)
        self.assertTrue(experiment_analysis.trials is None)
    def testInitLegacy(self):
        """Should still work if checkpoints are not json strings"""
        experiment_checkpoint_path = os.path.join(self.test_dir,
                                                  "experiment_state.json")
        checkpoint_data = {
            "checkpoints": [{
                "trainable_name":
                "MockTrainable",
                "logdir":
                "/mock/test/MockTrainable_0_id=3_2020-07-12"
            }]
        }

        with open(experiment_checkpoint_path, "w") as f:
            f.write(json.dumps(checkpoint_data))

        experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
        self.assertEqual(len(experiment_analysis._checkpoints), 1)
        self.assertFalse(experiment_analysis.trials)
Beispiel #10
0
    def testFromPath(self):
        self.run_test_exp()
        analysis = ExperimentAnalysis(self.test_path)

        self.assertTrue(analysis.get_best_trial(metric=self.metric, mode="max"))

        ray.shutdown()
        ray.tune.registry._global_registry = ray.tune.registry._Registry(
            prefix="global"
        )

        analysis = ExperimentAnalysis(self.test_path)

        # This will be None if validate_trainable during loading fails
        self.assertTrue(analysis.get_best_trial(metric=self.metric, mode="max"))
Beispiel #11
0
 def testInitException(self):
     experiment_checkpoint_path = os.path.join(self.test_dir, "mock.json")
     with pytest.raises(ValueError):
         ExperimentAnalysis(experiment_checkpoint_path)
 def testBestConfigIsLogdir(self):
     analysis = ExperimentAnalysis(self.test_dir)
     for metric, mode in [(self.metric, "min"), (self.metric, "max")]:
         logdir = analysis.get_best_logdir(metric, mode=mode)
         best_config = analysis.get_best_config(metric, mode=mode)
         self.assertEqual(analysis.get_all_configs()[logdir], best_config)
 def testDataframe(self):
     analysis = ExperimentAnalysis(self.test_dir)
     df = analysis.dataframe(self.metric, mode="max")
     self.assertTrue(isinstance(df, pd.DataFrame))
     self.assertEqual(df.shape[0], self.num_samples * 2)
Beispiel #14
0
def get_best_trial(analysis: tune.ExperimentAnalysis, objective: Objective, scope: str):
    return analysis.get_best_trial(
        full_metric_name(objective), mode=objective.mode, scope=scope
    )