def testProgressStr(self): trials = [] for i in range(5): t = Mock() if i == 0: t.status = "TERMINATED" elif i == 1: t.status = "PENDING" else: t.status = "RUNNING" t.trial_id = "%05d" % i t.local_dir = "/foo" t.location = "here" t.config = {"a": i, "b": i * 2} t.evaluated_params = t.config t.last_result = {"config": {"a": i, "b": i * 2}} t.__str__ = lambda self: self.trial_id trials.append(t) prog1 = trial_progress_str(trials, ["a", "b"], fmt="psql", max_rows=3) print(prog1) assert prog1 == EXPECTED_RESULT_1 prog2 = trial_progress_str(trials, ["a", "b"], fmt="psql", max_rows=None) print(prog2) assert prog2 == EXPECTED_RESULT_2
def testProgressStr(self): trials = [] for i in range(5): t = Mock() if i == 0: t.status = "TERMINATED" elif i == 1: t.status = "PENDING" else: t.status = "RUNNING" t.trial_id = "%05d" % i t.local_dir = "/foo" t.location = "here" t.config = {"a": i, "b": i * 2, "n": {"k": [i, 2 * i]}} t.evaluated_params = { "a": i, "b": i * 2, "n/k/0": i, "n/k/1": 2 * i } t.last_result = { "config": { "a": i, "b": i * 2, "n": { "k": [i, 2 * i] } }, "metric_1": i / 2, "metric_2": i / 4 } t.__str__ = lambda self: self.trial_id trials.append(t) # One metric, two parameters prog1 = trial_progress_str(trials, ["metric_1"], ["a", "b"], fmt="psql", max_rows=3) print(prog1) assert prog1 == EXPECTED_RESULT_1 # No metric, all parameters prog2 = trial_progress_str(trials, [], None, fmt="psql", max_rows=None) print(prog2) assert prog2 == EXPECTED_RESULT_2 # Both metrics, one parameter, all with custom representation prog3 = trial_progress_str(trials, { "metric_1": "Metric 1", "metric_2": "Metric 2" }, {"a": "A"}, fmt="psql", max_rows=3) print(prog3) assert prog3 == EXPECTED_RESULT_3 # Current best trial best1 = best_trial_str(trials[1], "metric_1") assert best1 == EXPECTED_BEST_1
def debug_string(self, delim="\n"): messages = [ self._scheduler_alg.debug_string(), self.trial_executor.debug_string(), trial_progress_str(self.get_trials()), ] return delim.join(messages)
def debug_string(self, delim="\n"): result_keys = [ list(t.last_result) for t in self.get_trials() if t.last_result ] metrics = set().union(*result_keys) messages = [ self._scheduler_alg.debug_string(), self.trial_executor.debug_string(), trial_progress_str(self.get_trials(), metrics), ] return delim.join(messages)
def debug_string(self, delim="\n"): from ray.tune.progress_reporter import trial_progress_str result_keys = [ list(t.last_result) for t in self.get_trials() if t.last_result ] metrics = set().union(*result_keys) messages = [ self._scheduler_alg.debug_string(), self.trial_executor.debug_string(), trial_progress_str(self.get_trials(), metrics, force_table=True), ] return delim.join(messages)
def testMaxLen(self): trials = [] for i in range(5): t = Mock() t.status = "TERMINATED" t.trial_id = "%05d" % i t.local_dir = "/foo" t.location = "here" t.config = {"verylong" * 20: i} t.evaluated_params = {"verylong" * 20: i} t.last_result = {"some_metric": "evenlonger" * 100} t.__str__ = lambda self: self.trial_id trials.append(t) progress_str = trial_progress_str(trials, metric_columns=["some_metric"], force_table=True) assert any(len(row) <= 90 for row in progress_str.split("\n"))