def test_to_noniterative_objective(): def f1(a, b: int) -> float: return a - b func = to_noniterative_objective(f1) assert func.min_better trial = Trial("abc", dict(b=20, a=10), dict(c=3)) report = func.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == trial.params assert report.metadata == {} func = to_noniterative_objective("f1", min_better=False) assert not func.min_better trial = Trial("abc", dict(b=20, a=10), dict(c=3)) report = func.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == trial.params assert report.metadata == {} assert -1 == func(1, 2) def f2(a, b: int) -> Tuple[float, Dict[str, Any]]: return a - b, dict(c=5) func = to_noniterative_objective(f2) trial = Trial("abc", dict(b=20, a=10), dict(c=3)) report = func.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == trial.params assert report.metadata == dict(c=5) def f3(t: Trial) -> TrialReport: return TrialReport( t, t.params["a"] - t.params["b"], params=dict(a=1), metadata=dict(c=6) ) func = to_noniterative_objective(f3) trial = Trial("abc", dict(b=20, a=10), dict(c=3)) report = func.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == dict(a=1) assert report.metadata == dict(c=6) class F4(NonIterativeObjectiveFunc): def run(self, t: Trial) -> TrialReport: return TrialReport( t, t.params["a"] - t.params["b"], params=dict(a=1), metadata=dict(c=6) ) f4 = F4() f4_ = to_noniterative_objective(f4) assert isinstance(f4_, F4) assert f4 is not f4_ raises(TuneCompileError, lambda: to_noniterative_objective("abc"))
def test_trial_report_heap(): t1 = Trial("a", {}) r1 = TrialReport(t1, 0.1) t2 = Trial("b", {}) r2 = TrialReport(t2, 0.2) t3 = Trial("c", {}) r3 = TrialReport(t3, 0.3) r4 = TrialReport(t3, -0.3) h = TrialReportHeap(min_heap=True) for r in [r1, r2, r3, r4]: h.push(r) assert "a" in h assert "x" not in h for r in [r4, r1, r2]: assert h.pop() is r assert 0 == len(h) h = TrialReportHeap(min_heap=False) for r in [r1, r2, r3, r4]: h.push(r) for r in [r1, r2, r4]: assert r in list(h.values()) for r in [r2, r1, r4]: assert h.pop() is r assert 0 == len(h) # test __lt__, the sort key is sort_metric! r5 = TrialReport(t1, metric=0.1, sort_metric=-0.1) r6 = TrialReport(t2, metric=0.2, sort_metric=-0.2) r7 = TrialReport(t3, metric=0.3, sort_metric=-0.3) h = TrialReportHeap(min_heap=True) for r in [r7, r6, r5]: h.push(r) for r in [r7, r6, r5]: assert h.pop() is r assert 0 == len(h) r5 = TrialReport(t1, metric=0.1, cost=0.2, rung=5) r6 = TrialReport(t2, metric=0.1, cost=0.3, rung=5) r7 = TrialReport(t3, metric=0.1, cost=0.3, rung=6) h = TrialReportHeap(min_heap=True) for r in [r7, r6, r5]: h.push(r) for r in [r5, r6, r7]: assert h.pop() is r assert 0 == len(h) # equal case r8 = TrialReport(t1, metric=0.1, cost=0.3, rung=6) r9 = TrialReport(t2, metric=0.1, cost=0.3, rung=6) h = TrialReportHeap(min_heap=False) for r in [r8, r9]: h.push(r) for r in [r8, r9]: assert h.pop() is r assert 0 == len(h)
def test_noniterative_objective(): @noniterative_objective def f1(a, b: int) -> float: return a - b assert isinstance(f1, NonIterativeObjectiveFunc) trial = Trial("abc", dict(b=20, a=10), dict(c=3)) report = f1.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == trial.params assert report.metadata == {} func = to_noniterative_objective("f1") report = func.run(trial) assert report.trial is trial assert report.metric == -10 assert report.params == trial.params assert report.metadata == {} @noniterative_objective(min_better=False) def f2(a, b: int) -> float: return a - b assert isinstance(f2, NonIterativeObjectiveFunc) assert not f2.min_better
def test_encode_decode(): p = {"a": 1, "b": Rand(1, 2)} trial = Trial("abc", p, {}, keys=["x", "y"], dfs={"v": ""}) d = trial.jsondict assert isinstance(d["params"]["b"], dict) t = Trial.from_jsondict(d) assert isinstance(t.params["b"], Rand) assert ["x", "y"] == t.keys assert {} == t.dfs # dfs will not be serialized
def test_copy(): trial = Trial("abc", {"a": 1}) t1 = trial.with_dfs({"c": pd.DataFrame([[0]])}) t2 = copy.copy(t1) t3 = copy.deepcopy(t1) assert trial.trial_id == t2.trial_id == t3.trial_id assert t1.dfs is t2.dfs is t3.dfs assert 0 == len(trial.dfs) assert 1 == len(t1.dfs) assert [] == t3.keys
def test_trial(): trial = Trial("abc", {"a": 1}, {"b": 2}, keys=["a", "b"]) assert "abc" == trial.trial_id assert {"a": 1} == trial.params assert {"b": 2} == trial.metadata assert ["a", "b"] == trial.keys t2 = trial.with_params({"c": 3}) assert "abc" == t2.trial_id assert {"c": 3} == t2.params assert {"b": 2} == t2.metadata assert ["a", "b"] == t2.keys
def test_validator(): m = M() for cont in [True, False]: validate_iterative_objective( F(), Trial("abc", {"a": 1}), [3, 3, 2], lambda reports: [-3.0, -6.0, -8.0] == [x.sort_metric for x in reports], continuous=cont, monitor=m, ) assert 6 == len(m._reports)
def test_trial_report(): trial = Trial("abc", {"a": Rand(3, 4)}, {"b": 2}) report = copy.copy( TrialReport( trial, metric=np.float(0.1), params={"c": Rand(1, 2)}, metadata={"d": 4}, cost=2.0, )) assert trial is report.trial assert 0.1 == report.metric assert type(report.metric) == float assert {"c": Rand(1, 2)} == report.params assert {"d": 4} == report.metadata assert 2.0 == report.cost assert 0 == report.rung assert 0.1 == report.sort_metric report = copy.deepcopy( TrialReport(trial, metric=np.float(0.111), cost=2.0, rung=4, sort_metric=1.23)) assert trial is report.trial assert 0.111 == report.metric assert type(report.metric) == float assert {"a": Rand(3, 4)} == report.params assert {} == report.metadata assert 2.0 == report.cost assert 4 == report.rung r1 = report.generate_sort_metric(True, 2) r2 = report.generate_sort_metric(False, 1) r3 = report.with_sort_metric(0.234) assert 1.23 == report.sort_metric assert 0.11 == r1.sort_metric assert -0.1 == r2.sort_metric assert 0.234 == r3.sort_metric report = TrialReport.from_jsondict(report.jsondict) assert trial.trial_id == report.trial_id assert 0.111 == report.metric assert type(report.metric) == float assert {"a": Rand(3, 4)} == report.params assert {} == report.metadata assert 2.0 == report.cost assert 3.0 == report.with_cost(3.0).cost assert 5 == report.with_rung(5).rung
def test_objective_func(tmpdir): fs = FileSystem().opendir(str(tmpdir)) j = J([3, 3, 2]) f = F().copy() t = Trial("abc", {"a": 1}) f.run(t, judge=j, checkpoint_basedir_fs=fs) assert -10 == f.v f.run(t, judge=j, checkpoint_basedir_fs=fs) assert -10 == f.v assert 6.0 == j.report.metric assert -6.0 == j.report.sort_metric f.run(t, judge=j, checkpoint_basedir_fs=fs) assert -10 == f.v assert 8.0 == j.report.metric assert -8.0 == j.report.sort_metric
def _to_trail_row(data: Dict[str, Any], metadata: Dict[str, Any]) -> Dict[str, Any]: key_names = sorted(k for k in data.keys() if not k.startswith(TUNE_PREFIX)) keys = [data[k] for k in key_names] trials: Dict[str, Dict[str, Any]] = {} for param in pickle.loads(data[TUNE_DATASET_PARAMS_PREFIX]): p = ParamDict( sorted(((k, v) for k, v in param.items()), key=lambda x: x[0])) tid = to_uuid(keys, p) trials[tid] = Trial(trial_id=tid, params=p, metadata=metadata, keys=keys).jsondict data[TUNE_DATASET_TRIALS] = json.dumps(list(trials.values())) del data[TUNE_DATASET_PARAMS_PREFIX] return data
def test_hyperopt(): params = dict(a=Rand(-10.0, 10.0), b=RandInt(-100, 100), c=2.0) trial = Trial("a", params, metadata={}) h = HyperoptRunner(max_iter=200, seed=0) @noniterative_objective def objective(a, b, c) -> Tuple[float, Dict[str, Any]]: return a**2 + b**2 + c, dict(a=1) def v(report): assert report.metric < 7 assert report.params["a"]**2 < 2 assert report.params["b"]**2 < 2 assert 2.0 == report.params["c"] validate_noniterative_objective(objective, trial, v, runner=h)
def test_trial_decision(): trial = Trial("abc", {"a": 1}, {"b": Rand(0, 2)}) report = TrialReport(trial, metric=np.float(0.1), params={"c": Rand(0, 3)}, metadata={"d": 4}) decision = TrialDecision(report, budget=0.0, should_checkpoint=True, metadata={"x": 1}, reason="p") assert trial is decision.trial assert report is decision.report assert decision.should_stop assert decision.should_checkpoint assert {"x": 1} == decision.metadata assert "p" == decision.reason assert 0.0 == decision.budget assert copy.copy(decision) is decision assert copy.deepcopy(decision) is decision d2 = TrialDecision.from_jsondict(decision.jsondict) assert d2.trial_id == trial.trial_id assert Rand(0, 3) == d2.report.params["c"] assert decision.should_stop assert decision.should_checkpoint assert {"x": 1} == decision.metadata assert "p" == decision.reason decision = TrialDecision(report, budget=1.0, should_checkpoint=True, metadata={"x": 1}) assert 1.0 == decision.budget assert not decision.should_stop
def rp(tid, metric, rung=0, keys=[]): t = Trial(tid, {}, keys=keys) return TrialReport(t, metric=metric, rung=rung)
def test_to_trial_row(): data1 = { "b": 2, "a": 1, TUNE_DATASET_DF_PREFIX + "x": "x", TUNE_DATASET_PARAMS_PREFIX: pickle.dumps([{ "b": 10, "a": 11 }, { "a": 11, "b": 10 }, { "b": 100, "a": 110 }], ), } res1 = _to_trail_row(data1, {"m": 1}) trials1 = [Trial(**p) for p in json.loads(res1[TUNE_DATASET_TRIALS])] assert 2 == len(trials1) data2 = { "a": 1, "b": 2, TUNE_DATASET_DF_PREFIX + "y": "x", TUNE_DATASET_PARAMS_PREFIX: pickle.dumps([{ "b": 10, "a": 11 }, { "b": 100, "a": 110 }], ), } res2 = _to_trail_row(data2, {"m": 1}) assert TUNE_DATASET_PARAMS_PREFIX not in res2 trials2 = [Trial(**p) for p in json.loads(res2[TUNE_DATASET_TRIALS])] assert 2 == len(trials2) assert any(trials2[0].trial_id == x.trial_id for x in trials1) assert any(trials2[1].trial_id == x.trial_id for x in trials1) data3 = { "a": 10, "b": 2, TUNE_DATASET_DF_PREFIX + "y": "x", TUNE_DATASET_PARAMS_PREFIX: pickle.dumps([{ "b": 10, "a": 11 }, { "b": 100, "a": 110 }], ), } res3 = _to_trail_row(data3, {"m": 1}) trials3 = [Trial(**p) for p in json.loads(res3[TUNE_DATASET_TRIALS])] assert 2 == len(trials2) assert not any(trials3[0].trial_id == x.trial_id for x in trials1) assert not any(trials3[1].trial_id == x.trial_id for x in trials1)