Exemple #1
0
def test_best_result_no_report(ray_start_2_cpus):
    def f(config):
        pass

    analysis = tune.run(f, config={"x": tune.grid_search([1, 2])})
    result_grid = ResultGrid(analysis)
    with pytest.raises(RuntimeError, match="No best trial found*"):
        result_grid.get_best_result(metric="x", mode="max")
Exemple #2
0
def test_best_result_checkpoint_history(ray_start_2_cpus):
    def f(config):
        for i in range(2):
            with tune.checkpoint_dir(step=i) as checkpoint_dir:
                path = os.path.join(checkpoint_dir, "checkpoint")
                with open(path, "w") as f:
                    f.write(json.dumps(dict(x=config["x"], step=i)))
            tune.report(x=config["x"], step=i)

    analysis = tune.run(f, config={"x": tune.grid_search([1, 3])})

    # No checkpointing config. Use metric and mode
    result_grid = ResultGrid(analysis)
    best_result = result_grid.get_best_result(metric="x", mode="max")
    assert best_result.metrics["x"] == 3
    print(best_result.best_checkpoints)
    print([
        x[0].get_internal_representation()
        for x in best_result.best_checkpoints
    ])
    assert len(best_result.best_checkpoints) == 2
    i = 0
    for checkpoint, metrics in best_result.best_checkpoints:
        assert isinstance(checkpoint, Checkpoint)
        assert metrics["x"] == 3
        assert metrics["step"] == i
        i += 1
Exemple #3
0
def test_best_result(ray_start_2_cpus):
    def f(config):
        for _ in range(2):
            tune.report(x=config["x"])

    analysis = tune.run(f, config={"x": tune.grid_search([1, 2])})
    result_grid = ResultGrid(analysis)
    best_result = result_grid.get_best_result(metric="x", mode="max")
    assert best_result.config["x"] == 2
    assert best_result.metrics["x"] == 2
Exemple #4
0
def test_no_metric_mode(ray_start_2_cpus):
    def f(config):
        tune.report(x=1)

    analysis = tune.run(f)
    result_grid = ResultGrid(analysis)
    with pytest.raises(ValueError):
        result_grid.get_best_result()

    with pytest.raises(ValueError):
        result_grid.get_best_result(metric="x")

    with pytest.raises(ValueError):
        result_grid.get_best_result(mode="max")