コード例 #1
0
def test_run_training_function_return_value():
    from flaml import tune

    # Test dict return value
    def evaluate_config_dict(config):
        metric = (round(config["x"]) - 85000)**2 - config["x"] / config["y"]
        return {"metric": metric}

    tune.run(
        evaluate_config_dict,
        config={
            "x": tune.qloguniform(lower=1, upper=100000, q=1),
            "y": tune.qrandint(lower=2, upper=100000, q=2),
        },
        metric="metric",
        mode="max",
        num_samples=100,
    )

    # Test scalar return value
    def evaluate_config_scalar(config):
        metric = (round(config["x"]) - 85000)**2 - config["x"] / config["y"]
        return metric

    tune.run(
        evaluate_config_scalar,
        config={
            "x": tune.qloguniform(lower=1, upper=100000, q=1),
            "y": tune.qlograndint(lower=2, upper=100000, q=2),
        },
        num_samples=100,
        mode="max",
    )
コード例 #2
0
ファイル: test_automl.py プロジェクト: pplonski/FLAML
 def search_space(cls, data_size, task):
     space = {
         'max_leaf': {
             'domain': tune.qloguniform(lower=4, upper=data_size, q=1),
             'init_value': 4
         },
         'n_iter': {
             'domain': tune.qloguniform(lower=1, upper=data_size, q=1),
             'init_value': 1
         },
         'n_tree_search': {
             'domain': tune.qloguniform(lower=1, upper=32768, q=1),
             'init_value': 1
         },
         'opt_interval': {
             'domain': tune.qloguniform(lower=1, upper=10000, q=1),
             'init_value': 100
         },
         'learning_rate': {
             'domain': tune.loguniform(lower=0.01, upper=20.0)
         },
         'min_samples_leaf': {
             'domain': tune.qloguniform(lower=1, upper=20, q=1),
             'init_value': 20
         },
     }
     return space
コード例 #3
0
 def search_space(cls, data_size, task):
     upper = min(32768, int(data_size))
     return {
         'n_estimators': {
             'domain': tune.qloguniform(lower=4, upper=upper, q=1),
             'init_value': 4,
         },
         'max_leaves': {
             'domain': tune.qloguniform(lower=4, upper=upper, q=1),
             'init_value': 4,
         },
     }
コード例 #4
0
ファイル: test_constraints.py プロジェクト: sonichi/FLAML
def test_config_constraint():
    from flaml import tune

    # Test dict return value
    def evaluate_config_dict(config):
        metric = (round(config["x"]) - 85000)**2 - config["x"] / config["y"]
        return {"metric": metric}

    def config_constraint(config):
        if config["y"] >= config["x"]:
            return 1
        else:
            return 0

    tune.run(
        evaluate_config_dict,
        config={
            "x": tune.qloguniform(lower=1, upper=100000, q=1),
            "y": tune.qrandint(lower=2, upper=100000, q=2),
        },
        config_constraints=[(config_constraint, ">", 0.5)],
        metric="metric",
        mode="max",
        num_samples=100,
    )
コード例 #5
0
def test_define_by_run():
    from flaml.tune.space import (
        unflatten_hierarchical,
        normalize,
        indexof,
        complete_config,
    )

    space = {
        # Sample a float uniformly between -5.0 and -1.0
        "uniform": tune.uniform(-5, -1),
        # Sample a float uniformly between 3.2 and 5.4,
        # rounding to increments of 0.2
        "quniform": tune.quniform(3.2, 5.4, 0.2),
        # Sample a float uniformly between 0.0001 and 0.01, while
        # sampling in log space
        "loguniform": tune.loguniform(1e-4, 1e-2),
        # Sample a float uniformly between 0.0001 and 0.1, while
        # sampling in log space and rounding to increments of 0.00005
        "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
        # Sample a random float from a normal distribution with
        # mean=10 and sd=2
        # "randn": tune.randn(10, 2),
        # Sample a random float from a normal distribution with
        # mean=10 and sd=2, rounding to increments of 0.2
        # "qrandn": tune.qrandn(10, 2, 0.2),
        # Sample a integer uniformly between -9 (inclusive) and 15 (exclusive)
        "randint": tune.randint(-9, 15),
        # Sample a random uniformly between -21 (inclusive) and 12 (inclusive (!))
        # rounding to increments of 3 (includes 12)
        "qrandint": tune.qrandint(-21, 12, 3),
        # Sample a integer uniformly between 1 (inclusive) and 10 (exclusive),
        # while sampling in log space
        "lograndint": tune.lograndint(1, 10),
        # Sample a integer uniformly between 2 (inclusive) and 10 (inclusive (!)),
        # while sampling in log space and rounding to increments of 2
        "qlograndint": tune.qlograndint(2, 10, 2),
        # Sample an option uniformly from the specified choices
        "choice": tune.choice(["a", "b", "c"]),
        "const": 5,
    }
    choice = {"nested": space}
    bs = BlendSearch(
        space={"c": tune.choice([choice])},
        low_cost_partial_config={"c": choice},
        metric="metric",
        mode="max",
    )
    print(indexof(bs._gs.space["c"], choice))
    print(indexof(bs._gs.space["c"], {"nested": {"const": 1}}))
    config = bs._gs.suggest("t1")
    print(config)
    config = unflatten_hierarchical(config, bs._gs.space)[0]
    print(config)
    print(normalize({"c": [choice]}, bs._gs.space, config, {}, False))
    space["randn"] = tune.randn(10, 2)
    cfo = CFO(
        space={"c": tune.choice([0, choice])},
        metric="metric",
        mode="max",
    )
    for i in range(5):
        cfo.suggest(f"t{i}")
    # print(normalize(config, bs._gs.space, config, {}, False))
    print(complete_config({}, cfo._ls.space, cfo._ls))