Beispiel #1
0
def test_max_score_1():
    def objective_function(para):
        score = -para["x1"] * para["x1"]
        time.sleep(0.01)
        return score

    search_space = {
        "x1": np.arange(0, 100, 0.1),
    }

    max_score = -9999

    c_time = time.time()
    opt = HillClimbingOptimizer(
        search_space, initialize={"warm_start": [{"x1": 99}]}
    )
    opt.search(
        objective_function, n_iter=100000, max_score=max_score,
    )
    diff_time = time.time() - c_time

    print("\n Results head \n", opt.results.head())
    print("\n Results tail \n", opt.results.tail())

    print("\nN iter:", len(opt.results))

    assert diff_time < 1
Beispiel #2
0
def test_max_score_0():
    def objective_function(para):
        score = -para["x1"] * para["x1"]
        return score

    search_space = {
        "x1": np.arange(0, 100, 0.1),
    }

    max_score = -9999

    opt = HillClimbingOptimizer(
        search_space,
        initialize={"warm_start": [{"x1": 99}]},
        epsilon=0.01,
        rand_rest_p=0,
    )
    opt.search(
        objective_function, n_iter=100000, max_score=max_score,
    )

    print("\n Results head \n", opt.results.head())
    print("\n Results tail \n", opt.results.tail())

    print("\nN iter:", len(opt.results))

    assert -100 > opt.best_score > max_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_wine

from gradient_free_optimizers import HillClimbingOptimizer

data = load_wine()
X, y = data.data, data.target


def model(para):
    gbc = DecisionTreeClassifier(
        min_samples_split=para["min_samples_split"],
        min_samples_leaf=para["min_samples_leaf"],
    )
    scores = cross_val_score(gbc, X, y, cv=5)

    return scores.mean()


search_space = {
    "min_samples_split": np.arange(2, 25, 1),
    "min_samples_leaf": np.arange(1, 25, 1),
}

opt = HillClimbingOptimizer(search_space)
opt.search(model, n_iter=500, memory=False)

print("\n\nMemory activated:")
opt = HillClimbingOptimizer(search_space)
opt.search(model, n_iter=500, memory=True)
import numpy as np
from gradient_free_optimizers import HillClimbingOptimizer


def convex_function(pos_new):
    score = -(pos_new["x1"] * pos_new["x1"] + pos_new["x2"] * pos_new["x2"])
    return score


search_space = {
    "x1": np.arange(-100, 101, 0.1),
    "x2": np.arange(-100, 101, 0.1),
}

opt = HillClimbingOptimizer(search_space)
opt.search(convex_function, n_iter=300000)
Beispiel #5
0
    }),
    ({
        "crossover_rate": 2
    }),
    ({
        "population": 1
    }),
    ({
        "population": 2
    }),
    ({
        "population": 100
    }),
    ({
        "population": [
            HillClimbingOptimizer(search_space),
            HillClimbingOptimizer(search_space),
            HillClimbingOptimizer(search_space),
            HillClimbingOptimizer(search_space),
        ]
    }),
    ({
        "rand_rest_p": 0
    }),
    ({
        "rand_rest_p": 0.5
    }),
    ({
        "rand_rest_p": 1
    }),
]
Beispiel #6
0
import numpy as np
from gradient_free_optimizers import HillClimbingOptimizer

n_iter = 10


def get_score(pos_new):
    x1 = pos_new[0]

    return -x1 * x1


space_dim = np.array([100])
init_positions = [np.array([10])]

opt = HillClimbingOptimizer(init_positions, space_dim, opt_para={})

for nth_init in range(len(init_positions)):
    pos_new = opt.init_pos(nth_init)
    score_new = get_score(pos_new)
    opt.evaluate(score_new)

for nth_iter in range(len(init_positions), n_iter):
    pos_new = opt.iterate(nth_iter)
    score_new = get_score(pos_new)
    opt.evaluate(score_new)