Esempio n. 1
0
def test_dump_and_load_optimizer():
    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer([(-2.0, 2.0)], base_estimator, n_random_starts=1,
                    acq_optimizer="sampling")

    opt.run(bench1, n_iter=3)

    with tempfile.TemporaryFile() as f:
        dump(opt, f)
        load(f)
Esempio n. 2
0
def test_dump_and_load_optimizer():
    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer([(-2.0, 2.0)], base_estimator, n_random_starts=1,
                    acq_optimizer="sampling")

    opt.run(bench1, n_iter=3)

    with tempfile.TemporaryFile() as f:
        dump(opt, f)
        f.seek(0)
        load(f)
Esempio n. 3
0
 def train_batchwise(self):
     opt = Optimizer(self.param_ranges,"GP")
     for batch in range(self.n_batches):
         print("starting a batch")
         for start in range(10):
             print("starting a round of optimization")
             self.training_param_range = [start*10, start*10 + 10]
             opt = Optimizer(self.param_ranges[start*10:start*10 + 10],"GP")
             r = opt.run(self.objective,n_iter=self.n_iter)
             self.params[start*10:start*10 + 10] = r.x
Esempio n. 4
0
def base_optimizer(black_box_function, mp_opt=False):
    global max_cc
    limit_obs, count, init_points = 30, 0, 6 if mp_opt else 5
    
    if mp_opt:
        search_space  = [
            Integer(1, 32), # Concurrency
            Integer(1, 4), # Parallesism
            Integer(1, 10), # Pipeline
            ]
    else:
        search_space  = [
            Integer(1, max_cc), # Concurrency
            ]
        
    optimizer = BO(
        dimensions=search_space,
        base_estimator="GP", #[GP, RF, ET, GBRT],
        acq_func="gp_hedge", # [LCB, EI, PI, gp_hedge]
        acq_optimizer="auto", #[sampling, lbfgs, auto]
        n_initial_points=init_points,
        model_queue_size= limit_obs,
    )
        
    while True:
        count += 1

        if len(optimizer.yi) > limit_obs:
            optimizer.yi = optimizer.yi[-limit_obs:]
            optimizer.Xi = optimizer.Xi[-limit_obs:]
            
        logger.info("Iteration {0} Starts ...".format(count))

        t1 = time.time()
        res = optimizer.run(func=black_box_function, n_iter=1)
        t2 = time.time()

        logger.info("Iteration {0} Ends, Took {3} Seconds. Best Params: {1} and Score: {2}.".format(
            count, res.x, np.round(res.fun), np.round(t2-t1, 2)))

        if optimizer.yi[-1] == -1:
            logger.info("Optimizer Exits ...")
            break
Esempio n. 5
0
        return target_score


if __name__ == '__main__':
    dimensions = [Real(name='alpha', low=-180.0, high=180.0)]

    for dataset_size in [2000, 3000, 5000, 10000, 20000, 35000, 70000]:
        s_time = time.time()
        opt = Optimizer(dimensions=dimensions,
                        n_initial_points=n_initial_points)
        optimization_attempt = myOptimizer(dataset_size=dataset_size,
                                           alpha0=alpha0,
                                           n_epochs=n_epochs,
                                           batch_size=batch_size)
        p_time = time.time()
        res = opt.run(optimization_attempt.target_function, n_iter=n_steps)
        e_time = time.time()

        with open('{}_{}_start.pcl'.format(alpha0, dataset_size),
                  'wb') as iofile:
            save_dict = {
                'opt': opt,
                'alpha0': alpha0,
                'dataset_size': dataset_size,
                'results_dict': optimization_attempt.results_dict,
                'raw_sysargv_params': params,
                'time': (s_time, p_time, e_time)
            }
            pickle.dump(save_dict, iofile)

        print('Process with alpha0 = {} finished with dataset size {}'.format(
#############################################################################
# We see that some minima is found and "exploited"
#
# Now lets try to set kappa and xi using'to other values and
# pass it to the optimizer:
acq_func_kwargs = {"xi": 10000, "kappa": 10000}
#############################################################################

opt = Optimizer([(-2.0, 2.0)],
                "GP",
                n_initial_points=1,
                acq_optimizer="sampling",
                acq_func_kwargs=acq_func_kwargs)
#############################################################################
opt.run(objective, n_iter=20)
plot_optimizer(opt, x, fx)
#############################################################################
# We see that the points are more random now.
#
# This works both for kappa when using acq_func="LCB":

#############################################################################
opt = Optimizer([(-2.0, 2.0)],
                "GP",
                n_initial_points=1,
                acq_func="LCB",
                acq_optimizer="sampling",
                acq_func_kwargs=acq_func_kwargs)
#############################################################################
opt.run(objective, n_iter=20)
Esempio n. 7
0
 def train(self):
     opt = Optimizer(self.param_ranges,"GP")
     r = opt.run(self.objective,n_iter=self.n_iter)
     self.params = r.x
Esempio n. 8
0
def base_optimizer(configurations, black_box_function, logger, verbose=True):
    limit_obs, count = 25, 0
    max_thread = configurations["thread_limit"]
    iterations = configurations["bayes"]["num_of_exp"]
    mp_opt = configurations["mp_opt"]

    if mp_opt:
        search_space = [
            Integer(1, max_thread),  # Concurrency
            Integer(1, 10),  # Parallesism
            Integer(1, 10),  # Pipeline
            Integer(5, 20),  # Chunk/Block Size in KB: power of 2
        ]
    else:
        search_space = [
            Integer(1, max_thread),  # Concurrency
        ]

    params = []
    optimizer = Optimizer(
        dimensions=search_space,
        base_estimator="GP",  #[GP, RF, ET, GBRT],
        acq_func="gp_hedge",  # [LCB, EI, PI, gp_hedge]
        acq_optimizer="auto",  #[sampling, lbfgs, auto]
        n_random_starts=configurations["bayes"]["initial_run"],
        model_queue_size=limit_obs,
        # acq_func_kwargs= {},
        # acq_optimizer_kwargs={}
    )

    while True:
        count += 1

        if len(optimizer.yi) > limit_obs:
            optimizer.yi = optimizer.yi[-limit_obs:]
            optimizer.Xi = optimizer.Xi[-limit_obs:]

        if verbose:
            logger.info("Iteration {0} Starts ...".format(count))

        t1 = time.time()
        res = optimizer.run(func=black_box_function, n_iter=1)
        t2 = time.time()

        if verbose:
            logger.info(
                "Iteration {0} Ends, Took {3} Seconds. Best Params: {1} and Score: {2}."
                .format(count, res.x, res.fun, np.round(t2 - t1, 2)))

        last_value = optimizer.yi[-1]
        if last_value == 10**10:
            logger.info("Optimizer Exits ...")
            break

        cc = optimizer.Xi[-1][0]
        if iterations < 1:
            reset = False
            if (last_value > 0) and (cc < max_thread):
                max_thread = max(cc, 2)
                reset = True

            if (last_value < 0) and (cc == max_thread) and (
                    cc < configurations["thread_limit"]):
                max_thread = min(cc + 5, configurations["thread_limit"])
                reset = True

            if reset:
                search_space[0] = Integer(1, max_thread)
                optimizer = Optimizer(
                    dimensions=search_space,
                    n_initial_points=configurations["bayes"]["initial_run"],
                    acq_optimizer="lbfgs",
                    model_queue_size=limit_obs)

        if iterations == count:
            logger.info("Best parameters: {0} and score: {1}".format(
                res.x, res.fun))
            params = res.x
            break

    return params