示例#1
0
def estimate_opt(x, y, my_pwlf):
    # define the lower and upper bound for the number of line segments
    bounds = [{
        'name': 'var_1',
        'type': 'discrete',
        'domain': np.arange(2, 40)
    }]

    def my_obj(x):
        # define some penalty parameter l
        # you'll have to arbitrarily pick this
        # it depends upon the noise in your data,
        # and the value of your sum of square of residuals
        l = y.mean() * 0.001
        f = np.zeros(x.shape[0])
        for i, j in enumerate(x):
            my_pwlf.fit(j[0])
            f[i] = my_pwlf.ssr + (l * j[0])
        return f

    np.random.seed(12121)

    myBopt = BayesianOptimization(my_obj,
                                  domain=bounds,
                                  model_type='GP',
                                  initial_design_numdata=10,
                                  initial_design_type='latin',
                                  exact_feval=True,
                                  verbosity=True,
                                  verbosity_model=False)
    max_iter = 30

    # perform the bayesian optimization to find the optimum number
    # of line segments
    myBopt.run_optimization(max_iter=max_iter, verbosity=True)

    print('\n \n Opt found \n')
    print('Optimum number of line segments:', myBopt.x_opt)
    print('Function value:', myBopt.fx_opt)
    myBopt.plot_acquisition()
    myBopt.plot_convergence()

    # perform the bayesian optimization to find the optimum number
    # of line segments
    myBopt.run_optimization(max_iter=max_iter, verbosity=True)

    return myBopt.x_opt
示例#2
0
    def gaussian_process(self, loss, **kwargs):
        """
        Tunes our model's hyper-parameter using gaussian process (a bayesian optimization method)

        :param loss: loss function to minimize
        :param kwargs: - nbr_initial_evals : number of points to evaluate before the beginning of the test
                       - method_type : one method available in ['GP', 'GP_MCMC']
                       - acquisition_function : one function available in ['EI', 'MPI']
        """

        # We look for extra parameters
        nbr_initial_evals = kwargs.get('nbr_initial_evals', 5)
        method_type = kwargs.get('method_type', 'GP')
        acquisition_fct = kwargs.get('acquisition_function', 'EI')

        # We verify if values are eligible for the method
        if not isinstance(nbr_initial_evals, int) or nbr_initial_evals < 0:
            raise Exception(
                'Value passed as nbr_initial_evals is not a positive integer')

        if method_type not in gaussian_process_methods:
            raise Exception('Gaussian process method must be in {}'.format(
                gaussian_process_methods))

        if acquisition_fct not in acquistions_type:
            raise Exception(
                'Acquisition function must be in {}'.format(acquistions_type))

        # We update tuning history method type
        self.tuning_history.method_type = method_type + acquisition_fct

        # We make sure that acquisition function en method type fit together
        if method_type == 'GP_MCMC':
            acquisition_fct += '_MCMC'

        # We execute the hyper-parameter optimization
        optimizer = BayesianOptimization(
            loss,
            domain=self.search_space.space,
            model_type=method_type,
            initial_design_numdata=nbr_initial_evals,
            acquisition_type=acquisition_fct)

        optimizer.run_optimization(max_iter=(self.nb_configs() -
                                             nbr_initial_evals))
        optimizer.plot_acquisition()
示例#3
0
    bar = progressbar.ProgressBar()
    for i in bar(range(12)):
        prob = getProbReorg(alpha=alphas_geq105[i],
                            length=20,
                            init_endorsers=inputs[0][0],
                            delay_priority=inputs[0][1],
                            delay_endorse=inputs[0][2],
                            sample_size=int(1e5))
        val += prob / length_20_probs_nonzero_geq105[i]
    print("value: ", val)
    return val


domain = [{
    'name': 'init_endorsers',
    'type': 'discrete',
    'domain': tuple(range(33))
}, {
    'name': 'delay_priority',
    'type': 'discrete',
    'domain': tuple(range(100))
}, {
    'name': 'delay_endorse',
    'type': 'discrete',
    'domain': tuple(range(100))
}]

opt = BayesianOptimization(f=objective, domain=domain)
opt.run_optimization(max_iter=100)
opt.plot_acquisition()
示例#4
0
    #print("True rain occ {}".format(calc_rain))
    #print("True sun occ {}".format(calc_sun))
    #print("Predicted occ sun {}".format(ts))
    #print("Predicted occ rain {}".format(tr))
    loss = math.sqrt((calc_rain - tr)**2 + (calc_sun - ts)**2) / 2
    #print("COCAINE {}".format(loss))
    #print(loss.shape)
    return loss


optimizer = BayesianOptimization(f=GH_black_box,
                                 domain=bounds,
                                 model_type='GP',
                                 acquisition_type='EI',
                                 maximize=False)
optimizer.run_optimization(max_iter=2, verbosity=True)
print("The minumum value obtained by the function was {} (x = {})".format(
    optimizer.fx_opt, optimizer.x_opt))

optimizer.plot_acquisition()
"""
for _ in range(5):
    next_point = optimizer.suggest(utility)
    target = GH_black_box(**next_point)
    print(next_point)
    print(target)
    optimizer.register(params=next_point, target=target)

    print(target, next_point)
print(optimizer.max)
"""
示例#5
0
import numpy as np
import matplotlib.pyplot as plt

from GPyOpt.methods import BayesianOptimization
from matplotlib2tikz import save as tikz_save


def f(x):
    return (6 * x - 2)**2 * np.sin(12 * x - 4)


domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (0, 1)}]

myBopt = BayesianOptimization(f=f, domain=domain)
myBopt.run_optimization(max_iter=5)
myBopt.plot_acquisition()
"""
tikz_save("example_prior.tex", 
            figure=fig1, 
            figureheight='\\figureheight', 
            figurewidth='\\figurewidth')

tikz_save("example_post.tex", 
            figure=fig2, 
            figureheight='\\figureheight', 
            figurewidth='\\figurewidth')
"""
示例#6
0
# create the plot
plt.plot(x, f_x, 'b-')
plt.show()

# plot previous line using streamlit
st.pyplot()

# ========== set up and run bayesian optmization ==========
# run bayesian optimization on charge
# the following 'domain' is a list of dictionaries containing the description of the inputs variables (See GPyOpt.core.space.Design_space class for details).
domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5, 4)}]

# f = objective fucntion for the Bayesian Optimization; domain = [refer to above]
myBopt_1d = BayesianOptimization(f=obj_func, domain=domain)
myBopt_1d.run_optimization(max_iter=5)
myBopt_1d.plot_acquisition()

# plot the acquisition function using streamlit
st.pyplot()

# ========== get the output of the bayesian optimization ==========

ins = myBopt_1d.get_evaluations()[1].flatten()
outs = myBopt_1d.get_evaluations()[0].flatten()
evals = pd.DataFrame({'x': ins, 'y': outs})

# plot
st.write(evals)

st.markdown("The minumum value obtained by the function was %.4f (x = %.4f)" %
            (myBopt_1d.fx_opt, myBopt_1d.x_opt))
示例#7
0
            
            params[param_name] = value
        
        gem5_result = gem5.main(params, rm_sim_dir=True, bench_name=_BENCHMARK)

        try:
            success = 1
            result = gem5_results.get_target_value(gem5_result, _TARGET)
        except:
            success = 0
            result = 0.0
            
        write_to_file(_RESULTS_FILE, _BDS, parameters=parameters, success=success, result=result)

        print("Params: ", params, " Result: ", result)

        return result  

    optimizer = BayesianOptimization(f=simulator, 
                                        domain=_BDS,
                                        model_type='GP',
                                        acquisition_type ='EI',
                                        exact_feval=True,
                                        maximize=True,
                                        de_duplication=True)

    optimizer.run_optimization(max_iter=10, verbosity=True)

    optimizer.plot_acquisition(filename = "acquisition.png")

    optimizer.plot_convergence(filename = "convergence.png")
示例#8
0
# --- Define your problem
def f(x):
    #return (6*x-2)**2*np.sin(12*x-4)
    return -x * np.sin(x) + 0.5 * x


domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5, 5)}]

# --- Solve your problem : acquisition_type='EI','MPI'
myBopt = BayesianOptimization(f=f, domain=domain, acquisition_type='EI')

filepath = "gpyopt_img/img"
num = 5
for i in range(num):
    myBopt.run_optimization(max_iter=1)
    myBopt.plot_acquisition(filename=filepath + str(i) + ".png")
    #myBopt.plot_acquisition(filename="gpyopt_img/img"+"{0:04d}".format(num)+".png")
print(myBopt.x_opt)
print(myBopt.fx_opt)

#################################################################################

fig = plt.figure()
ims = []

for i in range(num):
    im = plt.imread(filepath + str(i) + ".png")
    ims.append([plt.imshow(im)])

ani = animation.ArtistAnimation(fig, ims, interval=1000)
plt.show()
示例#9
0
            else:
                sess.run(tf.global_variables_initializer())

            # This is where the asynchronous magic happens.
            # Start the "work" process for each worker in a separate threat.

            worker_threads = []
            temp_best_solutions = np.zeros([len(workers)])
            for worker in workers:
                worker_work = lambda: worker.work(
                    max_episode_length, gamma, sess, coord, saver, saver_best)
                t = threading.Thread(target=(worker_work))
                t.start()
                sleep(0.5)
                worker_threads.append(t)
            coord.join(worker_threads)
            for index, worker in enumerate(workers):
                temp_best_solutions[index] = worker.best_solution
            best_solution_found = np.min(temp_best_solutions)
            return -best_solution_found


if __name__ == "__main__":
    BO = BayesianOptimization(f=objective, domain=dec_ranges)
    BO.run_optimization(max_iter=25,
                        verbosity=True,
                        report_file='./report.txt')
    BO.save_evaluations('./evaluations.txt')
    BO.plot_acquisition()
    BO.plot_convergence()
示例#10
0
# --- Load GPyOpt
from GPyOpt.methods import BayesianOptimization
#import numpy as np

bounds = [#{'name': 'x0', 'type': 'discrete', 'domain': (40,70)},\
         #{'name': 'x1', 'discrete': 'discrete', 'domain': (20,40)},\
          {'name': 'x2', 'type': 'continuous', 'domain': (0.001,0.0000001)}]#,\
          #{'name': 'x3', 'type': 'discrete', 'domain': (20,50)},\
          #{'name': 'x4', 'type': 'continuous', 'domain': (0.95,0.999)},\
           #{'name': 'x5', 'type': 'discrete', 'domain': (20,30)},\
          #{'name': 'x6', 'type': 'continuous', 'domain': (0.01,0.00001)},]



#bounds = [{'name': 'x', 'type': 'continuous', 'domain': [(-1,1),(-1,1)]}]
# --- Solve your problem


test = BayesianOptimization(f=obj_function,domain=bounds)#bounds)
#myBopt = BayesianOptimization(f=f, domain=domain)

test.run_optimization(max_iter=20,verbosity=True,report_file='./report.txt')
test.save_evaluations('./evaluations.txt')
test.plot_acquisition()
test.plot_convergence()

#test.plot_acquisition(filename='./test.png')
test.plot_convergence(filename='./test2.png')
#myBopt.run_optimization(max_iter=5)
#myBopt.plot_acquisition()
示例#11
0
            {'name': 'attention_dropout', 'type': 'continuous', 'domain': (0.0, 0.3)},
            {'name': 'relu_dropout', 'type': 'continuous', 'domain': (0.0, 0.3)},
            {'name': 'emb_dim', 'type': 'discrete', 'domain': tuple((i for i in range(60, 500+1))) },
            {'name': 'hop', 'type': 'discrete', 'domain': tuple((i for i in range(1, 10+1)))},
            {'name': 'heads', 'type': 'discrete', 'domain': tuple((i for i in range(1, 10+1)))},
            {'name': 'depth_key', 'type': 'discrete', 'domain': tuple((i for i in range(20, 80+1)))},
            {'name': 'depth_val', 'type': 'discrete', 'domain': tuple((i for i in range(20, 80+1)))},
            {'name': 'filter', 'type': 'discrete', 'domain': tuple((i for i in range(60, 300+1)))},
            {'name': 'batch_size', 'type': 'discrete', 'domain': tuple((i for i in range(32, 64+1)))}]

    X_init = np.array([[0.001,0.0,0.0,0.0,0.0,100,6,4,40,40,50,32]])
    Y_init = h_trs(X_init)
    optimizer = BayesianOptimization(f=h_trs, 
                                    domain=bds,
                                    model_type='GP',
                                    acquisition_type ='EI',
                                    acquisition_jitter = 0.05,
                                    exact_feval=False, 
                                    maximize=True,
                                    X=X_init,
                                    Y=np.array([[Y_init]]),
                                    verbosity_model=True)

    # # Only 20 iterations because we have 5 initial random points
    optimizer.run_optimization(max_iter=100,verbosity=True,report_file="save/{}/report.txt".format(gp_folder))
    optimizer.save_evaluations(evaluations_file="save/{}/evaluation.txt".format(gp_folder))
    optimizer.plot_acquisition(filename="save/{}/acquisition.pdf".format(gp_folder))
    optimizer.plot_convergence(filename="save/{}/convergence.pdf".format(gp_folder))


示例#12
0

dom = [{'name': 'PatientCPRA', 'type': 'continuous', 'domain': (0, 1)}]
for BTP in range(4):
    print("done {}".format(BTP))
    for BTD in range(4):
        print("intermediate {}".format(BTD))
        maxi = 5
        X = np.empty([0, 1])
        Y = np.empty([0, 1])
        jargs = [BTP, BTD, 0, 0]
        if jargs == [1, 3, 0, 0]:
            TRAJECTORIES = 64
        if jargs == [2, 1, 0, 0]:
            TRAJECTORIES = 128
        elif jargs == [3, 3, 0, 0]:
            TRAJECTORIES = 128
        else:
            TRAJECTORIES = 32
        print("Bayesian optimization for master features " + str(jargs))
        myBopt = BayesianOptimization(f=f,
                                      domain=dom,
                                      acquisition_type='LCB',
                                      num_cores=8)
        myBopt.run_optimization(
            max_iter=maxi,
            eps=0,
            evaluations_file=output_dir + "E" + l2f(jargs) + ".txt",
            models_file=output_dir + "M" + l2f(jargs) + ".txt")
        myBopt.plot_acquisition(output_dir + "Plot" + l2f(jargs) + ".png")