Exemple #1
0
def estimate_opt(x, y, my_pwlf):
    # define the lower and upper bound for the number of line segments
    bounds = [{
        'name': 'var_1',
        'type': 'discrete',
        'domain': np.arange(2, 40)
    }]

    def my_obj(x):
        # define some penalty parameter l
        # you'll have to arbitrarily pick this
        # it depends upon the noise in your data,
        # and the value of your sum of square of residuals
        l = y.mean() * 0.001
        f = np.zeros(x.shape[0])
        for i, j in enumerate(x):
            my_pwlf.fit(j[0])
            f[i] = my_pwlf.ssr + (l * j[0])
        return f

    np.random.seed(12121)

    myBopt = BayesianOptimization(my_obj,
                                  domain=bounds,
                                  model_type='GP',
                                  initial_design_numdata=10,
                                  initial_design_type='latin',
                                  exact_feval=True,
                                  verbosity=True,
                                  verbosity_model=False)
    max_iter = 30

    # perform the bayesian optimization to find the optimum number
    # of line segments
    myBopt.run_optimization(max_iter=max_iter, verbosity=True)

    print('\n \n Opt found \n')
    print('Optimum number of line segments:', myBopt.x_opt)
    print('Function value:', myBopt.fx_opt)
    myBopt.plot_acquisition()
    myBopt.plot_convergence()

    # perform the bayesian optimization to find the optimum number
    # of line segments
    myBopt.run_optimization(max_iter=max_iter, verbosity=True)

    return myBopt.x_opt
Exemple #2
0
optimizer = BayesianOptimization(f=Schwefel,
                                 domain=bds,
                                 model_type='GP',
                                 kernel=kernel,
                                 acquisition_type='EI',
                                 maximize=False)

t0 = time.time()
optimizer.run_optimization(max_iter=200, max_time=7200)
t1 = time.time()
print("time:")
print(t1 - t0)

#optimizer.plot_acquisition()
optimizer.plot_convergence()

# get the candidate solutions and their evaluations
ins = optimizer.get_evaluations()[0]
outs = optimizer.get_evaluations()[1]
outputs = outs.flatten()
# sort in descending order
outputs.sort()
reverse_array = outputs[::-1]
#print(reverse_array)

plt.figure(figsize=(6.4, 4.8))
plt.plot(reverse_array, color='blue')
plt.xlabel("Iterations")
plt.ylabel("Objective Value")
plt.title("Best Candidate Solution at each Iteration", fontsize='small')
Exemple #3
0
myBopt = BayesianOptimization(f=FV_2D,
                              X=inputs_all[:, 0, :],
                              Y=runup_mat,
                              domain=domain,
                              constraints=constrains,
                              kernel=kernel,
                              acquisition_type='EI',
                              initial_design_numdata=0,
                              exact_feval=True,
                              verbosity=True,
                              cost_withGradients=None,
                              model_type='GP',
                              acquisition_optimizer_type='lbfgs')

myBopt.run_optimization(max_iter=100)
myBopt.plot_convergence()

Bopt_out = open('Bopt_regular.pickle', 'wb')
pickle.dump([myBopt.X, myBopt.Y, kernel.variance[0],
             list(kernel.lengthscale)], Bopt_out)
#pickle.dump([we, rt], f_FV_input)
Bopt_out.close()

min_runup = np.minimum.accumulate(myBopt.Y).ravel()
iteration = np.arange(1, min_runup.shape[0] + 1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(1, 1, 1)
ax2.plot(iteration, min_runup, 'r-')
ax2.set_xlabel('Iteration no.', fontsize=13)
ax2.set_ylabel('minimum runup (m)', fontsize=13)
plt.show()
    {'name': 'n_in', 'type': 'discrete', 'domain': [1, 2, 3, 4, 5, 6, 7, 8]},
    # {'name': 'n_out', 'type': 'discrete', 'domain': [1]},
    {'name': 'g_layer_size', 'type': 'discrete', 'domain': [2, 4, 8, 16, 32, 64]},
    {'name': 'g_dropout', 'type': 'continuous', 'domain': (0, 0.5)},

    {'name': 'd_layer_size', 'type': 'discrete', 'domain': [2, 4, 8, 16, 32, 64]},
    {'name': 'd_dropout', 'type': 'continuous', 'domain': (0, 0.5)},
    # {'name': 'learning_rate', 'type': 'continuous', 'domain': (0.0001, 0.1)},
    # {'name': 'learning_rate', 'type': 'continuous', 'domain': (0.0001, 0.1)},
    {'name': 'num_train_d', 'type': 'discrete', 'domain': [1, 2, 3, 4, 5]},
    {'name': 'batch_size', 'type': 'discrete', 'domain': [4, 8, 16, 32]},


]
constraints = []

opt = BayesianOptimization(f=custom_fitness,
                           domain=domain,
                           constraints=constraints,
                           num_cores=6,
                           batch_size=6,
                           initial_design_numdata=30)

opt.run_optimization(max_iter=100, max_time=np.inf, verbosity=True,
                     report_file=os.path.join(path_log, 'report.txt'),
                     evaluations_file=os.path.join(path_log, 'evaluations.txt'),
                     models_file=os.path.join(path_log, 'model_file.txt'))
opt.plot_convergence()
# print(opt.X)
print(opt.Y)
Exemple #5
0
            params[param_name] = value
        
        gem5_result = gem5.main(params, rm_sim_dir=True, bench_name=_BENCHMARK)

        try:
            success = 1
            result = gem5_results.get_target_value(gem5_result, _TARGET)
        except:
            success = 0
            result = 0.0
            
        write_to_file(_RESULTS_FILE, _BDS, parameters=parameters, success=success, result=result)

        print("Params: ", params, " Result: ", result)

        return result  

    optimizer = BayesianOptimization(f=simulator, 
                                        domain=_BDS,
                                        model_type='GP',
                                        acquisition_type ='EI',
                                        exact_feval=True,
                                        maximize=True,
                                        de_duplication=True)

    optimizer.run_optimization(max_iter=10, verbosity=True)

    optimizer.plot_acquisition(filename = "acquisition.png")

    optimizer.plot_convergence(filename = "convergence.png")
Exemple #6
0
        synthesized_programs = int(out.decode("utf-8"))
        print(synthesized_programs)
        return synthesized_programs


domainMax = None
try:
    domainMax = int(os.environ['DOMAIN_MAX'])
    acquisition_type = os.environ['ACQUISITION']
except KeyError:
    domainMax = 2 if domainMax is None else domainMax
    acquisition_type = 'EI'

domain = [{
    'name': "var-" + gadget,
    'type': 'discrete',
    'domain': tuple(range(0, domainMax))
} for gadget in vocab]
print(domain, acquisition_type)

myBopt = BayesianOptimization(f,
                              domain,
                              maximize=True,
                              exact_feval=True,
                              acquisition_optimizer_type='lbfgs',
                              acquisition_type=acquisition_type)
myBopt.run_optimization(max_iter=35)
myBopt.save_report("report")
myBopt.save_evaluations("evals")
myBopt.plot_convergence("convergence.png")
Exemple #7
0
#%%
import torch
from GAN_utils import upconvGAN
G = upconvGAN("fc6")
#%%
G.cuda()
def img_contrast(code):
    with torch.no_grad():
        img = G.visualize(torch.from_numpy(code).cuda().float())
    out = img.std(dim=(1,2,3)).cpu().numpy()
    return out[:, np.newaxis]
domain = [{'name': 'code', 'type': 'continuous', 'domain': (-3,3), 'dimensionality': 4096}]
# Maximize will add a negative sign to the optimization.
GANBopt = BayesianOptimization(f=img_contrast, domain=domain, batch_size=1, acquisition_optimizer_type='lbfgs', verbosity=True, maximize=True, acquisition_type="LCB")
GANBopt.run_optimization(max_iter=600, max_time=600, verbosity=True)
GANBopt.plot_convergence()
#%%
import cma
def img_contrast(code):
    with torch.no_grad():
        img = G.visualize(torch.from_numpy(code).cuda().float())
    out = img.std(dim=(1,2,3)).cpu().numpy()
    return -out[:, np.newaxis]

def img_contrast_batch(codes):

    with torch.no_grad():
        img = G.visualize(torch.from_numpy(np.array(codes)).cuda().float())
    out = img.std(dim=(1,2,3)).cpu().numpy()
    return (-out).tolist()
Exemple #8
0
            else:
                sess.run(tf.global_variables_initializer())

            # This is where the asynchronous magic happens.
            # Start the "work" process for each worker in a separate threat.

            worker_threads = []
            temp_best_solutions = np.zeros([len(workers)])
            for worker in workers:
                worker_work = lambda: worker.work(
                    max_episode_length, gamma, sess, coord, saver, saver_best)
                t = threading.Thread(target=(worker_work))
                t.start()
                sleep(0.5)
                worker_threads.append(t)
            coord.join(worker_threads)
            for index, worker in enumerate(workers):
                temp_best_solutions[index] = worker.best_solution
            best_solution_found = np.min(temp_best_solutions)
            return -best_solution_found


if __name__ == "__main__":
    BO = BayesianOptimization(f=objective, domain=dec_ranges)
    BO.run_optimization(max_iter=25,
                        verbosity=True,
                        report_file='./report.txt')
    BO.save_evaluations('./evaluations.txt')
    BO.plot_acquisition()
    BO.plot_convergence()
def optimize_parameters(input_path: str, output_path: str,
                        google_files_aligner: GoogleFilesAligner,
                        alignment_parameters: Dict[str, Any],
                        convergence_plot_file: str, verbosity: int) -> None:
    """
    Tries to find the best parameters for google alignment.

    :param input_path:            Path to load all alignments from
    :param output_path:           Path to write the alignments to
    :param google_files_aligner:  GoogleFLiesAligner to re-align every epoch
    :param alignment_parameters:  Alignment parameters for comparison
    :param convergence_plot_file: Where to save the convergence plot
    :param verbosity:             Verbosity of the output

    :return: None
    """
    def optimize_function(params: List) -> float:
        """
        Function to optimize against

        :param params: Parameters given by BOpt

        :return: Calculated score
        """
        bin_print(verbosity, 1, "Starting new iteration...")

        google_files_aligner.alignment_parameters["algorithm"][
            "match_reward"] = params[0][0]
        google_files_aligner.alignment_parameters["algorithm"][
            "mismatch_penalty"] = params[0][1]
        google_files_aligner.alignment_parameters["algorithm"][
            "gap_penalty"] = params[0][2]

        bin_print(verbosity, 3, "Configured params: ",
                  google_files_aligner.alignment_parameters)

        google_files_aligner.align_files(input_path, output_path, 0)

        # Not "training_only", because we're using a further boiled down training set.
        result = compare_alignments(input_path, 0, "hand", "google", False,
                                    alignment_parameters)

        # Configurable, see config.example.yml
        score = eval(
            google_files_aligner.
            alignment_parameters["optimize_params_formula"],
            {"__builtins__": None}, {
                "deviation": result["scores"]["deviation"]["mean"],
                "iou": result["ious"]["mean"],
                "f1": result["appearance"]["f1_score"],
                "precision": result["appearance"]["precision"],
                "recall": result["appearance"]["recall"],
            })

        bin_print(verbosity, 1, "Parameters:                         ", params)
        bin_print(verbosity, 1, "Achieved score (smaller == better): ", score)

        return score

    domain = [
        {
            "name": "match_reward",
            "type": "continuous",
            "domain": (0, 100)
        },
        {
            "name": "mismatch_penalty",
            "type": "continuous",
            "domain": (-100, 0)
        },
        {
            "name": "gap_penalty",
            "type": "continuous",
            "domain": (-100, 0)
        },
    ]

    bopt = BayesianOptimization(f=optimize_function,
                                domain=domain,
                                model_type="GP",
                                acquisition_type="EI",
                                acquisition_jitter=0.05)

    bopt.run_optimization(max_iter=25)

    bopt.plot_convergence(filename=convergence_plot_file)

    bin_print(verbosity, 0, "Best values:", bopt.x_opt)
Exemple #10
0
# --- Load GPyOpt
from GPyOpt.methods import BayesianOptimization
#import numpy as np

bounds = [#{'name': 'x0', 'type': 'discrete', 'domain': (40,70)},\
         #{'name': 'x1', 'discrete': 'discrete', 'domain': (20,40)},\
          {'name': 'x2', 'type': 'continuous', 'domain': (0.001,0.0000001)}]#,\
          #{'name': 'x3', 'type': 'discrete', 'domain': (20,50)},\
          #{'name': 'x4', 'type': 'continuous', 'domain': (0.95,0.999)},\
           #{'name': 'x5', 'type': 'discrete', 'domain': (20,30)},\
          #{'name': 'x6', 'type': 'continuous', 'domain': (0.01,0.00001)},]



#bounds = [{'name': 'x', 'type': 'continuous', 'domain': [(-1,1),(-1,1)]}]
# --- Solve your problem


test = BayesianOptimization(f=obj_function,domain=bounds)#bounds)
#myBopt = BayesianOptimization(f=f, domain=domain)

test.run_optimization(max_iter=20,verbosity=True,report_file='./report.txt')
test.save_evaluations('./evaluations.txt')
test.plot_acquisition()
test.plot_convergence()

#test.plot_acquisition(filename='./test.png')
test.plot_convergence(filename='./test2.png')
#myBopt.run_optimization(max_iter=5)
#myBopt.plot_acquisition()
Exemple #11
0
def optimize_score(input_path: str, alignment_parameters: Dict[str, Any],
                   convergence_plot_file: str, verbosity: int) -> None:
    """
    Tries to find the best parameters for overall score.

    :param input_path:            Path to load all alignments from
    :param alignment_parameters:  Alignment parameters for comparison
    :param convergence_plot_file: Where to save the convergence plot
    :param verbosity:             Verbosity of the output

    :return: None
    """
    def optimize_function(params: List) -> float:
        """
        Function to optimize against

        :param params: Parameters given by BOpt

        :return: Calculated score
        """
        bin_print(verbosity, 2, "Parameters: ", params)

        alignment_parameters["score_weights"]["gaps_google"] = params[0][0]
        alignment_parameters["score_weights"]["gaps_transcript"] = params[0][1]
        alignment_parameters["score_weights"]["alignment_score"] = params[0][2]
        alignment_parameters["score_weights"]["google_confidence"] = params[0][
            3]

        results = compare_alignments(input_path, 0, "hand", "google", True,
                                     alignment_parameters)

        correlation_ious = pearsonr_lists(
            results["scores"]["ious"]["all"],
            results["scores"]["calculated"]["all"])
        correlation_deviation = pearsonr_lists(
            results["scores"]["deviation"]["all"],
            results["scores"]["calculated"]["all"])

        bin_print(verbosity, 1, "Correlation IOUs: ", correlation_ious)
        bin_print(verbosity, 1, "Correlation deviation: ",
                  correlation_deviation)

        # Only maximize correlation with IOU
        return abs(correlation_ious)

    domain = [
        {
            "name": "gaps_google",
            "type": "continuous",
            "domain": (-100, 100)
        },
        {
            "name": "gaps_transcript",
            "type": "continuous",
            "domain": (-100, 100)
        },
        {
            "name": "alignment_score",
            "type": "continuous",
            "domain": (-100, 100)
        },
        {
            "name": "google_confidence",
            "type": "continuous",
            "domain": (-100, 100)
        },
    ]

    bopt = BayesianOptimization(f=optimize_function,
                                domain=domain,
                                model_type="GP",
                                acquisition_type="EI",
                                acquisition_jitter=0.05,
                                maximize=True)

    bopt.run_optimization(max_iter=250)

    bopt.plot_convergence(filename=convergence_plot_file)

    bin_print(verbosity, 0, "Best values:", bopt.x_opt)
Exemple #12
0
            {'name': 'attention_dropout', 'type': 'continuous', 'domain': (0.0, 0.3)},
            {'name': 'relu_dropout', 'type': 'continuous', 'domain': (0.0, 0.3)},
            {'name': 'emb_dim', 'type': 'discrete', 'domain': tuple((i for i in range(60, 500+1))) },
            {'name': 'hop', 'type': 'discrete', 'domain': tuple((i for i in range(1, 10+1)))},
            {'name': 'heads', 'type': 'discrete', 'domain': tuple((i for i in range(1, 10+1)))},
            {'name': 'depth_key', 'type': 'discrete', 'domain': tuple((i for i in range(20, 80+1)))},
            {'name': 'depth_val', 'type': 'discrete', 'domain': tuple((i for i in range(20, 80+1)))},
            {'name': 'filter', 'type': 'discrete', 'domain': tuple((i for i in range(60, 300+1)))},
            {'name': 'batch_size', 'type': 'discrete', 'domain': tuple((i for i in range(32, 64+1)))}]

    X_init = np.array([[0.001,0.0,0.0,0.0,0.0,100,6,4,40,40,50,32]])
    Y_init = h_trs(X_init)
    optimizer = BayesianOptimization(f=h_trs, 
                                    domain=bds,
                                    model_type='GP',
                                    acquisition_type ='EI',
                                    acquisition_jitter = 0.05,
                                    exact_feval=False, 
                                    maximize=True,
                                    X=X_init,
                                    Y=np.array([[Y_init]]),
                                    verbosity_model=True)

    # # Only 20 iterations because we have 5 initial random points
    optimizer.run_optimization(max_iter=100,verbosity=True,report_file="save/{}/report.txt".format(gp_folder))
    optimizer.save_evaluations(evaluations_file="save/{}/evaluation.txt".format(gp_folder))
    optimizer.plot_acquisition(filename="save/{}/acquisition.pdf".format(gp_folder))
    optimizer.plot_convergence(filename="save/{}/convergence.pdf".format(gp_folder))