Пример #1
0
    def optimize(self, reset=False):
        # Bounded region of parameter space
        pbounds = {
            'men_kappa_fulltime': (0.3, 0.6),
            'men_kappa_parttime':
            (0.3, 0.7),  #'men_mu_scale': (0.01,0.3), 'men_mu_age': (57,62),
            'women_kappa_fulltime': (0.3, 0.6),
            'women_kappa_parttime': (0.3, 0.7)
        }  #, 'women_mu_scale': (0.01,0.3), 'women_mu_age': (57,62)}

        optimizer = BayesianOptimization(
            f=self.black_box_function,
            pbounds=pbounds,
            verbose=
            2,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
            random_state=1,
        )

        LOG_DIR = Path().absolute() / 'bayes_opt_logs'
        LOG_DIR.mkdir(exist_ok=True)
        filename = 'log_0.json'

        # talletus
        logfile = str(LOG_DIR / filename)
        logger = JSONLogger(path=logfile)
        if Path(logfile).exists() and not reset:
            load_logs(optimizer, logs=[logfile])
        optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)

        optimizer.maximize(
            init_points=2,
            n_iter=20,
        )

        print('The best parameters found {}'.format(optimizer.max))
Пример #2
0
    def optimize(self, load_from: str = None):
        self.idx = 0
        Popen('ulimit -n 4096', shell=True)

        optimizer = BayesianOptimization(
            f=self._black_box_function,
            pbounds=self.p_bounds,
            random_state=1,
        )

        logger = JSONLogger(path=self.opt_filepath)
        optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)

        if load_from is not None:
            logfile = os.path.join(self.out_path, load_from)
            if Path(logfile).is_file():
                logging.info('Loading logs from ' + logfile)
                load_logs(optimizer, logs=[logfile])
            else:
                logging.info('Could not find a log file under {}'.format(logfile))

        optimizer.maximize(
            init_points=self.init_points,
            n_iter=self.n_iter,
        )
Пример #3
0
    def optimizer(self,
                  log_json=False,
                  load_log=False,
                  log_path=None,
                  **kwargs):
        allow = ['n_init_explore_point', 'n_bayesian_iterations']
        self.__dict__.update((k, v) for k, v in kwargs.items() if k in allow)

        b_opt = BayesianOptimization(
            f=self.bb_partial,
            pbounds=self.parameters,
            verbose=self.opt_verbose,
            random_state=0,
        )

        if log_json:
            logger = JSONLogger(path=r'./Logs/' + self.type_nn + '_' +
                                str(self.rnn_kind) + '.json')
            b_opt.subscribe(Events.OPTMIZATION_STEP, logger)

        if load_log and log_path is not None:
            load_logs(b_opt, logs=log_path)

        b_opt.maximize(
            init_points=self.n_init_explore_point,
            n_iter=self.n_bayesian_iterations,
            acq=
            "poi",  # Acquisition Function "Probability of Improvement" --> Prefer exploration (with xi=0.1)
            xi=1e-1)
        print('best parameters:', b_opt.max, '\n')

        return b_opt
Пример #4
0
def get_optimizer():

    optimizer = BayesianOptimization(f=train,
                                     pbounds=pbounds,
                                     random_state=3,
                                     verbose=2)

    if os.path.isfile('./oldlogs.json'):
        load_logs(optimizer, logs=["./oldlogs.json"])
        print("New optimizer is now aware of {} points.".format(
            len(optimizer.space)))

    else:
        optimizer.probe(
            params={
                "contrast": 1,
                "hue": 0,
                "lr": 0.12,
                "rotations": 0
            },
            lazy=True,
        )
    logger = BasicObserver("./logs.json")
    optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
    optimizer.subscribe(Events.OPTMIZATION_START, logger.screenlogger)
    optimizer.subscribe(Events.OPTMIZATION_END, logger.screenlogger)
    return optimizer
Пример #5
0
def optimize():
    global ITERATIONS
    ITERATIONS = 1
    MAX_EVALS = 10

    from bayes_opt import BayesianOptimization

    # Bounded region of parameter space
    pbounds = {'alpha': (0.001, 0.999), 'beta': (0.001, 1.5)}

    optimizer = BayesianOptimization(
        f=objective,
        pbounds=pbounds,
        random_state=1,
    )

    try:
        from bayes_opt.util import load_logs
        load_logs(optimizer, logs=["logs.json"])
        print("Rerunning from {} trials".format(len(optimizer.res)))
    except:
        print("Starting from scratch: new trials.")

    from bayes_opt.observer import JSONLogger
    from bayes_opt.event import Events

    logger = JSONLogger(path="logs.json")
    optimizer.subscribe(Events.OPTMIZATION_STEP, logger)

    # Results will be saved in ./logs.json
    optimizer.maximize(
        init_points=20,  #max(0, 5 - len(optimizer.res)),
        n_iter=MAX_EVALS,
    )
    print(optimizer.max)
def get():
    pbounds = {
        'batch_size_continuous': (20, 20),
        # Possibly as small as possible to reduce model construction time.
        # Effect of large batch size is the same as large lr because
        # the training batch is repeative (no variance between batches).
        'lr_exp': (1.02, 1.02),
        # As large as possible to allows larger initial gradient
        'momentum': (0.8, 0.8),
        'layer_size_continuous': (20, 20),
        # As large as possible to increase model complexity, since no overfitting is presented.)
        'layer_count_continuous': (1, 1)
        # As small as possible because large layer count leads to slower optimization.
    }
    optimizer = BayesianOptimization(
        f=initial_acc,  # initial_slop,
        pbounds=pbounds
    )
    load_logs(optimizer, logs=["./baysian_logs.json"])

    res_list = []
    for i, res in enumerate(optimizer.res):
        print("Iteration {}: \n\t{}".format(i, res))
        res_list.append(res)

    # print("Final Max:", optimizer.max)
    return res_list
Пример #7
0
 def maximize(self,
              LHS_path=None,
              init_points=5,
              is_LHS=False,
              n_iter=25,
              acq='ucb',
              kappa=2.576,
              xi=0.0,
              **gp_params):
     """Mazimize your function"""
     self._prime_subscriptions()
     self.dispatch(Events.OPTMIZATION_START)
     if LHS_path == None:
         if is_LHS:
             self._prime_queue_LHS(init_points)
         else:
             self._prime_queue(init_points)
     else:
         from bayes_opt.util import load_logs
         load_logs(self, logs=[LHS_path])
     self.set_gp_params(**gp_params)
     util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
     iteration = 0
     while not self._queue.empty or iteration < n_iter:
         try:
             x_probe = next(self._queue)
         except StopIteration:
             x_probe = self.suggest(util)
             iteration += 1
         self.probe(x_probe, lazy=False)
     self.dispatch(Events.OPTMIZATION_END)
Пример #8
0
    def optimize_bayes_wo_param(parse_model_param):
        def crossval(*args_model, **kwargs_model):
            estimator = parse_model_param(*args_model, **kwargs_model)
            return cross_val_score(estimator,
                                   X=X,
                                   y=y,
                                   *args_eval,
                                   **kwargs_eval).mean()

        optimizer = BayesianOptimization(crossval, pbounds=pbounds)
        optimizer_log_dir = (LOG_DIR / log_dir)
        if optimizer_log_dir.exists():
            all_log = [str(path) for path in optimizer_log_dir.iterdir()]
            load_logs(optimizer, logs=all_log)
            filename = 'log_{}.json'.format(len(all_log))
        else:
            optimizer_log_dir.mkdir()
            filename = 'log_0.json'
        logger = JSONLogger(path=str(optimizer_log_dir / filename))
        optimizer.subscribe(Events.OPTMIZATION_STEP, logger)

        optimizer.maximize(init_points, n_iter, kappa=kappa, acq=acq)
        best_model = parse_model_param(**optimizer.max['params'])
        best_model.fit(X=X, y=y)
        return best_model
Пример #9
0
    def load_hyperparameters(self):
        """
        This method loads the best hyperparameters from the Hyperparameter_Selection History to the current instance
        """
        optimizer = BayesianOptimization(
            f=None,
            pbounds=self.hyperparameter,
            verbose=
            2,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
            random_state=1)

        #Load Hyperparamter Selection History Logs
        logger_name = self.model_name
        if os.path.exists(saved_models_path + logger_name + ".json"):
            load_logs(optimizer,
                      logs=[saved_models_path + logger_name + ".json"])

            #Save best hyperparameter
            best_model = optimizer.max
            if self.ridge_2 != None:
                self.ridge_1 = best_model['params']['ridge_1']
                self.ridge_2 = best_model['params']['ridge_2']
            else:
                self.ridge_1 = best_model['params']['ridge_1']

            print(
                "The optimizer is now aware of {num_pts} points and the best result is {Result}."
                .format(Result=best_model['target'],
                        num_pts=len(optimizer.space)))

        else:
            print('No Hyperparameters was tested.')

        return None
def hyperParameterOptimizer():
    def blackbox(c1Filters, c1KernelSize, c1Strides, c2Filters, c2KernelSize,
                 c2Strides, c3Filters, c3KernelSize, c3Strides, fcc1Units,
                 fcc2Units, dropout1, dropout2):
        hyperParams = HyperParams()

        hyperParams.c1Filters = int(round(c1Filters))
        hyperParams.c1KernelSize = int(round(c1KernelSize))
        hyperParams.c1Strides = int(round(c1Strides))

        hyperParams.c2Filters = int(round(c2Filters))
        hyperParams.c2KernelSize = int(round(c2KernelSize))
        hyperParams.c2Strides = int(round(c2Strides))

        hyperParams.c3Filters = int(round(c3Filters))
        hyperParams.c3KernelSize = int(round(c3KernelSize))
        hyperParams.c3Strides = int(round(c3Strides))

        hyperParams.fcc1Units = int(round(fcc1Units))
        hyperParams.fcc2Units = int(round(fcc2Units))

        hyperParams.dropout1 = round(dropout1, 2)
        hyperParams.dropout2 = round(dropout2, 2)

        checkpoint = train(200, None, hyperParams)
        return checkpoint.validationAccuracy

    bounds = {
        'c1Filters': (100, 128),
        'c1KernelSize': (2, 2),
        'c1Strides': (2, 2),
        'c2Filters': (64, 100),
        'c2KernelSize': (2, 2),
        'c2Strides': (2, 2),
        'c3Filters': (32, 64),
        'c3KernelSize': (2, 2),
        'c3Strides': (2, 2),
        'fcc1Units': (32, 150),
        'fcc2Units': (32, 150),
        'dropout1': (0.2, 0.5),
        'dropout2': (0.2, 0.5),
    }

    optimizer = BayesianOptimization(
        f=blackbox,
        pbounds=bounds,
        random_state=1,
    )

    logger = JSONLogger(path="./logs.json")
    optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
    load_logs(optimizer, logs=["./oldlogs.json"])
    optimizer.maximize(
        init_points=2,
        n_iter=36,
    )

    print(optimizer.max)
Пример #11
0
def xgb_optimization(X, y, params, random_state=1337):
    training_data = xgb.DMatrix(X, y)

    def xgb_model(
        feature_fraction,
        bagging_fraction,
        lambda_l1,
        lambda_l2,
        max_depth,
        num_leaves,
        min_split_gain,
        min_child_weight,
        learning_rate,
        n_estimators,
    ):

        params["feature_fraction"] = max(min(feature_fraction, 1), 0)
        params["bagging_fraction"] = max(min(bagging_fraction, 1), 0)
        params["lambda_l1"] = max(lambda_l1, 0)
        params["lambda_l2"] = max(lambda_l2, 0)
        params["max_depth"] = int(round(max_depth))
        params["num_leaves"] = int(round(num_leaves))
        params["min_split_gain"] = min_split_gain
        params["min_child_weight"] = min_child_weight
        params["learning_rate"] = learning_rate
        params["n_estimators"] = int(round(n_estimators))

        params.update({
            "objective": "reg:squarederror",
            "max_bin": 255,
            "bagging_freq": 1,
            "min_child_samples": 20,
            "boosting": "gbdt",
            "verbosity": 1,
            "early_stopping_round": 200,
            "metric": "rmse",
        })

        clf = xgb.cv(params,
                     training_data,
                     nfold=5,
                     seed=random_state,
                     verbose_eval=1)
        return (-1 * np.array(clf["test-rmse-mean"])).max()

    optimizer = BayesianOptimization(f=xgb_model,
                                     pbounds=params,
                                     random_state=1337)
    logger_path = os.path.join(LOGS_DIR, "logs_xgb.json")

    if os.path.exists(logger_path):
        load_logs(optimizer, logs=logger_path)

    logger = JSONLogger(path=logger_path)
    optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
    optimizer.maximize(init_points=5, n_iter=25, acq="ucb")

    return optimizer.max["params"]
Пример #12
0
def optimize_2d(path=None,
                steps=None,
                init_points=None,
                bounds=None,
                true_function=None,
                plot=False,
                load=False):
    def wrapper(x, y):
        os.environ['NW'] = "%f" % (x)
        os.environ['NN'] = "%f" % (y)
        res = -F.func_para()
        return res

    opt = BayesianOptimization(f=wrapper,
                               pbounds=bounds,
                               verbose=2,
                               random_state=92898)
    log_file = new_log_file_name()
    logger = JSONLogger(path=log_file)
    screen_logger = ScreenLogger(verbose=2)
    opt.subscribe(Events.OPTMIZATION_STEP, logger)
    opt.subscribe(Events.OPTMIZATION_START, screen_logger)
    opt.subscribe(Events.OPTMIZATION_STEP, screen_logger)
    opt.subscribe(Events.OPTMIZATION_END, screen_logger)
    print('Logging to logfile: ', os.path.abspath(log_file))
    dump_bounds(log_file, bounds)

    no_log_files_found = False
    if load:
        files = find_log_files()
        if len(files) > 0:
            print('Loading previous runs from logfile(s):')
            for f in files:
                print(f)
            load_logs(opt, logs=files)
        else:
            no_log_files_found = True
    if (init_points is not None) and (init_points > 0):
        if no_log_files_found or not load:
            opt.maximize(init_points=init_points, n_iter=0, alpha=1e-5)

    first_step = True
    opt.unsubscribe(Events.OPTMIZATION_END, screen_logger)
    print('')
    if _check_steps_finite(steps):
        for _ in range(steps):
            opt.maximize(init_points=0, n_iter=1, alpha=1e-5)
            if first_step:
                opt.unsubscribe(Events.OPTMIZATION_START, screen_logger)
                first_step = False
    else:
        while True:
            opt.maximize(init_points=0, n_iter=1, alpha=1e-5)
    print("MAX: ", opt.max)
    return opt
Пример #13
0
def optimize():
    # define bounds for the params you want to optimize. Can be multivariate. Check https://github.com/fmfn/BayesianOptimization on how to
    bounds = {
        'localAreaDensity': (0.01, 0.15),
        'permanenceIncrement': (0.01, 0.1),
    }

    optimizer = BayesianOptimization(
        f=target_func,
        pbounds=bounds,
        random_state=1,
    )

    # We can start from saved logs
    if os.path.isfile('./local_area_density_optimization_logs_base.json'):
        print('Loading Logs...')
        load_logs(optimizer, logs=["./local_area_density_optimization_logs_base.json"]);

    # The new log file to write to
    json_logger = JSONLogger(path="./local_area_density_optimization_logs.json")
    optimizer.subscribe(Events.OPTIMIZATION_STEP, json_logger)

    # Additionally log to console
    screen_logger = ScreenLogger()
    optimizer.subscribe(Events.OPTIMIZATION_STEP, screen_logger)

    # If you want to guide the optimization process
    val = 0.02
    while val <= 0.04:
        optimizer.probe(
            params={
                'localAreaDensity': val,
                'permanenceIncrement': 0.04,
            },
            lazy=True,
        )
        val = round(val + 0.001, 3)

    optimizer.maximize(
        init_points=20,
        n_iter=50,
    )

    print(optimizer.max)

    # cleanup temp dir
    shutil.rmtree(os.path.join('temp'))
Пример #14
0
def bayes_opt(fn, params, probes=None):
    name = fn.__name__
    opt = BayesianOptimization(fn, params, verbose=2)
    if os.path.exists(f'./bayes_opt_logs/{name}.json'):
        print('Loading logs...')
        load_logs(opt, logs=[f'./bayes_opt_logs/{name}.json'])

    logger = JSONLogger(path=f'./bayes_opt_logs/{name}.json')
    opt.subscribe(Events.OPTMIZATION_STEP, logger)

    # Probe with a set of know "good" params
    if probes:
        for probe in probes:
            opt.probe(params=probe, lazy=True)

    opt.maximize(n_iter=100, init_points=60)
    print(opt.max)
 def print_max(self):
     optimizer = BayesianOptimization(
         f=self.cost,
         pbounds=self.pbounds,
         random_state=1,
         verbose=2,
     )
     load_logs(optimizer,
               logs=[
                   "ABC_Results/user_parameters_p{}.json".format(
                       self.participant)
               ])
     x_obs = optimizer.space._params
     y_obs = optimizer.space._target
     gp = optimizer._gp
     gp.fit(x_obs, y_obs)
     print(optimizer.max)
Пример #16
0
def hyper():
    opt = BayesianOptimization(f=train,
                               pbounds={
                                   'lr': (0.0001, 0.001),
                                   'lm': (0.75, 0.95),
                                   'tpri': (10, 150),
                                   'rpr': (0.25, 0.75),
                                   'dmc': (0.6, 0.9),
                                   'wd': (0.00001, 0.001)
                               },
                               verbose=2)

    logger = JSONLogger(path="./bo_logs.json")
    opt.subscribe(Events.OPTIMIZATION_STEP, logger)

    opt.maximize(init_points=3, n_iter=10)
    load_logs(opt, logs=["./bo_logs.json"])
    print('maximum: ', opt.max)
Пример #17
0
def get():
    pbounds = {
        'squeeze_scale_exp': (1.5, 1.5),  # 2
        'small_filter_rate': (0.5, 0.5),  # 10
        'max_lr_exp': (-4, -2),  # 6
        'max_momentum': (0.8, 0.99),
        'num_epoch': (20, 50)
    }
    optimizer = BayesianOptimization(f=OneCycleTrain, pbounds=pbounds)
    load_logs(optimizer, logs=["./one_cycle_baysian_logs.json"])

    res_list = []
    for i, res in enumerate(optimizer.res):
        print("Iteration {}: \n\t{}".format(i, res))
        res_list.append(res)

    # print("Final Max:", optimizer.max)
    return res_list
Пример #18
0
    def optimize(self):
        """
		Main function for optimization
		"""
        # Initialize optimizer
        self.bo = BayesianOptimization(self._eval_fun, self.param_boundaries)
        self.bo.subscribe(Events.OPTMIZATION_STEP, self.logger)
        if self.load_log:
            load_logs(self.bo, logs=[self.prev_log])
        # Explore the input and target space on predefined points
        if self.probe:
            self._explore_target_space()

        # Set parameters for Gaussian Process
        gp_params = {}
        # {'kernel': None, 'alpha': 1e-5}
        self.bo.maximize(init_points=self.NR_INIT_POINTS,
                         n_iter=self.NR_ITERATIONS,
                         acq='ei',
                         **gp_params)
Пример #19
0
def test_logs():
    import pytest

    def f(x, y):
        return -x**2 - (y - 1)**2 + 1

    optimizer = BayesianOptimization(f=f,
                                     pbounds={
                                         "x": (-2, 2),
                                         "y": (-2, 2)
                                     },
                                     ptypes={
                                         'x': float,
                                         'y': float
                                     })
    assert len(optimizer.space) == 0

    load_logs(optimizer, "./tests/test_logs.json")
    assert len(optimizer.space) == 5

    load_logs(optimizer, ["./tests/test_logs.json"])
    assert len(optimizer.space) == 5

    other_optimizer = BayesianOptimization(f=lambda x: -x**2,
                                           pbounds={"x": (-2, 2)})
    with pytest.raises(ValueError):
        load_logs(other_optimizer, ["./tests/test_logs.json"])
def optimize_local_area_density():
    # optimize localAreaDensity
    bounds = {
        'localAreaDensity': (0.01, 0.15),
    }

    optimizer = BayesianOptimization(
        f=target_func,
        pbounds=bounds,
        random_state=1,
    )

    if os.path.isfile('./local_area_density_optimization_logs_base.json'):
        print('Loading Logs...')
        load_logs(optimizer,
                  logs=["./local_area_density_optimization_logs_base.json"])

    logger = JSONLogger(path="./local_area_density_optimization_logs.json")
    optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)

    val = 0.02
    while val <= 0.04:
        print('Adding', val)
        optimizer.probe(
            params={
                'localAreaDensity': val,
            },
            lazy=True,
        )
        val = round(val + 0.001, 3)

    print('Starting optimization...')

    optimizer.maximize(
        init_points=20,
        n_iter=50,
    )

    print(optimizer.max)
Пример #21
0
    return accuracy


pbounds = {
    'alphabet_size': (64.0, 64.0),
    'dropout': (0.0, 0.4),
    'embedding_size': (32.0, 64.0),
    'label_smoothing': (0.0, 0.2),
    'layer_size': (1280.0, 1280.0),
    'learning_rate': (-4.0, -2.0),
    'learning_rate_final': (-5.0, -3.0),
    'window': (8.0, 8.0)
}

optimizer = BayesianOptimization(f=model_accuracy,
                                 pbounds=pbounds,
                                 verbose=2,
                                 random_state=1)
if os.path.isfile("./parameters_log.json"):
    load_logs(optimizer, logs=["./parameters_log.json"])
    print("Loaded {} model evaluations".format(len(optimizer.space)))

logger = JSONLogger(path="./parameters_log_new.json")
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
optimizer.subscribe(Events.OPTMIZATION_STEP, ScreenLogger())

optimizer.maximize(
    init_points=max(0, 20 - len(optimizer.space)),
    n_iter=40 - max(len(optimizer.space) - 20, 0),
)
    def plot2(self):
        optimizer = BayesianOptimization(
            f=self.cost,
            pbounds=self.pbounds,
            random_state=1,
            verbose=2,
        )
        load_logs(optimizer,
                  logs=[
                      "ABC_Results/user_parameters_p{}.json".format(
                          self.participant)
                  ])

        x_obs = optimizer.space._params
        y_obs = optimizer.space._target
        gp = optimizer._gp

        # kernel = Matern(nu=4.5)
        kernel = RBF(length_scale=1e-5)
        # kernel = RationalQuadratic(length_scale=1,
        #                            alpha=10)  # length_scale_bounds=(1e-05, 100000.0), alpha_bounds=(1e-05, 100000.0)),
        # kernel=DotProduct(sigma_0=3e7)
        # sigma = 1e1
        # kernel = DotProduct(sigma_0=sigma) * DotProduct(sigma_0=sigma)

        gp = GaussianProcessRegressor(
            kernel=kernel,
            normalize_y=True,
            alpha=0.5,
            n_restarts_optimizer=25,
        )

        gp.fit(x_obs, y_obs)

        max_x = np.array([
            optimizer.max['params']['max_motor_units'],
            optimizer.max['params']['w_vel']
        ])
        max_y = optimizer.max['target']

        num_vals = 101
        heatmap = np.zeros((num_vals, num_vals))
        w_vel = np.linspace(self.pbounds["w_vel"][0], self.pbounds["w_vel"][1],
                            num_vals)
        max_motor_units = np.linspace(self.pbounds["max_motor_units"][0],
                                      self.pbounds["max_motor_units"][1],
                                      num_vals)

        # calculate heat map
        for i in range(num_vals):
            for j in range(num_vals):
                heatmap[i, j] = gp.predict(
                    np.array([max_motor_units[j], w_vel[i]]).reshape(1, -1))

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(x_obs[:, 0], x_obs[:, 1], y_obs, color='g')
        ax.scatter(max_x[0], max_x[1], max_y, color='r')
        xv, yv = np.meshgrid(max_motor_units, w_vel)
        ax.plot_wireframe(xv, yv, heatmap)
        # ax.plot_surface(xv, yv, heatmap)
        ax.set_xlabel("motor")
        ax.set_ylabel("w_vel")
        plt.show()
Пример #23
0
        'ridge_alpha': 0.0006696012127733874
    }
]

optimizer = BayesianOptimization(
    f=black_box_function,
    pbounds=pbounds,
    random_state=42,
)
for point in start_points:
    optimizer.probe(
        params=point,
        lazy=True,
    )

if LOAD_PREV:
    load_logs(optimizer, logs=["./calc_shifts_params.json"]);

logger = JSONLogger(path="./calc_shifts_params.json")
scrlogger = ScreenLogger()
optimizer.subscribe(Events.OPTIMIZATION_STEP, scrlogger)
optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)

optimizer.maximize(
    init_points=500,
    n_iter=500,
)

print(optimizer.max)

Пример #24
0
            'T': (0, 1),
            'Tsin': (0, 1),
        }  #constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter

        optimizer = BayesianOptimization(
            f=bayesOpt_function,  #function that is optimized
            pbounds=pbounds,  #opt.-range of parameters
            random_state=1,
            verbose=
            0  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent, verbose = 2 prints everything
        )

        #load existing optimizer
        log_already_available = 0
        if os.path.isfile("./logs_CNN_seq2seq_GWLt-1_" + Well_ID + ".json"):
            load_logs(optimizer,
                      logs=["./logs_CNN_seq2seq_GWLt-1_" + Well_ID + ".json"])
            print("\nExisting optimizer is already aware of {} points.".format(
                len(optimizer.space)))
            log_already_available = 1

        # Saving progress
        logger = newJSONLogger(path="./logs_CNN_seq2seq_GWLt-1_" + Well_ID +
                               ".json")
        optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)

        # random exploration as a start
        f = open('./timelog_CNN_seq2seq_GWLt-1_' + Well_ID + '.txt', "w")
        print("Starttime of first iteration: {}\n".format(
            datetime.datetime.now()),
              file=f)  #this is not looged in json file
    def plot(self, p, i):
        optimizer = BayesianOptimization(
            f=self.cost,
            pbounds=self.pbounds,
            random_state=1,
            verbose=2,
        )
        load_logs(optimizer,
                  logs=[
                      "ABC_Results/user_parameters_p{}.json".format(
                          self.participant)
                  ])

        x_obs = np.array([[res["params"][p]] for res in optimizer.res])[:i + 1]
        y_obs = np.array([res["target"] for res in optimizer.res])[:i + 1]
        gp = optimizer._gp
        gp.fit(x_obs, y_obs)

        # # kernel = Matern(nu=4.5)
        # kernel = RBF(length_scale=1e-5)
        # kernel = RationalQuadratic(length_scale=1,
        #                            alpha=10)  # length_scale_bounds=(1e-05, 100000.0), alpha_bounds=(1e-05, 100000.0)),
        # # kernel=DotProduct(sigma_0=3e7)
        sigma = 1e0
        kernel = DotProduct(sigma_0=sigma) * DotProduct(sigma_0=sigma)
        #
        gp = GaussianProcessRegressor(
            kernel=kernel,
            normalize_y=True,
            alpha=1e-6,
            n_restarts_optimizer=5,
        )

        gp.fit(x_obs, y_obs)

        # max_x = np.array([optimizer.max['params'][p]])
        # max_y = optimizer.max['target']

        xmin, xmax = self.pbounds[p]
        x = np.linspace(xmin, xmax, 100)
        num_vals = len(x)
        w_vel = np.linspace(self.pbounds[p][0], self.pbounds[p][1], num_vals)
        mu, sigma = gp.predict(w_vel.reshape(-1, 1), return_std=True)

        fig = plt.figure(i)
        ax = fig.add_subplot(111)
        ax.scatter(x_obs.flatten(), y_obs, color='g', label="observations")
        # ax.scatter(max_x, max_y, color='r', label="Max value")
        ax.plot(w_vel, mu, 'k--', label="prediction")
        ax.fill_between(w_vel,
                        mu - sigma,
                        mu + sigma,
                        label="SD Confidence",
                        alpha=0.5)
        ax.set_xlabel(p)
        ax.set_ylabel("target")
        ax.set_xlim(0, 1000)
        ax.set_ylim(-15, 1)
        plt.legend()
        counter = str(i).zfill(3)
        plt.savefig("ABC_Results\images\w_vel_p999_{}.png".format(counter))
        plt.close()
Пример #26
0
def bayesian_search(
    config_path,
    inferencecfg,
    pbounds,
    edgewisecondition=True,
    shuffle=1,
    trainingsetindex=0,
    modelprefix="",
    snapshotindex=-1,
    target="rpck_test",
    maximize=True,
    init_points=20,
    n_iter=50,
    acq="ei",
    log_file=None,
    dcorr=5,
    leastbpts=3,
    printingintermediatevalues=True,
):  #

    if "rpck" in target:
        assert maximize == True

    if "rmse" in target:
        assert maximize == False

    cfg = auxiliaryfunctions.read_config(config_path)
    evaluationfolder = os.path.join(
        cfg["project_path"],
        str(
            auxiliaryfunctions.GetEvaluationFolder(
                cfg["TrainingFraction"][int(trainingsetindex)],
                shuffle,
                cfg,
                modelprefix=modelprefix,
            )),
    )

    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg,
        shuffle,
        cfg["TrainingFraction"][int(trainingsetindex)],
        cfg["iteration"],
        modelprefix=modelprefix,
    )

    # load params
    fns = return_evaluate_network_data(
        config_path,
        shuffle=shuffle,
        trainingsetindex=trainingsetindex,
        modelprefix=modelprefix,
    )
    predictionsfn = fns[snapshotindex]
    data, metadata = auxfun_multianimal.LoadFullMultiAnimalData(predictionsfn)
    params = set_up_evaluation(data)
    columns = ["train_iter", "train_frac", "shuffle"]
    columns += [
        "_".join((b, a)) for a in ("train", "test")
        for b in ("rmse", "hits", "misses", "falsepos", "ndetects", "pck",
                  "rpck")
    ]

    train_iter = trainingsetindex  # int(predictionsfn.split('-')[-1].split('.')[0])
    train_frac = cfg["TrainingFraction"][
        train_iter]  # int(predictionsfn.split('trainset')[1].split('shuffle')[0])
    trainIndices = metadata["data"]["trainIndices"]
    testIndices = metadata["data"]["testIndices"]

    if edgewisecondition:
        mf = str(
            auxiliaryfunctions.GetModelFolder(
                cfg["TrainingFraction"][int(trainingsetindex)],
                shuffle,
                cfg,
                modelprefix=modelprefix,
            ))
        modelfolder = os.path.join(cfg["project_path"], mf)
        path_inferencebounds_config = (Path(modelfolder) / "test" /
                                       "inferencebounds.yaml")
        try:
            inferenceboundscfg = auxiliaryfunctions.read_plainconfig(
                path_inferencebounds_config)
        except FileNotFoundError:
            print("Computing distances...")
            from deeplabcut.pose_estimation_tensorflow import calculatepafdistancebounds

            inferenceboundscfg = calculatepafdistancebounds(
                config_path, shuffle, trainingsetindex)
            auxiliaryfunctions.write_plainconfig(path_inferencebounds_config,
                                                 inferenceboundscfg)

        partaffinityfield_graph = params["paf_graph"]
        upperbound = np.array([
            float(inferenceboundscfg[str(edge[0]) + "_" +
                                     str(edge[1])]["intra_max"])
            for edge in partaffinityfield_graph
        ])
        lowerbound = np.array([
            float(inferenceboundscfg[str(edge[0]) + "_" +
                                     str(edge[1])]["intra_min"])
            for edge in partaffinityfield_graph
        ])

        upperbound *= inferencecfg["upperbound_factor"]
        lowerbound *= inferencecfg["lowerbound_factor"]

    else:
        lowerbound = None
        upperbound = None

    def dlc_hyperparams(**kwargs):
        inferencecfg.update(kwargs)
        # Ensure type consistency
        for k, (bound, _) in pbounds.items():
            inferencecfg[k] = type(bound)(inferencecfg[k])

        stats = compute_crossval_metrics_preloadeddata(
            params,
            columns,
            inferencecfg,
            data,
            trainIndices,
            testIndices,
            train_iter,
            train_frac,
            shuffle,
            lowerbound,
            upperbound,
            dcorr=dcorr,
            leastbpts=leastbpts,
        )

        # stats = compute_crossval_metrics(config_path, inferencecfg, shuffle,trainingsetindex,
        #                                    dcorr=dcorr,leastbpts=leastbpts,modelprefix=modelprefix)

        if printingintermediatevalues:
            print(
                "rpck",
                stats["rpck_test"].values[0],
                "rpck train:",
                stats["rpck_train"].values[0],
            )
            print(
                "rmse",
                stats["rmse_test"].values[0],
                "miss",
                stats["misses_test"].values[0],
                "hit",
                stats["hits_test"].values[0],
            )

        # val = stats['rmse_test'].values[0]*(1+stats['misses_test'].values[0]*1./stats['hits_test'].values[0])
        val = stats[target].values[0]
        if np.isnan(val):
            if maximize:  # pck case
                val = -1e9  # random small number
            else:  # RMSE, return a large RMSE
                val = 1e9

        if not maximize:
            val = -val

        return val

    opt = BayesianOptimization(f=dlc_hyperparams,
                               pbounds=pbounds,
                               random_state=42)
    if log_file:
        load_logs(opt, log_file)
    logger = JSONLogger(path=os.path.join(evaluationfolder, "opti_log" +
                                          DLCscorer + ".json"))
    opt.subscribe(Events.OPTIMIZATION_STEP, logger)
    opt.maximize(init_points=init_points, n_iter=n_iter, acq=acq)

    inferencecfg.update(opt.max["params"])
    for k, (bound, _) in pbounds.items():
        tmp = type(bound)(inferencecfg[k])
        if isinstance(tmp, np.floating):
            tmp = np.round(tmp, 2).item()
        inferencecfg[k] = tmp

    return inferencecfg, opt
Пример #27
0
def main():
    # Parse arguments
    import argparse
    parser = argparse.ArgumentParser(description='Runs a bayesian optimisation for some of the algorithms defined in the PTSP framework')
    parser.add_argument('--algorithm',
                        choices=["QD-MCTS", "S-MCTS", "MS-MCTS", "VanillaGA", "VanillaMCTS"],
                        help='The algorithm that should be optimized',
                        default="S-MCTS")
    parser.add_argument('--outputDir', 
                        default="./optimizationResults",
                        help='The output directory for all data generated by the optimization')
    parser.add_argument("--ptspPath",
                        default="./ptsp.jar",
                        help="The path to the .jar file containing the PTSP framework")
    parser.add_argument("--iters",
                        default="10",
                        type=int,
                        help="Number of parameter-points to test by the bayesian optimization")
    args = parser.parse_args()
    args.outputPath = f"{args.outputDir}/{args.algorithm}"
    
    # Find all previous logs for this optimization
    logs = glob.glob(f"{args.outputPath}/optimizationLogs*.json")
    csvLogs = glob.glob(f"{args.outputPath}/*.csv")
    
    # Launch the JVM
    jpype.startJVM()
    jpype.addClassPath(args.ptspPath)
    import framework.Optimization as optim
    
    # Move java output into a file
    from java.lang import System
    from java.io import PrintStream, FileOutputStream
    pathlib.Path(args.outputPath).mkdir(parents=True, exist_ok=True)
    System.setOut(PrintStream(FileOutputStream(f"{args.outputPath}/cout.txt", True)))
    
    # Algorithm specific data
    bounds = {
        "QD-MCTS" : {
                "lowER": (0.01, 10), # Exploration rate low-level search
                "highER": (0.01, 10), # Exploration rate high-level search
                "steps": (300, 600),  # Number of steps for low-level search
                "rd": (10, 30) # rolloutDepth
            },
        "S-MCTS" : {
                "cellSize": (5, 30), # Size of a cell in the subgoal grid (aka distance between subgoals)
                "er": (0.01, 10), # Exploration rate high-level search
                "steps": (300, 600),  # Number of steps for low-level search
                "rd": (10, 30) # rolloutDepth
            },
        "MS-MCTS" : {
                "cellSize": (5, 30), # Size of a cell in the subgoal grid (aka distance between subgoals)
                "er": (0.01, 10), # Exploration rate high-level search
                "steps": (300, 600),  # Number of steps for low-level search
                "rd": (10, 30) # rolloutDepth
            },
        "VanillaGA" : {
                "gl": (10, 30), # How many base-actions does a genome contain
                "ps": (1, 100), # How many genomes in one population
                "mr": (0.1, 0.9),  # Probability that an action is mutated
            },
        "VanillaMCTS" : {
                "er": (0.01, 10), # Exploration rate
                "rd": (10, 30), # RolloutDepth
            }
    }
    
    funcs = {
        "QD-MCTS" : lambda lowER, highER, steps, rd: execSafe(optim.runQD_MCTS, lowER, highER, round(steps), round(rd)),
        "S-MCTS" : lambda cellSize, er, steps, rd: execSafe(optim.runSMCTS, cellSize, er, round(steps), round(rd)),
        "MS-MCTS" : lambda cellSize, er, steps, rd: execSafe(optim.runMS_MCTS, cellSize, er, round(steps), round(rd)),
        "VanillaGA" : lambda gl, ps, mr: execSafe(optim.runVanillaGA, round(gl), round(ps), mr),
        "VanillaMCTS" : lambda er, rd: execSafe(optim.runVanillaMCTS, er, round(rd))
    }
    
    # Probe points for each algorithm, only one which I've used previously
    probes = {
        "QD-MCTS" : {"lowER": math.sqrt(2), "highER": 4, "steps": 400, "rd": 25},
        "S-MCTS" : {"cellSize": 20, "er": 4, "steps": 400, "rd": 25},
        "MS-MCTS" : {"cellSize": 20, "er": 4, "steps": 400, "rd": 25},
        "VanillaGA" : {"gl": 20, "ps": 50, "mr": 1. / 20},
        "VanillaMCTS" : {"er": math.sqrt(2), "rd": 12}
    }
    
    # Initialize optimization
    optim.setupRun(len(logs) * 11) # Different seed for each run
    optim.RUN_COUNTER = len(csvLogs) # Make sure java logs into a new csv file
    optim.NUM_TRIALS = 10
    optim.OutputDir = args.outputPath
    optim.m_mapNames = glob.glob("./maps/**/*.map", recursive=True)
    
    optimizer = BayesianOptimization(
        f=funcs[args.algorithm],
        pbounds=bounds[args.algorithm],
        random_state=len(logs) * 11, # Change behaviour for each run
    )
    print(f"Optimizing {args.algorithm} with bounds:")
    print(bounds[args.algorithm])
    
    # Probe if necessary
    init_points = 0
    if len(logs) == 0:
        print("Found no previous logs... Probing to improve results:")
        print(probes[args.algorithm])
        optimizer.probe(params=probes[args.algorithm], lazy=True)
        init_points = 5
    else: # If we found logs, load them
        print(f"Reading previous logs into optimizer...")
        load_logs(optimizer, logs=logs);
        for log in logs:
            print(f"Successfully loaded {log}")
    
    logger = JSONLogger(path=f"{args.outputPath}/optimizationLogs{len(logs) + 1}.json")
    optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
    
    # Run optimization
    print(f"Starting optimisation for {args.algorithm}...")
    optimizer.maximize(init_points=init_points, n_iter=args.iters)
    print("Finished optimisation")
    print(optimizer.max)
Пример #28
0
        model_paths.append(os.path.join(dirpath, filename))

model_paths = sorted(model_paths, key=str.lower)

optimizer = BayesianOptimization(f='',
                                 pbounds={
                                     'batch_size': (0, 1),
                                     'n_hidden_1': (32, 4096),
                                     'n_hidden_2': (32, 4096),
                                     'learning_rate': (1e-5, 1e-3),
                                     'dropout': (0, 1)
                                 },
                                 verbose=0,
                                 random_state=5)

load_logs(optimizer, logs=[log_path])

print("Loading train dataset ... ", end='')
with open(train_data_path, 'rb') as train_data_file:
    X, y = pickle.load(train_data_file)
    dataset = Dataset(X, y)
print("Done.")

print("Splitting dataset ... ", end='')
num_train = int(len(dataset) * 0.8)
num_val = len(dataset) - num_train
torch.manual_seed(123)
[train_set, val_set] = torch.utils.data.random_split(dataset,
                                                     (num_train, num_val))

X_train = X[train_set.indices]
Пример #29
0
    return latency


# Bounded region of parameter space
pbounds = {'buffer_pool': (128, buffer_pool_max), 'log_file': (48, log_file_max),'flush_method': (0, flush_method_max),
           'thread_cache': (9, thread_cache_max),'thread_sleep': (0, thread_sleep_max), 'max_connect': (151, max_connect_max)}

optimizer = BayesianOptimization(
    f=objFunction,
    pbounds=pbounds,
    random_state=1,
)



load_logs(optimizer, logs=[setLoggerRoute()])
logger = JSONLogger(path="./logsResumed.json")

optimizer.subscribe(Events.OPTMIZATION_STEP, logger)



print("New optimizer is now aware of {} points.".format(len(optimizer.space)))
count=len(optimizer.space)

optimizer.maximize(
    init_points=0,
    n_iter=defineOptIterations(),
)

print(optimizer.max)
Пример #30
0
)

logger = JSONLogger(path="./logs.json")
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)

# By default these will be explored lazily (lazy=True), meaning these points will be evaluated only the next time you call maximize.

print(optimizer.space.keys)

optimizer.maximize(init_points=0, n_iter=6)

for i, res in enumerate(optimizer.res):
    print("Iteration {}: \n\t{}".format(i, res))

print(optimizer.max)

#load_logs
new_optimizer = BayesianOptimization(
    f=black_box_function,
    pbounds={"x": (-2, 2), "y": (-2, 2)},
    verbose=2,
    random_state=7,
)

print(len(new_optimizer.space))
load_logs(new_optimizer, logs=["./logs.json"])
print("New optimizer is now aware of {} points.".format(len(new_optimizer.space)))
new_optimizer.maximize(
    init_points=0,
    n_iter=10,
)