Exemplo n.º 1
0
def plot_gp(optimizer, x=None, y=None, set_xlim=(-2, 10)):
    """
    Plot the Gaussian posterior and the utility function after one or more
    optimization steps.

    Taken from

       https://github.com/fmfn/BayesianOptimization/blob/master/examples/visualization.ipynb
    """
    fig = plt.figure(figsize=(10, 5))
    steps = len(optimizer.space)
    fig.suptitle(
        'Gaussian Process and Utility Function After {} Steps'.format(steps),
        fontdict={'size': 30}
    )

    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
    axis = plt.subplot(gs[0])
    acq = plt.subplot(gs[1])

    x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
    y_obs = np.array([res["target"] for res in optimizer.res])

    if x is not None:
        mu, sigma = posterior(optimizer, x_obs, y_obs, x)
    else:
        bounds = optimizer._space._bounds[0]
        x0, x1 = bounds
        x = np.linspace(x0, x1, 1000).reshape(-1, 1)
        mu, sigma = posterior(optimizer, x_obs, y_obs, x)
    if (x is not None) and (y is not None):
        axis.plot(x, y, linewidth=3, label='Target')

    axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8,
              label=u'Observations', color='r')
    axis.plot(x, mu, '--', color='k', label='Prediction')

    axis.fill(np.concatenate([x, x[::-1]]),
              np.concatenate([mu - 1.9600 * sigma,
                             (mu + 1.9600 * sigma)[::-1]]),
              alpha=.6, fc='c', ec='None', label='95% confidence interval')

    #axis.set_xlim(set_xlim)
    axis.set_ylim((None, None))
    axis.set_ylabel('f(x)', fontdict={'size': 20})
    axis.set_xlabel('x', fontdict={'size': 20})

    utility_function = UtilityFunction(kind="ucb", kappa=5, xi=0)
    utility = utility_function.utility(x, optimizer._gp, 0)
    acq.plot(x, utility, label='Utility Function', color='purple')
    acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,
             label=u'Next Best Guess', markerfacecolor='gold',
             markeredgecolor='k', markeredgewidth=1)
    acq.set_xlim(set_xlim)
    acq.set_ylim((0, np.max(utility) + 0.5))
    acq.set_ylabel('Utility', fontdict={'size': 20})
    acq.set_xlabel('x', fontdict={'size': 20})

    axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
    acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
Exemplo n.º 2
0
def plot_gp(optimizer, x, y):
    fig = plt.figure(figsize=(16, 10))
    steps = len(optimizer.space)
    fig.suptitle(
        'Gaussian Process and Utility Function After {} Steps'.format(steps),
        fontdict={'size': 30})

    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
    axis = plt.subplot(gs[0])
    acq = plt.subplot(gs[1])

    x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
    y_obs = np.array([res["target"] for res in optimizer.res])

    mu, sigma = posterior(optimizer, x_obs, y_obs, x)
    axis.plot(x, y, linewidth=3, label='Target')
    axis.plot(x_obs.flatten(),
              y_obs,
              'D',
              markersize=8,
              label=u'Observations',
              color='r')
    axis.plot(x, mu, '--', color='k', label='Prediction')

    axis.fill(np.concatenate([x, x[::-1]]),
              np.concatenate(
                  [mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
              alpha=.6,
              fc='c',
              ec='None',
              label='95% confidence interval')

    axis.set_xlim((-2, 10))
    axis.set_ylim((None, None))
    axis.set_ylabel('f(x)', fontdict={'size': 20})
    axis.set_xlabel('x', fontdict={'size': 20})

    utility_function = UtilityFunction(kind="ei", kappa=None, xi=0)
    utility = utility_function.utility(x, optimizer._gp, 0)
    acq.plot(x, utility, label='Utility Function', color='purple')
    acq.plot(x[np.argmax(utility)],
             np.max(utility),
             '*',
             markersize=15,
             label=u'Next Best Guess',
             markerfacecolor='gold',
             markeredgecolor='k',
             markeredgewidth=1)

    acq.set_xlim((-2, 10))
    acq.set_ylim((0, np.max(utility) + 0.5))
    acq.set_ylabel('Utility', fontdict={'size': 20})
    acq.set_xlabel('x', fontdict={'size': 20})

    axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
    acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
    plt.show()
Exemplo n.º 3
0
def plot_gp(optimizer, x):
    fig = plt.figure()
    steps = len(optimizer.space)
    fig.suptitle('Gaussian Process after {} steps'.format(steps),
                 fontdict={'size': 30})

    axis = fig.add_subplot(111)
    #gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
    #axis = plt.subplot(gs[0])
    #acq = plt.subplot(gs[1])

    x_obs = numpy.array([[res["params"]["learning_rate"]]
                         for res in optimizer.res])
    y_obs = numpy.array([res["target"] for res in optimizer.res])

    mu, sigma = posterior(optimizer, x_obs, y_obs, x)
    #axis.plot(x, y, linewidth=3, label='Target')
    unc = 0.0033  # calculated for Leptonic ttH vs ttGG
    axis.errorbar(x_obs.flatten(),
                  y_obs,
                  yerr=numpy.ones(len(y_obs)) * unc,
                  label='Observations',
                  color='r',
                  marker='o',
                  markersize=8,
                  ls="none")
    axis.plot(x, mu, '--', color='k', label='Prediction')

    axis.fill(numpy.concatenate([x, x[::-1]]),
              numpy.concatenate(
                  [mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
              alpha=.6,
              fc='c',
              ec='None',
              label='95% confidence interval')

    axis.set_xlim((-5, -1))
    axis.set_ylim((0.7, 0.82))
    axis.set_ylabel('AUC', fontdict={'size': 20})
    axis.set_xlabel('Learning Rate', fontdict={'size': 20})

    utility_function = UtilityFunction(kind="ucb", kappa=5, xi=0)
    #utility_function = UtilityFunction(kind="ei", xi=float(args.xi))
    utility = utility_function.utility(x, optimizer._gp, 0)
    #acq.plot(x, utility, label='Utility Function', color='purple')
    #acq.plot(x[numpy.argmax(utility)], numpy.max(utility), '*', markersize=15,
    #label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
    #acq.set_xlim((-2, 10))
    #acq.set_ylim((0, numpy.max(utility) + 0.5))
    #acq.set_ylabel('Utility', fontdict={'size':20})
    #acq.set_xlabel('x', fontdict={'size':20})

    axis.legend(loc='upper left')
    #acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
    plt.savefig('optimization_step_%d.pdf' % (steps), bbox_inches='tight')
    plt.clf()
Exemplo n.º 4
0
    def __init__(self, hyperparam_space, early_stopper, ensembler,
                 working_folder):

        super(BayesianOptimizer,
              self).__init__(hyperparam_space, early_stopper, ensembler,
                             working_folder)

        self._cur_config = self.get_default()
        self.bo_params = {}
        pbounds = {}
        for key, value in self._hyperparam_space.items():
            if isinstance(value, Categoric):
                sorted_categories = sorted(value.categories)
                pbounds[key] = [0, len(sorted_categories) - 1e-10]
                self._hyperparam_space[key].categories = sorted_categories
                self.bo_params[key] = float(
                    list(self._hyperparam_space[key].categories).index(
                        self._cur_config[key]))
            elif isinstance(value, Numeric):
                assert value.low <= value.high
                pbounds[key] = [value.low, value.high]
                self.bo_params[key] = self._cur_config[key]
            else:
                raise NotImplementedError

        self.bayesian_optimizer = BayesianOptimization(
            f=None,
            pbounds=pbounds,
            verbose=2,
            random_state=random.randint(0, 1e5))

        self.utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
    def maximize(self, n_iter, kappa, acq='ucb', xi=0.0):

        if self.params_init_probe_json:
            #Probe with all known good points
            for p in self.init_points_to_probe:
                self.MaximizeStep(p, is_target_point=True)

        # Suggest-Evaluate-Register paradigm
        utility = UtilityFunction(kind=acq, kappa=kappa, xi=xi)

        n = 0
        while n < n_iter:
            #create point to probe
            p_opt = self.optimizer.suggest(utility)

            n += self.MaximizeStep(p_opt, is_target_point=False)

        p_opt_max = self.optimizer.max
        p_target_max = self.TransformParams(p_opt_max['params'])

        # for each param var up and down and try with edges parameter
        for key, values in self.params_typed_range.items():
            #Has some dependency wiht other variables
            if 'depends_on' in self.params_typed_range[key] and p_target_max[
                    self.params_typed_range[key]['depends_on']] == 0:
                continue
            new_target_point = copy.deepcopy(p_target_max)
            for v in values['range']:
                new_target_point[key] = v
                self.MaximizeStep(new_target_point, is_target_point=True)

        return self.TransformParams(
            self.optimizer.max['params']), self.optimizer.max['target']
Exemplo n.º 6
0
def optimize():
    """
    1. Basic 사용법
    """
    # Parameter 들의 범위를 지정해줌
    pbounds = {"x": (2, 4), "z": (0, 1), "y": (-3, 3)}
    discrete = ["y"]

    discrete_indices = get_discrete_idx(pbounds, discrete)

    optimizer = BayesianOptimization(
        f=black_box_function,  # Maximize 하고자 하는 함수
        pbounds=pbounds,
        verbose=
        2,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
        random_state=1,  # Optional, BO 에서 randomness 통제하기 위해 seed 입력 가능
        discrete=discrete_indices,
    )

    # init_points : 초기의 random exploration 개수
    # n_iter : 이후에 진행할 BO step 개수
    optimizer.maximize(init_points=2, n_iter=3)
    """
    2. 'maximize' 함수는 'Suggest-Evaluate(Probe)-Register' 반복하는 loop 의 wrapper
    """
    optimizer = BayesianOptimization(f=None,
                                     pbounds={
                                         "x": (-2, 2),
                                         "y": (-3, 3)
                                     },
                                     verbose=2,
                                     random_state=1)

    # Exploration strategy. UCB (Upper Confidence Bound), EI (Expected Improvement) 등
    utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)

    # 1) Suggest
    next_point_to_probe = optimizer.suggest(utility)
    # 2) Evaluate
    target = black_box_function(**next_point_to_probe)
    # 3) Register
    optimizer.register(params=next_point_to_probe, target=target)
    """
    3. 추가로 가능한 것들
    """
    # (일부) Parameter 의 범위 변경
    optimizer.set_bounds(new_bounds={"x": (-2, 3)})

    # Evaluate 할 특정 point 지정해주기
    optimizer.probe(
        params={
            "x": 0.5,
            "y": 0.7
        },
        lazy=True,  # maximize 함수를 call 하는 시점에 evaluate 하겠다는 뜻
    )
    optimizer.maximize(init_points=0, n_iter=0)

    # Gaussian Process regressor 의 parameter 변경
    optimizer.set_gp_params(normalize_y=True)
Exemplo n.º 7
0
def train_prominent():
    # prominence
    def black_box_function(weight_pos: float):
        preds_dict = process_dataset(train,
                                     cutoff=96.703,
                                     scorer=1,
                                     weight_pos=weight_pos)
        accuracy = evaluate_prominence(preds_dict)
        return accuracy

    pbounds = {"weight_pos": [0, 1]}

    optimizer = BayesianOptimization(f=black_box_function,
                                     pbounds=pbounds,
                                     verbose=2,
                                     random_state=4)

    utility = UtilityFunction(kind="ucb", kappa=1.96, xi=0.01)

    # TRAINING LOOP
    for i in range(25):
        next_point = optimizer.suggest(utility)

        target = black_box_function(weight_pos=next_point["weight_pos"])

        try:
            optimizer.register(params=next_point, target=target)
        except:
            pass
        print(f"TARGET: {target} \n")
        print(f'Weight_pos: {next_point["weight_pos"]}')

    plot_results(optimizer)
    def __init__(self,
                 reference_env,
                 randomized_env,
                 seed,
                 **kwargs):

        self.reference_env = reference_env
        self.randomized_env = randomized_env
        self.seed = seed

        self.statedifference_rewarder = StateDifferenceRewarder(weights=-1)

        self.nparams = randomized_env.randomization_space.shape[0]
        self.nenvs = randomized_env.nenvs

        pbounds = {}
        for i in range(self.nparams):
            pbounds[str(i)] = (0, 1)

        self.bayesianoptimizer = BayesianOptimization(
            f=None,
            pbounds=pbounds,
            verbose=2,
            random_state=seed,
        )

        self.utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
        self.registered_points = {}
        self.agent_timesteps = 0
Exemplo n.º 9
0
    def compute_POIs(self):
        # reset map
        self.POIs = {"POIs": []}
        # print("compute")

        pbounds = {
            'x': (self.x_low, self.x_up),
            'y': (self.y_low, self.y_high)
        }
        # pbounds = {'x': (0, dim_x), 'y': (0, dim_y)}
        num_suggested_points = self.num_of_points
        samples = [({
            'x': s['x'],
            'y': s['y']
        }, s['sample_value']) for s in self.json_dict_samples['sample_values']]
        rospy.loginfo(samples)
        utility = UtilityFunction(kind="ucb", kappa=10, xi=0.0)

        suggested_points = suggest_points(num_suggested_points, utility,
                                          pbounds, samples)
        for i, point in enumerate(suggested_points):
            # test occupency grid
            #occupency_val = self.map.get_cell_val(point['x'], point['y'])
            occupency_val = -888
            if occupency_val < 0.1:
                self.POIs['POIs'].append({
                    "x": point['x'],
                    "y": point['y'],
                    # "z": 0,
                    "poi_id": i,
                    "poi_reward": point['reward']
                })
        print('computed POIs are: ')
        print(self.POIs)
Exemplo n.º 10
0
	def __init__(self,
			# Maximizes target function
			target_function,
			parameters: dict,

			alpha: float =1e-5,
			n_restarts: int = 20,
			acquisition: str='ei',

			logger: Logger=NullLogger(),
		):
		"""Set op BO class, set up utility function (acqusition function) and gaussian process.

		:param float alpha:  Handles how much noise the GP can deal with
		:param int n_restarts: Higher => more expensive, but more accurate
		"""
		super().__init__(target_function, parameters, logger)

		self.optimizer = BayesianOptimization(
			f=None,
			pbounds=parameters,
			verbose=0,
		)
		self.optimizer.set_gp_params(alpha=alpha, n_restarts_optimizer=n_restarts)
		self.utility = UtilityFunction(kind=acquisition, kappa=2.5, xi=0.2)

		self.logger(f"Created Bayesian Optimizer with alpha = {alpha} and {n_restarts} restarts for each optimization. Acquisition function is {acquisition}.")
Exemplo n.º 11
0
def generate_new_points(input_points, opt_space, max_points, num_points):
    # pbounds={'x': (-2, 2), 'y': (-3, 3)}
    if len(input_points) > max_points:
        return []
    num_points = min(num_points, max_points - len(input_points))

    utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
    optimizer = BayesianOptimization(f=None,
                                     pbounds=opt_space,
                                     verbose=2,
                                     random_state=1)

    unfinished_points = []
    for input_point in input_points:
        point, loss = input_point
        if loss:
            optimizer.register(point, loss)
        else:
            unfinished_points.append(point)

    new_points = []
    for _ in range(num_points):
        x = optimizer.suggest(utility)
        # TODO: check duplication
        if x in unfinished_points:
            continue
        new_points.append(x)
    # recommendation = optimizer.provide_recommendation()
    return new_points
Exemplo n.º 12
0
def train_fuzzy():
    def black_box_function(cutoff: int, scorer):
        preds_dict = process_dataset(train, cutoff=cutoff, scorer=scorer)
        accuracy, diagnosis_dict = evaluate_fuzzy_matching(preds_dict)
        return accuracy

    pbounds = {"cutoff": [0, 100], "scorer": [0, len(FUZZUP_SCORERS) - 1]}

    optimizer = BayesianOptimization(f=black_box_function,
                                     pbounds=pbounds,
                                     verbose=2,
                                     random_state=4)

    utility = UtilityFunction(kind="ucb", kappa=1.96, xi=0.01)

    # TRAINING LOOP
    for i in range(25):
        next_point = optimizer.suggest(utility)

        next_point["scorer"] = int(next_point["scorer"])

        target = black_box_function(cutoff=next_point["cutoff"],
                                    scorer=next_point["scorer"])

        try:
            optimizer.register(params=next_point, target=target)
        except:
            pass
        print(f"TARGET: {target} \n")
        print(
            f'cutoff: {next_point["cutoff"]} \n  scorer: {FUZZUP_SCORERS[next_point["scorer"]]} '
        )

    plot_results(optimizer)
Exemplo n.º 13
0
def plot_gp(optimizer, x, y):
    steps = len(optimizer.space)

    x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
    y_obs = np.array([res["target"] for res in optimizer.res])
    mu, sigma = posterior(optimizer, x_obs, y_obs, x)

    fig, axes = plt.subplots(2, 1, figsize=(9, 7))

    ax = axes[0]
    ax.plot(x, y, linewidth=3, label='Target')
    ax.plot(x_obs.flatten(), y_obs, 'd', label=u'Observations', color='r')
    ax.plot(x, mu, '--', color='k', label='Prediction')

    ax.fill(np.concatenate([x, x[::-1]]),
            np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
            alpha=.6,
            fc='c',
            ec='None',
            label='95% confidence interval')

    ax.set_ylabel('f(x)')
    ax.set_xlabel('x')
    ax.set_title(f'Gaussian Process and Utility Function After {steps} Steps')
    ax.grid()
    ax.legend(bbox_to_anchor=(1, 0.5))

    utility_function = UtilityFunction(kind="ucb", kappa=5, xi=0)
    utility = utility_function.utility(x, optimizer._gp, 0)

    ax = axes[1]
    ax.plot(x, utility, label='Utility function', color='purple')
    ax.plot(x[np.argmax(utility)],
            np.max(utility),
            '*',
            markersize=15,
            label=u'Next best guess',
            markerfacecolor='gold',
            markeredgecolor='k',
            markeredgewidth=1)
    ax.set_ylim((0, np.max(utility) + 0.5))
    ax.set_ylabel('Utility')
    ax.set_xlabel('x')
    ax.grid()
    ax.legend(bbox_to_anchor=(1, 0.5))

    return fig
Exemplo n.º 14
0
    def bo(self, opt_u_index=(1,1), u_range=(0,10)):
        data = pd.read_csv(self.input, header=None, delimiter="\s",engine ='python')
        num_rows, d = data.shape
        num_variables = sum(opt_u_index)
        variables_string = ascii_lowercase[:num_variables]
        pbounds = {}
        if num_variables == 1:
            pbounds[variables_string[0]] = u_range
        elif num_variables >= 2:
            for variable in variables_string:
                pbounds[variable] = u_range
        
        utility = UtilityFunction(kind="ucb", kappa=self.kappa, xi=0.0)
        optimizer = BayesianOptimization(
                                        f=None,
                                        pbounds=pbounds,
                                        verbose=2,
                                        random_state=1,
                                       )
        for i in range(num_rows):
            values = list()
            for j in range(len(opt_u_index)):
                if opt_u_index[j]:
                    values.append(data.iloc[i][j])
            params = {}
            for (value, variable) in zip(values, variables_string):
                params[variable] = value
            target = self.loss(self.gap, list(data.iloc[i])[-2],list(data.iloc[i])[-1],self.a1,self.a2)

            optimizer.register(
                               params=params,
                               target=target,
                              )
        next_point_to_probe = optimizer.suggest(utility)
        
        points = list(next_point_to_probe.values())
        # if num_variables == 1:
        #     if opt_u_index[0] == 1 and opt_u_index[1] == 0:
        #         points.append(0)
        #     elif opt_u_index[0] == 0 and opt_u_index[1] == 1:
        #         points.insert(0,0)
        points = [ round(elem,5) for elem in points]
        U = [str(x) for x in points]
        with open('input.json', 'r') as f:
            data = json.load(f)
            elements = list(data["pbe"]["ldau_luj"].keys())
            for i in range(len(opt_u_index)):
                if opt_u_index[i]:
                    try:
                        data["pbe"]["ldau_luj"][elements[i]]["U"] = round(float(U[i]),4)
                    except:
                        data["pbe"]["ldau_luj"][elements[i]]["U"] = round(float(U[i-1]),4)
            f.close()
        
        with open('input.json', 'w') as f:
            json.dump(data,f,indent=4)
            f.close()

        return target
def test_suggest_at_random():
    util = UtilityFunction(kind="ucb", kappa=5, xi=0)
    optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1)

    for _ in range(50):
        sample = optimizer.space.params_to_array(optimizer.suggest(util))
        assert len(sample) == optimizer.space.dim
        assert all(sample >= optimizer.space.bounds[:, 0])
        assert all(sample <= optimizer.space.bounds[:, 1])
Exemplo n.º 16
0
 def __init__(self, f, acquisition, x0, sigma, kappa=2.576, xi=0.0, **opts):
     self.f = f
     self.optimizer = BayesianOptimization_(
         f=f,
         pbounds=opts['bounds'],
         random_state=1,
     )
     self.util = UtilityFunction(kind=acquisition, kappa=kappa, xi=xi)
     opts['bounds'] = self.optimizer._space._bounds.T.tolist()
     self.es = cma.CMAEvolutionStrategy(x0, sigma, opts)
Exemplo n.º 17
0
def main():
    pbounds = {
        "batch_size_16": (0, 1),
        "batch_size_32": (0, 1),
        "batch_size_64": (0, 1),
        "batch_size_128": (0, 1),
        "batch_size_256": (0, 1),
        "learning_rate": (0.1, 2.0),
    }

    discrete = []
    # TODO: support multiple categorical parameters
    # Parameter type 과 가능한 값들을 입력으로 받아서 직접 generate
    categorical = [
        "batch_size_16",
        "batch_size_32",
        "batch_size_64",
        "batch_size_128",
        "batch_size_256",
    ]

    discrete_indices = get_idx(pbounds, discrete)
    categorical_indices = get_idx(pbounds, categorical)

    optimizer = BayesianOptimization(
        strategy=FLAGS.strategy,
        f=train,
        pbounds=pbounds,
        verbose=
        2,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
        random_state=1,  # Optional, BO 에서 randomness 통제하기 위해 seed 입력 가능
        discrete=discrete_indices,
        categorical=categorical_indices,
    )

    # TODO: points trained with init_points are not discretized as of now.
    #       Explicitly probing initial points with discrete values
    # optimizer.probe(params={"batch_size": 20, "learning_rate": 1.0}, lazy=True)
    # optimizer.probe(params={"batch_size": 20, "learning_rate": 0.1}, lazy=True)

    optimizer.probe(
        params={
            "batch_size_16": 0,
            "batch_size_32": 1,
            "batch_size_64": 0,
            "batch_size_128": 0,
            "batch_size_256": 0,
            "learning_rate": 0.001,
        },
        lazy=False,
    )

    utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
    next_point_to_probe = optimizer.suggest(utility)
    print("Next point to probe is:", next_point_to_probe)
def test_suggest_with_one_observation():
    util = UtilityFunction(kind="ucb", kappa=5, xi=0)
    optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1)

    optimizer.register(params={"p1": 1, "p2": 2}, target=3)

    for _ in range(5):
        sample = optimizer.space.params_to_array(optimizer.suggest(util))
        assert len(sample) == optimizer.space.dim
        assert all(sample >= optimizer.space.bounds[:, 0])
        assert all(sample <= optimizer.space.bounds[:, 1])
Exemplo n.º 19
0
    def _update_ax_utility(self):
        self.ax_utility.clear()
        # 2.576 is the default kappa value in bayes_opt source (for whatever
        # reason). The xi parameter is ignored when using UCB utility.
        self.utility_function = UtilityFunction(kind='ucb', kappa=2.576, xi=0)

        # Last argument (y_max) ignored when using UCB utility.
        utility = self.utility_function.utility(self.design_matrix,
                                                self.optimizer._gp, 0)
        utility = np.resize(utility, (self.N, self.N))

        self.ax_utility.plot_surface(self.X, self.Y, utility, alpha=0.8)
        ind = utility.argmax()
        i, j = np.unravel_index(ind, utility.shape)
        self.ax_utility.scatter(self.X_[i],
                                self.Y_[j],
                                np.amax(utility),
                                s=50,
                                c='r')
        self.ax_utility.view_init(self.phi, self.theta)
        self.next_point = [self.X_[i], self.Y_[i], np.amax(utility)]
Exemplo n.º 20
0
    def generate_batch(self, batch_size=BATCH, verbose=0, random_state=None, utility_kind="ucb", kappa=2.5, xi=0.0,
                       sampler='greedy', **kwargs):
        '''
        Creates optimizer, registers all previous data, and generates a proposed batch.
        Arguments
        ----------
        batch_size: integer number of points to suggest per batch
        verbose: 0 (quiet), 1 (printing only maxima as found), 2 (print every registered point)
        random_state: integer for random number generator
        utility_kind: Utility function to use ('ucb', 'ei', 'poi')
        kappa: float, necessary for 'ucb' utility function
        xi: float, translation of gaussian function
        **kwargs: dictionary passed to suggestion function. See bayes_opt.parallel_opt.disc_acq_max() for options
        
        Returns
        ----------
        batch: list of dictionaries containing parameters for each variable in the experiment 
        '''
        batch = []
        # Update kwargs 
        if sampler == 'greedy' or sampler == 'capitalist':
            kwargs['complements'] = bool(self.complements)
        # Initialize optimizer and utility function 
        fname = os.path.join(self.directory_path, 'optimizer.pickle')
        if os.path.isfile(fname):
            with open(fname, 'rb') as handle:
                data = pickle.load(handle)
                dbo = data['model']
                running_points = self.get_running_points()
                for point in running_points:
                    dbo.partner_register(params=point, clear=False)
                self.model_uuid = data['uuid']
        else:
            dbo = self.generate_model(verbose=verbose, random_state=random_state)
            self.model_uuid = self.get_saved_model_uuid()
        utility = UtilityFunction(kind=utility_kind, kappa=kappa, xi=xi)

        # Generate batch of suggestions
        dbo.reset_rng()
        batch = dbo.suggest(utility, sampler=sampler, n_acqs=batch_size, fit_gp=False, **kwargs)

        # Clear and re-register running data to partner space in optimizer (can be adjusted in capitalist)
        running_points = self.get_running_points()
        for idx, point in enumerate(running_points):
            if idx == 0:
                dbo.partner_register(params=point, clear=True)
            else:
                dbo.partner_register(params=point, clear=False)
        for point in batch:
            self.complement_mapping(point)
        return batch
Exemplo n.º 21
0
    def __init__(self, starting_point=None):
        self.hyperopt = BayesianOptimization(
            f=None,
            pbounds={'m': (0, 100)},
            verbose=2,
            random_state=1,
        )

        if starting_point:
            self.hyperopt.probe({'m': starting_point})

        # Prefer exploitation. We should experiment with some exploration tho
        self.util_func = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)

        self._next_probe = None
Exemplo n.º 22
0
    def __init__(self, n_init, n_iter, utility=None):
        # These parameters are initialized by the runner
        # updated by setExperiment()
        self.experiment_id = None
        self.parameters_by_name = None
        self.optimizer = None
        self.previous_trials = []

        self.utility = utility if utility != None else UtilityFunction(
            kind="ucb", kappa=2.5, xi=0.0)
        self.n_init = n_init
        self.n_iter = n_iter

        self.n_initted = 0
        self.n_itered = 0
        self.previous_trials_loaded = False
Exemplo n.º 23
0
    def __init__(self, optimization_problem: OptimizationProblem,
                 optimizer_config: SimpleBayesianOptimizerConfig):
        assert len(optimization_problem.objectives
                   ) == 1, "This is a single-objective optimizer."
        OptimizerInterface.__init__(self, optimization_problem)
        self.minimize = self.optimization_problem.objectives[0].minimize

        self._ordered_parameter_names = [
            dimension.name for dimension in
            self.optimization_problem.parameter_space.dimensions
            if dimension.name not in OptimizationProblem.META_DIMENSION_NAMES
        ]

        self._ordered_feature_names = [
            dimension.name
            for dimension in self.optimization_problem.feature_space.dimensions
            if dimension.name not in OptimizationProblem.META_DIMENSION_NAMES
        ]

        assert SimpleBayesianOptimizerConfig.contains(optimizer_config)
        self._optimizer_config = optimizer_config
        self._utility_function = UtilityFunction(
            kind=self._optimizer_config.utility_function,
            kappa=self._optimizer_config.kappa,
            xi=self._optimizer_config.xi)

        self._full_parameter_space_bounds = self._format_search_space(
            self.optimization_problem.parameter_space)
        self._full_feature_space_bounds = self._format_search_space(
            self.optimization_problem.feature_space)

        self._optimizer = BayesianOptimization(
            f=None,
            pbounds=self.
            _full_feature_space_bounds  # Both parameters and context are used for regression.
        )

        # Optionally the optimizer can focus on a subspace of the parameter search space.
        #
        self.focused = False
        self._focused_parameter_space = None
        self._focused_parameter_space_bounds = None

        # HISTORY
        self._registered_param_combos = []
        self._observations = []
        self._models = []
Exemplo n.º 24
0
def maximize(optimizer, pbounds, n_probe, n_points, random, do_plot=False):
    optimizer.set_bounds(new_bounds=pbounds)
    utility_function = UtilityFunction(kind="ucb", kappa=5, xi=0)
    for i in range(n_points):
        if i % 3 == 0 or random:
            print("Probing a RANDOM point")
            optimizer.maximize(init_points=1,
                               n_iter=0,
                               alpha=float(args.alpha))
        else:
            next_point = optimizer.suggest(utility_function)
            print("Probing this point next: ", next_point)
            target = auc(**next_point)
            print("Found AUC to be: ", target)
            optimizer.register(params=next_point, target=target)
        if do_plot:
            plot_gp(optimizer, x)
Exemplo n.º 25
0
def setup_gaussian(param_ranges, kappa, xi):

    pbounds = {}
    for param in param_ranges.keys():
        pbounds[param] = (0, 1)

    print(pbounds)

    optimizer = BayesianOptimization(
        f=None,
        pbounds=pbounds,
        verbose=
        0,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
        random_state=None)
    utility = UtilityFunction(kind="ucb", kappa=kappa, xi=xi)

    return optimizer, utility
Exemplo n.º 26
0
    def __init__(self, space, max_num_steps):
        super(self.__class__, self).__init__(space, max_num_steps)

        # For UCB acquisition function, smaller kappa prefers exploitation (e.g., 1.0), larger kappa prefers
        # exploration (e.g., 10.0). For EI or PI acquisition function, smaller xi prefers exploitation (e.g., 0.0),
        # larger xi prefers exploration (e.g., 0.1).
        # Check https://github.com/fmfn/BayesianOptimization/blob/master/examples/exploitation_vs_exploration.ipynb.
        # self._utility = UtilityFunction(kind='ucb', kappa=2.5, xi=0.0)
        self._utility = UtilityFunction(kind='ei', kappa=0.0, xi=0.1)
        self._opt = BayesianOptimization(
            f=None,
            pbounds=self._space,
        )
        self._next_point = None
        self._logger.info(
            "Bayesian Search is enabled, space {}, max_num_steps {}.".format(
                space, max_num_steps))
Exemplo n.º 27
0
    def __init__(self,
                 n_init,
                 n_iter,
                 alpha=1e-6,
                 kappa=2.5,
                 utility=None,
                 budget=None,
                 converge_thres=None,
                 converge_steps=None):
        # These parameters are initialized by the runner
        # updated by setExperiment()

        self.optimizer = None
        if alpha is None:
            self.alpha = 1e-6
        else:
            self.alpha = alpha
        if kappa is None:
            self.kappa = 2.5
        else:
            self.kappa = kappa

        self.utility = utility if utility != None else UtilityFunction(
            kind="ucb", kappa=self.kappa, xi=0.0)

        self.experiment_id = None
        self.parameters_by_name = None
        self.n_init = n_init
        self.n_iter = n_iter
        self.budget = budget
        self.converge_thres = converge_thres
        self.converge_steps = converge_steps
        self.converge_steps_count = 0
        self.stop_flag = False

        self.using_budget_flag = False  # check whether the current trial use budget
        self.using_converge_flag = False  # check whether the current tirl need to consider in convergence
        self.previous_trials = []
        self.n_initted = 0
        self.n_itered = 0
        self.previous_trials_loaded = False

        self.all_trials = []
        self.visited_config = {
        }  # store a string of config, and value is the index in previous_trials
def main():
    bounds = {'alpha':(0.3,0.9),'temperature':(3,15)}
    optimizer = BayesianOptimization(
        f = KD_train,
        pbounds = bounds,
        verbose = 2,
        random_state = 0)
    utility = UtilityFunction(kind = 'ei', kappa= 1,xi=0.0)

    for _ in range(5):
        next_p = optimizer.suggest(utility)
        print('suggest for next:',next_p)
        result = KD_train(**next_p)
        optimizer.register(params = next_p,target = result)

    for i,res in enumerate(optimizer.res):
        print("ite {} \t {}".format(i,res))

    logger = JSONLogger(path = './BO_logs.json')
    optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
Exemplo n.º 29
0
def optimize():
    pbounds = {"x": (-10, 10)}
    discrete = ["x"]

    optimizer = BayesianOptimization(
        f=f,  # Maximize 하고자 하는 함수
        pbounds=pbounds,
        verbose=
        2,  # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
        random_state=1,  # Optional, BO 에서 randomness 통제하기 위해 seed 입력 가능
        strategy="proposed",
        discrete=discrete,
    )

    optimizer.maximize(init_points=1, n_iter=10)

    utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
    next_to_probe = optimizer.suggest(utility)
    print(f"Next Point to Probe = {next_to_probe}")

    plot_bo(f, optimizer, pbounds)
Exemplo n.º 30
0
def bayes_opt_xgb(
    log,
    dmatrix,
    opt_fun,
    opt_rounds,
    init_rounds,
    params_ranges,
    max_estimators
    ):

    """ Function to perform bayesian optimization of model hyperparameters for xgboost models """

    # Instatiate BO object, utilty function, and log
    opt_xgb = BayesianOptimization(opt_fun, params_ranges)
    utility = UtilityFunction(kind="ei", kappa=2.5, xi=0.0)
    log_params = {}
    
    # Manually loop through opimization process (wanted better logs than the built-in funtion)
    for _ in range(opt_rounds):
        # Start selecting non-random points after 10 rounds
        if _ < init_rounds: 
            np.random.seed()
            next_point = {key: np.random.uniform(value[0],value[1]) for key, value in params_ranges.items()}
        else: 
            next_point = opt_xgb.suggest(utility)
        # Fit xgb model with selected hyperparams
        opt_params, fit_props = opt_fun(
            dmatrix = dmatrix,
            max_estimators = max_estimators,
            **next_point
            )
        
        target = fit_props['val_score']
        # Register results to BO object
        opt_xgb.register(params=next_point, target=target)
        # Print to keep user updated and save to log
        log.info(str(_) + str(fit_props) + str(opt_params))
        log_params.update({target:{'params':opt_params,'fit_props':fit_props}})
    
    return log_params