Beispiel #1
0
 def EI(self):
     """
     construct a GP model for scalarized output data
     applying EI for this model
     """
     kernel = GPy.kern.RBF(self.x_train.shape[1])
     self.model = GPy.models.GPRegression(self.x_train,
                                          self.f_theta[:, None],
                                          kernel=kernel,
                                          normalizer=None)
     self.model['.*Gaussian_noise.variance'].constrain_fixed(1.0e-2)
     self.model['.*rbf.variance'].constrain_fixed(1.0)
     #lengthscaleのboundを決定
     x_dist = distance.cdist(self.x_train, self.x_train)
     median = np.median(x_dist)
     if median == 0:
         lower = 1.0e-3
         upper = 100
     else:
         lower = 1.0e-3 * median
         upper = 100 * median
     self.model['.*rbf.lengthscale'].constrain_bounded(lower, upper)
     self.model.optimize_restarts()
     #停止条件の計算
     array_bounds = np.array(self.x_bounds)
     max_bound = np.argmax(array_bounds[:, 0] - array_bounds[:, 1])
     terminate_vol = (0.1**self.x_train.shape[1]) / (
         array_bounds[max_bound, 1] - array_bounds[max_bound, 0])
     res = minimize(self.obj,
                    bounds=self.x_bounds,
                    algmethod=1,
                    volper=terminate_vol)
     return res
 def __init__(self, noise_var, seed=1):
     self.dim = 2
     self.bounds = [[-5, 5], [-5, 5]]
     self.lengthscale_bound = [[0, 5]]
     super().__init__(self.dim, self.bounds, noise_var, seed)
     res = minimize(self.value, self.bounds, maxf=self.dim * 1000, algmethod=1)
     self.x_opt = res['x']
     self.y_opt = -self.value(self.x_opt)
Beispiel #3
0
def test_minimize():

    # basic test
    bounds = [(-10, 10) for i in range(4)]
    def func(x):
        x -=  np.array([-1, 2, -4, 3])
        return np.dot(x, x)
    res = minimize(func, bounds)
    npt.assert_allclose(res.x, np.array([-1, 2, -4, 3]), atol=0.1)

    # test with function not defined everywhere
    def func(x):
        if np.sum(np.abs(x)) > 20:
            raise Exception("func not defined")
        x -=  np.array([-1, 2, -4, 3])
        return np.dot(x, x)
    res = minimize(func, bounds)
    npt.assert_allclose(res.x, np.array([-1, 2, -4, 3]), atol=0.1)
def next_point(data,trans):
    num_fidelities = data['num_fidelities']
    multi_fid_values = data['multi_fidelity_params']

    bounds = trans.get_bounds()
    if len(data['data_points']) == 0:
        first_point = (bounds[:,1]-bounds[:,0])/2 + bounds[:,0]
        first_fidelity = 0
        return first_point,first_fidelity

    regressors = []
    for fidelity in range(num_fidelities):
        reg = sklearn.gaussian_process.GaussianProcessRegressor(
            kernel=sklearn.gaussian_process.kernels.RBF(length_scale=1.0),# length scales get handled in data preprocessing
            alpha=data['guassian_params']['noise'],
            optimizer=None
        )
        ys,xs = parse_data(data,trans,fidelity)
        if len(ys) > 0:
            reg.fit(xs,ys)

        regressors.append(reg)

    t = len(data['data_points'])
    d = len(data['data_points'][0]['data'])
    beta = 0.2*math.log(t+1)*d #standard formula for beta
    zetas = multi_fid_values['err_bounds'] + [0.0]
    def neg_upper_confidence_bound(x):
        x = np.asarray(x)
        if len(x.shape) == 1:
            x = np.stack([x])
        ucbs = []
        for zeta,reg in zip(zetas,regressors):
            mean,stdev = reg.predict(x, return_std=True)
            ucbs.append(mean + stdev * beta + zeta)
        true_ucb = min(ucbs)
        return -true_ucb
    print(bounds)
    #print("bounds")
    #print(upper_confidence_bound(np.asarray([[0.00617284, 0.48765432]])))
    min_val = scipydirect.minimize(neg_upper_confidence_bound,bounds)
    xval = min_val.x

    acc_targets = multi_fid_values['accuracy_targets']+[0.0]
    out_fid_level = num_fidelities-1# defaults to highest fidelity function
    for fid_level,(acc,reg) in enumerate(zip(acc_targets,regressors)):
        mean,stdev = reg.predict([min_val.x], return_std=True)
        if stdev*beta > acc:
            out_fid_level = fid_level
            break

    yval = -neg_upper_confidence_bound([xval])
    return xval,yval,out_fid_level
Beispiel #5
0
    def _optimize(self) -> Tuple[np.ndarray, np.ndarray]:
        def objective(x):
            return self.acquisition_function(x.reshape(1, -1))

        res = minimize(
            objective,
            bounds=list(zip(self.bounds.lowers, self.bounds.uppers)),
            **self.direct_kwargs
        )
        x_min = res.x
        f_min = res.fun

        return np.array([x_min]), np.array([f_min])
 def __init__(self, seed=1):
     self.dim = 1
     self.bounds = [[-3, 3]]
     self.y_bounds = [-2, 2]
     super().__init__(self.dim, self.bounds, seed)
     self.fit()
     self.min, self.max = self.get_min_max()
     res = minimize(self.value_std,
                    self.bounds,
                    maxf=self.dim * 1000,
                    algmethod=1)
     self.x_opt = res['x'][0]
     self.y_opt = -self.value_std(self.x_opt)
Beispiel #7
0
def direct_minimize(acq_function, bounds, return_best_only=True, **kwargs):
    def obj(x: np.ndarray):
        if x.ndim == 1:
            x = x[None, :]
        x = torch.tensor(x).double().unsqueeze(-2)
        y = acq_function(x)
        y = float(y.view(-1).item())

        return y

    res = minimize(obj, bounds.T)

    return torch.tensor(res.x).float().unsqueeze(0), torch.tensor(
        res.fun).float().unsqueeze(0)
Beispiel #8
0
    def maximize(self, model_predict: callable, lower_bound: np.ndarray,
                 upper_bound: np.ndarray):
        bound = []
        dim = len(lower_bound)
        for i in range(dim):
            bound.append((lower_bound[i], upper_bound[i]))

        def acquisition_curve(x: float):
            _, uncertainty = model_predict(x[None])
            return -uncertainty[:, None]

        res = minimize(acquisition_curve, bound)
        print("Selected point", res.x, res.fun)
        # x = np.atleast_2d(np.linspace(lower_bound, upper_bound, 100))
        # _, u = model_predict(x)
        # print("Other vals = ", np.max(u))
        return res.x, res.fun
Beispiel #9
0
    def calc_smsego(self):
        """
        optimize SMSego

        Returns
        -------
        res : res
            result of optimization by DIRECT
        """
        #現時点での獲得点が作るパレート超体積を計算
        # print(self.x_bounds)
        # self.MOGPI = MOGPI
        # res = obj(np.array([1.0]))
        #停止条件の計算
        array_bounds = np.array(self.x_bounds)
        max_bound = np.argmax(array_bounds[:,0] - array_bounds[:,1])
        terminate_vol = (0.1 ** self.x_train.shape[1]) / (array_bounds[max_bound, 1] - array_bounds[max_bound, 0])
        res = minimize(self.obj, bounds = self.x_bounds,algmethod=1,volper = terminate_vol)
        return res
Beispiel #10
0
    def calc_ieipv(self, MOGPI):
        """
        obtain the results optimize IEIPV

        Parameters
        ----------
        MOGPI : MultiOutputGPIndep
            Gaussian Process Regression model for each objective function
        Returns
        -------
        res : res
            result of optimization by DIRECT
        """
        #cellの作成
        v, w = Create_vw.create_vw(self.y_train, self.v_ref, self.w_ref)

        def obj(x):
            if np.any(np.all(self.x_train == x, axis=1)):
                return 1.0e5
            else:
                # mean, var = MTGPR.multitaskGP_predict(np.atleast_2d(x))
                mean, var = MOGPI.predict_one(x)
                alpha = (mean - v) / np.sqrt(var)
                beta = (mean - w) / np.sqrt(var)
                ieipv_each_cell = var * (
                    (norm.pdf(beta) - norm.pdf(alpha)) + beta *
                    (norm.cdf(beta) - norm.cdf(alpha)))
                ieipv = (-1) * np.sum(np.prod(ieipv_each_cell, axis=1))
                return ieipv

        #停止条件の計算
        array_bounds = np.array(self.x_bounds)
        max_bound = np.argmax(array_bounds[:, 0] - array_bounds[:, 1])
        terminate_vol = (0.1**self.x_train.shape[1]) / (
            array_bounds[max_bound, 1] - array_bounds[max_bound, 0])
        res = minimize(obj,
                       bounds=self.x_bounds,
                       algmethod=1,
                       volper=terminate_vol)
        # print(obj([1]))
        return res
Beispiel #11
0
 def fit(self, X_train, y_train, noise_var=None, n_iter=8, decompo=None, verbose=True):
     
     if self.normalize_X:
         self.X_train = (X_train - self.lowBounds) / (self.highBounds - self.lowBounds)
     else:
         self.X_train = X_train
         
     if self.normalize_y:
         self.mean_y_train = y_train.mean()
         self.std_y_train = y_train.std()
         self.y_train = (y_train - self.mean_y_train) / self.std_y_train
     else:
         self.y_train = y_train
         
     self.nbOfSamples = self.X_train.shape[0]
     if decompo is None:
         decompo = self.decompo.copy()
     if noise_var is None:
         noise_var = self.alpha
     
     tempListOfLogLik = []
     tempListOfTheta = []
     
     # Maximize the log-likelihood to find the shared hyperparameter
     res = scipydirect.minimize(lambda x: -self.loglik(np.exp(x), self.X_train, self.y_train, noise_var, decompo),
                                bounds=np.log([[1e-5,1e5] for i in range(2)]),
                                maxT=n_iter)
     if verbose:
         print(res)
         
     sol = res.x
     self.kernel.sigma = np.exp(sol[0])
     self.kernel.lengthscale = np.exp(sol[1])
     
     self.K_w_noise = noise_var * np.eye(self.nbOfSamples)
     for i in range(len(decompo)):
         self.K_w_noise += self.kernel.compute(self.X_train[:,decompo[i]],self.X_train[:,decompo[i]])
     
     self.L_ = cholesky(self.K_w_noise, lower=True)
     self.alpha_ = cho_solve((self.L_,True), self.y_train)
Beispiel #12
0
    def optimize_acq_f(self, n_iter=50, method = "LBFGS"):
        # optimization of aquisition function to get next query point x
        def obj_LBFGS(x):
            return -self.acq_f(x)

        x_tries = np.random.uniform(self.bounds[0, :], self.bounds[1, :],
                                    size=(10000, self.bounds.shape[1]))
        x_seeds = np.random.uniform(self.bounds[0, :], self.bounds[1, :], size=(n_iter, self.bounds.shape[1]))
        ys = -obj_LBFGS(x_tries)
        x_max = x_tries[ys.argmax()].reshape((1, -1))
        max_acq = ys.max()
        if(method == "LBFGS"):
            for x_try in x_seeds:
                # Find the minimum of minus the acquisition function
                res = minimize(obj_LBFGS,
                                x_try.reshape(1, -1),
                                bounds=self.reformat_bounds(self.bounds),
                                method="L-BFGS-B")

                # See if success
                if not res.success:
                    continue

                # Store it if better than previous minimum(maximum).
                if max_acq is None or -res.fun[0] > max_acq:
                    x_max = res.x
                    max_acq = -res.fun[0]
        elif(method == "DIRECT"):
            ys = -obj_LBFGS(x_tries)
            x_max = x_tries[ys.argmax()].reshape((1, -1))
            max_acq = ys.max()
            x = scipydirect.minimize(obj_LBFGS, self.reformat_bounds(self.bounds)).x
            acq = -obj_LBFGS(x)[0,0]
            if (acq > max_acq):
                x_max = x
        else:
            raise NotImplementedError

        return np.clip(x_max, self.bounds[0, :], self.bounds[1, :]).reshape((1, -1))
Beispiel #13
0
paramscomb = params_combination((aenvs, pienvs, maxfs, deltatols))
niter = int(niter)
nburnin = int(nburnin)
if parametercheck(datadir, sys.argv, paramscomb, nbatch):
    njob = int(sys.argv[1])
    data = []
    for i in progressbar(range(nbatch)):
        n = (njob - 1) * nbatch + i
        aenv, pienv, maxf, deltatol = paramscomb[n]
        if disp:
            print paramscomb[n]
        args = lambda_, mus, cup, aenv, pienv, niter, nburnin
        bounds = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
        res1 = scipydirect.minimize(minus(evolimmune.Lambda_pq),
                                    maxf=maxf,
                                    args=args,
                                    bounds=bounds,
                                    disp=disp)
        if disp:
            print 'results of first phase optimization', res1
        res2 = noisyopt.minimize(minus(evolimmune.Lambda_pq),
                                 res1.x,
                                 scaling=(1.0, 1.0, 5.0, 1.0),
                                 args=args,
                                 bounds=bounds,
                                 deltainit=deltainit,
                                 deltatol=deltatol,
                                 alpha=alpha,
                                 feps=feps,
                                 errorcontrol=True,
                                 paired=True,
Beispiel #14
0
def runNVOptimize():
    global env, countfigure
    starttime = time.time()
    res = []
    locreslist = []
    if (optimizer == 0 or optimizer == 2):
        numsteps = env.totalsteps
        env_fn = lambda: env
        hidden_sizes = np.ones(
            circuitdepth) * nneurons  #size of neural network
        ac_kwargs = dict(hidden_sizes=hidden_sizes, activation=tf.nn.relu)
        logger_kwargs = dict(
            output_dir=output_dir + dataset + "/",
            exp_name=dataset)  #this logger is not used, we have our own
        if (optimizer == 0):  #Use PPO
            with tf.Graph().as_default():
                ppo_tf1(env_fn=env_fn,
                        ac_kwargs=ac_kwargs,
                        steps_per_epoch=(numsteps) * iterations_per_epoch,
                        pi_lr=pi_lr,
                        vf_lr=vf_lr,
                        train_pi_iters=iterations_per_epoch,
                        train_v_iters=iterations_per_epoch,
                        epochs=big_epochs,
                        lam=0.99,
                        logger_kwargs=logger_kwargs,
                        target_kl=100,
                        save_freq=100,
                        clip_ratio=clip_ratio)
        elif (optimizer == 2):  #Use SAC, does not work properly yet
            with tf.Graph().as_default():
                startsteps = 10000
                sac_tf1(env_fn,
                        ac_kwargs=ac_kwargs,
                        seed=0,
                        steps_per_epoch=(numsteps) * iterations_per_epoch,
                        epochs=big_epochs,
                        replay_size=1000000,
                        gamma=0.99,
                        polyak=0.995,
                        lr=sac_lr,
                        alpha=0.05,
                        batch_size=500,
                        start_steps=startsteps,
                        max_ep_len=1000,
                        logger_kwargs=logger_kwargs,
                        save_freq=100)

        tf.reset_default_graph()

    elif (optimizer == 1):  #Use DIRECCT algorithm
        from scipydirect import minimize

        print("start DIRECT")
        bounds = np.transpose([
            env.action_space.low[0] *
            np.ones(env.action_spaceLength * env.totalsteps),
            env.action_space.high[0] *
            np.ones(env.totalsteps * env.action_spaceLength)
        ])

        def gymWrapper(
                x):  #Wrap gym environment into format understood by optimizer
            env.reset()
            actions = np.reshape(x, [env.totalsteps, env.action_spaceLength])

            for i in range(np.shape(actions)[0]):
                env.step(actions[i])

            return -env.fidel_val

        res = minimize(gymWrapper,
                       bounds,
                       fglobal=-1,
                       maxf=maxiterations,
                       algmethod=1)

        actions = np.reshape(res["x"],
                             [env.totalsteps, env.action_spaceLength])
        print(res)
        print('DIRECT result', -res["fun"], actions)
    elif (optimizer == 3):  #Use nealder-mead
        steadytime = False  #Keep time per bin constant, but can vary time overall
        if (len(t_var) > 0 and steadytime == True):
            actionspace = (env.action_spaceLength - 1) * env.totalsteps + 1
        else:
            actionspace = env.action_spaceLength * env.totalsteps
        #action is bounded betwen -0.5 and 0.5
        bounds = np.transpose([
            env.action_space.low[0] * np.ones(actionspace),
            env.action_space.high[0] * np.ones(actionspace)
        ])
        print("start scipy optimize")

        def reshapeActions(x):
            if (len(t_var) > 0 and steadytime == True):
                xtime = x[0]  #time per bin, mapped between -0.5 and 0.5
                xrest = np.reshape(
                    x[1:], [env.totalsteps, (env.action_spaceLength - 1)])
                actions = np.zeros([env.totalsteps, env.action_spaceLength])
                for i in range(env.totalsteps):
                    actions[i] = list(xrest[i]) + [xtime]
            else:
                actions = np.reshape(x,
                                     [env.totalsteps, env.action_spaceLength])

            return actions

        def gymWrapper(x):
            env.reset()
            actions = reshapeActions(x)
            totalreward = env.fidel_val
            for i in range(np.shape(actions)[0]):
                _, reward, done, _ = env.step(actions[i])
                totalreward += reward

            #totalreward=env.fidel_val
            return -totalreward

        for i in range(
                repeatOptimize
        ):  #Repeat optimization repeatOptimize times, with random initial state
            x0 = (np.random.rand(actionspace) -
                  0.5) * 0.5  #Random initial parameters
            res = sp.optimize.minimize(gymWrapper,
                                       x0,
                                       method="Nelder-Mead",
                                       bounds=bounds,
                                       options={
                                           "maxiter": maxiterations,
                                           "adaptive": True
                                       })

            actions = reshapeActions(res["x"])
            print(res)
            print('scipy result', -res["fun"], actions)
            locreslist.append(res)
            if (-res["fun"] > 0.99):
                break

    print(dataset)

    #Get results from logger in gym environment
    finalfidelresults = [env.logger[i][0]
                         for i in range(len(env.logger))]  #Fidelity at end
    #fidelresults=[env.logger[i][1] for i in range(len(env.logger))]
    penaltyresults = [
        np.sum(env.logger[i][3]) for i in range(len(env.logger))
    ]  #Penalty for going out of bounds, decreases to zero over training
    rewardresults = [
        finalfidelresults[i] - penaltyresults[i]
        for i in range(len(env.logger))
    ]  #Reward including penalty, e..g. reward=fidelity-penalty

    omegaresults = [
        env.logger[i][2].flatten() for i in range(len(env.logger))
    ]  #Driving parameters found during training

    argmax = np.argmax(rewardresults)  #Get best result
    print(rewardresults[argmax], omegaresults[argmax])

    #Re/run best result found and store it in variable maxdata
    env.reset()
    fidelmaxlist = []  #Best fidelity found
    fidelmaxlist.append(env.fidel_val)

    omegamax = np.reshape(
        omegaresults[argmax], [env.totalsteps, env.action_spaceLength]
    )  #Best driving parameters found, parameters are mapped between -0.5 and +0.5
    for i in range(np.shape(omegamax)
                   [0]):  #Run system with driving parameters and save fidelity
        env.step(omegamax[i])
        fidelmaxlist.append(env.fidel_val)

    tlist = env.tlist  #np.linspace(0,t_max,num=N_bins+1) #Time
    #tlistOmega=env.tlist[1:]#np.linspace(0,t_max,num=N_bins)

    actualparammax = env.logger[argmax][6]  #driving parameters in real values
    maxdata = [
        finalfidelresults[argmax], penaltyresults[argmax],
        omegaresults[argmax], tlist, actualparammax
    ]

    totaltime = time.time() - starttime
    print(time.time() - starttime)  #Total runtime
    return env.logger, totaltime, maxdata, locreslist  #Return result
Beispiel #15
0
def twostage_optimization(func, args, kwargs):
    res1 = scipydirect.minimize(func, args=args, **kwargs)
    return noisyopt.minimize(func, res1.x, args=args, **kwargs)
def obj(x):
    """Six-hump camelback function"""

    x1 = x[0]
    x2 = x[1]

    f = (4 - 2.1 * (x1 * x1) +
         (x1 * x1 * x1 * x1) / 3.0) * (x1 * x1) + x1 * x2 + (-4 + 4 *
                                                             (x2 * x2)) * (x2 *
                                                                           x2)
    return f


bounds = [(-3, 3), (-2, 2)]
res = minimize(obj, bounds)

#
# Plot the results.
#
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

x = res.x
X, Y = np.mgrid[x[0] - 1:x[0] + 1:50j, x[1] - 1:x[1] + 1:50j]
Z = np.zeros_like(X)

for i in range(X.size):
    Z.ravel()[i] = obj([X.flatten()[i], Y.flatten()[i]])

ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet)
Beispiel #17
0
def test_minimize():
    bounds = [(-10, 10) for i in range(4)]
    res = minimize(func, bounds)
    npt.assert_allclose(res.x, np.array([-1, 2, -4, 3]), atol=0.1)
Beispiel #18
0
 def opt_x(self):
     res = minimize(self.mu_minus,
                    self.f.bounds,
                    maxf=self.f.dim * 1000,
                    algmethod=1)
     return res['x']
Beispiel #19
0
def twostage_optimization(func, args, kwargs):
    res1 = scipydirect.minimize(func, args=args, **kwargs)
    return noisyopt.minimize(func, res1.x, args=args, **kwargs)
Beispiel #20
0
 def fit(self):
     bound = self.f.lengthscale_bound
     res = minimize(self.marginal_liklihood, bound, maxf=1000, algmethod=1)
     lengthscale = res['x'][0]
     self.kernel.lengthscale = lengthscale
Beispiel #21
0
    def _optimise_acq_func(self, acq, max_or_min='max', acq_opt_params=None):
        """
        Run the chosen optimisation procedure
        """

        if self.verbose:
            print(f"Optimising acquisition function ({max_or_min})")

        if acq_opt_params is None:
            acq_opt_params = self.acq_opt_params

        if max_or_min == 'max':
            def optimiser_func(x):
                return -acq.evaluate(np.atleast_2d(x))
        elif max_or_min == 'min':
            def optimiser_func(x):
                return acq.evaluate(np.atleast_2d(x))
        else:
            raise NotImplementedError

        if acq_opt_params['method'] == 'direct':
            n_direct_evals = acq_opt_params['n_direct_evals']
            res = scipydirect.minimize(optimiser_func,
                                       self.bounds,
                                       maxf=n_direct_evals)

        elif acq_opt_params['method'] == 'multigrad':
            num_restarts = acq_opt_params['num_restarts']
            if 'minimize_options' in acq_opt_params.keys():
                minimize_options = acq_opt_params['minimize_options']
            else:
                minimize_options = None

            res = minimize_with_restarts(optimiser_func,
                                         self.bounds,
                                         num_restarts=num_restarts,
                                         hard_bounds=self.bounds,
                                         minimize_options=minimize_options,
                                         verbose=False)

        elif acq_opt_params['method'] == 'samplegrad':
            if 'minimize_options' in acq_opt_params.keys():
                minimize_options = acq_opt_params['minimize_options']
            else:
                minimize_options = None

            if 'num_samples' in acq_opt_params.keys():
                num_samples = acq_opt_params['num_samples']
            else:
                num_samples = 1000
            if 'num_local' in acq_opt_params.keys():
                num_local = acq_opt_params['num_local']
            else:
                num_local = 5
            if 'num_chunks' in acq_opt_params.keys():
                num_chunks = acq_opt_params['num_chunks']
            else:
                num_chunks = 5
            if 'evaluate_sequentially' in acq_opt_params.keys():
                evaluate_sequentially = \
                    acq_opt_params['evaluate_sequentially']
            else:
                evaluate_sequentially = True

            res = sample_then_minimize(
                optimiser_func,
                self.bounds,
                num_samples=num_samples,
                num_local=num_local,
                num_chunks=num_chunks,
                minimize_options=minimize_options,
                evaluate_sequentially=evaluate_sequentially,
                verbose=False)
        else:
            raise NotImplementedError

        best_x = np.atleast_2d(res.x)

        # Return the correct value for the acquisition function depending on
        # whether we minimized or maximized
        if max_or_min == 'max':
            best_eval = best_x, -res.fun
        elif max_or_min == 'min':
            best_eval = best_x, res.fun
        else:
            raise NotImplementedError

        return best_eval
Beispiel #22
0
 def calc_next_point(self):
     self._update_surface_model()  # 1)
     self._update_acquisition_function()  # 2)
     af = lambda x: -1 * self.acquisition_function(x)  # 3) A
     next_point = minimize(af, self.bounds)  # 3) B
     return next_point
Beispiel #23
0
datadir = 'data/'

paramscomb = params_combination((aenvs, pienvs, maxfs, deltatols))
niter = int(niter)
nburnin = int(nburnin)
if parametercheck(datadir, sys.argv, paramscomb, nbatch):
    njob = int(sys.argv[1])
    data = []
    for i in progressbar(range(nbatch)):
        n = (njob-1) * nbatch + i
        aenv, pienv, maxf, deltatol = paramscomb[n]
        if disp:
            print paramscomb[n]
        args = lambda_, mus, cup, aenv, pienv, niter, nburnin
        bounds = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
        res1 = scipydirect.minimize(minus(evolimmune.Lambda_pq),
                   maxf=maxf, args=args, bounds=bounds, disp=disp)
        if disp:
            print 'results of first phase optimization', res1
        res2 = noisyopt.minimize(minus(evolimmune.Lambda_pq),
		   res1.x,
                   scaling=(1.0, 1.0, 5.0, 1.0),
                   args=args, bounds=bounds,
                   deltainit=deltainit,
                   deltatol=deltatol,
                   alpha=alpha,
                   feps=feps,
                   errorcontrol=True,
                   paired=True,
                   disp=disp)
        res2.x[res2.free] = np.nan
        p, q, epsilon, pup = res2.x
Beispiel #24
0
    def _get_y_min(self):
        """
        Get y_min for EI computation

        Returns smallest y_min of the model. Can change this to evaluate
        lowest y over domain later.
        """
        if self.verbose:
            print("Finding y_min")

        def optimiser_func(x):
            return self.surrogate.predict(np.atleast_2d(x))[0].flatten()

        if self.y_min_opt_params['method'] == 'standard':
            idx = np.argmin(self.surrogate.Y_raw)
            x_min = self.surrogate.X[idx]
            y_min = self.surrogate.Y_raw[idx]
        elif self.y_min_opt_params['method'] == 'direct':
            def optimiser_func(x):
                return self.surrogate.predict(np.array([x]))[0]

            n_direct_evals = self.y_min_opt_params['n_direct_evals']
            res = scipydirect.minimize(optimiser_func,
                                       self.bounds,
                                       maxf=n_direct_evals)
            x_min = res.x
            y_min = res.fun
        elif self.y_min_opt_params['method'] == 'multigrad':

            num_restarts = self.y_min_opt_params['num_restarts']
            res = minimize_with_restarts(optimiser_func, self.bounds,
                                         num_restarts=num_restarts,
                                         verbose=False)
            x_min = res.x
            y_min = res.fun

        elif self.y_min_opt_params['method'] == 'samplegrad':
            op = self.y_min_opt_params

            if 'minimize_options' in op.keys():
                minimize_options = op['minimize_options']
            else:
                minimize_options = None

            if 'num_samples' in op.keys():
                num_samples = op['num_samples']
            else:
                num_samples = 1000
            if 'num_local' in op.keys():
                num_local = op['num_local']
            else:
                num_local = 5
            if 'evaluate_sequentially' in op.keys():
                evaluate_sequentially = \
                    op['evaluate_sequentially']
            else:
                evaluate_sequentially = False

            res = sample_then_minimize(
                optimiser_func,
                self.bounds,
                num_samples=num_samples,
                num_local=num_local,
                minimize_options=minimize_options,
                evaluate_sequentially=evaluate_sequentially,
                extra_locs=self.surrogate.X,
                verbose=False)

            x_min = res.x
            y_min = res.fun

        else:
            raise NotImplementedError

        _, var_at_y_min = self.surrogate.predict(np.atleast_2d(x_min))

        if self.verbose:
            print(f"Current y_min = {y_min}")

        return x_min, y_min.item(), var_at_y_min.item()
Beispiel #25
0
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
import os

def obj(x):
    """Six-hump camelback function"""
    
    x1 = x[0]
    x2 = x[1]
    
    f = (4 - 2.1*(x1*x1) + (x1*x1*x1*x1)/3.0)*(x1*x1) + x1*x2 + (-4 + 4*(x2*x2))*(x2*x2)
    return f

bounds = [(-3, 3), (-2, 2)]
res = minimize(obj, bounds)

#
# Plot the results.
#
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

x = res.x
X, Y = np.mgrid[x[0]-1:x[0]+1:50j, x[1]-1:x[1]+1:50j]
Z = np.zeros_like(X)

for i in range(X.size):
    Z.ravel()[i] = obj([X.flatten()[i], Y.flatten()[i]])
    
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet)
Beispiel #26
0
    def optimize(
        self, sample_x: np.array, sample_y: np.array
    ) -> Tuple[np.array, np.array, np.array, np.array, np.array, np.array,
               np.array, np.array]:
        """finds the optima of the acquisition function. For that defines a internal function that evaluates
        the acquisition function and returns it's negative value. Then executes the scipydirect minimization
        algorithm to find the maxima of the acquisition function. While doing this keeps track of evaluated points

        :param sample_x: input of samples
        :param sample_y: outputs of samples
        :return: input value with maximal acquisition value, mu value for point with maximal acquisition value,
            sigma value for point with maximal acquisition value, maximal acquisition function value, all mu estimates,
            all sigma estimates, all acq. values evaluated, all input samples for the acquisition function
        """
        incumbent = np.max(sample_y)
        incumbent_x = sample_x[np.argmax(sample_y)]
        mu = []
        sigma = []
        acq = []
        xs = []

        def func(x: np.array, inc: float, mu_f: np.array, sigma_f: np.array,
                 acq_f: np.array, x_f: np.array) -> float:
            """function that is internally used as it is passed to the minimize
            takes input and calculates the respective acquisition value and negates it as minimize is used
            but the original problem is maximization

            :param x: input value
            :param inc: incumbent
            :param mu_f: list of mu values to append mu-prediction to for persistence
            :param sigma_f: list of sigma values to append sigma-prediction to for persistence
            :param acq_f: list of acquisition function values to append for to persistence
            :param x_f: list of acquisition function sample inputs to append to for persistence
            :return:
            """
            m, s = self.context.estimator.regress(np.array([x]))
            mu_f.append(m)
            sigma_f.append(s)
            a = self.context.acq.evaluate(m, s, inc)
            acq_f.append(a)
            x_f.append(x)
            if len(x_f) % 200 == 0:
                gc.collect()

            return -a

        old_stdout = sys.stdout  # backup current stdout
        sys.stdout = open(os.devnull, "w")
        result = minimize(func,
                          args=(incumbent, mu, sigma, acq, xs),
                          bounds=np.vstack((self.lower_search_bounds,
                                            self.upper_search_bounds)).T,
                          maxT=100,
                          maxf=5000)
        sys.stdout = old_stdout  # reset old stdout

        new_sample_x = result.x
        new_sample_mu, new_sample_sigma = self.context.estimator.regress(
            np.array([new_sample_x]))
        new_sample_acq = result.fun

        return new_sample_x, new_sample_mu, new_sample_sigma, new_sample_acq, mu, sigma, acq, xs