def increase_p(prev_best_angles, h, jr, jc, qubits, p, num_trials):
    next_guess = [0.0] * (2 * p + 2)
    prev_x_rots = prev_best_angles[0::2]
    prev_z_rots = prev_best_angles[1::2]
    next_x_guess = [0.0] * (p + 1)
    next_z_guess = [0.0] * (p + 1)
    next_x_guess[-1] = prev_x_rots[-1]
    next_x_guess[0] = prev_x_rots[0]
    next_z_guess[-1] = prev_z_rots[-1]
    next_z_guess[0] = prev_z_rots[0]

    upper_bound = [8] * (p + 1) * 2
    lower_bound = [-8] * (p + 1) * 2
    upper_bound[0::2] = [1.1] * (p + 1)
    lower_bound[0::2] = [-0.1] * (p + 1)
    bound = (np.array(lower_bound), np.array(upper_bound))

    for i in range(1, p):
        next_x_guess[i] = prev_x_rots[i - 1] * float(i) / float(
            p) + prev_x_rots[i] * float(p - i) / float(p)
        next_z_guess[i] = prev_z_rots[i - 1] * float(i) / float(
            p) + prev_z_rots[i] * float(p - i) / float(p)
    next_guess[0::2] = next_x_guess
    next_guess[1::2] = next_z_guess
    res = pybobyqa.solve(cost_function_multistep,
                         np.array(next_guess) * 1.0,
                         args=(h, jr, jc, qubits, num_trials, p + 1),
                         bounds=bound,
                         maxfun=1000)
    cost = res.f
    ground_prob = ground_state_prob(res.x, h, jr, jc, qubits, p)
    return cost, ground_prob, res.x
Esempio n. 2
0
    def optimize(self, observable, buffer, optimizer_args, execParams):
        super().optimize(observable, buffer, optimizer_args, execParams)

        import pybobyqa

        if 'options' in self.opt_args:
            base_args = self.opt_args['options']
            opt_result = pybobyqa.solve(self.energy, self.init_args, **base_args)
        else:
            opt_result = pybobyqa.solve(self.energy, self.init_args, **self.opt_args)

        # Optimizer adds the results to the buffer automatically
        buffer.addExtraInfo('vqe-energies', self.energies)
        buffer.addExtraInfo('vqe-parameters', self.angles)
        optimal_angles = [float(x) for x in self.angles[self.energies.index(min(self.energies))].split(",")]
        buffer.addExtraInfo('vqe-angles', optimal_angles)
        buffer.addExtraInfo('vqe-energy', min(self.energies))
Esempio n. 3
0
    def otimizar(self, p_inicial):
        self.ponto_inicial = np.array(p_inicial)

        self.iniciar_tempo()
        sol = pybobyqa.solve(self.func_himmelblau, self.ponto_inicial)
        self.finalizar_tempo()

        self.ponto_final = sol.x
        self.valor_final = sol.f
        self.chamadas_func_obj = sol.nf
        self.num_iteracoes = sol.nruns
Esempio n. 4
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 1.0])
     np.random.seed(0)
     soln = pybobyqa.solve(rosenbrock, x0, npt=6)
     self.assertTrue(array_compare(soln.x, np.array([1.0, 1.0]), thresh=1e-4), "Wrong xmin")
     self.assertTrue(array_compare(soln.f, rosenbrock(soln.x), thresh=1e-10), "Wrong fmin")
     self.assertTrue(array_compare(soln.gradient, rosenbrock_gradient(soln.x), thresh=1e-2), "Wrong gradient")
     # Hessian entries are quite large, O(100-1000), so can have a fairly large tolerance
     # self.assertTrue(array_compare(soln.hessian, rosenbrock_hessian(soln.x), thresh=1e-0), "Wrong Hessian")
     self.assertLessEqual(np.max(np.abs(rosenbrock_hessian(soln.x) / soln.hessian)) - 1, 1e-1, "Wrong Hessian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
    def blackbox_optimizer_ordered_domain(self, iteration, ord_domain):
        # build new inputs, based on current variable value
        var = self.real_modifier.copy()
        var_size = self.real_modifier.size

        NN = self.var_list.size
        nn = self.batch_size

        if ((iteration + 1) * nn <= NN):
            var_indice = ord_domain[iteration * nn:(iteration + 1) * nn]
        else:
            var_indice = ord_domain[list(range(iteration * nn, NN)) + list(
                range(0, (self.batch_size -
                          (NN - iteration * nn))))]  #check if this is true

        indice = self.var_list[var_indice]
        opt_fun = Objfun(lambda c: self.sess.run(
            [self.loss],
            feed_dict={
                self.use_log: self.use_log2,
                self.modifier: vec2mod(c, indice, var)
            })[0])

        x_o = np.zeros(self.batch_size, )
        b = self.modifier_up[indice]
        a = self.modifier_down[indice]

        soln = pybobyqa.solve(opt_fun,
                              x_o,
                              rhobeg=self.L_inf / 3,
                              bounds=(a, b),
                              maxfun=self.q + 5,
                              npt=self.q + 1)
        # print(soln)
        evaluations = soln.nf
        # adjust sample probability, sample around the points with large gradient
        nimgs = vec2mod(soln.x, indice, var)

        if self.real_modifier.shape[0] > self.resize_init_size:
            self.sample_prob = self.get_new_prob(self.real_modifier)
            self.sample_prob = self.sample_prob.reshape(var_size)

        summary = opt_fun.get_summary(with_xs=False)

        distance = self.sess.run(self.distance,
                                 feed_dict={
                                     self.use_log: self.use_log2,
                                     self.modifier: nimgs
                                 })

        return soln.f, evaluations, nimgs, summary
Esempio n. 6
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 0.7])  # standard start point too close to upper bounds
     lower = np.array([-2.0, -2.0])
     upper = np.array([0.9, 0.9])
     xmin = np.array([0.9, 0.81])  # approximate
     fmin = rosenbrock(xmin)
     np.random.seed(0)
     soln = pybobyqa.solve(rosenbrock, x0, bounds=(lower, upper))
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
     self.assertTrue(array_compare(soln.gradient, rosenbrock_gradient(soln.x), thresh=1e-2), "Wrong gradient")
     # Hessian entries are quite large, O(100-1000), so can have a fairly large tolerance (use relative terms)
     self.assertLessEqual(np.max(np.abs(rosenbrock_hessian(soln.x) / soln.hessian)) - 1, 1e-1, "Wrong Hessian")
def optimal_angle_solver(
        qubit_ring: QubitRing,
        p_val: int,
        num_inits: int = 20) -> Tuple[float, List[Tuple[float, float]]]:
    """Solves for the optimal approximation ratio at a given circuit depth (p).

    Args:
        qubit_ring: Stores information of the Ising Hamiltonian. See
            implementation of the QubitRing class.
        p_val: Circuit depth (number of (\gamma, \beta) pairs).
        num_inits: How many restarts with random initial guesses.

    Returns:
        best_ratio: The best approximation ratio at circuit depth p.
        best_angles: The optimal angles that give the best approximation ratio.
    """
    energy_extrema, indices, e_list = qubit_ring.get_all_energies()

    def cost_function(angles_list: Sequence[float]) -> float:
        angles_list = [(angles_list[k], angles_list[k + 1])
                       for k in numpy.array(range(p_val)) * 2]
        _, state_probs = qubit_ring.compute_wavefunction(angles_list)
        return -float(numpy.sum(e_list * state_probs))

    lower_bounds = numpy.array([0.0] * (2 * p_val))
    upper_bounds = numpy.array([1.0, 2.0] * p_val) * 16

    best_cost = None
    best_angles = None

    for i in range(num_inits):
        guess = numpy.random.uniform(0, 4, p_val * 2)
        guess[0::2] = guess[0::2] / 2.0
        res = pybobyqa.solve(cost_function,
                             guess,
                             bounds=(lower_bounds, upper_bounds),
                             maxfun=1000)
        cost = res.f
        if best_cost is None or cost < best_cost:
            best_cost = cost
            best_angles = res.x

    best_angles = [(best_angles[i], best_angles[i + 1])
                   for i in numpy.array(range(p_val)) * 2]

    e_max, e_min = energy_extrema['E_max'], energy_extrema['E_min']
    best_ratio = (-best_cost - e_min) / (e_max - e_min)

    return best_ratio, best_angles
Esempio n. 8
0
def minimize(func, x0, bounds, budget, optin, **optkwds):
     objfunc = ObjectiveFunction(func, {'simple_function' : True })

     # massage bounds (force reshaping as bobyqa is picky)
     lower = numpy.asarray(bounds[:,0]).reshape(-1)
     upper = numpy.asarray(bounds[:,1]).reshape(-1)

     x0 = numpy.asarray(x0).reshape(-1)

     # actual Py-BOBYQA call
     result = pybobyqa.solve(
        objfunc, x0, maxfun=budget, bounds=(lower,upper), seek_global_minimum=True, objfun_has_noise=True, **optkwds)

     # get collected history and repackage return result
     return Result(result.f, result.x), objfunc.get_history()
Esempio n. 9
0
 def runTest(self):
     n, m = 2, 5
     np.random.seed(0)  # (fixing random seed)
     A = np.random.rand(m, n)
     b = np.random.rand(m)
     objfun = lambda x: sumsq(np.dot(A, x) - b)
     gradfun = lambda x: 2.0 * np.dot(A.T, np.dot(A,x)) - 2.0 * np.dot(A.T, b)
     hessfun = 2.0 * np.dot(A.T, A)  # constant Hessian
     xmin = np.linalg.lstsq(A, b)[0]
     fmin = objfun(xmin)
     x0 = np.zeros((n,))
     np.random.seed(0)
     soln = pybobyqa.solve(objfun, x0, npt=n+1)
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(array_compare(soln.gradient, gradfun(soln.x), thresh=1e-2), "Wrong gradient")
     # self.assertTrue(array_compare(soln.hessian, hessfun, thresh=1e-1), "Wrong Hessian")  # not for linear models
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
Esempio n. 10
0
    def bobyqa_cube_factory(objective, n_trials, n_dim, with_count, **kwargs):
        global feval_count
        feval_count = 0

        lb = np.array([0. for _ in range(n_dim)])
        ub = np.array([1. for _ in range(n_dim)])
        x0 = np.array([0.5]*n_dim)

        def _objective(u) -> float:
            global feval_count
            feval_count += 1
            return objective(u)

        soln = solve(_objective, x0, bounds=(lb, ub), maxfun=n_trials, do_logging=False)

        best_x, best_val = list(soln.x), soln.f

        return (best_val, best_x, feval_count) if with_count else (best_val, best_x)
    def blackbox_optimizer(self, img, img0):
        # build new inputs, based on current variable value
        var = 0*np.array([img])
        nn = self.batch_size
        x_o = np.zeros(nn,)
        Random_Matrix = np.random.normal(size=(self.var_size_b,nn))*self.delta
        # Define the bounds of the optimisation variable
        a = -np.ones((nn,))
        b = np.ones((nn,))

        bb = self.modifier_up
        aa = self.modifier_down
        # define the loss function
        opt_fun = Objfun(lambda c: self.loss_f(img, vec2modMatRand3(c, Random_Matrix, bb, aa, var), 
                                               only_loss=False)
                        )
        initial_loss = opt_fun(x_o)
        
        user_params = {'init.random_initial_directions':False, 
                       'init.random_directions_make_orthogonal':False}
        soln = pybobyqa.solve(opt_fun, x_o, rhobeg=np.min(b-a)/3,
                              bounds=(a, b), maxfun=nn*self.max_f,
                              rhoend=np.min(b-a)/6,
                              npt=nn+1, scaling_within_bounds=False,
                              user_params=user_params)
        summary = opt_fun.get_summary(with_xs=False)
        minimiser = np.min(summary['fvals'])
        distances = np.array(summary['dvals'])
        early_discovery=0
        if np.any(distances<=0):
            # not counting the evaluations done after having found an example
            # for which distance <=0 i.e. an adversarial ex was found.
            early_discovery = (np.max(summary['neval']) - 
                               summary['neval'][np.where(distances<=0)[0][0]] + 2)
            print('Early Discover made at ', early_discovery)

        real_oe = self.loss_f(img, vec2modMatRand3(soln.x, Random_Matrix, bb, aa, var), only_loss=True)
        if (minimiser != real_oe) and (initial_loss>minimiser):
            print('[WARNING] BOBYQA returns not the minimal samples function.')
        evaluations = soln.nf

        nimgs = vec2modMatRand3(soln.x, Random_Matrix, bb, aa, var)
        distance = self.loss_f(img,nimgs, only_loss=True)
        return distance[0], evaluations + 2 - early_discovery, nimgs, summary
def initial_guess_sweep_bobyqa(h, jr, jc, qubits, p, num_trials=1000):
    best_cost = 1000
    best_angles = None
    initial_guesses = list(
        itertools.product(
            [[0.8, 0.3], [0.8, 0.25], [0.8, 0.35], [0.9, 0.3], [0.9, 0.25],
             [0.9, 0.35], [0.95, 0.3], [0.95, 0.25], [0.95, 0.35], [0.85, 0.3],
             [0.85, 0.25], [0.85, 0.35]],
            repeat=p))
    upper_bound = [8] * p * 2
    lower_bound = [-8] * p * 2
    upper_bound[0::2] = [1.1] * p
    lower_bound[0::2] = [-0.1] * p
    bound = (np.array(lower_bound), np.array(upper_bound))
    result_gnd_prob = []
    result_cost_func = []
    for guess in initial_guesses:
        guess = np.array(sum(guess, []))
        res = pybobyqa.solve(cost_function_multistep,
                             guess,
                             args=(h, jr, jc, qubits, num_trials, p),
                             bounds=bound,
                             maxfun=1000)
        cost = res.f
        ground_prob = ground_state_prob(res.x, h, jr, jc, qubits, p)

        if cost < best_cost:
            best_cost = cost
            best_angles = res.x
            best_ground_prob = ground_prob
        print("Current solution is {}".format(cost))
        print("Current best solution is {}".format(best_cost))
        print("Current ground state prob is {}".format(ground_prob))
        print("Current best ground state prob is {}".format(best_ground_prob))
        print("")
        result_gnd_prob.append(ground_prob)
        result_cost_func.append(cost)

    print("Final best solution is {}".format(best_cost))

    return best_angles, best_ground_prob, best_cost, result_gnd_prob, result_cost_func
Esempio n. 13
0
def search_bobyqa(arguments, state, log_dir):
    arguments.logger = f"{log_dir}/{EVAL_LOG}"
    if not arguments.continue_iter:
        make_logger(log_dir)

    lower = np.array([0 for _ in range(state.size)])
    upper = np.array([1 for _ in range(state.size)])

    for i in range(arguments.pop_size):
        x0 = np.array(state.get_random_individual())

        soln = pybobyqa.solve(evaluator_wrapper,
                              x0,
                              args=(
                                  arguments,
                                  state,
                              ),
                              maxfun=arguments.max_iter,
                              rhobeg=0.3,
                              bounds=(lower, upper),
                              seek_global_minimum=True)
        print(soln)
Esempio n. 14
0
def main():
    lowerbound=-3*np.ones(50*seq_length)
    upperbound=3*np.ones(50*seq_length)
    z0=np.load('Healthy_Z1000.npy')
    print('Z0 is',z0)
    z0=z0.reshape((50*seq_length))
    print('Z0 is', z0)
    soln=pybobyqa.solve(optimizing_func,z0,maxfun=100)
    print(soln)
    print('X part is',soln.x)

    Z = soln.x
    #Z=z0
    Z = Z.reshape(seq_length, 1, -1)
    model = SeqVaeFull()
    modelfull = torch.load('output/modelGaussFull1000', map_location={'cuda:0': 'cpu'})
    model.load_state_dict(modelfull['state_dict'])
    # ----Loading of previous model ends-----
    # ----- Read H,Y,beta-----
    pathH = '/Users/sg9872/Desktop/Research/Data/Halifax-EC/Simulation/1862/Input/'
    pathU = '/Users/sg9872/Desktop/Research_Projects/Sequence_VAE/BigData/'
    H = readH(pathH)
    U = readU(pathU)
    Y = genNoisy(np.matmul(H, U))
    beta = 1e5

    Mu, logvar = model.decode(Variable(torch.FloatTensor(Z)))  # Converting into torch variable and decoding
    Sigma = logvar.exp()
    Mu = (Mu.data.view(seq_length, -1)).numpy()
    Sigma = (Sigma.data.view(seq_length, -1)).numpy()
    MuZ = Mu.transpose()
    SigmaZ = Sigma.transpose()
    # Sampling ends---------
    #print(MuZ.shape, H.shape)

    MeanU, logdetPrecisionU = Posterior(MuZ, SigmaZ, H, Y, beta) # Posterior calculation of U given Z, Y
    print('Error is',np.linalg.norm(U-MeanU))
    sio.savemat('Useg12exc71solBob.mat', {"U": MeanU})
Esempio n. 15
0
    def run(self):
        """
        Runs `scipy.Minimize`
        """
        results = []
        successes = []

        def minuslogp_transf(x):
            return -self.logp(self.inv_affine_transform(x))

        for i, initial_point in enumerate(self.initial_points):

            self.log.debug("Starting minimization for starting point %s.", i)

            self._affine_transform_baseline = initial_point
            initial_point = self.affine_transform(initial_point)
            np.testing.assert_allclose(initial_point, np.zeros(initial_point.shape))
            bounds = np.array(
                [self.affine_transform(self._bounds[:, i]) for i in range(2)]).T

            try:
                # Configure method
                if self.method.lower() == "bobyqa":
                    self.kwargs = {
                        "objfun": minuslogp_transf,
                        "x0": initial_point,
                        "bounds": np.array(list(zip(*bounds))),
                        "maxfun": self.max_iter,
                        "rhobeg": 1.,
                        "do_logging": (self.log.getEffectiveLevel() == logging.DEBUG)}
                    self.kwargs = recursive_update(self.kwargs,
                                                   self.override_bobyqa or {})
                    self.log.debug("Arguments for pybobyqa.solve:\n%r",
                                   {k: v for k, v in self.kwargs.items() if
                                    k != "objfun"})
                    result = pybobyqa.solve(**self.kwargs)
                    success = result.flag == result.EXIT_SUCCESS
                    if not success:
                        self.log.error("Finished unsuccessfully. Reason: "
                                       + _bobyqa_errors[result.flag])
                else:
                    self.kwargs = {
                        "fun": minuslogp_transf,
                        "x0": initial_point,
                        "bounds": bounds,
                        "options": {
                            "maxiter": self.max_iter,
                            "disp": (self.log.getEffectiveLevel() == logging.DEBUG)}}
                    self.kwargs = recursive_update(self.kwargs, self.override_scipy or {})
                    self.log.debug("Arguments for scipy.optimize.Minimize:\n%r",
                                   {k: v for k, v in self.kwargs.items() if k != "fun"})
                    result = optimize.minimize(**self.kwargs)
                    success = result.success
                    if not success:
                        self.log.error("Finished unsuccessfully.")
            except:
                self.log.error("Minimizer '%s' raised an unexpected error:", self.method)
                raise
            results += [result]
            successes += [success]

        self.process_results(*mpi.zip_gather(
            [results, successes, self.initial_points,
             [self._inv_affine_transform_matrix] * len(self.initial_points)]))
Esempio n. 16
0
    ),
    1,
)

lower = optim_paras_start * 0.9
upper = optim_paras_start * 1.1

# Log
logging.basicConfig(level=logging.INFO, format="%(message)s")

# Optimize
soln = pybobyqa.solve(
    objective,
    optim_paras_start,
    rhobeg=0.01,
    rhoend=1e-4,
    maxfun=2,
    bounds=(lower, upper),
    scaling_within_bounds=True,
)

print(soln)

# print("")
# print("** SciPy results **")
# print("Solution xmin = %s" % str(soln.x))
# print("Objective value f(xmin) = %.10g" % (soln.fun))
# print("Needed %g objective evaluations" % soln.nfev)
# print("Exit flag = %g" % soln.status)
# print(soln.message)
Esempio n. 17
0
def deslant(img: np.ndarray,
            optim_algo: 'str' = 'grid',
            lower_bound: float = -2,
            upper_bound: float = 2,
            num_steps: int = 20,
            bg_color=255) -> DeslantRes:
    """
    Deslants the image by applying a shear transform.

    The function searches for a shear transform that yields many long connected vertical lines.

    Args:
        img: The image to be deslanted with text in black and background in white.
        optim_algo: Specify optimization algorithm searching for the best scoring shear value:
            'grid': Search on grid defined by the bounds and the number of steps.
            'powell': Apply the derivative-free BOBYQA optimizer from Powell within given bounds.
        lower_bound: Lower bound of shear values to be considered by optimizer.
        upper_bound: Upper bound of shear values to be considered by optimizer.
        num_steps: Number of grid points if optim_algo is 'grid'.
        bg_color: Color that is used to fill the gaps of the returned sheared image.

    Returns:
        Object of DeslantRes, holding the deslanted image and (only for optim_algo 'grid') the candidates
        with shear value and score.
    """
    assert img.ndim == 2
    assert img.dtype == np.uint8
    assert optim_algo in ['grid', 'powell']
    assert lower_bound < upper_bound

    # apply Otsu's threshold method to inverted input image
    img_binary = cv2.threshold(255 - img, 0, 255,
                               cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] // 255

    # variables to be set by optimization method
    best_shear_val = None
    candidates = None

    # compute scores on grid points
    if optim_algo == 'grid':
        step = (upper_bound - lower_bound) / num_steps
        shear_vals = _get_shear_vals(lower_bound, upper_bound, step)
        candidates = [
            Candidate(s, _compute_score(img_binary, s)) for s in shear_vals
        ]
        best_shear_val = sorted(candidates,
                                key=lambda c: c.score,
                                reverse=True)[0].shear_val

    # use Powell's derivative-free optimization method to find best scoring shear value
    elif optim_algo == 'powell':
        bounds = [[lower_bound], [upper_bound]]
        s0 = [(lower_bound + upper_bound) / 2]

        # minimize the negative score
        def obj_fun(s):
            return -_compute_score(img_binary, s)

        # the heuristic to find a global minimum is used, as the negative score contains many small local minima
        res = pybobyqa.solve(obj_fun,
                             x0=s0,
                             bounds=bounds,
                             seek_global_minimum=True)
        best_shear_val = res.x[0]

    res_img = _shear_img(img, best_shear_val, bg_color, cv2.INTER_LINEAR)
    return DeslantRes(res_img, best_shear_val, candidates)
Esempio n. 18
0
            else:
                ssz.append(table1[i,2] + delta[0])
                idx_ssz.append(i)
    ssz = np.asarray(np.sort(ssz))
        
    # table2 = np.concatenate((table2[:122,:],table2[121,:].reshape((1,256)),table2[122:,:]))
    # table3 = np.concatenate((table3[:122,:],table3[121,:].reshape((1,256)),table3[122:,:]))
             
    # best_approx = np.asarray([table1,table2,table3,table4,table5])
    
    best_approx = [np.asarray(Table1['MUX'][idx_ssz])*2*np.pi,np.asarray(Table1['MUY'][idx_ssz])*2*np.pi]

    env = env_mod.OptEnv(ssz[ss], ssz, focusing_list, solver, n_iter, best_approx)
    
    if solver == 'BOBYQA':
        solution = pybobyqa.solve(env.step, ssz[ss], bounds=(env.lower, env.upper), seek_global_minimum=True)
    elif solver == 'ZOOpt':  
        solution = ExpOpt.min(env.step, env.parameter, plot=True)
    elif solver == 'Bayesian':
        env.optimizer.probe(params = ssz[ss], lazy = True)
        env.optimizer.maximize(n_iter = int(env._n_iter*0.7), init_points = int(env._n_iter*0.3) )
        solution = env.optimizer.max
        
    else:
        solution = minimize(env.step, ssz[ss], method=solver, bounds=env.bounds, options={'maxiter': n_iter,
                                                                                    'xtol': 2,
                                                                                    'adaptive': True
                                                                                        })

    
    timer[solver] = env.timer
Esempio n. 19
0
# Py-BOBYQA example: minimize the Rosenbrock function
from __future__ import print_function
import numpy as np
import pybobyqa


# Define the objective function
def rosenbrock(x):
    return 100.0 * (x[1] - x[0]**2)**2 + (1.0 - x[0])**2


# Define the starting point
x0 = np.array([-1.2, 1.0])

# Set random seed (for reproducibility)
np.random.seed(0)

# For optional extra output details
# import logging
# logging.basicConfig(level=logging.INFO, format='%(message)s')

# Call Py-BOBYQA
soln = pybobyqa.solve(rosenbrock, x0)

# Display output
print(soln)
Esempio n. 20
0
        REML = True
        if REML:
            ld += cholesky(DD).logdet()
            fn -= len(beta)

        deviance = ld + fn * (1. + np.log(2. * np.pi * pwrss) - np.log(fn))
        np.array([deviance]).tofile('deviance-py.bin')
        return deviance

    return devfun


devfun = pls(X, y, Z, Lambdat, thfun)
upper = np.repeat(np.inf, len(theta0))
lower = np.where(theta0, 0, -np.inf)

# defaults used in minqa::bobyqa according to
# https://www.rdocumentation.org/packages/minqa/versions/1.2.4/topics/bobyqa
n = len(theta0)
npt = min(2 * n, n + 2)
rhobeg = min(0.95, 0.2 * np.max(np.abs(theta0)))
rhoend = 1e-6 * rhobeg

soln = pybobyqa.solve(devfun,
                      theta0,
                      bounds=(lower, upper),
                      npt=npt,
                      rhobeg=rhobeg,
                      rhoend=rhoend)
print(soln)
Esempio n. 21
0
     'logging.save_diagnostic_info': True,
     'logging.save_xk': True,
     "noise.quit_on_noise_level": False,
     'init.run_in_parallel': True,
     'general.check_objfun_for_overflow': False
 }
 # merge in values from namedSetting into userParams
 namedSettings = optimise.get('BOBYQA_namedSettings', {})
 for k in namedSettings.keys():  # loop over keys
     if not re.search('_comment\s*$', k):  # not a comment
         userParams[k] = namedSettings[k]
 solution = pybobyqa.solve(optFunctionBOBYQA,
                           start.values,
                           objfun_has_noise=False,
                           bounds=prange,
                           maxfun=optimise.get('maxfun', 100),
                           rhobeg=optimise.get('rhobeg', 1e-1),
                           rhoend=optimise.get('rhoend', 1e-3),
                           user_params=userParams,
                           scaling_within_bounds=True)
 if solution.flag == solution.EXIT_LINALG_ERROR:  # linear algebra error
     raise np.linalg.LinAlgError  # re-raise the linear algebra error which will trigger doing more runs..
 elif solution.flag not in (solution.EXIT_SUCCESS,
                            solution.EXIT_MAXFUN_WARNING):
     print("bobyqa failed with flag %i error : %s" %
           (solution.flag, solution.msg))
     raise Exception("Problem with bobyqa")
 ## code here will be run when PYBOBYQA has completed. It mostly is to put stuff in the final JSON file
 ## so can easily be looked at for subsequent analysis.
 ## some of it could be done even if DFOLS did not complete.
 print("BOBYQA completed: Solution status: %s" % (solution.msg))
Esempio n. 22
0
    r2 = -29.0 + x[0] + ((1.0 + x[1]) * x[1] - 14.0) * x[1]
    return r1**2 + r2**2


# Define the starting point
x0 = np.array([5.0, -20.0])

# Define bounds (required for global optimization)
lower = np.array([-30.0, -30.0])
upper = np.array([30.0, 30.0])

# Set random seed (for reproducibility)
np.random.seed(0)

print("First run - search for local minimum only")
print("")
soln = pybobyqa.solve(freudenstein_roth, x0, maxfun=500, bounds=(lower, upper))
print(soln)

print("")
print("")

print("Second run - search for global minimum")
print("")
soln = pybobyqa.solve(freudenstein_roth,
                      x0,
                      maxfun=500,
                      bounds=(lower, upper),
                      seek_global_minimum=True)
print(soln)
Esempio n. 23
0
# Py-BOBYQA example: minimize the Rosenbrock function with bounds
from __future__ import print_function
import numpy as np
import pybobyqa


# Define the objective function
def rosenbrock(x):
    return 100.0 * (x[1] - x[0]**2)**2 + (1.0 - x[0])**2


# Define the starting point
x0 = np.array([-1.2, 1.0])

# Set random seed (for reproducibility)
np.random.seed(0)

# Define bound constraints (lower <= x <= upper)
lower = np.array([-10.0, -10.0])
upper = np.array([0.9, 0.85])

# For optional extra output details
#import logging
#logging.basicConfig(level=logging.INFO, format='%(message)s')

# Call Py-BOBYQA (with bounds)
soln = pybobyqa.solve(rosenbrock, x0, bounds=(lower, upper))

# Display output
print(soln)
Esempio n. 24
0
    return rosenbrock(x) * (1.0 + 1e-2 * np.random.normal(size=(1, ))[0])


# Define the starting point
x0 = np.array([-1.2, 1.0])

# Set random seed (for reproducibility)
np.random.seed(0)

print("Demonstrate noise in function evaluation:")
for i in range(5):
    print("objfun(x0) = %s" % str(rosenbrock_noisy(x0)))
print("")

# Call Py-BOBYQA
soln = pybobyqa.solve(rosenbrock_noisy, x0)
#soln = pybobyqa.solve(rosenbrock_noisy, x0, objfun_has_noise=True)

# Display output
print(soln)

# Compare with a derivative-based solver
import scipy.optimize as opt
soln = opt.minimize(rosenbrock_noisy, x0)

print("")
print("** SciPy results **")
print("Solution xmin = %s" % str(soln.x))
print("Objective value f(xmin) = %.10g" % (soln.fun))
print("Needed %g objective evaluations" % soln.nfev)
print("Exit flag = %g" % soln.status)
f1 = rosenbrock_f
g1 = rosenbrock_g1
g2 = rosenbrock_g2


aug_functions = PenaltyFunctions(f1,[g1,g2],type_penalty='le', mu=100)#functools.partial(penalized_objective,f1,[g1,g2], 100)

f_pen = aug_functions.aug_obj

bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = np.array([0.5,0.5])
user_params = {'logging.save_diagnostic_info': True}
user_params['logging.save_xk'] = True
user_params['logging.save_xk'] = True

soln = pybobyqa.solve(f_pen, x0, bounds=bounds.T, user_params=user_params, maxfun=100)


def quadratic_g(x):
    '''
    test constraint
    g(x) <= 0
    '''
    return 1 - x[0] - x[1]

def quadratic_f(x):
    '''
    test objective
    '''
    return x[0]**2 + 10 * x[1]**2 + x[0] * x[1]
f1 = quadratic_f
Esempio n. 26
0
def optimize(inputInfos, outputInfos, aspenModel, calculator, outDir):
    '''
	Parameters
	inputInfos: df, input infos for optimization, columns are ['Input', 'Path', 'Range', 'Fortran']
	outputInfos: df, output infos, columns are ['Output', 'Unit', 'Location']
	aspenModel: instance of Aspen class
	calculator: instance of Excel class
	outDir: str, output directory
	
	Returns
	solutions: df, index are outputs, index are ['Objective'] + inputs
	'''

    tmpDir = outDir + '/tmp'
    os.makedirs(tmpDir, exist_ok=True)

    ## setting
    inputSettings = inputInfos.copy()
    inputSettings[['LB', 'UB']] = inputSettings['Range'].str.split(
        ',', expand=True).astype(np.float)

    outputSettings = outputInfos.copy()
    outputSettings[['Sheet', 'Cell'
                    ]] = outputSettings['Location'].str.split('!', expand=True)

    ## optimization
    lb = inputSettings['LB'].values
    ub = inputSettings['UB'].values

    nvars = inputSettings.shape[0]
    x0 = uniform(low=lb, high=ub, size=nvars)

    rhoend = 0.001  #!
    maxfun = 100  #!

    solutions = pd.DataFrame(columns=['Objective'] +
                             inputSettings['Input'].tolist())
    for idx, row in outputSettings.iterrows():
        output, sheet, cell = row[['Output', 'Sheet', 'Cell']]

        count = 0

        def f(x, aspenModel, calculator, inputSettings, tmpDir):

            # set ASPEN model variables
            for idx, row in inputSettings.iterrows():
                input, path, ifFortran = row[['Input', 'Path', 'Fortran']]

                aspenModel.set_value(path, x[idx], bool(ifFortran))

            # run ASPEN model
            aspenModel.run_model()

            nonlocal count
            count += 1
            tmpFile = '%s/%s.bkp' % (tmpDir, count)
            aspenModel.save_model(tmpFile)

            # run excel calculator
            calculator.load_aspenModel(tmpFile)

            calculator.run_macro('solvedcfror')

            res = np.float(calculator.get_cell(sheet, loc=cell))

            return res

        basicConfig(filename='%s/%s_opt.log' % (outDir, output),
                    level=INFO,
                    format='%(message)s',
                    filemode='w')

        res = solve(f,
                    x0,
                    args=(aspenModel, calculator, inputSettings, tmpDir),
                    bounds=(lb, ub),
                    rhoend=rhoend,
                    maxfun=maxfun,
                    scaling_within_bounds=True)

        solutions.loc[output, :] = [res.f] + res.x.tolist()

    return solutions
    def blackbox_optimizer_ordered_domain(self, iteration, ord_domain,
                                          Random_Matrix, super_dependency, img,
                                          k, img0):
        # build new inputs, based on current variable value
        times = np.zeros(8, )
        var = 0 * np.array([img])
        # print('the type of var is', type(var[0]))
        # print('the shape of var is',var[0].shape)

        NN = self.var_list.size

        if len(ord_domain) < self.batch_size:
            nn = len(ord_domain)
        else:
            nn = self.batch_size

        # We choose the elements of ord_domain that are inherent to the step. So it is already
        # limited to the variable's dimension
        # print('inner iteration', iteration)
        if (iteration + 1) * nn <= NN:
            var_indice = ord_domain[iteration * nn:(iteration + 1) * nn]
        else:
            var_indice = ord_domain[
                list(range(iteration * nn, NN)) +
                list(range(0, (self.batch_size - (NN - iteration * nn))))]

        # print('======> optimised indices', var_indice)
        indice = self.var_list[var_indice]
        x_o = np.zeros(nn, )
        # Changing the bounds according to the problem being resized or not

        eta_hat = np.array(
            cv2.resize((img - img0)[0], (self.small_x, self.small_y),
                       interpolation=cv2.INTER_LINEAR)).reshape(-1, )

        mod_up = self.modifier_up.reshape((self.size_img, self.size_img, 3))
        mod_up_hat = cv2.resize(mod_up, (self.small_x, self.small_y),
                                interpolation=cv2.INTER_LINEAR).reshape(-1, )
        mod_down = self.modifier_down.reshape(
            (self.size_img, self.size_img, 3))
        mod_down_hat = cv2.resize(mod_down, (self.small_x, self.small_y),
                                  interpolation=cv2.INTER_LINEAR).reshape(
                                      -1, )

        # check if th e interpolations are correct
        if self.use_resize:
            a = -np.ones((nn, ))
            b = np.ones((nn, ))

            for i in range(nn):
                # indices = finding_indices(super_dependency.reshape(-1, ),
                x_o[i] = np.clip(
                    eta_hat[indice[i]] /
                    (mod_up_hat[indice[i]] - mod_down_hat[indice[i]]) * 2, -1,
                    1)
                # print(x_o[i])

        else:
            b = self.modifier_up[indice]
            a = self.modifier_down[indice]
        bb = self.modifier_up
        aa = self.modifier_down
        # print(indice)img
        opt_fun = Objfun(lambda c: self.loss_f(img, [
            vec2modMatRand3(c, indice, eta_hat, b, a, bb, aa, self.image_size,
                            self.small_x)
        ],
                                               only_loss=True)[0])
        initial_loss = opt_fun(x_o)
        print('Initial Loss', initial_loss)
        if initial_loss != self.l:
            print(' COULD NOT REBUILD pert', initial_loss - self.l)
        user_params = {
            'init.random_initial_directions': False,
            'init.random_directions_make_orthogonal': False
        }
        soln = pybobyqa.solve(opt_fun,
                              x_o,
                              rhobeg=np.min(b - a) / 3,
                              bounds=(a, b),
                              maxfun=nn * 1.3,
                              rhoend=np.min(b - a) / 6,
                              npt=nn + 1,
                              scaling_within_bounds=False,
                              user_params=user_params)
        summary = opt_fun.get_summary(with_xs=False)
        minimiser = np.min(summary['fvals'])
        # real_oe = self.loss_f(img, vec2modMatRand3(soln.x, indice, var, Random_Matrix, super_dependency, bb, aa,
        #                                                        self.overshoot), only_loss=True)
        if (minimiser != soln.f):  # and (initial_loss>minimiser):
            print('########################## ERRRORROR')
        # print(a,b)
        evaluations = soln.nf
        # print(soln)
        # print('==========   a  ', a)
        # print('==========   b  ', b)
        # print('========== soln ', soln.x)
        # print(soln)

        nimgs = vec2modMatRand3(soln.x, indice, eta_hat, b, a, bb, aa,
                                self.image_size, self.small_x)

        loss_at_min = opt_fun(soln.x)
        print('SOLUTION ANSWER', minimiser, ' EMPIRICAL ', loss_at_min)

        nimg2 = nimgs.copy()
        nimg2.reshape(
            -1, )[bb - nimgs.reshape(-1, ) < nimgs.reshape(-1, ) -
                  aa] = bb[bb - nimgs.reshape(-1, ) < nimgs.reshape(-1, ) - aa]
        nimg2.reshape(
            -1, )[bb - nimgs.reshape(-1, ) > nimgs.reshape(-1, ) -
                  aa] = aa[bb - nimgs.reshape(-1, ) > nimgs.reshape(-1, ) - aa]

        distance = [minimiser]  #self.loss_f(img,nimgs, only_loss=True)
        distance2 = self.loss_f(img, [nimg2], only_loss=True)
        # print(distance, real_oe)
        if soln.f > initial_loss:
            print('The optimisation is not working. THe diff is ',
                  initial_loss - soln.f)
            return initial_loss, evaluations + 2, var, times, summary
        elif distance2 < distance:
            print('USING ROUNDED  with loss ', distance2)
            return distance2[0], evaluations + 2, np.array([nimg2
                                                            ]), times, summary
        else:
            # print('The optimisation is working. THe diff is ', initial_loss - soln.f)
            return distance[0], evaluations + 2, np.array([nimgs
                                                           ]), times, summary
Esempio n. 28
0

f1 = rosenbrock_f
g1 = rosenbrock_g1
g2 = rosenbrock_g2


f_pen = PenaltyFunctions(f1,[g1,g2],type_penalty='l2', mu=100)#functools.partial(penalized_objective,f1,[g1,g2], 100)




bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = np.array([0.5,0.5])

soln = pybobyqa.solve(f_pen, x0, bounds=bounds.T)


#solution1 = BayesOpt().solve(f1, x0, bounds=bounds.T, print_iteration=True, constraints=[g1,g2])
solution = BayesOpt().solve(f1, x0, acquisition='EIC',bounds=bounds.T, print_iteration=True, constraints=[g1, g2], casadi=True)




def quadratic_g(x):
    '''
    test constraint
    g(x) <= 0
    '''
    return 1 - x[0] - x[1]
Esempio n. 29
0
    def blackbox_optimizer(self, iteration, ord_domain, super_dependency, img, img0):
        # build new inputs, based on current variable value
        var = 0*np.array([img])
        NN = self.var_list.size
        if len(ord_domain)<self.batch_size:
            nn = len(ord_domain)
        else:
            nn = self.batch_size
        # We choose the elements of ord_domain that are inherent to the step. 
        # So it is already limited to the variable's dimension
        if (iteration+1)*nn <= NN:
            var_indice = ord_domain[iteration*nn: (iteration+1)*nn]
        else:
            var_indice = ord_domain[list(range(iteration*nn, NN))]
            nn = NN - iteration*nn#+ 
                                    # list(range(0, (self.batch_size-(NN-iteration*nn))))]
        indice = self.var_list[var_indice]
        x_o = np.zeros(nn,)
        # Define the bounds of the optimisation variable
        if self.use_resize:
            a = -np.ones((nn,))
            b = np.ones((nn,))
            # find the initial condition that identifies the previous perturbation
            for i in range(nn):
                indices = finding_indices(super_dependency.reshape(-1, ), indice[i])
                up = self.modifier_up[indices] 
                down = self.modifier_down[indices] 
                max_ind = np.argmax(up-down)
                xs =  np.divide( -(up+down),
                                (up-down))
                x_o[i] = np.clip(xs[max_ind],-1,1)
        else:
            b = self.modifier_up[indice]
            a = self.modifier_down[indice]
        bb = self.modifier_up
        aa = self.modifier_down
        # define the loss function
        opt_fun = Objfun(lambda c: self.loss_f(img, vec2modMatRand3(c, indice, var, super_dependency, bb, aa,
                                                               self.overshoot), only_loss=False))
        initial_loss = opt_fun(x_o)
        if np.abs(initial_loss - self.l)>10e-6:
            print('[WARNING] Rebuilt intial vecotr has a loss different by', initial_loss-self.l)
        user_params = {'init.random_initial_directions':False, 
                       'init.random_directions_make_orthogonal':False}
        soln = pybobyqa.solve(opt_fun, x_o, rhobeg=np.min(b-a)/3,
                              bounds=(a, b), maxfun=nn*self.max_f,
                              rhoend=np.min(b-a)/6,
                              npt=nn+1, scaling_within_bounds=False,
                              user_params=user_params)
        summary = opt_fun.get_summary(with_xs=False)
        minimiser = np.min(summary['fvals'])
        distances = np.array(summary['dvals'])
        early_discovery=0
        if np.any(distances<=0):
            # not counting the evaluations done after having found an example
            # for which distance <=0 i.e. an adversarial ex was found.
            early_discovery = (np.max(summary['neval']) - 
                               summary['neval'][np.where(distances<=0)[0][0]] + 2)
            print('Early Discover made at ', early_discovery)

        real_oe = self.loss_f(img, vec2modMatRand3(soln.x, indice, var, super_dependency, bb, aa,
                                                    self.overshoot), only_loss=True)
        if (minimiser != real_oe) and (initial_loss>minimiser):
            print('[WARNING] BOBYQA returns not the minimal samples function.')
        evaluations = soln.nf

        nimgs = vec2modMatRand3(soln.x, indice, var, super_dependency, bb, aa, self.overshoot)
        distance = self.loss_f(img,nimgs, only_loss=True)
        if self.rounding:
            # checking if the rounded image works better
            nimg2 = nimgs.copy()
            nimg2.reshape(-1,)[bb-nimgs.reshape(-1,)<nimgs.reshape(-1,)-aa] = bb[bb-nimgs.reshape(-1,)<nimgs.reshape(-1,)-aa]
            nimg2.reshape(-1,)[bb-nimgs.reshape(-1,)>nimgs.reshape(-1,)-aa] = aa[bb-nimgs.reshape(-1,)>nimgs.reshape(-1,)-aa]
            distance2 = self.loss_f(img, nimg2, only_loss=True)
            if distance2 < distance:
                print('[WARNING][L5] Using rounded perturbation to the domain')
                return distance2[0], evaluations + 2 - early_discovery, nimg2, summary
        return distance[0], evaluations + 2 - early_discovery, nimgs, summary
    def blackbox_optimizer_ordered_domain(self, iteration, ord_domain,
                                          Random_Matrix, super_dependency, img,
                                          k, img0):
        # build new inputs, based on current variable value
        times = np.zeros(8, )
        var = 0 * np.array([img])
        # print('the type of var is', type(var[0]))
        # print('the shape of var is',var[0].shape)

        NN = self.var_list.size

        nn = len(ord_domain)
        nn_var = nn if nn < self.batch_size else self.batch_size

        # We choose the elements of ord_domain that are inherent to the step. So it is already
        # limited to the variable's dimension
        # print('inner iteration', iteration)
        var_indice = np.arange(nn)
        # print('======> optimised indices', var_indice)
        indice = self.var_list[var_indice]
        x_o = np.zeros(nn_var, )
        s_o = np.zeros(nn, )
        # Changing the bounds according to the problem being resized or not
        # print('############')
        # print('nn_var',nn_var)
        # print('nn',nn)

        if self.use_resize:
            a = -3 * np.ones((nn_var, ))
            b = +3 * np.ones((nn_var, ))

            for i in range(nn):
                # if nn < self.batch_size:
                indices = finding_indices(super_dependency.reshape(-1, ),
                                          indice[i])
                up = self.modifier_up[indices]
                down = self.modifier_down[indices]
                max_ind = np.argmax(up - down)
                xs = np.divide(-(up + down), (up - down))
                s_o[i] = np.clip(np.arctanh(np.clip(xs[max_ind], -1, 1)), -3,
                                 3)
                if i == 0:
                    x_o[i] = 3

            if nn < self.batch_size:
                S = np.eye(nn_var) / np.sqrt(nn_var)
                # print('nn_var',nn_var)
                # print('nn',nn)
                # print(S.shape)
                x_o = s_o
            else:
                # print('s_o', s_o)
                # print('S_k', (nn_var-1,nn_var))
                S_n = np.concatenate(
                    (s_o.reshape(1, nn) / 3, np.random.randn(nn_var - 1, nn) /
                     np.sqrt(nn_var))).transpose()
                S = Graham_Schmidt(S_n)

                # print(S[:,:2])
                # print(S_n[:,:2])
                # np.array([s_o,
                #         np.random.randn(nn_var-1,nn_var)/np.sqrt(nn_var)]).transpose()

        else:
            b = self.modifier_up[indice]
            a = self.modifier_down[indice]
        bb = self.modifier_up
        aa = self.modifier_down
        # print(x_o)
        # print(np.dot(S,x_o))
        opt_fun = Objfun(lambda c: self.loss_f(
            img,
            vec2modMatRand3(np.dot(S, c), indice, var, Random_Matrix,
                            super_dependency, bb, aa, self.overshoot),
            only_loss=True)[0])
        initial_loss = opt_fun(x_o)
        if initial_loss != self.l:
            print(' COULD NOT REBUILD pert', initial_loss - self.l)
        user_params = {
            'init.random_initial_directions': False,
            'init.random_directions_make_orthogonal': False
        }
        soln = pybobyqa.solve(opt_fun,
                              x_o,
                              rhobeg=np.min(b - a) / 3,
                              bounds=(a, b),
                              maxfun=nn_var * 1.5,
                              rhoend=np.min(b - a) / 6,
                              npt=nn_var + 1,
                              scaling_within_bounds=False,
                              user_params=user_params)
        summary = opt_fun.get_summary(with_xs=True)
        minimiser = np.min(summary['fvals'])
        # print(soln)
        real_oe = self.loss_f(img,
                              vec2modMatRand3(np.dot(S, soln.x), indice, var,
                                              Random_Matrix, super_dependency,
                                              bb, aa, self.overshoot),
                              only_loss=True)
        if (minimiser != real_oe) and (initial_loss > minimiser):
            print('########################## ERRRORROR')
        # print(a,b)
        evaluations = soln.nf

        # print('==========   a  ', a)
        # print('==========   b  ', b)
        print('========== soln ', soln.x)
        # print(soln)

        nimgs = vec2modMatRand3(np.dot(S, soln.x), indice, var, Random_Matrix,
                                super_dependency, bb, aa, self.overshoot)
        nimg2 = nimgs.copy()
        nimg2.reshape(
            -1, )[bb - nimgs.reshape(-1, ) < nimgs.reshape(-1, ) -
                  aa] = bb[bb - nimgs.reshape(-1, ) < nimgs.reshape(-1, ) - aa]
        nimg2.reshape(
            -1, )[bb - nimgs.reshape(-1, ) > nimgs.reshape(-1, ) -
                  aa] = aa[bb - nimgs.reshape(-1, ) > nimgs.reshape(-1, ) - aa]

        distance = self.loss_f(img, nimgs, only_loss=True)
        distance2 = self.loss_f(img, nimg2, only_loss=True)

        if soln.f > initial_loss:
            print('The optimisation is not working. THe diff is ',
                  initial_loss - soln.f)
            return initial_loss, evaluations + 2, var, times, summary
        elif distance2 < distance:
            print('USING ROUNDED')
            return distance2[0], evaluations + 2, nimg2, times, summary
        else:
            # print('The optimisation is working. THe diff is ', initial_loss - soln.f)
            return distance[0], evaluations + 2, nimgs, times, summary