Example #1
0
    def test_3_2_disp_sobol(self):
        """Iterative sampling on TestFunction 1 and 2 (multi and univariate)"""

        def callback_func(x):
            print("Local minimization callback test")

        for test in [test1_1, test2_1]:
            res = shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
                       callback=callback_func, options={'disp': True})

            res = shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
                       callback=callback_func, options={'disp': True})
Example #2
0
def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
             callback=None, minimizer_kwargs=None, options=None,
             sampling_method='sobol'):
    res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
               n=n, iters=iters, callback=callback,
               minimizer_kwargs=minimizer_kwargs, options=options,
               sampling_method=sampling_method)

    logging.info(res)

    if test.expected_x is not None:
        numpy.testing.assert_allclose(res.x, test.expected_x,
                                      rtol=test_atol,
                                      atol=test_atol)

    # (Optional tests)
    if test.expected_fun is not None:
        numpy.testing.assert_allclose(res.fun,
                                      test.expected_fun,
                                      atol=test_atol)

    if test.expected_xl is not None:
        numpy.testing.assert_allclose(res.xl,
                                      test.expected_xl,
                                      atol=test_atol)

    if test.expected_funl is not None:
        numpy.testing.assert_allclose(res.funl,
                                      test.expected_funl,
                                      atol=test_atol)
    return
Example #3
0
 def test_6_2_simplicial_min_iter(self):
     """Test that maximum iteration option works on TestFunction 3"""
     options = {'min_iter': 2}
     res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
                options=options, sampling_method='simplicial')
     numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
                                   atol=1e-5)
     numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
Example #4
0
 def test_3_2_no_min_pool_simplicial(self):
     """Check that the routine stops when no minimiser is found
        after maximum specified sampling evaluations"""
     options = {'maxev': 10,
                'disp': True}
     res = shgo(test_table.f, test_table.bounds, n=3, options=options,
                sampling_method='simplicial')
     numpy.testing.assert_equal(False, res.success)
Example #5
0
    def test_2_2_sobol_iter(self):
        """Iterative Sobol sampling on TestFunction 2 (univariate)"""
        res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
                   n=None, iters=1, sampling_method='sobol')

        numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
                                      atol=1e-5)
        numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
Example #6
0
    def test_1_maxiter(self):
        """Test failure on insufficient iterations"""
        options = {'maxiter': 2}
        res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None,
                   options=options, sampling_method='sobol')

        numpy.testing.assert_equal(False, res.success)
        numpy.testing.assert_equal(4, res.nfev)
Example #7
0
    def test_5_2_infeasible_simplicial(self):
        """Ensures the algorithm terminates on infeasible problems
           after maxev is exceeded."""
        options = {'maxev': 1000,
                   'disp': False}

        res = shgo(test_infeasible.f, test_infeasible.bounds,
                   constraints=test_infeasible.cons, n=100, options=options,
                   sampling_method='simplicial')

        numpy.testing.assert_equal(False, res.success)
Example #8
0
    def shgo(self):
        from scipy.optimize import shgo

        res = shgo(self.error,
                   self.bounds,
                   constraints=None,
                   n=60,
                   sampling_method='sobol',
                   iters=30)

        self.param = res.x
        self.fit = lambda x: func(x, *self.param)
        self._measureGoodness()
Example #9
0
def optima_clement1a(i,oldfolder,training_master,ytrue,\
                    theshape,iniclemz,Nop,iniclemb,clfx,clfy,nclus,minnss,maxss):
    import numpy as np
    print('%d|%d'%((i+1),Nop))
    yuse=np.reshape(ytrue[i,:],(1,-1) ,'F')
    #initial_theta = np.reshape(iniclemz,(1,-1),'F')
    initial_theta=np.reshape(iniclemz[i,:],(1,-1),'F')
    iniclem=np.reshape(iniclemb[i,:],(1,-1),'F')
    bnds=[(np.asscalar(minnss[:,0]), np.asscalar(maxss[:,0])), (np.asscalar(minnss[:,1]), np.asscalar(maxss[:,1]))]    
    resultt = opt.shgo(func=costFunc1,bounds=bnds, \
                      args=(yuse,clfx,clfy,theshape,iniclem,nclus),n=100, iters=5)
    Xe=np.reshape(resultt.x,(1,-1),'F')
    return Xe 
Example #10
0
    def test_5_1_1_infeasible_sobol(self):
        """Ensures the algorithm terminates on infeasible problems
           after maxev is exceeded. Use infty constraints option"""
        options = {'maxev': 100, 'disp': True}

        res = shgo(test_infeasible.f,
                   test_infeasible.bounds,
                   constraints=test_infeasible.cons,
                   n=100,
                   options=options,
                   sampling_method='sobol')

        numpy.testing.assert_equal(False, res.success)
Example #11
0
def shgo_cube(objective,scale, n_trials, n_dim, with_count=False):
    bounds = [(-scale,scale)]*n_dim

    global feval_count
    feval_count = 0

    def _objective(x):
        global feval_count
        feval_count += 1
        return objective(list(x))[0]

    result = shgo(_objective, bounds, options={'maxfev':n_trials,'minimize_every_iter':False,'maxfun':n_trials}, sampling_method='sobol')
    return (result.fun, feval_count) if with_count else result.fun
Example #12
0
def test_shgo(problem, num):
    best = math.inf
    lb = problem.bounds.lb
    ub = problem.bounds.ub
    t0 = time.perf_counter()
    for i in range(num):
        ret = shgo(problem.fun,
                   bounds=list(zip(lb, ub)),
                   n=300,
                   sampling_method='sobol')
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #13
0
 def test_6_2_simplicial_min_iter(self):
     """Test that maximum iteration option works on TestFunction 3"""
     options = {'min_iter': 2}
     res = shgo(test3_1.f,
                test3_1.bounds,
                constraints=test3_1.cons,
                options=options,
                sampling_method='simplicial')
     numpy.testing.assert_allclose(res.x,
                                   test3_1.expected_x,
                                   rtol=1e-5,
                                   atol=1e-5)
     numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
Example #14
0
    def _calc_target_function(loss_function, bounds_type, method_optimization):
        solution = None
        if method_optimization == 'diff':
            solution = optimize.differential_evolution(loss_function,
                                                       bounds_type)

        if method_optimization == 'shgo':
            solution = optimize.shgo(loss_function, bounds_type)

        if method_optimization == 'dual':
            solution = optimize.dual_annealing(loss_function,
                                               bounds_type,
                                               initial_temp=1)
        return solution.x
Example #15
0
def test_eval_count():
    scale = 5
    bounds = [(-scale,scale)]*6
    global feval_count
    feval_count = 0

    def _objective(x):
        global feval_count
        feval_count += 1
        return x[0]*x[1]*x[1]-3*x[0]

    result = shgo(_objective, bounds, options={'maxfev':700}, sampling_method='sobol')
    print(result.fun)
    print(str(feval_count))
def adiabatic_theorem_check(beta, time):


    #Performance counter
    #GAMMA MAXIMIZATION
    par_bnds = ([0, 1],)
    energy_min = 1

    minimization = shgo(compute_gamma, par_bnds,n=25, iters=1, args=(beta,),sampling_method='sobol')
    gamma_max = -minimization.fun

    #ENERGY MINIMUM

    minimization = shgo(compute_energy_diff, par_bnds,n=25, iters=1, args=(beta,),sampling_method='sobol')
    energy_min = minimization.fun

    #TIME BOUNDS FOR ADIABATIC THEOREM
    adiabatic_time = gamma_max/(energy_min**2)

    if(time < adiabatic_time):
        return 0
    else:
        return 1
Example #17
0
    def test_2_2_sobol_iter(self):
        """Iterative Sobol sampling on TestFunction 2 (univariate)"""
        res = shgo(test2_1.f,
                   test2_1.bounds,
                   constraints=test2_1.cons,
                   n=None,
                   iters=1,
                   sampling_method='sobol')

        numpy.testing.assert_allclose(res.x,
                                      test2_1.expected_x,
                                      rtol=1e-5,
                                      atol=1e-5)
        numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
def minimize_global_abs_diff(space_vals, model_vals, tolerance_mm,
                             normal_vectors):
    (n, space_vals, space_vals_flat,
     model_vals) = extract_optimization_vars(space_vals, model_vals)
    bounds = extract_bounds(space_vals_flat, tolerance_mm)
    a0 = extract_abs_diff_ratios(space_vals, model_vals)
    minimized_vals = optimize.shgo(abs_diff_ratio_function_no_bounds,
                                   bounds,
                                   args=(model_vals, a0, n, space_vals_flat,
                                         normal_vectors),
                                   options={
                                       'disp': True
                                   }).x
    return np.array(minimized_vals).reshape(n, 3).tolist()
Example #19
0
    def test_4_4_known_f_min(self):
        """Test Global mode limiting local evalutions for 1-D functions"""
        options = {  # Specify known function value
            'f_min': test2_1.expected_fun,
            'f_tol': 1e-6,
            # Specify number of local iterations to perform+
            'minimize_every_iter': True,
            'local_iter': 1,
            'infty_constraints': False}

        res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
                   n=None, iters=None, options=options,
                   sampling_method='sobol')
        numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
                                      atol=1e-5)
Example #20
0
    def test_4_4_known_f_min(self):
        """Test Global mode limiting local evalutions for 1D funcs"""
        options = {  # Specify known function value
            'f_min': test2_1.expected_fun,
            'f_tol': 1e-6,
            # Specify number of local iterations to perform+
            'minimize_every_iter': True,
            'local_iter': 1,
            'infty_constraints': False}

        res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
                   n=None, iters=None, options=options,
                   sampling_method='sobol')
        numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
                                      atol=1e-5)
def optimize(model, data, workflow):

    #print(data)
    model_gp = model['model']
    selected_feature = model['selected_feature']
    scaler = model['scaler']

    cpu_loc = model['cpu_loc']
    slo_target = slo_target_list[workflow]

    ### initialize data
    X = data[selected_feature]

    #normalized values
    norm_temp = scaler.transform(X)[0]
    length = len(norm_temp)

    def cpusum(x):
        """sum of normalized cpu utilization of various microservices"""
        return -1 * sum(x)

    def cons_f(x):
        temp = update_data(norm_temp, x)
        #temp = np.concatenate(( x, norm_vm, norm_workload), axis=None).reshape(1,length)
        y_pred, sigma = model_gp.predict(temp, return_std=True)
        #print(temp, y_pred, sigma, slo_target)
        return -1.0 * (y_pred + 2 * sigma) + slo_target

    def update_data(ret, update_list):
        length = len(ret)
        temp = ret.copy()
        for val in update_list:
            temp[cpu_loc] = val
        return np.array(temp).reshape(1, length)

    bounds = [(-1, 1)] * len(cpu_loc)
    cons = ({'type': 'ineq', 'fun': cons_f})
    res = shgo(cpusum, bounds, iters=10, constraints=cons)
    print(res.x, cpusum(res.x))

    temp = update_data(norm_temp, res.x)
    y_pred, sigma = model_gp.predict(temp, return_std=True)
    print("response time=", y_pred, "stdev=", sigma, "target=", slo_target)
    final_result = scaler.inverse_transform(temp)[0]
    cputhresholds = [final_result[loc] for loc in cpu_loc]
    print("denormalized cpu utilization % thresholds=", cputhresholds)
    print("sum of denormalized cpu utilization % =", sum(cputhresholds))
    print("\n")
Example #22
0
def max_3_shgo_0():
    bnd = ([1e-2, 1], [1e-2, 1], [0., 1], [0., 1], [d0 + thr, l0])

    def cs_p(x):
        return epsilon - prob_3(*x)

    def cs_fair(x):
        return fair_cond_3(*x) - (1 - alpha) * a0

    cs = [{'type': 'ineq', 'fun': cs_p}, {'type': 'ineq', 'fun': cs_fair}]

    def aim(x):
        return -ce(util_3(*x)) / (1 + (vartheta0_(*x) / l0))

    t = shgo(aim, bounds=bnd, constraints=cs, sampling_method='sobol')
    return t
Example #23
0
def max_1_shgo_0():
    bnd = ([1e-2, 1.], [1e-2, 1.], [0., 1])

    def cs_p(x):
        return epsilon - prob_1(*x)

    def cs_fair(x):
        return fair_cond_1(*x) - (1 - alpha) * a0

    cs = [{'type': 'ineq', 'fun': cs_p}, {'type': 'ineq', 'fun': cs_fair}]

    def aim(x):
        return -ce(util_1(*x))

    t = shgo(aim, bounds=bnd, constraints=cs, sampling_method='sobol')
    return t
Example #24
0
    def runAllocation2(self):
        #####################################
        ## Optimization 2: Minimize budget ##
        ##  identify m additional faults   ##
        #####################################

        cons2 = ({
            'type': 'eq',
            'fun': self.optimization2,
            'args': (self.covariate_data, )
        })
        bnds = tuple((0, None) for i in range(self.model.numCovariates))

        self.res2 = shgo(
            lambda x: sum([x[i] for i in range(self.model.numCovariates)]),
            bounds=bnds,
            constraints=cons2)
        self.effort = np.sum(self.res2.x)
Example #25
0
def shgo_cube(objective,
              n_trials,
              n_dim,
              with_count: bool = False,
              local_method=None,
              sampling_method='sobol'):
    """ Minimize a function on the cube using SHGO
    :param objective:    function on (0,1)^n_dim
    :param n_trials:
    :param n_dim:
    :param with_count:
    :return:
    """
    minimizer_kwargs = MINIMIZER_KWARGS[local_method]
    assert sampling_method in ['sobol', 'simplicial'
                               ], ' did not understand sampling method'
    bounds = [(0, 1)] * n_dim

    global feval_count
    feval_count = 0

    def _objective(x):
        global feval_count
        feval_count += 1
        return objective(list(x))

    # Try to induce roughly the right number of function evaluations. This can be improved!
    n_trials_reduced = int(n_trials / 2 + 1)
    n_iters = int(1 + n_trials / 80)
    n = int(5 + n_trials / 40)
    result = shgo(_objective,
                  bounds,
                  n=n,
                  iters=n_iters,
                  options={
                      'maxfev': n_trials_reduced,
                      'minimize_every_iter': False,
                      'maxfun': n_trials_reduced,
                      'minimizer_kwargs': minimizer_kwargs
                  },
                  sampling_method=sampling_method)
    return (result.fun, list(result.x),
            feval_count) if with_count else (result.fun, result.x)
Example #26
0
def run_test(test,
             args=(),
             test_atol=1e-5,
             n=100,
             iters=None,
             callback=None,
             minimizer_kwargs=None,
             options=None,
             sampling_method='sobol'):
    res = shgo(test.f,
               test.bounds,
               args=args,
               constraints=test.cons,
               n=n,
               iters=iters,
               callback=callback,
               minimizer_kwargs=minimizer_kwargs,
               options=options,
               sampling_method=sampling_method)

    logging.info(res)

    if test.expected_x is not None:
        numpy.testing.assert_allclose(res.x,
                                      test.expected_x,
                                      rtol=test_atol,
                                      atol=test_atol)

    # (Optional tests)
    if test.expected_fun is not None:
        numpy.testing.assert_allclose(res.fun,
                                      test.expected_fun,
                                      atol=test_atol)

    if test.expected_xl is not None:
        numpy.testing.assert_allclose(res.xl, test.expected_xl, atol=test_atol)

    if test.expected_funl is not None:
        numpy.testing.assert_allclose(res.funl,
                                      test.expected_funl,
                                      atol=test_atol)
    return
Example #27
0
def max_2_shgo():
    bnd = ([1e-2, 1], [0., 1], [0., 1], [1e-2, l0], [1e-2, l0])

    def cs_p(x):
        return epsilon - prob_2(*x)

    def cs_fair(x):
        return fair_cond_2(*x) - (1 - alpha) * a0

    cs = [{'type': 'ineq', 'fun': cs_p}, {'type': 'ineq', 'fun': cs_fair}]

    def aim(x):
        return -ce(util_2(*x)) / (1 + (vartheta0(*x) / l0))

    def cs_k(x):
        return x[3] - (1 + fac) * x[4]

    cs.append({'type': 'ineq', 'fun': cs_k})
    t = shgo(aim, bounds=bnd, constraints=cs, sampling_method='sobol')
    return t
Example #28
0
def main():
    args = create_cmd_args()
    if not args:
        args = get_input_args()
    # Select subjects for fitting
    if args.subject == 'all':
        # Fit model to all subjects
        subjects = range(100)
    elif args.subject[0] == '-':
        # For leave-one-subject-out cross validation
        subjects = [int(x) for x in range(100) if x != -int(args.subject)]
    else:
        # For single subject fitting
        subjects = [int(args.subject)]
    notes = 'na'
    bounds = model_bounds[args.training_model]
    simulator, trials = build_simulator(args.experiment_name, subjects)
    logfile = 'fitting_log_' + ymdhms() + '_' + str(args.subject) + '.txt'
    with open(logfile, 'a') as file:
        file.write(f'experiment_name: {args.experiment_name}\nsubject: {args.subject}\ntrials: {trials}\n'
            f'approach_experiment: {args.approach_experiment}\n'
            f'approach_model: {args.approach_model}, avoid_model: {args.avoid_model}\n'
            f'training_model: {args.training_model}\n'            
            f'method: {args.method}\nbounds: {bounds}\nt_start: {args.t_start}, t_end: {args.t_end}\n'
            f'preferred speed: {args.ps}\nnotes: {notes}\n')              
    if args.method == 'nelder-mead':
        res = optimize.minimize(error, x0, args=(simulator, trials, logfile, args), method='nelder-mead',
                        options={'xatol': 1e-6, 'disp': True, 'adaptive': True})
    elif args.method == 'shgo':
        res = optimize.shgo(error, bounds, args=(simulator, trials, logfile, args))
    elif args.method == 'dual_annealing':
        res = optimize.dual_annealing(error, bounds, args=(simulator, trials, logfile, args),
                                      initial_temp=25000)
    elif args.method == 'differential_evolution':
        res = optimize.differential_evolution(error, bounds, args=(simulator, trials, logfile, args),
                                    updating='immediate', workers=1)
    elif args.method == 'basinhopping':
        res = optimize.basinhopping(error, bounds, minimizer_kwargs={'args':(simulator, trials, logfile, args)})
    with open(logfile, 'a') as file:
        file.write(f'The optimal x: {res.x}')
    print(res.x)
Example #29
0
def findFractionsComplete(specsignal, specspecies, lifesignal, lifespecies):
    #signal is in the form [(s,g),harmonics] ie shape = (2,har)
    #species is in form [(s,g),Pure components,harmonics] ie shape = (2,nComponents,harmonics)
    def funcFs(x):
        chisq = np.sum(
            np.sum((np.squeeze(lifesignal) -
                    np.squeeze(np.sum(x * lifespecies, 2)))**2)) + np.sum(
                        np.sum((np.squeeze(specsignal) - np.squeeze(
                            np.sum(x * specspecies, 2)))**2))  #spectrum
        return chisq

    sz = np.size(lifespecies, 2)
    bnds = [(0, 1) for x in range(sz)]
    # init= [0.1 for x in range(sz)]
    #init=[0.08,0.12,0.3,0.15,0.22,0.13]
    cons = ({'type': 'eq', 'fun': lambda x: 1 - np.sum(x)})
    # res = minimize(funcFs,init , method='SLSQP',bounds=bnds,constraints = cons,tol=1e-10)
    res = optimize.shgo(funcFs, bounds=bnds, constraints=cons)
    #res = minimize(funcFs,init , method='Nelder-Mead')#,bounds=bnds)#,constraints = cons,tol=1e-10)
    res = res.x
    return res
Example #30
0
    def optimize_gp(self):
        """
		Optimisation of Gaussian Process hyperparameters, including lengthscale, amplitude, and correlation coeffcients.
		Optimisation is done via maximising the log marginal likelihood.
		"""
        print(
            "Optimizing GP hyperparameters and correlation coefficients, this may take a while..."
        )
        self.datastd = np.mean([
            np.nanstd(self.gravfield),
            np.nanstd(self.magfield),
            np.nanstd(self.drillfield)
        ])
        # run optimisation
        bopt_res = shgo(self.calc_logl,
                        bounds=((0.5, 2), (0.5 * gp_lengthscale,
                                           10 * gp_lengthscale),
                                (0.5 * gp_coeff[0], 1), (0.5 * gp_coeff[1], 1),
                                (0.5 * gp_coeff[2], 1)),
                        n=10,
                        iters=10,
                        sampling_method='sobol')  #tol =1e-6, method='SLSQP'
        #bopt_res = minimize(self.calc_logl, x0 = np.asarray([self.gp_amp, gp_lengthscale, gp_coeff[0], gp_coeff[1], gp_coeff[2]]),
        #	method = 'SLSQP', options={'maxiter': 10, 'disp': True, 'ftol': 1e-02})
        if not bopt_res.success:
            # Don't update parameters
            print('WARNING: ' + bopt_res.message)
        else:
            # Update parameters with optimised solution
            print(
                "Initial parameter [amplitude, lengthscale, corr1, corr2, corr3]:"
            )
            print(self.gp_amp, self.gp_length, self.coeffm)
            self.gp_amp = bopt_res.x[0]
            self.gp_length = bopt_res.x[1]
            self.coeffm = np.asarray([bopt_res.x[2:]]).flatten()
            print(
                "Optimized parameter [amplitude, lengthscale, corr1, corr2, corr3]:"
            )
            print(self.gp_amp, self.gp_length, self.coeffm)
Example #31
0
    def optimize(self) -> Match:
        """
        Starts the optimization process and translates into a Match()
        """

        opt: OptimizeResult = shgo(func=self.score, bounds=self.bounds)

        if not opt["success"]:
            return Match(
                score=0.0,
                matched=[
                    NoMatch() for _ in range(0, len(self.interpretations))
                ],
            )

        score = opt["fun"]
        max_score = sqrt(sum(r.weight**2 for r in self.parser.rules.values()))

        return Match(
            score=max([0, (max_score - score) / max_score]),
            matched=self._get_selection(opt["x"]),
        )
Example #32
0
def test8():
    f = lambda x: (4 * x[0]**2 + 2 * x[1]**2 + 4 * x[0] * x[1] + 2 * x[1] + 1
                   ) * exp(x[0])

    # 用跳盆算法求函数的全局最小值
    result1 = optimize.basinhopping(f, x0=[-1, 1])

    # 在给定范围内用蛮力最小化函数。
    # result2 = optimize.brute(f, ranges=(-2, 2))
    # print(result2)

    # 求多元函数的全局最小值。
    result3 = optimize.differential_evolution(f, bounds=([-1, 1], [-2, 0]))
    # print(result3)

    # 使用SHG(简单同调全局优化”)优化法求函数的全局最小值
    result4 = optimize.shgo(f, bounds=([-1, 1], [-2, 0]))
    print(result4)

    # 用对偶退火法求函数的全局最小值
    result5 = optimize.dual_annealing(f, bounds=([-1, 1], [-2, 0]))
    print(result5)
Example #33
0
    def train(self):
        self.weights = brute(self._loss, ranges=[(0, 1), (0, 1), (0, 1), (0, 1)])[0]
        logging.info("RecommenderWeightsComplicated: brute train done, weights = " + str(self.weights))

        self.weights = np.array([1., 1., 1., 1.])

        self.weights = shgo(self._loss, bounds=[(0, 1), (0, 1), (0, 1), (0, 1)])['x']
        logging.info("RecommenderWeightsComplicated: shgo train done, weights = " + str(self.weights))

        self.weights = np.array([1., 1., 1., 1.])

        self.weights = basinhopping(self._loss, np.array([1., 1., 1., 1.]), niter=1).x
        logging.info("RecommenderWeightsComplicated: basinhopping train done, weights = " + str(self.weights))

        self.weights = np.array([1., 1., 1., 1.])

        self.weights = differential_evolution(self._loss, bounds=[(0, 1), (0, 1), (0, 1), (0, 1)])['x']
        logging.info("RecommenderWeightsComplicated: differential_evolution train done, weights = " + str(self.weights))

        self.weights = np.array([1., 1., 1., 1.])

        self.weights = dual_annealing(self._loss, bounds=[(0, 1), (0, 1), (0, 1), (0, 1)])['x']
        logging.info("RecommenderWeightsComplicated: dual_annealing train done, weights = " + str(self.weights))
 def solve_global(self, Ks, Ps, callput):
     func = lambda x: np.sum(
         np.square(self._parse_param(x).cal_price(Ks, callput) - Ps))
     jac = lambda x: self._jac(x, Ks, Ps, callput)
     N = self.shape[0]
     lb = np.array([1e-5] * N * 2 + [0.01] * N + [0])
     ub = np.array([1] * N + [3] * N + [3] * N + [1])
     bounds = list(zip(lb, ub))
     constr_func = lambda x: np.array(
         [np.sum(x[:N]) + x[-1] - 1, x[:N].dot(x[N:2 * N]) - 1])
     constr_jac = lambda x: np.array(
         [[1] * N + [0] * N * 2 + [1],
          np.concatenate([x[N:2 * N], x[:N], [0] * N, [0]])])
     eq_cons = {'type': 'eq', 'fun': constr_func, 'jac': constr_jac}
     reg = opt.shgo(func,
                    bounds=bounds,
                    constraints=[eq_cons],
                    minimizer_kwargs={'method': 'SLSQP'},
                    options={
                        'jac': jac,
                        'maxtime': 10
                    })
     self._parse_param(reg.x)
     return reg
Example #35
0
 def test_12_sobol_inf_cons(self):
     """Test to cover the case where f_lowest == 0"""
     options = {'maxtime': 1e-15,
                'f_min': 0.0}
     res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
                options=options, sampling_method='sobol')
Example #36
0
                if robot_node.getPosition()[1] > best_height:
                    best_height = robot_node.getPosition()[1]
            elif abs(velocity[1]) > 10:
                best_height = 0
                break

    #reset the robot position
    robot_node.remove()
    children.importMFNode(-1, "Robot.wbo")

    #return the negative max jump height
    #this is because the scipy minimizes a function
    return -best_height


#uncomment this line and the four lines in the jump_height funtion to optimize for lever length as well
#x0 = np.array([KNEE_INITIAL[1], HEEL_INITIAL[2], KNEE_TENDON_INITIAL, HEEL_TENDON_INITIAL])

x0 = np.array([KNEE_TENDON_INITIAL, HEEL_TENDON_INITIAL])

#use a global optimization algorithm to find the optimal value
#the bounds for the variables being optimized for need to be specified
#the iters parameter will give more precise results for a higher value, but will take more time
minimizer_kwargs = dict(method="Powell")

#to simply test the leg with the initial values provided, just comment this line.
optimized = shgo(jump_height, ((0, 30000), (0, 30000)),
                 iters=7,
                 minimizer_kwargs=minimizer_kwargs)
print(optimized.x)
print(optimized.fun)
Example #37
0
def main():

    global DF, PTABLE, OPTIONS

    parser = argparse.ArgumentParser()

    parser.add_argument("glob")
    parser.add_argument("--period", type=int)
    parser.add_argument("--bb-low", type=float)
    parser.add_argument("--bb-high", type=float)
    parser.add_argument("--lo-zone", type=float)
    parser.add_argument("--hi-zone", type=float)
    parser.add_argument("--lo-sigma", type=float)
    parser.add_argument("--hi-sigma", type=float)
    parser.add_argument("--protect-loss", type=bool)
    parser.add_argument("--method", default="dual_annealing")
    parser.add_argument("--finish", default=None)

    args = parser.parse_args()

    DF = pd.DataFrame(columns=["time", "mark", "ask", "bid"])

    for csvfile in glob.glob(args.glob):

        csvdf = pd.read_csv(csvfile, index_col=0)

        csvdf["time"] = csvdf.apply(timefunc, axis=1)
        csvdf["mark"] = pd.to_numeric(csvdf["mark"])
        csvdf["ask"] = pd.to_numeric(csvdf["ask"])
        csvdf["bid"] = pd.to_numeric(csvdf["bid"])

        if DF.shape[0] > 0:
            prev_time = DF.iloc[DF.shape[0] - 1]["time"]
            prev_mark = DF.iloc[DF.shape[0] - 1]["mark"]

            dt = csvdf.iloc[0]["time"] - prev_time
            scale = csvdf.iloc[0]["mark"] - prev_mark

            csvdf["time"] = csvdf["time"] - dt
            csvdf["mark"] = csvdf["mark"] - scale
            csvdf["ask"] = csvdf["ask"] - scale
            csvdf["bid"] = csvdf["bid"] - scale

        DF = DF.append(csvdf, ignore_index=True)

    bounds_dict = {
        "period": (12, 48 * 3600 / 5),
        "bb_low": (0.25, 4),
        "bb_high": (0.25, 4),
        "lo_zone": (-0.1, 0.5),
        "hi_zone": (0.5, 1.1),
        "lo_sigma": (0, 4),
        "hi_sigma": (0, 4),
        "protect_loss": (0, 1),
    }

    abs_dict = {
        "period": 1,
        "bb_low": 0.1,
        "bb_high": 0.1,
        "lo_zone": 0.01,
        "hi_zone": 0.01,
        "lo_sigma": 0.1,
        "hi_sigma": 0.1,
        "protect_loss": 1,
    }

    PTABLE = PrettyTable([
        "Iteration",
        "Time",
        "Period",
        "BB Low",
        "BB High",
        "Low Zone",
        "High Zone",
        "Low Sigma",
        "High Sigma",
        "Protect",
        "Return",
    ])
    PTABLE.float_format = ".4"

    bounds = []
    bounds.append((0, 100))
    bounds.append((
        f"{datetime(2021, 1, 1, 0, 0, 0):%X}",
        f"{datetime(2021, 1, 1, 23, 59, 59):%X}",
    ))
    bounds.append([int(v) for v in bounds_dict["period"]])
    bounds.append([float(v) for v in bounds_dict["bb_low"]])
    bounds.append([float(v) for v in bounds_dict["bb_high"]])
    bounds.append([float(v) for v in bounds_dict["lo_zone"]])
    bounds.append([float(v) for v in bounds_dict["hi_zone"]])
    bounds.append([float(v) for v in bounds_dict["lo_sigma"]])
    bounds.append([float(v) for v in bounds_dict["hi_sigma"]])
    bounds.append((False, True))
    bounds.append((-99.0, 99.0))
    for i in product([0, 1], repeat=len(bounds)):
        PTABLE.add_row([bounds[j][i[j]] for j in range(len(bounds))])
    OPTIONS = PTABLE._get_options({})
    frows = PTABLE._format_rows(PTABLE._get_rows(OPTIONS), OPTIONS)
    PTABLE._compute_widths(frows, OPTIONS)
    PTABLE._hrule = PTABLE._stringify_hrule(OPTIONS)
    print(PTABLE._stringify_header(OPTIONS))

    fixed = []
    bounds = []
    abs_diff = []
    for arg in [
            "period",
            "bb_low",
            "bb_high",
            "lo_zone",
            "hi_zone",
            "lo_sigma",
            "hi_sigma",
            "protect_loss",
    ]:
        if getattr(args, arg) is not None:
            fixed.append(getattr(args, arg))
        else:
            fixed.append(None)
            bounds.append(bounds_dict[arg])
            abs_diff.append(abs_dict[arg])

    res = None
    if args.method == "brute":

        x0, fval, grid, Jout = optimize.brute(
            func=run,
            args=tuple(fixed),
            ranges=bounds,
            full_output=True,
            finish=args.finish,
        )

        if grid.ndim == 1:
            plt.plot(grid, -np.log(Jout))
            plt.title(args.glob)
            plt.show()

        elif grid.ndim == 3:
            fig = plt.figure(figsize=(10, 6))
            ax1 = fig.add_subplot(111, projection="3d")

            mycmap = plt.get_cmap("gist_earth")
            surf1 = ax1.plot_surface(grid[0, :],
                                     grid[1, :],
                                     -np.log(Jout),
                                     cmap=mycmap)
            fig.colorbar(surf1, ax=ax1, shrink=0.5, aspect=5)

            plt.title(args.glob)
            plt.show()

    elif args.method == "basinhopping":
        res = optimize.basinhopping(
            func=run,
            x0=tuple(fixed),
            minimizer_kwargs={"args": tuple(7 * [None])},
        )

    elif args.method == "shgo-sobol":

        constraints = []
        if args.period is None:
            constraints.append({
                "type": "eq",
                "fun": lambda x: np.array([x[0] - int(x[0])])
            })

        if args.protect_loss is None:
            constraints.append({
                "type": "eq",
                "fun": lambda x: np.array([x[7] - int(x[7])])
            })

        res = optimize.shgo(
            func=run,
            args=tuple(fixed),
            bounds=bounds,
            constraints=constraints,
            options={"disp": True},
            sampling_method="sobol",
            minimizer_kwargs={"options": {
                "eps": np.array(abs_diff)
            }},
        )

        tbl = PrettyTable([
            "Period",
            "BB Low",
            "BB High",
            "Low Zone",
            "High Zone",
            "Low Sigma",
            "High Sigma",
            "Protect",
            "Return",
        ])
        tbl.float_format = ".4"

        for minim in res.xl:
            row = []
            i = 0
            for val in fixed:
                if val is None:
                    row.append(minim[i])
                    i += 1
                else:
                    row.append(val)
            score = run(minim, *fixed)
            row.append(-np.log(score))
            tbl.add_row(row)

        print(PTABLE._hrule)
        print()

        print(tbl)

    elif args.method == "hyperopt":

        space = [
            hp.quniform("period", 12, 48 * 3600 / 5, 1),
            hp.uniform("bb_low", 0.25, 4),
            hp.uniform("bb_high", 0.25, 4),
            hp.uniform("lo_zone", -0.1, 0.5),
            hp.uniform("hi_zone", 0.5, 1.1),
            hp.uniform("lo_sigma", 0, 4),
            hp.uniform("hi_sigma", 0, 4),
            hp.quniform("protect_loss", 0, 1, 1),
        ]

        res = fmin(run, space, algo=tpe.suggest, max_evals=200)

        print(run(space_eval(space, res)))

    elif len(bounds) == 0:
        run([], *fixed)

    elif len(bounds) == 1:

        x0 = [(bounds[0][0] + bounds[0][1]) / 2]
        constraints = ()
        options = {"disp": True}
        if args.period is None:
            constraints = [{
                "type": "eq",
                "fun": lambda x: np.array([x[0] - int(x[0])])
            }]
            options["finite_diff_rel_step"] = (1 / x0[0], )

        res = optimize.minimize(
            fun=run,
            x0=x0,
            method="trust-constr",
            args=tuple(fixed),
            bounds=Bounds(bounds[0][0], bounds[0][1]),
            constraints=constraints,
            options=options,
        )

    else:
        res = getattr(optimize, args.method)(
            func=run,
            args=tuple(fixed),
            bounds=bounds,
            maxiter=1000000,
            local_search_options={
                "options": {
                    "disp": True
                }
            },
        )

    if res is not None:
        print(res)

    print(f"Glob = {args.glob}")
    print(f"Default = {DF.iloc[DF.shape[0] - 1]['mark']/DF.iloc[0]['mark']}")
Example #38
0
def custom_optimizer(fun, **kwargs):
    r"""Wrapper for ``scipy.optimize.shgo`` that does not return y_min."""
    opt_res = shgo(fun, **kwargs)
    return opt_res.x, None
Example #39
0
def calculate_densities(chain):
    def objective(chain, x):
        T = chain.Time_to_exp / 252
        Rf = chain.Rf

        c_total = len(chain.Call_Strike)
        p_total = len(chain.Put_Strike)

        w = x[0]
        F1 = x[1]
        sigma1 = x[2]
        F2 = x[3]
        sigma2 = x[4]

        c_market = (chain.Call_Ask + chain.Call_Bid) / 2
        p_market = (chain.Put_Ask + chain.Put_Bid) / 2

        S1 = F1 * math.exp(-1 * Rf * T)
        S2 = F2 * math.exp(-1 * Rf * T)
        c_the =np.array([w*(american_option('c',S1,chain.Call_Dummy_Strike[i],T,Rf,sigma1)[0])\
                + (1-w)*(american_option('c',S2,chain.Call_Dummy_Strike[i],T,Rf,sigma2)[0])\
                for i in range(c_total)])

        p_the = np.array([w*(american_option('p',S1,chain.Put_Dummy_Strike[i],T,Rf,sigma1)[0])\
                + (1-w)*(american_option('p',S2,chain.Put_Dummy_Strike[i],T,Rf,sigma2)[0])\
                for i in range(p_total)])

        c_sse = np.sum((c_market - c_the)**2)
        p_sse = np.sum((p_market - p_the)**2)

        return c_sse + p_sse

    def stock_density(S, sigma, r, T, S_T):
        dens = (1 / (S_T * sigma * math.sqrt(2 * math.pi * T))) * (math.exp(
            -0.5 *
            (((math.log(S_T) - math.log(S) - r * T + 0.5 * T * sigma**2) /
              (sigma * math.sqrt(T)))**2)))
        return dens

    def risk_neutral_density(x, chain, S_T):
        w = x[0]
        F1 = x[1]
        sigma1 = x[2]
        F2 = x[3]
        sigma2 = x[4]
        r = chain.Rf
        T = chain.Time_to_exp / 252
        S1 = F1 * math.exp(-r * T)
        S2 = F2 * math.exp(-r * T)
        dens = w * stock_density(S1, sigma1, r, T, S_T) + (
            1 - w) * stock_density(S2, sigma2, r, T, S_T)
        return dens

    def real_world_density(x, chain, gamma, S_T):
        r = chain.Rf
        T = chain.Time_to_exp / 252
        w = x[0]
        F1 = x[1]
        sigma1 = x[2]
        F2 = x[3]
        sigma2 = x[4]
        F1_new = F1 * math.exp(gamma * T * sigma1**2)
        F2_new = F2 * math.exp(gamma * T * sigma2**2)
        one_by_w_new = 1 + ((1 - w) / w) * (
            (F2 / F1)**gamma) * (math.exp(0.5 * T * (gamma**2 - gamma) *
                                          (sigma2**2 - sigma1**2)))
        w_new = 1 / one_by_w_new
        S1 = F1_new * math.exp(-r * T)
        S2 = F2_new * math.exp(-r * T)
        dens = w_new * stock_density(S1, sigma1, r, T, S_T) + (
            1 - w_new) * stock_density(S2, sigma2, r, T, S_T)
        return dens

    res_dict = dict()

    fun_to_min = partial(objective, chain)

    T = chain.Time_to_exp / 252
    Rf = chain.Rf
    S = chain.Stock_Last

    F = S * math.exp(Rf * T)

    bounds = [(0.0, 1.0), (0.5 * S, 1.5 * S), (0.01, 0.99), (0.5 * S, 1.5 * S),
              (0.01, 0.99)]

    eq_cons = {
        'type': 'eq',
        'fun': lambda x: np.array([x[0] * (x[1]) + (1 - x[0]) * (x[3]) - F])
    }

    ineq_cons = {'type': 'ineq', 'fun': lambda x: np.array([x[3] - x[1]])}

    cons = (eq_cons, ineq_cons)

    res = shgo(fun_to_min,
               bounds,
               n=120,
               iters=5,
               minimizer_kwargs={'method': "L-BFGS-B"},
               constraints=cons,
               options={'disp': False},
               sampling_method='sobol')

    # print(res.x)

    res_dict['Name'] = chain.Name
    res_dict['total_calls'] = chain.Call_total
    res_dict['termination'] = res.success
    res_dict['Prob Bearish'] = res.x[0]
    res_dict['Prob Bullish'] = 1 - res.x[0]

    #check again
    if res.x[0] > 0.5:
        res_dict['Direction_Price'] = 'Bullish'
    else:
        res_dict['Direction_Price'] = 'Bearish'

    res_dict['F1'] = res.x[1]
    res_dict['Stock_Last'] = chain.Stock_Last
    res_dict['F2'] = res.x[3]
    res_dict['sigma1'] = res.x[2]
    res_dict['Impl_Vol'] = chain.Stock_Volt
    res_dict['sigma2'] = res.x[4]

    risk_neutral_dens_par = partial(risk_neutral_density, res.x, chain)
    vec_risk_neutral_dens_par = np.vectorize(risk_neutral_dens_par)

    real_world_dens_par_1 = partial(real_world_density, res.x, chain, 2)
    vec_real_world_dens_par_1 = np.vectorize(real_world_dens_par_1)

    real_world_dens_par_2 = partial(real_world_density, res.x, chain, 4)
    vec_real_world_dens_par_2 = np.vectorize(real_world_dens_par_2)

    prices = np.arange(S - 0.5 * S, S + 0.5 * S, 0.1)

    risk_neutral = vec_risk_neutral_dens_par(prices)
    real_world_1 = vec_real_world_dens_par_1(prices)
    real_world_2 = vec_real_world_dens_par_2(prices)

    return res_dict, prices, risk_neutral, real_world_1, real_world_2
Example #40
0
 def test_10_finite_time(self):
     """Test single function constraint passing"""
     options = {'maxtime': 1e-15}
     res = shgo(test1_1.f, test1_1.bounds, n=1, iters=None,
                options=options, sampling_method='sobol')
Example #41
0
 def test_5_2_sobol_argless(self):
     """Test Default sobol sampling settings on TestFunction 1"""
     res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
                sampling_method='sobol')
     numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
                                   atol=1e-5)
Example #42
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                       'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                               'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(fun=signature_extender(self._con_val_func, args),
                                                  lb=lb, ub=ub,
                                                  jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(of=lincons, wrt=self._dvlist,
                                                              return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_sparsity and self.options['dynamic_simul_derivs']:
            coloring_mod.dynamic_simul_coloring(self, run_model=False, do_sparsity=False)

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(self._objfunc, x_init,
                                  # args=(),
                                  method=opt,
                                  jac=jac,
                                  hess=hess,
                                  # hessp=None,
                                  bounds=bounds,
                                  constraints=constraints,
                                  tol=self.options['tol'],
                                  # callback=None,
                                  options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {"method": "L-BFGS-B", "jac": True}
                self.opt_settings.pop('maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)])
                        user_test = self.opt_settings.pop('accept_test', None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun, x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n', 'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs['minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Example #43
0
 def test_5_1_simplicial_argless(self):
     """Test Default simplicial sampling settings on TestFunction 1"""
     res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
     numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
                                   atol=1e-5)