コード例 #1
0
    def minimizeCustom(self, p, q, **kwargs):
        S = numpy.matrix(numpy.identity(4))
        # TODO: try using functions from the nlopt module

        def objectiveFunc(*args, **kwargs):
            d = p
            m = q
            params = args[0]
            if args[1].size > 0:  # gradient
                args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01])  # arbitrary gradient

#            transform = numpy.matrix(numpy.identity(4))
            translate = numpyTransform.translation(params[3:6])
            rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
            roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
            rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
            transform = translate * rotx * roty * rotz

            Dicp = numpyTransform.transformPoints(transform, d)

#            err = self.rms_error(m, Dicp)
            err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
#            err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
            return err

        x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        if 'optAlg' in kwargs:
            opt = nlopt.opt(kwargs['optAlg'], 6)
        else:
            opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)

        opt.set_min_objective(objectiveFunc)
        opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
        opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
        opt.set_maxeval(1500)
        params = opt.optimize(x0)

#        output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
#        params = output[0]

#        params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)

#        constraints = []
#        varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)

#        output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
#        params = output[0]
#        print  'Min error:', output[1]

#        params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)

        translate = numpyTransform.translation(params[3:6])
        rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
        roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
        rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
        transform = translate * rotx * roty * rotz
        return rotx * roty * rotz, S
コード例 #2
0
ファイル: train.py プロジェクト: KelvyHsu/ocean-exploration
def optimise_hypers(criterion, optParams):
    objective = lambda theta, grad: criterion(*unpack(theta, unpackinfo))
    theta_low, _ = pack(optParams.sigma.lowerBound, optParams.noise.lowerBound)
    theta_0, unpackinfo = pack(optParams.sigma.initialVal, optParams.noise.initialVal)
    theta_high, _ = pack(optParams.sigma.upperBound, optParams.noise.upperBound)

    nParams = theta_0.shape[0]
    opt = nl.opt(nl.LN_BOBYQA, nParams)
    opt.set_lower_bounds(theta_low)
    opt.set_upper_bounds(theta_high)
    opt.set_min_objective(objective)
    opt.set_maxtime(optParams.walltime)
    if optParams.global_opt is True:
        opt = nl.opt(nl.G_MLSL_LDS, nParams)
        local_opt = nl.opt(nl.LN_BOBYQA, nParams)
        local_opt.set_ftol_rel(1e-4)
        opt.set_local_optimizer(local_opt)
    else:
        opt.set_ftol_rel(1e-6)

    assert( (theta_low<=theta_0).all())
    assert( (theta_high>=theta_0).all())

    theta_opt = opt.optimize(theta_0)
    sigma, noise_sigma = unpack(theta_opt, unpackinfo)
    opt_val = opt.last_optimum_value()
    return sigma, noise_sigma, opt_val
コード例 #3
0
ファイル: dihedral.py プロジェクト: alejandrovr/htmd
    def _optimize_CRS2_LM(self, vector):
        """
        Controlled random search with local mutations
        """

        # Create a global optimizer
        opt = nlopt.opt(nlopt.GN_CRS2_LM, vector.size)
        opt.set_min_objective(self._objective)
        lower_bounds, upper_bounds = self._getBounds()
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        neval = 10000 * opt.get_dimension()  # TODO allow to tune this parameter
        opt.set_maxeval(neval)

        # Optimize parameters
        vector = opt.optimize(vector)  # TODO check optimizer status
        self.loss = opt.last_optimum_value()
        assert self._objective(vector, None) == self.loss

        # Create a local optimizer
        opt = nlopt.opt(nlopt.LN_BOBYQA, opt.get_dimension())
        opt.set_min_objective(self._objective)
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        opt.set_xtol_rel(1e-3)
        opt.set_maxeval(neval)
        opt.set_initial_step(1e-3 * (upper_bounds-lower_bounds))

        # Optimize parameters
        vector = opt.optimize(vector)  # TODO check optimizer status
        self.loss = opt.last_optimum_value()
        assert self._objective(vector, None) == self.loss

        return vector
コード例 #4
0
ファイル: Pyfuzzy.py プロジェクト: yanfeng1022/samt2
def start_training(f):
    """ define the training parameters
    """
    opt=nlopt.opt(nlopt.GN_DIRECT_L,f.get_len_output())
    # build the boundaries
    minout=[]
    maxout=[]
    startout=[]
    for i in range(f.get_len_output()-1):
        minout.append(f.get_output(i))
    for i in range(1,f.get_len_output()):
        maxout.append(f.get_output(i))
    for i in range(f.get_len_output()):
        startout.append(f.get_output(i))
    minout.insert(0,minout[0]-(minout[1]-minout[0]))
    maxout.append(maxout[-1]+(maxout[-1]-maxout[-2]))
    print 'minout:',minout
    print 'maxout:',maxout
    print 'start:', startout
    opt.set_lower_bounds(np.array(minout))
    opt.set_upper_bounds(np.array(maxout))
    opt.set_initial_step((f.get_output(1)-f.get_output(0))/500.)
    opt.set_min_objective(f.myfunc)
    opt.set_ftol_rel((f.get_output(1)-f.get_output(0))/100000.)
    opt.set_maxtime(60)  # 60 s
    xopt=opt.optimize(np.array(startout))
    opt_val=opt.last_optimum_value()
    result=opt.last_optimize_result()
    print ' *************Result of Optimization*****************'
    print 'max:', opt_val
    print 'parameter:', xopt
    # set the best values
    for i in range(f.get_len_output()):
        f.set_output(i,xopt[i])
コード例 #5
0
ファイル: optimize.py プロジェクト: ziyuw/AHMC
    def direct(self, alpha):
	import nlopt
	
	fn = lambda x, grad: self.objective_func(x, grad, alpha)
	
	# Using DIRECT as the optimization scheme
	opt = nlopt.opt(nlopt.GN_DIRECT, self.dim)

	# Set the objective
	opt.set_max_objective(fn)

	# Set the maximum number of iterations
	opt.set_maxeval(self.maxeval)

	# Set lower and upper bounds
	opt.set_lower_bounds(self.lb)
	opt.set_upper_bounds(self.ub)

	# Optimize with starting point
	x = opt.optimize(self.start_point)
	#minf = opt.last_optimum_value()
	#print "optimum at ", x[0]
	#print "minimum value = ", minf
	#print "result code = ", opt.last_optimize_result()

	return x
コード例 #6
0
def test_make_nlopt_fun_neldermead(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
コード例 #7
0
ファイル: esp.py プロジェクト: jeiros/htmd
    def run(self):
        """
        Run ESP charge fitting

        Return
        ------
        results : dict
            Dictionary with the fitted charges and fitting loss value
        """

        # Get charge bounds
        lower_bounds, upper_bounds = self._get_bounds()

        # Set up NLopt
        opt = nlopt.opt(nlopt.LN_COBYLA, self.ngroups)
        opt.set_min_objective(self._compute_objective)
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        opt.add_equality_constraint(self._compute_constraint)
        opt.set_xtol_rel(1.e-6)
        opt.set_maxeval(1000*self.ngroups)
        opt.set_initial_step(0.001)

        # Optimize the charges
        group_charges = opt.optimize(np.zeros(self.ngroups) + 0.001) # TODO: a more elegant way to set initial charges
        # TODO: check optimizer status
        charges = self._map_groups_to_atoms(group_charges)
        loss = self._compute_objective(group_charges, None)

        return {'charges': charges, 'loss': loss}
コード例 #8
0
ファイル: fake.py プロジェクト: jeiros/htmd
    def run(self):

        ff = FFEvaluate(self.molecule)

        results = []
        for iframe in range(self.molecule.numFrames):
            self.molecule.frame = iframe

            directory = os.path.join(self.directory, '%05d' % iframe)
            os.makedirs(directory, exist_ok=True)
            pickleFile = os.path.join(directory, 'data.pkl')

            if self._completed(directory):
                with open(pickleFile, 'rb') as fd:
                    result = pickle.load(fd)
                logger.info('Loading QM data from %s' % pickleFile)

            else:
                result = QMResult()
                result.errored = False
                result.coords = self.molecule.coords[:, :, iframe:iframe + 1].copy()

                if self.optimize:
                    opt = nlopt.opt(nlopt.LN_COBYLA, result.coords.size)
                    opt.set_min_objective(lambda x, _: ff.run(x.reshape((-1, 3)))['total'])
                    if self.restrained_dihedrals is not None:
                        for dihedral in self.restrained_dihedrals:
                            indices = dihedral.copy()
                            ref_angle = np.deg2rad(dihedralAngle(self.molecule.coords[indices, :, iframe]))
                            def constraint(x, _):
                                coords = x.reshape((-1, 3))
                                angle = np.deg2rad(dihedralAngle(coords[indices]))
                                return np.sin(.5*(angle - ref_angle))
                            opt.add_equality_constraint(constraint)
                    opt.set_xtol_abs(1e-3) # Similar to Psi4 default
                    opt.set_maxeval(1000*opt.get_dimension())
                    opt.set_initial_step(1e-3)
                    result.coords = opt.optimize(result.coords.ravel()).reshape((-1, 3, 1))
                    logger.info('Optimization status: %d' % opt.last_optimize_result())

                result.energy = ff.run(result.coords[:, :, 0])['total']
                result.dipole = self.molecule.getDipole()

                if self.optimize:
                    assert opt.last_optimum_value() == result.energy # A self-consistency test

                # Compute ESP values
                if self.esp_points is not None:
                    assert self.molecule.numFrames == 1
                    result.esp_points = self.esp_points
                    distances = cdist(result.esp_points, result.coords[:, :, 0])  # Angstrom
                    distances *= const.physical_constants['Bohr radius'][0] / const.angstrom  # Angstrom --> Bohr
                    result.esp_values = np.dot(np.reciprocal(distances), self.molecule.charge)  # Hartree/Bohr

                with open(pickleFile, 'wb') as fd:
                    pickle.dump(result, fd)

            results.append(result)

        return results
コード例 #9
0
ファイル: minimization.py プロジェクト: sybenzvi/3ML
        def __init__(self, function, parameters, ftol=1e-5, verbosity=1):
            super(BOBYQAMinimizer, self).__init__(function, parameters, ftol, verbosity)

            # setup the bobyqa minimizer
            self.x0 = map(lambda x: x.value, self.parameters.values())
            self.lowerBounds = map(lambda x: x.minValue, self.parameters.values())
            self.upperBounds = map(lambda x: x.maxValue, self.parameters.values())
            self.steps = map(lambda x: x.delta, self.parameters.values())
            self.objectiveFunction = function

            def wrapper(x, grad):
                if grad.size > 0:
                    print("This won't ever happen, since BOBYQA does not use derivatives")
                return self.objectiveFunction(x)

            pass
            self.wrapper = wrapper

            self.bob = nlopt.opt(nlopt.LN_BOBYQA, self.Npar)
            self.bob.set_min_objective(self.wrapper)
            self.bob.set_ftol_abs(ftol)
            # Stop if the value of all the parameter change by less than 1%
            self.bob.set_xtol_rel(0.001)
            self.bob.set_initial_step(self.steps)

            self.bob.set_lower_bounds(self.lowerBounds)
            self.bob.set_upper_bounds(self.upperBounds)
コード例 #10
0
def test_make_nlopt_fun_grad1(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=rosen_der)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
コード例 #11
0
def nlopt_test():
    '''This is from the tutorial'''
    raise SkipTest
    def myfunc(x, grad):
        if grad.size > 0:
            grad[0] = 0.0
            grad[1] = old_div(0.5, math.sqrt(x[1]))
        return math.sqrt(x[1])

    def myconstraint(x, grad, a, b):
        if grad.size > 0:
            grad[0] = 3 * a * (a*x[0] + b)**2
            grad[1] = -1.0
        return (a*x[0] + b)**3 - x[1]

    opt = nlopt.opt(nlopt.LD_MMA, 2)
    opt.set_lower_bounds([-float('inf'), 0])
    opt.set_min_objective(myfunc)
    opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,2,0), 1e-8)
    opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,-1,1), 1e-8)
    opt.set_xtol_rel(1e-4)
    x = opt.optimize([1.234, 5.678])
    minf = opt.last_optimum_value()
    print("optimum at ", x[0],x[1])
    print("minimum value = ", minf)
    print("result code = ", opt.last_optimize_result())
コード例 #12
0
def _minimize_nlopt(
    criterion_and_derivative,
    x,
    lower_bounds,
    upper_bounds,
    algorithm,
    algorithm_name,
    *,
    convergence_xtol_rel=None,
    convergence_xtol_abs=None,
    convergence_ftol_rel=None,
    convergence_ftol_abs=None,
    stopping_max_eval=None,
    population_size=None,
    algo_info=None,
):
    """Run actual nlopt optimization argument, set relevant attributes."""
    if algo_info is None:
        algo_info = DEFAULT_ALGO_INFO.copy()
    else:
        algo_info = algo_info.copy()
    algo_info["name"] = algorithm_name

    def func(x, grad):
        if grad.size > 0:
            criterion, derivative = criterion_and_derivative(
                x,
                task="criterion_and_derivative",
                algorithm_info=algo_info,
            )
            grad[:] = derivative
        else:
            criterion = criterion_and_derivative(
                x,
                task="criterion",
                algorithm_info=algo_info,
            )
        return criterion

    opt = nlopt.opt(algorithm, x.shape[0])
    if convergence_ftol_rel is not None:
        opt.set_ftol_rel(convergence_ftol_rel)
    if convergence_ftol_abs is not None:
        opt.set_ftol_abs(convergence_ftol_abs)
    if convergence_xtol_rel is not None:
        opt.set_xtol_rel(convergence_xtol_rel)
    if convergence_xtol_abs is not None:
        opt.set_xtol_abs(convergence_xtol_abs)
    if lower_bounds is not None:
        opt.set_lower_bounds(lower_bounds)
    if upper_bounds is not None:
        opt.set_upper_bounds(upper_bounds)
    if stopping_max_eval is not None:
        opt.set_maxeval(stopping_max_eval)
    if population_size is not None:
        opt.set_population(population_size)
    opt.set_min_objective(func)
    solution_x = opt.optimize(x)
    return _process_nlopt_results(opt, solution_x)
コード例 #13
0
    def fit(self, X, y): # -1 for unlabeled
        unlabeledX = X[y==-1, :]
        labeledX = X[y!=-1, :]
        labeledy = y[y!=-1]
        
        M = unlabeledX.shape[0]
        
        # train on labeled data
        self.model.fit(labeledX, labeledy)

        unlabeledy = self.predict(unlabeledX)
        
        #re-train, labeling unlabeled instances pessimistically
        
        # pessimistic soft labels ('weights') q for unlabelled points, q=P(k=0|Xu)
        f = lambda softlabels, grad=[]: self.discriminative_likelihood_objective(self.model, labeledX, labeledy=labeledy, unlabeledData=unlabeledX, unlabeledWeights=numpy.vstack((softlabels, 1-softlabels)).T, gradient=grad) #- supLL
        lblinit = numpy.random.random(len(unlabeledy))

        try:
            self.it = 0
            opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, M)
            opt.set_lower_bounds(numpy.zeros(M))
            opt.set_upper_bounds(numpy.ones(M))
            opt.set_min_objective(f)
            opt.set_maxeval(self.max_iter)
            self.bestsoftlbl = opt.optimize(lblinit)
            if self.verbose >= 1:
                print(" max_iter exceeded.")
        except Exception as e:
            print(e)
            self.bestsoftlbl = self.bestlbls
            
        if numpy.any(self.bestsoftlbl != self.bestlbls):
            self.bestsoftlbl = self.bestlbls
        
        self.bestsoftlbl = numpy.array(self.bestsoftlbl)
        ll = f(self.bestsoftlbl)
        
        unlabeledy = (self.bestsoftlbl<0.5)*1
        uweights = numpy.copy(self.bestsoftlbl) # large prob. for k=0 instances, small prob. for k=1 instances 
        uweights[unlabeledy==1] = 1-uweights[unlabeledy==1] # subtract from 1 for k=1 instances to reflect confidence
        weights = numpy.hstack((numpy.ones(len(labeledy)), uweights))
        labels = numpy.hstack((labeledy, unlabeledy))
        if self.use_sample_weighting:
            self.model.fit(numpy.vstack((labeledX, unlabeledX)), labels, sample_weight=weights)
        else:
            self.model.fit(numpy.vstack((labeledX, unlabeledX)), labels)
        
        if self.verbose > 1:
            print("number of non-one soft labels: ", numpy.sum(self.bestsoftlbl != 1), ", balance:", numpy.sum(self.bestsoftlbl<0.5), " / ", len(self.bestsoftlbl))
            print("current likelihood: ", ll)
        
        if not getattr(self.model, "predict_proba", None):
            # Platt scaling
            self.plattlr = LR()
            preds = self.model.predict(labeledX)
            self.plattlr.fit( preds.reshape( -1, 1 ), labeledy )
            
        return self
コード例 #14
0
def test_make_nlopt_fun_bobyqa(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_BOBYQA, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    opt.set_ftol_abs(1e-11)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
コード例 #15
0
def calibrate_nlopt_test(alg, pguess, bounds, meas, sim, platooninfo, platoon, makeleadfolinfo, platoonobjfn, platoonobjfn_der, model, modeladjsys, modeladj, *args, evalper = 20 ):
    #this is going to take an specified platoon and calibrate it using one of the NLopt algorithms. 
    
    #refer to calibrate_bfgs2 for the most up to date documentation of what all the parameters are. 
    #note that nlopt doesn't support grad and obj being returned at the same time (this is not ideal for using automatic differentiation or the adjoint method because its slower) 
    #so this means that platoonobjfn_der needs to only return the grad. 
    
    N = len(pguess) #total number of parameters
    m = args[1] #number of parameters per vehicle 
    opt = nlopt.opt(alg,N)
    if alg == nlopt.G_MLSL_LDS or alg == nlopt.G_MLSL: 
        optlocal = nlopt.opt(nlopt.LD_TNEWTON_PRECOND_RESTART, N)
        opt.set_local_optimizer(optlocal)
    lb = []; ub = [];
    for i in bounds: 
        lb.append(i[0]); ub.append(i[1])
    
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    
    maxfun = max(200,evalper*N) #
    opt.set_maxeval(maxfun)
    
    leadinfo, folinfo, rinfo = makeleadfolinfo(platoon, platooninfo, sim) #note that this needs to be done with sim and not meas 
    
    count = 0
    countgrad = 0
    
    def nlopt_fun(p, grad):
        nonlocal count
        nonlocal countgrad
        if len(grad) > 0 :
            newgrad = platoonobjfn_der(p, model, modeladjsys, modeladj, meas, sim, platooninfo, platoon, leadinfo, folinfo, rinfo, *args)
            newgrad = newgrad
            grad[:] = newgrad
            countgrad += 1
            return grad
        obj = platoonobjfn(p, model, modeladjsys, modeladj, meas, sim, platooninfo, platoon, leadinfo, folinfo, rinfo, *args)
        count += 1
        return obj
            
    opt.set_min_objective(nlopt_fun)
    
    ans = opt.optimize(pguess) #returns answer
    
    return ans, count, countgrad
コード例 #16
0
def set_up_and_run_nlopt(Run_H, user_specs):
    """ Set up objective and runs nlopt

    Declares the appropriate syntax for our special objective function to read
    through Run_H, sets the parameters and starting points for the run.
    """

    assert 'xtol_rel' or 'xtol_abs' or 'ftol_rel' or 'ftol_abs' in user_specs, "NLopt can cycle if xtol_rel, xtol_abs, ftol_rel, or ftol_abs are not set"

    def nlopt_obj_fun(x, grad, Run_H):
        out = look_in_history(x, Run_H)

        if user_specs['localopt_method'] in ['LD_MMA']:
            grad[:] = out[1]
            out = out[0]

        return out

    n = len(user_specs['ub'])

    opt = nlopt.opt(getattr(nlopt, user_specs['localopt_method']), n)

    lb = np.zeros(n)
    ub = np.ones(n)
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    x0 = Run_H['x_on_cube'][0]

    # Care must be taken here because a too-large initial step causes nlopt to move the starting point!
    dist_to_bound = min(min(ub - x0), min(x0 - lb))
    assert dist_to_bound > np.finfo(
        np.float32
    ).eps, "The distance to the boundary is too small for NLopt to handle"

    if 'dist_to_bound_multiple' in user_specs:
        opt.set_initial_step(dist_to_bound *
                             user_specs['dist_to_bound_multiple'])
    else:
        opt.set_initial_step(dist_to_bound)

    opt.set_maxeval(len(Run_H) + 1)  # evaluate one more point
    opt.set_min_objective(lambda x, grad: nlopt_obj_fun(x, grad, Run_H))
    if 'xtol_rel' in user_specs:
        opt.set_xtol_rel(user_specs['xtol_rel'])
    if 'ftol_rel' in user_specs:
        opt.set_ftol_rel(user_specs['ftol_rel'])
    if 'xtol_abs' in user_specs:
        opt.set_xtol_abs(user_specs['xtol_abs'])
    if 'ftol_abs' in user_specs:
        opt.set_ftol_abs(user_specs['ftol_abs'])

    x_opt = opt.optimize(x0)
    exit_code = opt.last_optimize_result()

    if exit_code == 5:  # NLOPT code for exhausting budget of evaluations, so not at a minimum
        exit_code = 0

    return x_opt, exit_code
コード例 #17
0
 def mle(self, params, maxiter=100):
     opt = nlopt.opt(nlopt.LN_COBYLA, params.size)
     opt.set_min_objective(self.likelihood)
     opt.set_maxeval(maxiter)
     opt.set_lower_bounds(np.zeros( params.size) )
     opt.set_initial_step(np.linalg.norm(params))
     opt.set_ftol_rel(1e-3)
     params = opt.optimize( params )
     return params
コード例 #18
0
ファイル: shockt.py プロジェクト: gjhunt/app
 def init_opt(self, new_options, npar):
     ret_options = self.optim_params_init.copy()
     for k, v in new_options.items():
         ret_options[k] = v
     opt = nlopt.opt(ret_options['algo'], npar)
     opt.set_xtol_rel(ret_options['xtol_rel'])
     opt.set_maxeval(ret_options['maxeval'])
     opt.set_maxtime(ret_options['maxtime'])
     return opt
コード例 #19
0
def test_make_nlopt_fun_grad5(start_point):
    # Of course, you can use gradient-based optimization and not supply
    # any gradient information at your own discretion.
    # No warning are raised.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), x0)
コード例 #20
0
ファイル: final.py プロジェクト: chicagohawk/twinmodel
 def mle(self, params, maxiter=100):
     opt = nlopt.opt(nlopt.LN_COBYLA, params.size)
     opt.set_min_objective(self.likelihood)
     opt.set_maxeval(maxiter)
     opt.set_lower_bounds(np.zeros( params.size) )
     opt.set_initial_step(np.linalg.norm(params))
     opt.set_ftol_rel(1e-3)
     params = opt.optimize( params )
     return params
コード例 #21
0
    def __init__(self, dim, config_dict, logger=None):

        self.debug = False
        self.logger = logger
        self.dim = dim
        self.options = SimpleNamespace(**config_dict)

        self.beta = None
        self.beta_0 = self.options.beta_0
        self.gamma = self.options.gamma
        self.epsilon = self.options.epsilon
        self.eta_0 = self.options.eta_0
        self.omega_0 = self.options.omega_0
        self.h_0 = self.options.h_0  # minimum entropy of the search distribution

        # Setting up optimizer
        opt = nlopt.opt(nlopt.LD_LBFGS, 2)

        opt.set_lower_bounds((1e-20, 1e-20))
        opt.set_upper_bounds((np.inf, np.inf))

        opt.set_ftol_abs(1e-12)
        opt.set_ftol_rel(1e-12)
        opt.set_xtol_abs(1e-12)
        opt.set_xtol_rel(1e-12)
        opt.set_maxeval(1000)
        opt.set_maxtime(5 * 60 * 60)

        def opt_func(x, grad):
            g = self.dual_and_grad(x, grad)
            if np.isinf(g):
                opt.set_lower_bounds((float(x[0]), 1e-20))
            return float(g.flatten())

        opt.set_min_objective(opt_func)

        self.opt = opt
        self._grad_bound = 1e-5

        # constant values
        self._log_2_pi_k = self.dim * (np.log(2 * np.pi))
        self._entropy_const = self.dim * (np.log(2 * np.pi) + 1)

        # cached values
        self._eta = 1
        self._omega = 1
        self._old_term = None
        self._old_dist = None
        self._current_model = None
        self._dual = np.inf
        self._grad = np.zeros(2)
        self._kl = np.inf
        self._kl_mean = np.inf
        self._kl_cov = np.inf
        self._new_entropy = np.inf
        self._new_mean = None
        self._new_cov = None
コード例 #22
0
def test_make_nlopt_fun_grad_free1(start_point):
    # When using derivative-free optimization methods, gradient information
    # supplied in any form is disregarded without warning.
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=True)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
コード例 #23
0
ファイル: spearmint_pes.py プロジェクト: ntienvu/ICDM2017_FBO
def acq_max_nlopt_pes(ac,bounds):
    import nlopt
    
    def objective(x, grad):
            """Objective function in the form required by nlopt."""
            #print "=================================="
            if grad.size > 0:
                fx, gx = ac(x[None], grad=True)
                grad[:] = gx[0][:]
            else:
                try:
                    fx = ac(x)
                    if isinstance(fx,list):
                        fx=fx[0]
                except:
                    return 0
            return fx[0]
            
    tol=1e-6
    bounds = np.array(bounds, ndmin=2)

    dim=bounds.shape[0]
    opt = nlopt.opt(nlopt.GN_DIRECT, dim)

    opt.set_lower_bounds(bounds[:, 0])
    opt.set_upper_bounds(bounds[:, 1])
    #opt.set_ftol_rel(tol)
    opt.set_maxeval(5000*dim)
    opt.set_xtol_abs(tol)

    #opt.set_ftol_abs(tol)#Set relative tolerance on function value.
    #opt.set_xtol_rel(tol)#Set absolute tolerance on function value.
    #opt.set_xtol_abs(tol) #Set relative tolerance on optimization parameters.

    opt.set_maxtime=5000*dim
    
    opt.set_max_objective(objective)    

    xinit=random.uniform(bounds[:,0],bounds[:,1])
    
    try:
        xoptimal = opt.optimize(xinit.copy())

    except:
        xoptimal=xinit
     
    fmax= opt.last_optimum_value()
    
    code=opt.last_optimize_result()
    status=1

    if code<4:
        #print "nlopt code = {:d}".format(code)
        status=0

    return xoptimal, fmax, status
コード例 #24
0
ファイル: solver.py プロジェクト: jomorlier/byextopopt
    def __init__(self, nelx, nely, params, problem_type, bc, gui=None):
        """
        Allocate and initialize internal data structures.
        """
        n = nelx * nely
        self.nelx = nelx
        self.nely = nely
        self.opt = nlopt.opt(nlopt.LD_MMA, n)
        self.problem_type = problem_type

        # Alloc arrays
        self.x_phys = numpy.ones(n)
        self.x_rgb = numpy.ones((nelx, nely, 3))

        # Set bounds
        lb = numpy.array(params.densityMin * numpy.ones(n, dtype=float))
        ub = numpy.array(1.0 * numpy.ones(n, dtype=float))
        bc.set_passive_elements(nelx, nely, lb, ub, params.problemOptions)
        self.opt.set_upper_bounds(ub)
        self.opt.set_lower_bounds(lb)

        # Set stopping criteria
        self.opt.set_maxeval(params.maxSolverStep)
        self.opt.set_ftol_rel(0.0)

        # Setup topopt problem
        if problem_type.involves_compliance():
            self.problem = TopoptProblem(nelx, nely, params, bc)

        # Setup filter
        self.filtering = Filter(nelx, nely, params, problem_type)

        # Setup appearance matcher
        if problem_type.involves_appearance():
            self.exemplar_rgb = images.load_exemplar(params)
            self.appearance_cl = AppearanceCL(params.lambdaOccurrenceMap,
                                              params.exponentSimilarityMetric,
                                              params.appearanceNormWeight)

        # Setup user parameters
        self.params = params

        # Setup functional right-hand sides
        self.volume_max = self.params.volumeFracMax * n
        self.volume_min = self.params.volumeFracMin * n
        self.appearance_max = 0
        self.compliance_max = 0
        if problem_type.involves_compliance():
            if 'complianceMax' in params:
                self.compliance_max = params.complianceMax

        # Define optimization problem
        self.init_problem()

        # Set GUI callback
        self.gui = gui
コード例 #25
0
  def _optimize_inputs_nlopt(self, sess, inputs_new, losses_op, feed_dict=None,
    var_list=None, config=None, time_limit=np.inf, **kwargs):
    '''
    Optimize a loss function w.r.t. a set of inputs using
    NLopt methods such as Dividing Rectangles (DIRECT).
    '''
    feed_dict = dict() if (feed_dict is None) else feed_dict.copy()

    # Prepare requisite graph elements
    inputs_new_shape = inputs_new.get_shape().as_list()

    # Create TensorFlow wrapper
    inputs_seq, loss_seq, start_time = [], [], self.timer()
    def wrapper(inputs_new_vect, user_data):
      if self.timer() - start_time > time_limit:
        raise StopIteration('Runtime limit exhausted.')
      feed_dict[inputs_new] = np.reshape(inputs_new_vect, inputs_new_shape)
      losses_per_step = sess.run(losses_op, feed_dict)
      inputs_seq.append(feed_dict[inputs_new].copy())
      loss_seq.append(losses_per_step)
      return np.mean(losses_per_step)

    # Initialize and configure NLopt optimizer
    import nlopt # pylint: disable=import-not-at-top
    config = self.update_dict(self.configs['nlopt'], config, as_copy=True)
    num_params = int(np.prod(inputs_new_shape))
    optimizer = nlopt.opt(getattr(nlopt, config['method']), num_params)
    optimizer.set_min_objective(wrapper)
    optimizer.set_maxeval(config.get('maxfun', 1024))
    optimizer.set_lower_bounds(config.get('lower', 0.0))
    optimizer.set_upper_bounds(config.get('upper', 1.0))

    use_initial = config.pop('use_initial', True)
    if use_initial:
      starts = np.ravel(sess.run(inputs_new))
    else:
      self.rng.rand(num_params)

    # Run optimizer until convergence or time limit is exhausted
    try:
      _ = optimizer.optimize(starts)
    except StopIteration as e:
      pass #allows us to stop early

    inputs_seq, loss_seq = np.stack(inputs_seq), np.ravel(loss_seq)
    argmins = np.where(loss_seq == np.min(loss_seq))[0]
    inputs_new_src = inputs_seq[self.rng.choice(argmins)]

    # Assign optimized values to <tf.Variable>
    ref = self.get_or_create_ref('assignment', dtype=inputs_new.dtype)
    assign_op = self.get_or_create_node('assign', tf.assign, (inputs_new, ref))
    sess.run(assign_op, {ref:inputs_new_src})

    logger.info('NLopt evaluated {:d} losses in {:.3e}s'\
          .format(len(loss_seq), self.timer() - start_time))
    return loss_seq
コード例 #26
0
ファイル: test_nlopt.py プロジェクト: alexeyignatiev/pyodide
def test_nlopt(selenium):
    import nlopt
    import numpy as np

    # objective function
    def f(x, grad):
        x0 = x[0]
        x1 = x[1]
        y = (
            67.8306620138889
            - 13.5689721666667 * x0
            - 3.83269458333333 * x1
            + 0.720841066666667 * x0**2
            + 0.3427605 * x0 * x1
            + 0.0640322916666664 * x1**2
        )

        grad[0] = 1.44168213333333 * x0 + 0.3427605 * x1 - 13.5689721666667
        grad[1] = 0.3427605 * x0 + 0.128064583333333 * x1 - 3.83269458333333

        return y

    # inequality constraint (constrained to be <= 0)
    def h(x, grad):
        x0 = x[0]
        x1 = x[1]
        z = (
            -3.72589930555515
            + 128.965158333333 * x0
            + 0.341479166666643 * x1
            - 0.19642666666667 * x0**2
            + 2.78692500000002 * x0 * x1
            - 0.0000104166666686543 * x1**2
            - 468.897287036862
        )

        grad[0] = -0.39285333333334 * x0 + 2.78692500000002 * x1 + 128.965158333333
        grad[1] = 2.78692500000002 * x0 - 2.08333333373086e-5 * x1 + 0.341479166666643

        return z

    opt = nlopt.opt(nlopt.LD_SLSQP, 2)
    opt.set_min_objective(f)

    opt.set_lower_bounds(np.array([2.5, 7]))
    opt.set_upper_bounds(np.array([7.5, 15]))

    opt.add_inequality_constraint(h)

    opt.set_ftol_rel(1.0e-6)

    x0 = np.array([5, 11])

    xopt = opt.optimize(x0)

    assert np.linalg.norm(xopt - np.array([2.746310775, 15.0])) < 1e-7
コード例 #27
0
    def __init__(self, vars, solver_name):
        try:
            import nlopt as N
        except:
            raise Exception(
                'Error: In order to use an NLopt solver, you must have NLopt installed.'
            )

        GrooveType.__init__(self, vars)
        self.solver_name = solver_name

        if solver_name == 'slsqp':
            self.opt = N.opt(N.LD_SLSQP, len(self.vars.init_state))
        elif solver_name == 'ccsaq':
            self.opt = N.opt(N.LD_CCSAQ, len(self.vars.init_state))
        elif solver_name == 'mma':
            self.opt = N.opt(N.LD_MMA, len(self.vars.init_state))
        elif solver_name == 'bobyqa':
            self.opt = N.opt(N.LN_BOBYQA, len(self.vars.init_state))
        elif solver_name == 'cobyla':
            self.opt = N.opt(N.LN_COBYLA, len(self.vars.init_state))
        elif solver_name == 'lbfgs':
            self.opt = N.opt(N.LD_LBFGS, len(self.vars.init_state))
        elif solver_name == 'mlsl':
            self.opt = N.opt(N.GD_MLSL, len(self.vars.init_state))
        elif solver_name == 'direct':
            self.opt = N.opt(N.GN_DIRECT_L_RAND, len(self.vars.init_state))
        elif solver_name == 'newuoa':
            self.opt = N.opt(N.LN_NEWUOA_BOUND, len(self.vars.init_state))
        else:
            raise Exception(
                'Invalid solver_name in subroutine [GrooveType_nlopt]!')

        self.opt.set_min_objective(obj.objective_master_nlopt)
        self.opt.set_xtol_rel(1e-4)
        # self.opt.set_maxtime(.025)
        if self.vars.bounds == ():
            self.opt.set_lower_bounds(len(self.vars.init_state) * [-50.0])
            self.opt.set_upper_bounds(len(self.vars.init_state) * [50.0])
        else:
            u = []
            l = []
            for b in self.vars.bounds:
                u.append(b[1])
                l.append(b[0])

            self.opt.set_lower_bounds(l)
            self.opt.set_upper_bounds(u)
コード例 #28
0
ファイル: fit.py プロジェクト: alasfar-lina/pyik
 def MinimizeWithMethod(method):
     if method == "PRAXIS":
         method = nlopt.LN_PRAXIS
     elif method == "BOBYQA":
         method = nlopt.LN_BOBYQA
     elif method == "COBYLA":
         method = nlopt.LN_COBYLA
     elif method == "SBPLX":
         method = nlopt.LN_SBPLX
     elif method == "MLSL":
         method = nlopt.G_MLSL_LDS, nlopt.LN_BOBYQA
     elif method == "DIRECT":
         method = nlopt.GN_DIRECT
     elif method == "DIRECT-L":
         method = nlopt.GN_DIRECT_L
     npar = len(iStarts)
     if type(method) == tuple:
         opt = nlopt.opt(method[0], npar)
         local_opt = nlopt.opt(method[1], npar)
         local_opt.set_lower_bounds(iLower)
         local_opt.set_upper_bounds(iUpper)
         opt.set_local_optimizer(local_opt)
         opt.set_population(self.stochastic_population)
     else:
         opt = nlopt.opt(method, npar)
     opt.set_min_objective(WrappedFunction)
     opt.set_ftol_abs(self.ftolabs)
     opt.set_ftol_rel(self.ftolrel)
     opt.set_maxtime(self.max_time)
     if self.maxeval is None:
         opt.set_maxeval(1000 + 100 * npar**2)
     else:
         opt.set_maxeval(self.maxeval)
     opt.set_lower_bounds(iLower)
     opt.set_upper_bounds(iUpper)
     sqrtdbleps = np.sqrt(np.finfo(np.double).eps)
     opt.set_xtol_rel(sqrtdbleps)
     iresult = opt.optimize(iStarts)
     if np.isinf(opt.last_optimum_value()):
         raise ValueError("got inf")
     if np.isnan(opt.last_optimum_value()):
         raise ValueError("got nan")
     return iresult
コード例 #29
0
def min_dist(X,
             dist='Cramer-von Mises',
             n_component=3,
             w0=None,
             g0=None,
             m0=None,
             s0=None,
             tol=1e-4,
             max_iter=1000,
             method='PRAXIS'):
    m = n_component
    if method == 'PRAXIS':
        local_opter = nlopt.opt(nlopt.LN_PRAXIS, 4 * m - 1)
        local_opter.set_ftol_abs(tol)
        local_opter.set_maxeval(max_iter)
        opter = nlopt.opt(nlopt.AUGLAG, 4 * m - 1)
        opter.set_local_optimizer(local_opter)
    if method == 'COBYLA':
        opter = nlopt.opt(nlopt.LN_COBYLA, 4 * m - 1)
    opter.set_min_objective(
        lambda theta, grad: dist_fun_allinone(theta, X, name=dist))
    lb = np.array([0.] * (m - 1) + [-np.inf] * (2 * m) + [0.] * m)
    ub = np.array([1.] * (m - 1) + [np.inf] * (3 * m))
    opter.set_lower_bounds(lb)
    opter.set_upper_bounds(ub)
    opter.add_inequality_constraint(
        lambda theta, grad: np.sum(theta[:(m - 1)]) - 1)
    theta0 = np.append(
        np.array(w0[:-1]),
        np.append(np.array(g0), np.append(np.array(m0), np.array(s0))))
    opter.set_ftol_abs(tol)
    opter.set_maxeval(max_iter)
    try:
        theta = opter.optimize(theta0)
    except (nlopt.RoundoffLimited, ValueError):
        theta = np.array([np.nan] * (4 * m - 1))
    w = theta[:(m - 1)]
    w = np.append(w, [1 - w.sum()])
    gamma = theta[(m - 1):(2 * m - 1)]
    mu = theta[(2 * m - 1):(3 * m - 1)]
    sigma = theta[(3 * m - 1):(4 * m - 1)]
    r = opter.last_optimize_result()
    return w, gamma, mu, sigma, r  # in [nlopt.XTOL_REACHED, nlopt.FTOL_REACHED]
コード例 #30
0
def amoroso_binned_max_log_likelihood(samples, initial_guess=None, nbins=50):
    if initial_guess is None:
        initial_guess = np.array([1.005 * samples.max(), samples.std(), 1.1, 1])
        # for negative skew, use negative scale parameter
        if scipy.stats.skew(samples) < 0:
            initial_guess[1] *= -1

    # initial_guess = np.array([ 1.44631991, -0.02302599,  1.370993,    1.00922993])
    # initial_guess = np.array([ 1.44506214, -0.02157434,  1.28101393,  0.90385331])

    bounds = [(None, None),
              (0.0, None) if initial_guess[1] > 0 else (None, 0.0),
              (0, None),
              (0.0, None)
              ]

    # bin the data with Bayesian blocks
    # from astroML.plotting import hist
    # bin_counts, bin_edges, _ = hist(samples, bins='blocks')

    from matplotlib.pyplot import hist
    bin_counts, bin_edges, _ = hist(samples, bins=nbins)

    print()
    print("initial guess", initial_guess, "f", amoroso_binned_log_likelihood(initial_guess, bin_edges, bin_counts))

    # scipy.optimize
    # kwargs = dict(bounds=bounds, options=dict(disp=True, maxiter=100),
    #               method='Powell',
    #               args=(bin_edges, bin_counts))
    # return scipy.optimize.minimize(amoroso_binned_log_likelihood, initial_guess, **kwargs)

    # nlopt
    import nlopt

    # best results with LN_COBYLA, LN_SBPLX, GN_CRS2_LM
    # not good: LN_BOBYQA, LN_PRAXIS, GN_DIRECT_L, GN_ISRES, GN_ESCH
    # opt = nlopt.opt(nlopt.GN_CRS2_LM, 4)
    # opt = nlopt.opt(nlopt.LN_SBPLX, 4)
    opt = nlopt.opt(nlopt.LN_COBYLA, 4)
    opt.set_min_objective(lambda x, grad: amoroso_binned_log_likelihood(x, bin_edges, bin_counts))

    opt.set_lower_bounds([0.95 * bin_edges[0], 0.0 if initial_guess[1] > 0.0 else -20.0, 0.0, 0.0])
    opt.set_upper_bounds([1.05 * bin_edges[-1], 50.0 if initial_guess[1] > 0.0 else 0.0, 10, 10.0])

    tol = 1e-12
    opt.set_ftol_abs(tol)
    opt.set_xtol_rel(math.sqrt(tol))
    opt.set_maxeval(1500)

    xopt = opt.optimize(initial_guess)
    fmin = opt.last_optimum_value()

    print("Mode", repr(xopt), ", min. f =", fmin)
    return xopt
コード例 #31
0
def optimize_obj(obj_val, num_parameters, params=None):
    options = {}
    try:
        init_points = params['sample_points'][0]
    except (KeyError, TypeError):
        init_points = np.random.uniform(-np.pi, np.pi, num_parameters)
    try:
        options['maxiter'] = params['n_iter'] + params['init_points']
    except (KeyError, TypeError):
        options['maxiter'] = 100

    def objective(x, grad):
        f = obj_val(x)
        return f

    if params['ansatz'] == 'QAOA':
        lb = np.array([0, 0] * params['ansatz_depth'])
        ub = np.array([np.pi, 2*np.pi] * params['ansatz_depth'])
    elif params['ansatz'] == 'RYRZ':
        lb = np.array([-np.pi] * num_parameters)
        ub = np.array([np.pi] * num_parameters)

    nlopt.srand(params['seed'])
    opt = nlopt.opt(nlopt.G_MLSL_LDS, num_parameters)
    try:
        local_opt_method = getattr(nlopt, params['localopt_method'])
    except AttributeError:
        raise ValueError("Incorrect local opt method: {}".format(
            params['localopt_method']))
    local_opt = nlopt.opt(local_opt_method, num_parameters)
    local_opt.set_lower_bounds(lb)
    local_opt.set_upper_bounds(ub)
    opt.set_ftol_rel(params['ftol_rel'])     
    opt.set_xtol_rel(params['xtol_rel'])
    opt.set_local_optimizer(local_opt)
    opt.set_min_objective(objective)
    opt.set_population(params['max_active_runs'])
    opt.set_maxeval(options['maxiter'])
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    x = opt.optimize(init_points)
    return x
コード例 #32
0
def test_make_nlopt_fun_grad4(start_point):
    # Likewise, if you *do* supply gradient information, but set `jac=False`
    # you will be reminded of the fact that the gradient information is
    # being ignored through a `RuntimeWarning`.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=False)
    opt.set_min_objective(obj_fun)
    with pytest.warns(RuntimeWarning):
        x_opt = opt.optimize(x0)
    assert np.allclose(x_opt, x0)
コード例 #33
0
def test_make_nlopt_fun_grad3(start_point):
    # If you use a gradient-based optimization method with `jac=True` but
    # fail to supply any gradient information, you will receive a
    # `RuntimeWarning` and poor results.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=True)
    opt.set_min_objective(obj_fun)
    with pytest.warns(RuntimeWarning):
        x_opt = opt.optimize(x0)
    assert np.allclose(x_opt, x0)
コード例 #34
0
def test_make_nlopt_fun_grad2(start_point):
    # If a callable jacobian `jac` is specified, it will take precedence
    # over the gradient given by a function that returns a tuple with the
    # gradient as its second value.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    # We give some function that is clearly not the correct derivative.
    obj_fun = make_nlopt_fun(couple(rosen, lambda x: 2 * x), jac=rosen_der)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
コード例 #35
0
def lorentzfit(p0, x, y, alg=nlopt.LD_LBFGS, tol=1e-25, maxeval=10000):
    """Return the optimal Lorentzian polarizability parameters and error
       which minimize the error in ε(p0,x) relative to y for an initial
       set of Lorentzian polarizability parameters p0 over a set of
       frequencies x using the NLopt algorithm alg for a relative
       tolerance tol and a maximum number of iterations maxeval.
    """
    opt = nlopt.opt(alg, len(p0))
    opt.set_ftol_rel(tol)
    opt.set_maxeval(maxeval)
    opt.set_lower_bounds(np.zeros(len(p0)))
    opt.set_upper_bounds(float('inf') * np.ones(len(p0)))
    opt.set_min_objective(lambda p, grad: lorentzerr(p, x, y, grad))
    local_opt = nlopt.opt(nlopt.LD_LBFGS, len(p0))
    local_opt.set_ftol_rel(1e-10)
    local_opt.set_xtol_rel(1e-8)
    opt.set_local_optimizer(local_opt)
    popt = opt.optimize(p0)
    minf = opt.last_optimum_value()
    return popt, minf
コード例 #36
0
    def __init__(self, mol, calculator):
        super().__init__()
        self.opt = nlopt.opt(nlopt.LN_COBYLA, mol.coords.size)

        def objective(x, _):
            return float(
                calculator.calculate(x.reshape((-1, 3, 1)),
                                     mol.element,
                                     units="kcalmol")[0])

        self.opt.set_min_objective(objective)
コード例 #37
0
ファイル: solvers.py プロジェクト: zfergus/topopt
    def __init__(self,
                 problem: Problem,
                 volfrac: float,
                 filter: Filter,
                 gui: GUI,
                 maxeval=2000,
                 ftol_rel=1e-3):
        """
        Create a solver to solve the problem.

        Parameters
        ----------
        problem: :obj:`topopt.problems.Problem`
            The topology optimization problem to solve.
        volfrac: float
            The maximum fraction of the volume to use.
        filter: :obj:`topopt.filters.Filter`
            A filter for the solutions to reduce artefacts.
        gui: :obj:`topopt.guis.GUI`
            The graphical user interface to visualize intermediate results.
        maxeval: int
            The maximum number of evaluations to perform.
        ftol: float
            A floating point tolerance for relative change.

        """
        self.problem = problem
        self.filter = filter
        self.gui = gui

        n = problem.nelx * problem.nely
        self.opt = nlopt.opt(nlopt.LD_MMA, n)
        self.xPhys = numpy.ones(n)

        # set bounds on the value of x (0 ≤ x ≤ 1)
        self.opt.set_lower_bounds(numpy.zeros(n))
        self.opt.set_upper_bounds(numpy.ones(n))

        # set stopping criteria
        self.maxeval = maxeval
        self.ftol_rel = ftol_rel

        # set objective and constraint functions
        self.opt.set_min_objective(self.objective_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)
        self.volfrac = volfrac  # max volume fraction to use

        # setup filter
        self.passive = problem.bc.passive_elements
        if self.passive.size > 0:
            self.xPhys[self.passive] = 0
        self.active = problem.bc.active_elements
        if self.active.size > 0:
            self.xPhys[self.active] = 1
コード例 #38
0
ファイル: fit_pdfs.py プロジェクト: schlafly/galstar
def min_nlopt(pdfs, guess, p0=1.e-5, regulator=1000., maxtime=25., maxeval=10000, algorithm='CRS', Delta_Ar_neighbor=None, weight_neighbor=None):
	N_regions = guess.size - 1
	
	opt = None
	if algorithm == 'CRS':
		opt = nlopt.opt(nlopt.GN_CRS2_LM, N_regions+1)
	elif algorithm == 'MLSL':
		opt = nlopt.opt(nlopt.G_MLSL_LDS, N_regions+1)
	
	# Set lower and upper bounds on Delta_Ar
	lower = np.empty(N_regions+1, dtype=np.float64)
	upper = np.empty(N_regions+1, dtype=np.float64)
	lower.fill(1.e-10)
	upper.fill(max(float(pdfs.shape[2]), 1.2*np.max(guess)))
	opt.set_lower_bounds(lower)
	opt.set_upper_bounds(upper)
	
	# Set local optimizer (if required)
	if algorithm == 'MLSL':
		local_opt = nlopt.opt(nlopt.LN_COBYLA, N_regions+1)
		local_opt.set_lower_bounds(lower)
		local_opt.set_upper_bounds(upper)
		local_opt.set_initial_step(15.)
		opt.set_local_optimizer(local_opt)
	
	opt.set_initial_step(15.)
	
	# Set stopping conditions
	opt.set_maxtime(maxtime)
	opt.set_maxeval(maxeval)
	#opt.set_xtol_abs(0.1)
	
	# Set the objective function
	opt.set_min_objective(lambda x, grad: nlopt_measure(x, grad, pdfs, p0, regulator, Delta_Ar_neighbor, weight_neighbor))
	
	# Run optimization algorithm
	x = opt.optimize(guess)
	measure = opt.last_optimum_value()
	success = opt.last_optimize_result()
	
	return x, success, measure
コード例 #39
0
        def get_optimizer(method):
            """
            Get the optimizer associated with the given method.

            Args:
                method (str): optimizer string

            Returns:

            """
            if method == 'ISRES':
                return nlopt.opt(nlopt.GN_ISRES, M)
            elif method == 'COBYLA':
                return nlopt.opt(nlopt.LN_COBYLA, M)
            elif method == 'SLSQP':
                return nlopt.opt(nlopt.LD_SLSQP, M)
            elif method == 'AUGLAG':
                return nlopt.opt(nlopt.AUGLAG, M)
            else:
                raise NotImplementedError(
                    "The given method has not been implemented")
コード例 #40
0
    def __init__(self, ndim, k, search_int, opt_maxeval=10, steps_btw_opt=20, custom_gp=False, rbf_lengthscale=5.0,
                 rbf_variance=20.0):
        """
        Initialization of BO.
        :param ndim: Number of dimensions.
        :param k: exploration-exploitation UCB parameter.
        :param search_int: search inverval for all dimensions. Should be a 2-element list.
        :param opt_maxeval: Maximum number of evaluations used when optimizing the acquisition function. (default 10)
        :param steps_btw_opt: Number of steps between every BO optimization. (default 20)
        :param custom_gp: Whether or not to use a custom GP. Using GPy othewise. (default False)
        """
        self.__acq_fun = putils.UCB(k)
        self.search_min = search_int[0]
        self.search_max = search_int[1]
        self.d = ndim
        self.last_x_diff = sys.float_info.max
        self.last_x = None
        self.__recompute_x_diff = True
        self.x = []
        self.y = []
        self.custom_gp = custom_gp
        self.steps_btw_opt = steps_btw_opt

        # What GP model to use
        if not self.custom_gp:
            self.cf = GPy.kern.RBF(self.d, variance=rbf_variance, lengthscale=rbf_lengthscale)
            self.gp = None
        else:
            self.gp = GP()

        # Prepare acquisition function
        if custom_gp:
            def __acq_fun_maximize(_x, grad):
                for xi in _x:
                    if xi < self.search_min or xi > self.search_max:
                        return 0.0
                vals = self.gp.estimate(_x)
                return float(self.__acq_fun(vals[0], np.sqrt(vals[1])))
        else:
            def __acq_fun_maximize(_x, grad):
                for xi in _x:
                    if xi < self.search_min or xi > self.search_max:
                        return 0.0

                vals = self.gp.predict(np.array([_x]))
                return float(self.__acq_fun(vals[0], np.sqrt(vals[1])))

        # Prepare acquisition function optimization algorithm
        self.opt = nlopt.opt(nlopt.LN_COBYLA, self.d)
        self.opt.set_lower_bounds(self.search_min)
        self.opt.set_upper_bounds(self.search_max)
        self.opt.set_maxeval(opt_maxeval)
        self.opt.set_max_objective(__acq_fun_maximize)
コード例 #41
0
    def fit_pif(self,source_models,data,data_variance,efficiency):

        p0=[]
        for a in source_models:
            p0.append(a['fit_cppix'])
            if isinstance(a['pif'],tuple):
                p0+=list(a['pif'][0])

        def residual_func(p,gp):
            s=None
            i=0
            for a in source_models:
                if isinstance(a['pif'],tuple):
                    model=p[i]*a['pif'][1](*p[i+1:i+1+len(a['pif'][0])])
                    i+=1+len(a['pif'][0])
                else:
                    model=p[i]*a['pif'] # a duck
                    i+=1
                if s is None: s=zeros_like(model)
                s+=model

            r=((s*efficiency-data)**2/data_variance)
            rs=r[(efficiency>0.1) & (data_variance>0)].sum()
            if isnan(rs): return float('inf')
            return rs.astype(float64)
    
        import nlopt
        print "p0",p0

        #opt=nlopt.opt(nlopt.LN_PRAXIS, len(p0))
        #opt=nlopt.opt(nlopt.LN_COBYLA, len(p0))
        opt=nlopt.opt(nlopt.LN_BOBYQA, len(p0))
        opt.set_lower_bounds([-1000]*len(p0))
        opt.set_upper_bounds([1000]*len(p0))
        opt.set_min_objective(residual_func)
        opt.set_xtol_rel(1e-6)
        p_best=opt.optimize(p0)
        
        i=0
        for a in source_models:
            print a['source_name'],a['fit_cppix'],a['fit_cppix']/a['fit_sigma'],p_best[i]
            a['pfit_cppix']=p_best[i]
            if isinstance(a['pif'],tuple):
                model=p_best[i]*a['pif'][1](*p_best[i+1:i+1+len(a['pif'][0])])
                a['pfit_model']=model
                print "...",p_best[i+1:i+1+len(a['pif'][0])],a['pif'][0],i,len(p_best)
                pyfits.PrimaryHDU(model).writeto(a['source_name']+".fits",clobber=True)
                fn=a['source_name']+".txt"
                savetxt(fn,p_best[i:i+1+len(a['pif'][0])])
                setattr(self,fn,da.DataFile(fn))
                i+=1+len(a['pif'][0])
            else:
                i+=1
コード例 #42
0
ファイル: Analysis.py プロジェクト: riskey95/HollowRC
def bendingSolution(x0, section, SF, Mat):
    opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))  # faster than LN_SBPLX
    opt.set_min_objective(lambda x, grad: errorFunBending(x, section, SF, Mat))
    opt.set_xtol_rel(1e-8)
    x = opt.optimize(x0)  # Optimized strain state
    # print("result code = ", opt.last_optimize_result())

    # check result for high error margin
    if opt.last_optimum_value() > 1e-6:
        print("Failed to find bending equilibrium, try with less load")
        raise MyOptimizerError(
            "Failed to find bending equilibrium",
            "The section cannot sustain the bending loads applied. Try with less load."
        )

        # If bending equilibrium fails -> capacity is insufficient -> intro.
        # load factor and minimize it under error constraint -> yields a UR / lambda_bending factor
        # ------- need to implement gradients for optimization below ------
        # print("Error too big!, steps over to bigger problem including load factor")
        # # initial guess
        # x0 = np.append(x0, 0.9)
        # print('x0 = ', x0)
        # # initiate optimization instance
        # opt = nlopt.opt(nlopt.LN_COBYLA, len(x0))  # only LN_COBYLA support constraints
        # # set bounds
        # # opt.set_lower_bounds([-float('inf'), -float('inf'), -float('inf'), 1e-4])
        # # opt.set_upper_bounds([float('inf'), float('inf'), float('inf'), 1.0])
        # # set objective
        # # opt.set_max_objective(lambda x, grad: myObjective(x))
        # opt.set_max_objective(myObjective)
        # # set constraint
        # # opt.add_equality_constraint(lambda x, grad: errorFun(x, Geometry, SF, Mat), 1e-8)
        # opt.add_inequality_constraint(lambda x, grad: errorFunBending(x, section, SF, Mat), 1e-8)  # feasible if func < tol
        # # set tolerances
        # opt.set_xtol_rel(1e-8)
        # # solve
        # xopt = opt.optimize(x0)
        # x = xopt[:-1]
        # # print result
        # print("bending load factor =", xopt[-1])
        # print("bending optimum at x =", x)
        # print("bending error at opt =", errorFunBending(xopt, section, SF, Mat))
        # # print("result code = ", opt.last_optimize_result())
        #
        # if opt.last_optimum_value() > 1e-4:
        #     print("Error too big!, bigger problem including load factor failed, try with less load")
        #     error_msg = ["Failed to find bending equilibrium", "try with less load"]
        # else:
        #     print("Bigger problem including load factor succeeded, Maximum bending load factor is: " + str(xopt[-1]))
        #     error_msg = ["Failed to find bending equilibrium", "Maximum bending load factor is: " + str(xopt[-1])]
        # return None, error_msg

    return x
コード例 #43
0
def denomMinMLSL(rapp, box, center, popsize=4, maxeval=1000):
    import numpy as np

    def my_func(x, grad):
        if grad.size > 0:
            _grad = fast_grad(x, rapp)
            for _i in range(grad.size):
                grad[_i] = grad[_i]
        return rapp.denom(x)

    import nlopt
    locopt = nlopt.opt(nlopt.LD_MMA, center.size)
    glopt = nlopt.opt(nlopt.GD_MLSL_LDS, center.size)
    glopt.set_min_objective(my_func)
    glopt.set_lower_bounds(np.array([b[0] for b in box]))
    glopt.set_upper_bounds(np.array([b[1] for b in box]))
    glopt.set_local_optimizer(locopt)
    glopt.set_population(popsize)
    glopt.set_maxeval(maxeval)
    xmin = glopt.optimize(center)
    return xmin
コード例 #44
0
ファイル: explore.py プロジェクト: KelvyHsu/ocean-exploration
def optimal_difference_path(theta_stack_init, r, x, memory, feature_fn, white_params,
    Fs, Fw, y, inference_learn, turn_limit = np.deg2rad(30), bound = 100,
    theta_stack_low = None, theta_stack_high = None,
    walltime = None, xtol_rel = 0, ftol_rel = 0, ctol = 1e-6):

    def objective(theta_stack, grad):
        return difference_acquisition(theta_stack, r, x, memory, feature_fn, white_params, Fs, Fw, y, inference_learn)

    # Define the path constraint
    def constraint(result, theta_stack, grad):
        result = path_bounds_model(theta_stack, r, x, feature_fn.Xq_ref, bound)

    # Obtain the number of parameters involvevd
    n_params = theta_stack_init.shape[0]

    # Prepare for global or local optimisation according to specification
    opt = nlopt.opt(nlopt.LN_COBYLA , n_params)

    # Set lower and upper bound
    if theta_stack_low is not None:
        theta_stack_low[0] = theta_stack_init[0] - turn_limit
        opt.set_lower_bounds(theta_stack_low)
    if theta_stack_high is not None:
        theta_stack_high[0] = theta_stack_init[0] + turn_limit
        opt.set_upper_bounds(theta_stack_high)

    # Set tolerances
    if xtol_rel > 0:
        opt.set_xtol_rel(xtol_rel)
    if ftol_rel > 0:
        opt.set_ftol_rel(ftol_rel)
    
    # Set maximum optimisation time
    opt.set_maxtime(walltime)

    # Set the objective and constraint and optimise!
    opt.set_max_objective(objective)
    opt.add_inequality_mconstraint(constraint, ctol * np.ones(n_params))
    theta_stack_opt = opt.optimize(theta_stack_init)
    entropy_opt = opt.last_optimum_value()

    # Compute optimal path
    x_path_opt = forward_path_model(theta_stack_opt, r, x)

    # Replace the optimal coordinates with the closest query locations
    x_path_opt = feature_fn.closest_locations(x_path_opt)

    # Approximate the corresponding path angles
    theta_stack_opt = backward_path_model(x_path_opt, x)

    # Return path coordinates, path angles, and path entropy
    return x_path_opt, theta_stack_opt, entropy_opt
コード例 #45
0
    def _minimize(
        self,
        name: str,
        objective_function: Callable,
        variable_bounds: Optional[List[Tuple[float, float]]],
        initial_point: Optional[np.ndarray] = None,
        max_evals: int = 1000,
    ) -> Tuple[float, float, int]:
        """Minimize using objective function

        Args:
            name: NLopt optimizer name
            objective_function: handle to a function that
                                            computes the objective function.
            variable_bounds: list of variable
                                bounds, given as pairs (lower, upper). None means
                                unbounded.
            initial_point: initial point.
            max_evals: Maximum evaluations

        Returns:
            tuple(float, float, int): Solution at minimum found,
                    value at minimum found, num evaluations performed
        """
        threshold = 3 * np.pi
        low = [(l if l is not None else -threshold)
               for (l, u) in variable_bounds]
        high = [(u if u is not None else threshold)
                for (l, u) in variable_bounds]

        opt = nlopt.opt(name, len(low))
        logger.debug(opt.get_algorithm_name())

        opt.set_lower_bounds(low)
        opt.set_upper_bounds(high)

        eval_count = 0

        def wrap_objfunc_global(x, _grad):
            nonlocal eval_count
            eval_count += 1
            return objective_function(x)

        opt.set_min_objective(wrap_objfunc_global)
        opt.set_maxeval(max_evals)

        xopt = opt.optimize(initial_point)
        minf = opt.last_optimum_value()

        logger.debug("Global minimize found %s eval count %s", minf,
                     eval_count)
        return xopt, minf, eval_count
コード例 #46
0
ファイル: optimize.py プロジェクト: zenna/ig
def optimize(init_state, cost_func):
    # reset counter
    global optim_iter
    optim_iter = 0
    init_shapes = init_state.flatten()
    nparams = init_state.size
    opt = nlopt.opt(nlopt.LD_MMA, nparams)
    opt.set_min_objective(cost_func)
    opt.set_xtol_rel(1e-4)
    x = opt.optimize(init_shapes)
    minf = opt.last_optimum_value()
    # print "optimum at ", x[0],x[1]
    print "minimum value = ", minf
コード例 #47
0
ファイル: final.py プロジェクト: chicagohawk/twinmodel
 def infer(self, coef, lasso_reg):
     # optimize selected basis coefficients
     for tcutoff in np.logspace(-3,0,5)*self.primal.tfinal:
         opt = nlopt.opt(nlopt.LD_LBFGS, coef.size)
         opt.set_min_objective(lambda coef, grad: 
                               self.var_grad(coef, grad, lasso_reg, tcutoff))
         opt.set_stopval(1e-1)
         opt.set_ftol_rel(1e-2)
         opt.set_maxeval(100)
         if tcutoff == self.primal.tfinal:
             opt.set_stopval(0.)
             opt.set_ftol_rel(1e-4)
         coef = opt.optimize(degrade(coef).copy())
     return coef
コード例 #48
0
def so_sample():
    def my_func(x, grad):
        arr = np.array([[x[0]+x[1],-2],[-2,x[1]-2*(x[1]+x[0])]])
        ev, ew=eig(arr)
        return ev[0].real
    opt = nlopt.opt(nlopt.LN_BOBYQA, 2)
    opt.set_lower_bounds([1.0,1.0])
    opt.set_min_objective(my_func)
    opt.set_xtol_rel(1e-7)
    x = opt.optimize([10.0, 3.5])
    minf = opt.last_optimum_value()
    print("optimum at ", x)
    print("minimum value = ", minf)
    print("result code = ", opt.last_optimize_result())
コード例 #49
0
 def optimize(self, algorithm):       
     t0 = time() 
     self.errorSum = 0.0
     for paramSet in self.paramLayer :            
         numParam = len(paramSet)
         self.count = 0            
         ##Optimization algorithm
         opt = nlopt.opt(algorithm, numParam)            
         
         ##Upper / lower bounds
         lowerBoundList = []
         upperBoundList = []
         for param in paramSet :
             bound = self.paramBoundDic[param]
             lowerBoundList.append(bound[0])
             upperBoundList.append(bound[1])            
         opt.set_lower_bounds(lowerBoundList) # set 
         opt.set_upper_bounds(upperBoundList)
         del lowerBoundList, upperBoundList
         
         #Get active joint list of the param set
         activeJntList = []            
         for param in paramSet :
             activeJntList = activeJntList + self.getActiveJntList(param)                
         activeJntList = list(set(activeJntList))   
         
         #Get goal joint value from ground-truth          
         goalJntVal = self.getGoalJointRotList(activeJntList)
         
         #Set objective function / threshold
         opt.set_min_objective(lambda x, grad: self.myfunc(x, grad, goalJntVal, paramSet, activeJntList))
         opt.set_xtol_rel(1e-10)
         
         #Optimization
         
         initVal = []
         for param in paramSet :
             initVal.append(self.paramValDic[param])            
         x = opt.optimize(initVal)
         for i, param in enumerate(paramSet) :
             self.paramValDic[param] = x[i]
         print "count : ", self.count
         print "error : ", opt.last_optimum_value()
         self.errorSum = self.errorSum +opt.last_optimum_value() 
         
     t1 = time()
         #print "optimized paramSet : ", paramSet
     print "computation time : %f" %(t1-t0)
     print "error sum : %f" %self.errorSum
コード例 #50
0
ファイル: gaussfit.py プロジェクト: npadmana/py_hydra
  def optimize(self, frel=1.e-6, fabs=1.e-8):
    opt = nlopt.opt(nlopt.LN_COBYLA, self.nparam)
    opt.set_min_objective(self.minfunc)
    opt.set_ftol_rel(frel)
    opt.set_xtol_rel(frel)
    opt.set_ftol_abs(fabs)
    opt.set_xtol_abs(fabs)

    startvec = np.zeros((self.nparam, ), dtype='f8')
    startvec[0] = 1.0
    startvec[1] = np.mean(self.x)
    startvec[2] = self.y.max()

    self.xopt = opt.optimize(startvec)
    self.fopt =  opt.last_optimize_result()
    def _get_optimizer(self, D=1, upper_bound=1, iteration_budget=None):
        """Utility function creating an NLOPT optimizer with default
        parameters depending on this objects parameters
        """
        if iteration_budget == None:
            iteration_budget = self.linear_iteration_budget

        opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, D)
        # opt.set_stopval(self.acceptance_threshold/10.0)
        opt.set_ftol_rel(1e-5)
        opt.set_maxeval(iteration_budget)
        opt.set_lower_bounds(0)
        opt.set_upper_bounds(upper_bound)

        return opt
コード例 #52
0
ファイル: gp.py プロジェクト: chaitan3/GPEXP
    def chooseParams(self, paramLowerBounds, paramUpperBounds, startValues, costFunction,maxiter=40, useLastParams=True):
        
        if NLOPT is True:
            local_opt = nlopt.opt(nlopt.LN_COBYLA, len(startValues))

            local_opt.set_xtol_rel(1e-3)
            local_opt.set_ftol_rel(1e-3)
            local_opt.set_ftol_abs(1e-3)
            local_opt.set_maxtime(10);
            local_opt.set_maxeval(50*len(startValues)); 
               
            local_opt.set_lower_bounds(paramLowerBounds)
            local_opt.set_upper_bounds(paramUpperBounds)

            try:
                local_opt.set_min_objective(costFunction)       
                sol = local_opt.optimize(startValues)
            except nlopt.RoundoffLimited:
                if useLastParams:
                    return costFunction.last_x_value, costFunction.last_f_value
                else:
                    return startValues, None
            return sol, local_opt.last_optimum_value()
        else:
            maxeval = 100
            bounds = zip(paramLowerBounds, paramUpperBounds)
            objFunc = lambda x: costFunction(x,np.empty(0))
            #sol = slsqp(objFunc, np.array(startValues), bounds=bounds,
            #            iter=maxeval)
            

            #print "startValuies ", len(startValues), len(paramLowerBounds)
            objFunc = lambda x : costFunction(x,np.empty(0))
            def const(x):
                good = 1.0
                for ii in xrange(len(x)):
                    if (x[ii] < paramLowerBounds[ii]):
                        return -1.0
                    elif (x[ii] > paramUpperBounds[ii]):
                        return -1.0
                return good

            #sol = cobyla(objFunc, np.array(startValues), cons=(const), maxfun=maxeval)
            sol_bfgs = bfgs(objFunc, np.array(startValues), bounds=bounds, approx_grad=True, factr=1e10, maxfun=maxiter)
            sol = sol_bfgs[0]
            #print "sol ", np.round(sol,4)
            val = objFunc(sol)
            return sol,val
コード例 #53
0
ファイル: nlopt_wrap.py プロジェクト: nishbo/simsimpy
def scipy_nlopt_cobyla(*args, **kwargs):
    """Wraps nlopt library cobyla function to be compatible with scipy optimize

    parameters:
        args[0]: target, function to be minimized
        args[1]: x0, starting point for minimization
        bounds: list of bounds for the movement
                [[min, max], [min, max], ...]
        ftol_rel: same as in nlopt
        xtol_rel: same as in nlopt
            one of the tol_rel should be specified
    returns:
        OptimizeResult() object with properly set x, fun, success.
            status is not set when nlopt.RoundoffLimited is raised
    """
    answ = OptimizeResult()
    bounds = kwargs['bounds']

    opt = nlopt.opt(nlopt.LN_COBYLA, len(args[1]))
    opt.set_lower_bounds([i[0] for i in bounds])
    opt.set_upper_bounds([i[1] for i in bounds])
    if 'ftol_rel' in kwargs.keys():
        opt.set_ftol_rel(kwargs['ftol_rel'])
    if 'xtol_rel' in kwargs.keys():
        opt.set_ftol_rel(kwargs['xtol_rel'])
    opt.set_min_objective(args[0])

    x0 = list(args[1])

    try:
        x1 = opt.optimize(x0)
    except nlopt.RoundoffLimited:
        answ.x = x0
        answ.fun = args[0](x0)
        answ.success = False
        answ.message = 'nlopt.RoundoffLimited'
        return answ

    answ.x = x1
    answ.fun = args[0](x1)
    answ.success = True if opt.last_optimize_result() in [3, 4] else False
    answ.status = opt.last_optimize_result()
    if not answ.fun == opt.last_optimum_value():
        print 'Something\'s wrong, ', answ.fun, opt.last_optimum_value()

    return answ
コード例 #54
0
def nlopt_sample():
    print("[nlopt_sample] started")
    lb = [-3, -1]
    ub = [2, 6]
    opt = nlopt.opt(nlopt.LN_BOBYQA, 2)  # LN_BOBYQA
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    opt.set_min_objective(udf_single)
    opt.set_xtol_rel(1e-8)
    opt.set_maxeval(int(100))  # Maximum number of function evaluations
    x = opt.optimize([0, 1])
    minf = opt.last_optimum_value()
    print("optimum at ", x[0], x[1])
    print("minimum value = ", minf)
    print("result code = ", opt.last_optimize_result())
    print("udf_single(best) = ", udf_single(x))
    print("[nlopt_sample] finished")