Пример #1
0
def fit(fitfunc, model, data_single, y, starting_values, param_upper_bounds, param_lower_bounds):
    print_level = -1
    if model.main_constrained:
        fitting = NLP(
                        partial (fitfunc,
                                 data = data_single,
                                 ind = y,
                                 return_residuals=False,
                                 return_SSR=True
                                 ),
                        starting_values,
                        ub=param_upper_bounds,
                        lb=param_lower_bounds,
                        ftol=model.tolerence
                        )
        #results = fitting.solve('nlp:ralg', iprint=print_level)
        #return fitting.solve('nssolve', iprint = print_level)
        return fitting.solve('ralg', iprint = print_level, maxIter = model.max_runs)
    else:
        fitting = NLP(
                        partial (fitfunc,
                                 data=data_single,
                                 ind = y,
                                 return_residuals=False,
                                 return_SSR=True
                                 ),
                        starting_values,
                        ftol=model.tolerence,
                        maxIter = model.max_runs,
                        )
        return fitting.solve('scipy_leastsq', iprint=print_level)
Пример #2
0
    def openopt(self, *args, **kwargs):
        from openopt import NLP
        
        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False
        try:
            solver = args[0]
        except:
            solver = 'ralg'
        self.args += args[1:]
        self.kwargs.update(kwargs)

        
        f = lambda p: sum(((self.func(p, self.x, self.const,self.mima) - self.y) / self.weights)**2)
        self.prob = NLP(f, x0 = self.p0, *self.args, **self.kwargs)
        self.res = self.prob.solve(solver)
        self.p = self.res.xf

        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with openopt:\n solver: '+ solver)
Пример #3
0
    def _min(self, func, x0, *args, **kwargs):
        if _USE_OPENOPT:
            if 'solver' in kwargs:
                solver = kwargs['solver']
                del kwargs['solver']
            else:
                solver = 'ralg'
            if 'df' in kwargs:
                df = kwargs['df']
                del kwargs['df']
            else:
                df = self._diff
            p = NLP(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                df=df,
                **kwargs)
            z = p.solve(solver)

            return z.xf, z.ff, z.istop > 0, z.msg
        else:
            a = minimize(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                *args,
                **kwargs)
            return a.x, a.fun, a.success, a.message
Пример #4
0
def test_openopt():
    p = NLP(residual, x0, lb=lb, ub=ub,
            iprint=1, plot=True)
    # uncomment the following (and set scale = 1 above) to use openopt's
    # scaling mechanism.  This only seems to work with a few solvers, though.
    #p.scale = np.array([1, 1e6, 1e6, 1e6, 1e6, 1])

    r = p.solve('ralg')             # OpenOpt solver, seems to work well,
Пример #5
0
    def max_loglik_z(self, method='scipy_slsqp'):
        message(self, 'Optimizing strain frequencies using %s' % (method))

        # Initialize frequencies
        z = []

        # Bound frequencies in (0,1)
        lb = np.zeros(self.n)
        ub = np.ones(self.n)

        # Constrain frequencies to sum to 1
        def h(a):
            return sum(a) - 1

        quiet()

        # For every subject
        for i in range(self.m):

            # Objective function
            def f(a):
                # Get frequencies (N) and error rate
                zi = a
                ei = self.e
                # Get nucleotide frequencies at every position (L,4)
                a1 = np.einsum('i...,i...', zi, self.p)[self.mask[i]]
                # Remove masked sites from alignment (L,4)
                xi = self.x[i][self.mask[i]]
                # Error correct and weight by counts
                a2 = np.einsum('ij,ij', xi,
                               np.log(((1 - ei) * a1 + ei / 4.).clip(1e-10)))
                # Return negative log-likelihood
                return -1. * a2

            # Run optimization
            g = self.z[i, :]
            soln = NLP(f,
                       g,
                       lb=lb,
                       ub=ub,
                       h=h,
                       gtol=1e-5,
                       contol=1e-5,
                       name='NLP1').solve(method, plot=0)
            if soln.ff <= f(g) and soln.isFeasible == True:
                zi = soln.xf
                z.append(zi.clip(0, 1))
            else:
                z.append(g)

        loud()

        # Update frequencies and error rate
        self.z = np.array(z)

        return self
Пример #6
0
def fit_kernel_model(kernel, loss, X, y, gamma, weights=None):
    n_samples = X.shape[0]
    gamma = float(gamma)
    if weights is not None:
        weights = weights / np.sum(weights) * weights.size

    # --- optimize bias term ---

    bias = fd.oovar('bias', size=1)

    if weights is None:
        obj_fun = fd.sum(loss(y, bias))
    else:
        obj_fun = fd.sum(fd.dot(weights, loss(y, bias)))
    optimizer = NLP(obj_fun, {bias: 0.}, ftol=1e-6, iprint=-1)

    result = optimizer.solve('ralg')
    bias = result(bias)

    # --- optimize betas ---

    beta = fd.oovar('beta', size=n_samples)

    # gram matrix
    K = kernel(X, X)
    assert K.shape == (n_samples, n_samples)

    K_dot_beta = fd.dot(K, beta)

    penalization_term = gamma * fd.dot(beta, K_dot_beta)
    if weights is None:
        loss_term = fd.sum(loss(y - bias, K_dot_beta))
    else:
        loss_term = fd.sum(fd.dot(weights, loss(y - bias, K_dot_beta)))
    obj_fun = penalization_term + loss_term

    beta0 = np.zeros((n_samples, ))

    optimizer = NLP(obj_fun, {beta: beta0}, ftol=1e-4, iprint=-1)
    result = optimizer.solve('ralg')
    beta = result(beta)

    return KernelModel(X, kernel, beta, bias)
Пример #7
0
    def find2(self, POIMobj, motif_len, motif_start, base, path2pwm=None,solver="NLP"):
        self.motif_start = motif_start
        self.motif_len = motif_len
        x0 = tools.ini_pwm(motif_len, 1, len(base))[0]

        x0 = x0.flatten()

        lb = np.ones(x0.shape) * 0.001
        ub = np.ones(x0.shape) * 0.999
        iprint = 0
        maxIter = 1000
        ftol = 1e-04
        gradtol = 1e-03
        diffInt = 1e-05
        contol = 1e-02
        maxFunEvals = 1e04
        maxTime = 100

        lenA = int(len(x0))
        lenk = int(len(x0)) / len(base)
        Aeq = np.zeros((lenk, lenA))
        beq = np.ones(lenk)
        for i in range(lenk):
            for pk in range(i, lenA, lenk):
                Aeq[i, pk] = 1

                # ,Aeq=Aeq,beq=beq,
        cons = {'type': 'eq', 'fun': lambda x: np.dot(Aeq, x) - beq}
        bnds = []
        for i in range(len(x0)):
            bnds.append((lb[i], ub[i]))
        # bnds = np.vstack((lb,ub))


        if solver == "ralg":
            from openopt import NLP
            p = NLP(self.f_L2, x0,lb=lb, ub=ub, Aeq=Aeq,beq=beq, args=(POIMobj.gPOIM,POIMobj.L,motif_start,POIMobj.small_k,motif_len),  diffInt=diffInt, ftol=ftol, plot=0, iprint=iprint,maxIter = maxIter, maxFunEvals = maxFunEvals, show=False, contol=contol)
            result = p._solve(solver)
            x = result.xf
            f = result.ff
        elif solver == "LBFGSB":
            x, f, d = fmin_l_bfgs_b(self.f_L2, x0,
                                    args=(POIMobj.gPOIM, POIMobj.L, motif_start, POIMobj.small_k, motif_len),
                                    approx_grad=True)#constraints=cons)#
        elif solver == "SLSQP":
            result = minimize(self.f_L2, x0,args=(POIMobj.gPOIM, POIMobj.L, motif_start, POIMobj.small_k, motif_len),method='SLSQP',bounds=bnds,constraints=cons)
            x = result.x
            f = result.fun
        self.motif_pwm = np.reshape(x, (4, motif_len))
        fopt = f
        self.normalize()
        if not(path2pwm is None):
            np.savetxt(path2pwm, self.poim_norm)

        return self.motif_pwm
Пример #8
0
def balance(sam, debug=False):
    try:
        table = sam.array()
    except AttributeError:
        table = np.array(sam)

    assert table.shape[0] == table.shape[1]
    size = table.shape[0]
    x0 = np.array([v for v in table.flatten() if v != 0])

    def transform(ox):
        ret = np.zeros_like(table)
        i = 0
        for r in range(size):
            for c in range(size):
                if table[r, c] != 0:
                    ret[r, c] = ox[i]
                    i += 1
        return ret

    def objective(ox):
        ox = np.square((ox - x0) / x0)
        return np.sum(ox)

    def constraints(ox):
        ox = transform(ox)
        ret = np.sum(ox, 0) - np.sum(ox, 1)
        return ret

    print constraints(x0)

    if debug:
        print("--- balance ---")
    p = NLP(objective,
            x0,
            h=constraints,
            iprint=50 * int(debug),
            maxIter=100000,
            maxFunEvals=1e7,
            name='NLP_1')
    r = p.solve('ralg', plot=0)
    if debug:
        print 'constraints'
        print constraints(r.xf)
    assert r.isFeasible

    try:
        return sam.replace(transform(r.xf))
    except UnboundLocalError:
        return transform(r.xf)
Пример #9
0
    def run(self, plot=True):
        """
        Solves the optimization problem.
        """
        # Initial try
        p0 = self.get_p0()

        #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
        lbound = N.array([0.0001] * len(p0))
        if self.gridsize == 1:
            ubound = [10.0] * (self.gridsize * self.nbr_us)
        else:
            ubound = [10.0] * (self.gridsize * self.nbr_us
                               ) + [0.20, 0.20, 0.20, 0.20, N.inf] * (
                                   (self.gridsize - 1))

        #UPPER BOUND FOR VDP
        #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)

        if self.verbosity >= Multiple_Shooting.NORMAL:
            print 'Initial parameter vector: '
            print p0
            print 'Lower bound:', len(lbound)
            print 'Upper bound:', len(ubound)

        # Get OpenOPT handler
        p_solve = NLP(self.f,
                      p0,
                      lb=lbound,
                      ub=ubound,
                      maxFunEvals=self.maxFeval,
                      maxIter=self.maxIter,
                      ftol=self.ftol,
                      maxTime=self.maxTime)

        #If multiple shooting is preformed or single shooting
        if self.gridsize > 1:
            p_solve.h = self.h

        if plot:
            p_solve.plot = 1

        self.opt = p_solve.solve(self.optMethod)

        return self.opt
Пример #10
0
def lower_bound_predictor(model, lower, upper):

    n_sample, n_feature = model.X.shape

    theta = model.theta_
    ph = 2
    beta = model.beta
    gamma = model.gamma
    X = model.X

    x_lb = atleast_2d(lower)
    x_ub = atleast_2d(upper)

    x_lb = x_lb.T if size(x_lb, 1) != n_feature else x_lb
    x_ub = x_ub.T if size(x_ub, 1) != n_feature else x_ub

    x_lb = (x_lb - model.X_mean) / model.X_std
    x_ub = (x_ub - model.X_mean) / model.X_std

    #    a = zeros(1, m) b = zeros(1, m)
    #    midpoint = (z_min + z_max)' / 2
    #    for i = 1:m
    #        if gamma(i) >= 0
    #            # tangent line relaxation
    #            b(i) = -gamma(i) * exp(-midpoint(i))
    #            a(i) = gamma(i) * exp(-midpoint(i)) - b(i) * midpoint(i)
    #        else
    #            # chord relaxation
    #            b(i) = (gamma(i) * exp(-z_max(i)) - gamma(i) * exp(-z_min(i))) / (z_max(i)-z_min(i))
    #            a(i) = gamma(i) * exp(-z_min(i)) - b(i) * z_min(i)
    #

    x0 = ((x_ub - x_lb) * rand(1, n_feature) +
          x_lb)[0].tolist()  # Make a starting guess at the solution
    obj = model.predict
    #    x_opt = fmin_slsqp(obj, x0, bounds=zip(x_lb[0], x_ub[0]), iter=1e4, iprint=-1)
    p = NLP(obj, x0, lb=x_lb[0], ub=x_ub[0], iprint=1e6)
    r = p.solve('ralg')
    x_opt = r.xf

    #    if any(x_opt < x_lb) or any(x_opt > x_ub):
    #        pdb.set_trace()
    res = obj(x_opt)

    return res
Пример #11
0
def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.inspire(newProb)
    newProb.discreteVars = originProb.discreteVars

    def err(s):  # to prevent text output
        raise OpenOptException(s)

    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided,
                                               fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))

    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver
    return newProb
Пример #12
0
def getDirectionOptimPoint(p, func, x, direction, forwardMultiplier = 2.0, maxiter = 150, xtol = None, maxConstrLimit = None,  \
                           alpha_lb = 0,  alpha_ub = inf,  \
                           rightLocalization = 0,  leftLocalization = 0, \
                           rightBorderForLocalization = 0, leftBorderForLocalization = None):
    if all(direction==0): p.err('nonzero direction is required')

    if maxConstrLimit is None:
        lsFunc = funcDirectionValue
        args = (func, x, direction)
    else:
        lsFunc = funcDirectionValueWithMaxConstraintLimit
        args = (func, x, direction, maxConstrLimit, p)

    prev_alpha, new_alpha = alpha_lb, min(alpha_lb+0.5, alpha_ub)
    prev_val = lsFunc(prev_alpha, *args)
    for i in range(p.maxLineSearch):
        if lsFunc(new_alpha, *args)>prev_val or new_alpha==alpha_ub: break
        else:
            if i != 0: prev_alpha = min(alpha_lb, new_alpha)
            new_alpha *= forwardMultiplier

    if i == p.maxLineSearch-1: p.debugmsg('getDirectionOptimPoint: maxLineSearch is exeeded')
    lb, ub = prev_alpha, new_alpha

    if xtol is None: xtol = p.xtol / 2.0
    # NB! goldenSection solver ignores x0
    p_LS = NLP(lsFunc, x0=0, lb = lb,  ub = ub, iprint = -1, \
               args=args, xtol = xtol, maxIter = maxiter, contol = p.contol)# contol is used in funcDirectionValueWithMaxConstraintLimit


    r = p_LS.solve('goldenSection', useOOiterfcn=False, rightLocalization=rightLocalization, leftLocalization=leftLocalization, rightBorderForLocalization=rightBorderForLocalization, leftBorderForLocalization=leftBorderForLocalization)
    if r.istop == IS_MAX_ITER_REACHED:
        p.warn('getDirectionOptimPoint: max iter has been exceeded')
    alpha_opt = r.special.rightXOptBorder

    R = DirectionOptimPoint()
    R.leftAlphaOptBorder = r.special.leftXOptBorder
    R.leftXOptBorder = x + R.leftAlphaOptBorder * direction
    R.rightAlphaOptBorder = r.special.rightXOptBorder
    R.rightXOptBorder = x + R.rightAlphaOptBorder * direction
    R.x = x + alpha_opt * direction
    R.alpha = alpha_opt
    R.evalsF = r.evals['f']+i
    return R
Пример #13
0
    def _fit_openopt(self):

        from openopt import NLP


        solver = self.kwargs.get('solver','ralg')
        #~ self.args += args[1:]
        #~ self.kwargs.update(kwargs)
        self.err = [1e10,1e9,1e8,1e7]
        self.i = 0
        params = []
        for p in self.model.x0:
            params.append(p()['value'])

        def f(params):
            for i in xrange(len(params)):
                if self.model.x0[i]()['froozen']:
                    params[i] = self.model.x0[i]()['value']
                else:
                    if abs(params[i]) == nan or abs(params[i]) == inf:
                        params[i] = self.model.x0[i]()['value']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
                    if params[i] < self.model.x0[i]()['min']:
                        params[i] = self.model.x0[i]()['min']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
                    if params[i] > self.model.x0[i]()['max']:
                        params[i] = self.model.x0[i]()['max']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
            if self.data.we is not None:
                return sum((self.data.y-self.model.fcn(params,self.data.x,self.model.const))**2 * self.data.we)
            else:
                return sum(self.data.y-self.model.fcn(params,self.data.x,self.model.const)**2)

        #~ f = lambda p: sum(((self.func(p, self.x) - self.y) / self.weights)**2)
        self.prob = NLP(f, x0 = params)
        self.res = self.prob.solve(solver)

        self.storeResult(self.res)

        return self.result
Пример #14
0
    def optimize(
        self, solver="ralg", plot=0, maxIter=1e5, maxCPUTime=3600, maxFunEvals=1e12
    ):
        if self.mtype == "LP" or self.mtype == "MILP":
            p = MILP(
                self.objective,
                self.init,
                constraints=self.constraints,
                maxIter=maxIter,
                maxCPUTime=maxCPUTime,
                maxFunEvals=maxFunEvals,
            )
        elif self.mtype == "NLP":
            p = NLP(
                self.objective,
                self.init,
                constraints=self.constraints,
                maxIter=maxIter,
                maxCPUTime=maxCPUTime,
                maxFunEvals=maxFunEvals,
            )
        else:
            print("Model Type Error")
            raise TypeError

        if self.sense == GRB.MAXIMIZE:
            self.Results = p.maximize(solver, plot=plot)
        else:
            self.Results = p.minimize(solver, plot=plot)
        # print(self.Results)
        self.ObjVal = self.Results.ff

        if self.Results.isFeasible:
            self.Status = GRB.OPTIMAL
        else:
            self.Status = GRB.INFEASIBLE

        for v in self.variables:
            v.VarName = v.name
            v.X = self.Results.xf[v]
        return self.Status
Пример #15
0
def upper_bound_mse(model, lower, upper):

    n_sample, n_feature = model.X.shape
    x_lb = atleast_2d(lower)
    x_ub = atleast_2d(upper)

    x_lb = x_lb.T if size(x_lb, 1) != n_feature else x_lb
    x_ub = x_ub.T if size(x_ub, 1) != n_feature else x_ub

    x0 = ((x_ub - x_lb) * rand(1, n_feature) + x_lb)[0]

    obj = objective_mse_wrapper(model)
    #    x_opt = fmin_slsqp(obj, x0, bounds=zip(x_lb[0], x_ub[0]), iter=1e4, iprint=-1)

    #    # openopt optimizers
    p = NLP(obj, x0, lb=x_lb[0], ub=x_ub[0], iprint=1e6)
    r = p.solve('ralg')
    x_opt = r.xf

    #    if any(x_opt < x_lb) or any(x_opt > x_ub):
    #        pdb.set_trace()
    res = -obj(x_opt)

    return res
Пример #16
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))

def df(x):
    r = 2*(x-M)
    r[0] += 15 #incorrect derivative
    r[8] += 80 #incorrect derivative
    return r
p.df =  df

p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2] + 15 #incorrect derivative
    return r
p.dc = dc

p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
    return r
Пример #17
0
x1 = 1
z1 = 5
z2 = 2
x0 = [x1, z1, z2]

# calculate the constraints first
[y1, y2, _] = GaussSeidelCoordinator(
    x1, z1, z2, 0)  # find values of y1, y2 that is consistent with x1, z1, z2
coupling.y1 = y1
coupling.y2 = y2

# print initial values
print('Initial Objective: ' + str(ObjectiveMain(x0)))
print('[x1 z1 z2] = ' + str(x0))
print('[y1 y2] = [' + str(coupling.y1) + ', ' + str(coupling.y2) + ']')

# Non linear Programming for optimizing the Sellar problem
# documentation at: https://github.com/troyshu/openopt/blob/master/openopt/oo.py
p = NLP(f=ObjectiveMain,
        x0=x0,
        lb=(0, -10, 0),
        ub=(10, 10, 10),
        c=[Constraint1, Constraint2],
        iprint=50)
res = p.solve('scipy_cobyla')

# print the results
print('\n' + '-' * 50)
print('Final Objective: ' + str(res.ff))
print('[x1 z1 z2] = ' + str(res.xf))
print('[y1 y2] = [' + str(coupling.y1) + ', ' + str(coupling.y2) + ']')
Пример #18
0
    def __init__(self, calibration, parameter, debug=False):
        def eqX(industry, Industry):
            i, I = industry, Industry

            def equation(x):
                X, px, pf = x[sX:sF], x[spx:spz], x[spf:epf]
                pf = np.array([float(x[spf:epf]), 1])
                return X[i] - self.calibration.alpha[I] * sum([
                    pf[h] * self.calibration.FF[H]
                    for h, H in enumerate(parameter.factors)
                ] / px[i])

            return equation

        def eqpx(i):
            def equation(x):
                X, Z = x[sX:sF], x[sZ:spx]
                return X[i] - Z[i]

            return equation

        def eqZ(i):
            def equation(x):
                px, pz = x[spx:spz], x[spz:spf]
                return px[i] - pz[i]

            return equation

        def eqpz(j, J):
            def equation(x):
                F, Z = Table.unflatten(index=parameter.factors,
                                       columns=parameter.industries,
                                       sam=x[sF:sZ]), x[sZ:spx]
                return Z[j] - self.calibration.b[J] * np.prod([
                    F[J][H]**self.calibration.beta[J][H]
                    for H in parameter.factors
                ])

            return equation

        def eqpf(H):
            def equation(x):
                F = Table.unflatten(index=parameter.factors,
                                    columns=parameter.industries,
                                    sam=x[sF:sZ])
                return sum(
                    F[J][H]
                    for J in parameter.industries) - self.calibration.FF[H]

            return equation

        def eqF(h, H, j, J):
            def equation(x):
                F, Z, pz, pf = Table.unflatten(
                    index=parameter.factors,
                    columns=parameter.industries,
                    sam=x[sF:sZ]), x[sZ:spx], x[spz:spf], x[spf:epf]
                pf = np.array([float(x[spf:epf]), 1])
                return F[J][
                    H] - self.calibration.beta[J][H] * pz[j] * Z[j] / pf[h]

            return equation

        self.calibration = copy(calibration)
        self.calibration.X0 = self.calibration.X0['HH']
        j = i = len(parameter.industries)
        h = len(parameter.factors)
        sX = 0
        sF = sX + i
        sZ = sF + i * h
        spx = sZ + j
        spz = spx + i
        spf = spz + j
        epf = spf + h - 1
        epf_numerair = epf + 1
        lb = np.array([(np.float64(0.001))] * epf)
        self.x = x = np.empty(epf, dtype='f64')
        x[sX:sF] = self.calibration.X0.data
        x[sF:sZ] = self.calibration.F0.as_matrix().flatten()
        x[sZ:spx] = self.calibration.Z0.data
        x[spx:spz] = [1] * i
        x[spz:spf] = [1] * j
        x[spf:epf] = [1] * (epf - spf)
        self.t = x[:]
        if debug:
            self.x = x = np.array([21.1] * epf)
        print x
        xnames = [] * (epf_numerair)
        xnames[sX:sF] = self.calibration.X0.names
        xnames[sF:sZ] = [
            i + h + ' ' for i in parameter.industries
            for h in parameter.factors
        ]
        xnames[sZ:spx] = self.calibration.Z0.names
        xnames[spx:spz] = parameter.industries
        xnames[spz:spf] = parameter.industries
        xnames[spf:epf] = parameter.factors
        xnames[epf] = parameter.factors[-1]

        xtypes = [] * epf_numerair
        xtypes[sX:sF] = ['X0'] * len(self.calibration.X0)
        xtypes[sF:sZ] = ['F'] * (len(parameter.industries) +
                                 len(parameter.factors))
        xtypes[sZ:spx] = ['F0'] * len(self.calibration.F0)
        xtypes[spx:spz] = ['pb'] * len(parameter.industries)
        xtypes[spz:spf] = ['pz'] * len(parameter.industries)
        xtypes[spf:epf] = ['pf'] * len(parameter.factors)
        xtypes[epf] = ['pf']

        self.xnametypes = [
            '%s %s' % (xnames[i], xtypes[i]) for i in range(epf - 1)
        ]

        constraints = []
        for i, I in enumerate(parameter.industries):
            industry, Industry = unlink2(i, I)
            constraints.append(eqX(industry, Industry))
            constraints.append(eqpx(industry))
            constraints.append(eqZ(industry))
            constraints.append(eqpz(industry, Industry))

        for F in parameter.factors:
            Factor = unlink(F)
            constraints.append(eqpf(Factor))

        for f, F in enumerate(parameter.factors):
            for i, I in enumerate(parameter.industries):
                industry, Industry = unlink2(i, I)
                factor, Factor = unlink2(f, F)
                constraints.append(eqF(factor, Factor, industry, Industry))

        self.UU = UU = lambda x: -np.prod([
            x[i]**self.calibration.alpha[i]
            for i in range(len(parameter.industries))
        ])

        p = NLP(UU,
                x,
                h=constraints,
                lb=lb,
                iprint=50,
                maxIter=10000,
                maxFunEvals=1e7,
                name='NLP_1')
        p.plot = debug
        self.r = p.solve('ralg', plot=0)

        if debug:
            for i, constraint in enumerate(constraints):
                print(i, '%02f' % constraint(self.r.xf))
Пример #19
0
    #print 'len pd',len(pd)
    #print 'len p',len(p)

    x, z = precompute_r()
    X, Z = N.meshgrid(x, z)
    coslist, cosmat_list = precompute_cos(h, k, l, x, z)
    #print 'coslist',coslist[0]
    flist = N.ones(len(p0))

    flist[M::] = -flist[M::]
    #vout=chisq_hessian(p,fqerr,p,coslist,flist)
    #print 'vout', vout

    #    print 'pos',pos_sum(p0)
    #    print 'neg',neg_sum(p0)
    p = NLP(Entropy, p0, maxIter=1e3, maxFunEvals=1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
    #    p.df = S_grad
    #    p.d2f=S_hessian
    #    p.userProvided.d2f=True

    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb = 1e-7 * N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
    #p.ub[4] = -2.5
Пример #20
0
colors = colors[:len(solvers)]
lines, results = [], {}
for j in range(len(solvers)):
    solver = solvers[j]
    color = colors[j]
    p = NLP(f,
            x0,
            name='bench2',
            df=df,
            c=c,
            dc=dc,
            h=h,
            dh=dh,
            lb=lb,
            ub=ub,
            gtol=gtol,
            ftol=ftol,
            maxFunEvals=1e7,
            maxIter=maxIter,
            maxTime=maxTime,
            plot=1,
            color=color,
            iprint=10,
            legend=[solvers[j]],
            show=False,
            contol=contol)
    #    p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = 1e4, maxTime = maxTime,  plot = 1, color = color, iprint = 0, legend = [solvers[j]], show=False,  contol = contol)
    if solver[:4] == ['ralg']:
        pass
#        p.gtol = 1e-8
#        p.ftol = 1e-7
Пример #21
0
    def max_loglik_p(self, method='scipy_slsqp'):

        message(self, 'Optimizing strain genotypes using %s' % (method))

        # Initialize frequencies
        p = []

        # Bound genotypes in (0,1)
        lb = np.zeros(4 * self.n)
        ub = np.ones(4 * self.n)

        # Constrain genotypes to sum to 1
        def h(a):
            return np.reshape(a, (self.n, 4)).sum(axis=1) - 1.0

        quiet()

        # Optimize genotypes at every position
        for j in range(self.l):

            # Copy mask
            mj = self.mask[:, j].copy()

            # Objective function
            def f(a):
                # Reshape strain genotypes
                pi = np.reshape(a, (self.n, 4))
                # Nucleotide frequencies at position j
                a1 = (1 - self.e) * (np.dot(self.z, pi)) + (self.e / 4.)
                # Site likelihoods
                l1 = (self.x[:, j, :] * np.log(a1.clip(1e-10))).sum(axis=1)
                # Robust estimation
                if self.robust == True:
                    # Alternative likelihoods
                    l2 = (np.log(.25) * self.x[:, j, :]).sum(axis=1)
                    # Pay likelihood penalty to mask sites
                    i = l1 >= self.penalty * l2
                    # Update mask
                    mj[i] = True
                    mj[np.logical_not(i)] = False
                    # Penalized likelihood
                    lf = l1[i].sum(
                    ) + self.penalty * l2[np.logical_not(i)].sum()
                else:
                    # Normal likelihood
                    lf = l1.sum()
                # L2 penalty
                #return -1.*lf - pi.max(axis=1).sum()
                return -1. * lf - (pi**2).sum()

            # Calculate original likelihood
            x0 = self.p[:, j, :].flatten()
            l0 = f(x0)

            # Optimize genotypes
            g = [.25] * 4 * self.n
            soln = NLP(f,
                       g,
                       h=h,
                       lb=lb,
                       ub=ub,
                       gtol=1e-5,
                       contol=1e-5,
                       name='NLP1').solve(method, plot=0)

            # Update genotypes
            if soln.ff <= l0 and soln.isFeasible == True:
                # Discretize results
                xf = discretize_genotypes(
                    np.reshape(soln.xf.clip(0, 1), (self.n, 4)))
                lf = f(xf.flatten())
                # Validate likelihood
                if lf < l0:
                    # Update genotypes and mask
                    p.append(xf)
                    if self.robust == True:
                        self.mask[:, j] = mj
                    continue
            # If likelihood not improved, use original genotypes
            p.append(self.p[:, j, :])

        loud()

        # Fix shape
        self.p = np.swapaxes(np.array(p), 0, 1)
        return self
Пример #22
0
def AMPGO(objfun,
          x0,
          args=(),
          local='L-BFGS-B',
          local_opts=None,
          bounds=None,
          maxfunevals=None,
          totaliter=20,
          maxiter=5,
          glbtol=1e-5,
          eps1=0.02,
          eps2=0.1,
          tabulistsize=5,
          tabustrategy='farthest',
          fmin=-numpy.inf,
          disp=None):
    """
    Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for
    Global Optimization) algorithm. 
    
    :param `objfun`: Function to be optimized, in the form ``f(x, *args)``.
    :type `objfun`: callable
    :param `args`: Additional arguments passed to `objfun`.
    :type `args`: tuple
    :param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available
     `scipy` local solvers or `OpenOpt` solvers.
    :type `local`: string
    :param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable
     [(`xl0`, `xu0`), (`xl1`, `xu1`), ...]
    :type `bounds`: list
    :param `maxfunevals`: The maximum number of function evaluations allowed.
    :type `maxfunevals`: integer
    :param `totaliter`: The maximum number of global iterations allowed.
    :type `totaliter`: integer
    :param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration.
    :type `maxiter`: integer
    :param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective
     function value and the provided global optimum (`fmin`) is less than `glbtol`.
    :type `glbtol`: float
    :param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase.
    :type `eps1`: float
    :param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase.
    :type `eps2`: float
    :param `tabulistsize`: The size of the tabu search list (a circular list).
    :type `tabulistsize`: integer
    :param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be
     'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from
     the last local minimum found.
    :type `tabustrategy`: string
    :param `fmin`: If known, the objective function global optimum value.
    :type `fmin`: float
    :param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status
     messages are printed.
    :type `disp`: integer
 
    :returns: A tuple of 5 elements, in the following order:

     1. **best_x** (`array_like`): the estimated position of the global minimum.
     2. **best_f** (`float`): the value of `objfun` at the minimum.
     3. **evaluations** (`integer`): the number of function evaluations.
     4. **msg** (`string`): a message describes the cause of the termination.
     5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the
        successful ones.

    :rtype: `tuple`

    The detailed implementation of AMPGO is described in the paper 
    "Adaptive Memory Programming for Constrained Global Optimization" located here:

    http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf

    Copyright 2014 Andrea Gavana
    """

    if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS:
        raise Exception('Invalid local solver selected: %s' % local)

    if local in SCIPY_LOCAL_SOLVERS and not SCIPY:
        raise Exception(
            'The selected solver %s is not available as there is no scipy installation'
            % local)

    if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT:
        raise Exception(
            'The selected solver %s is not available as there is no OpenOpt installation'
            % local)

    x0 = numpy.atleast_1d(x0)
    n = len(x0)

    if bounds is None:
        bounds = [(None, None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    low = [0] * n
    up = [0] * n
    for i in range(n):
        if bounds[i] is None:
            l, u = -numpy.inf, numpy.inf
        else:
            l, u = bounds[i]
            if l is None:
                low[i] = -numpy.inf
            else:
                low[i] = l
            if u is None:
                up[i] = numpy.inf
            else:
                up[i] = u

    if maxfunevals is None:
        maxfunevals = max(100, 10 * len(x0))

    if tabulistsize < 1:
        raise Exception(
            'Invalid tabulistsize specified: %s. It should be an integer greater than zero.'
            % tabulistsize)
    if tabustrategy not in ['oldest', 'farthest']:
        raise Exception(
            'Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'
            % tabustrategy)

    iprint = 50
    if disp is None or disp <= 0:
        disp = 0
        iprint = -1

    low = numpy.asarray(low)
    up = numpy.asarray(up)

    tabulist = []
    best_f = numpy.inf
    best_x = x0

    global_iter = 0
    all_tunnel = success_tunnel = 0
    evaluations = 0

    if glbtol < 1e-8:
        local_tol = glbtol
    else:
        local_tol = 1e-8

    while 1:

        if disp > 0:
            print('\n')
            print('=' * 72)
            print('Starting MINIMIZATION Phase %-3d' % (global_iter + 1))
            print('=' * 72)

        if local in OPENOPT_LOCAL_SOLVERS:
            problem = NLP(objfun,
                          x0,
                          lb=low,
                          ub=up,
                          maxFunEvals=max(1, maxfunevals),
                          ftol=local_tol,
                          iprint=iprint)
            problem.args = args

            results = problem.solve(local)
            xf, yf, num_fun = results.xf, results.ff, results.evals['f']
        else:
            options = {'maxiter': max(1, maxfunevals), 'disp': disp}
            if local_opts is not None:
                options.update(local_opts)
            res = minimize(objfun,
                           x0,
                           args=args,
                           method=local,
                           bounds=bounds,
                           tol=local_tol,
                           options=options)
            xf, yf, num_fun = res['x'], res['fun'], res['nfev']

        maxfunevals -= num_fun
        evaluations += num_fun

        if yf < best_f:
            best_f = yf
            best_x = xf

        if disp > 0:
            print('\n\n ==> Reached local minimum: %s\n' % yf)

        if best_f < fmin + glbtol:
            if disp > 0:
                print('=' * 72)
            return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                all_tunnel, success_tunnel)
        if maxfunevals <= 0:
            if disp > 0:
                print('=' * 72)
            return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (
                all_tunnel, success_tunnel)

        tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
        tabulist.append(xf)

        i = improve = 0

        while i < maxiter and improve == 0:

            if disp > 0:
                print('-' * 72)
                print('Starting TUNNELLING   Phase (%3d-%3d)' %
                      (global_iter + 1, i + 1))
                print('-' * 72)

            all_tunnel += 1

            r = numpy.random.uniform(-1.0, 1.0, size=(n, ))
            beta = eps2 * numpy.linalg.norm(xf) / numpy.linalg.norm(r)

            if numpy.abs(beta) < 1e-8:
                beta = eps2

            x0 = xf + beta * r

            x0 = numpy.where(x0 < low, low, x0)
            x0 = numpy.where(x0 > up, up, x0)

            aspiration = best_f - eps1 * (1.0 + numpy.abs(best_f))

            tunnel_args = tuple([objfun, aspiration, tabulist] + list(args))

            if local in OPENOPT_LOCAL_SOLVERS:
                problem = NLP(tunnel,
                              x0,
                              lb=low,
                              ub=up,
                              maxFunEvals=max(1, maxfunevals),
                              ftol=local_tol,
                              iprint=iprint)
                problem.args = tunnel_args

                results = problem.solve(local)
                xf, yf, num_fun = results.xf, results.ff, results.evals['f']
            else:
                options = {'maxiter': max(1, maxfunevals), 'disp': disp}
                if local_opts is not None:
                    options.update(local_opts)

                res = minimize(tunnel,
                               x0,
                               args=tunnel_args,
                               method=local,
                               bounds=bounds,
                               tol=local_tol,
                               options=options)
                xf, yf, num_fun = res['x'], res['fun'], res['nfev']

            maxfunevals -= num_fun
            evaluations += num_fun

            yf = inverse_tunnel(xf, yf, aspiration, tabulist)

            if yf <= best_f + glbtol:
                oldf = best_f
                best_f = yf
                best_x = xf
                improve = 1
                success_tunnel += 1

                if disp > 0:
                    print(
                        '\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'
                        % (yf, oldf))

            if best_f < fmin + glbtol:
                return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                    all_tunnel, success_tunnel)

            i += 1

            if maxfunevals <= 0:
                return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (
                    all_tunnel, success_tunnel)

            tabulist = drop_tabu_points(xf, tabulist, tabulistsize,
                                        tabustrategy)
            tabulist.append(xf)

        if disp > 0:
            print('=' * 72)

        global_iter += 1
        x0 = xf.copy()

        if global_iter >= totaliter:
            return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (
                all_tunnel, success_tunnel)

        if best_f < fmin + glbtol:
            return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                all_tunnel, success_tunnel)
Пример #23
0
from openopt import NLP
x0 = [4, 5, 6]
#h = lambda x: log(1+abs(4+x[1]))
#f = lambda x: log(1+abs(x[0]))
f = lambda x: x[0]**4 + x[1]**4 + x[2]**4
df = lambda x: [4*x[0]**3,  4*x[1]**3, 4*x[2]**3]
h = lambda x: [(x[0]-1)**2,  (x[1]-1)**4]
dh = lambda x: [[2*(x[0]-1), 0, 0],  [0, 4*(x[1]-1)**3, 0]]
colors = ['r', 'b', 'g', 'k', 'y']
solvers = ['ralg','scipy_cobyla', 'algencan', 'ipopt', 'scipy_slsqp']
solvers = ['ralg','algencan']
contol = 1e-8
gtol = 1e-8

for i, solver in enumerate(solvers):
    p = NLP(f, x0, df=df, h=h, dh=dh, gtol = gtol, diffInt = 1e-1, contol = contol,  iprint = 1000, maxIter = 1e5, maxTime = 50, maxFunEvals = 1e8, color=colors[i], plot=0, show = i == len(solvers))
    p.checkdh()
    r = p.solve(solver)

#
#x0 = 4
##h = lambda x: log(1+abs(4+x[1]))
##f = lambda x: log(1+abs(x[0]))
#f = lambda x: x**4
#h = lambda x: (x-1)**2
#colors = ['r', 'b', 'g', 'k', 'y']
#solvers = ['ralg','scipy_cobyla', 'algencan', 'ipopt', 'scipy_slsqp']
##solvers = ['algencan']
#contol = 1e-8
#gtol = 1e-8
#for i, solver in enumerate(solvers):
Пример #24
0
###############################################################
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt','algencan']
#solvers = ['ralg', 'ipopt']
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt']
solvers = ['ralg', 'scipy_slsqp', 'scipy_cobyla', 'algencan']
#solvers = ['ipopt','ralg', 'algencan']
solvers = ['ralg', 'scipy_cobyla']
#solvers = ['ralg', 'scipy_slsqp']
#solvers = ['ralg', 'algencan']
solvers = ['ralg']
###############################################################

lines, results = [], {}
for j, solver in enumerate(solvers):
    p = NLP(ff, startPoint, xlabel = Xlabel, gtol=gtol, diffInt = diffInt, ftol = ftol, maxIter = 1390, plot = PLOT, color = colors[j], iprint = 10, df_iter = 4, legend = solver, show=False,  contol = contol,  maxTime = maxTime,  maxFunEvals = maxFunEvals, name='NLP_bench_1')
    p.constraints = [c1<0,  c2<0,  h1.eq(0),  h2.eq(0), x > lb, x< ub]
    #p.constraints = h1.eq(0)
    
    #p._Prepare()
    #print p.dc(p.x0)
    #print h1.D(startPoint)
    #print h2.D(startPoint)
    #continue
    
    if solver =='algencan':
        p.gtol = 1e-2
    elif solver == 'ralg':
        pass
        #p.debug = 1
    
Пример #25
0
"""
OpenOpt GUI:
     function manage() usage example
"""

from openopt import NLP, manage
from numpy import cos, arange, ones, asarray, abs, zeros
N = 50
M = 5
p = NLP(lambda x: ((x - M)**2).sum(), cos(arange(N)))
p.lb, p.ub = -6 * ones(N), 6 * ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
p.c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8]
p.h = (lambda x: 1e1 * (x[-1] - 1)**4, lambda x: (x[-2] - 1.5)**4)
"""
minTime is used here
for to provide enough time for user
to play with GUI
"""

minTime = 1.5  # sec
p.name = 'GUI_example'
p.minTime = minTime
"""
hence maxIter, maxFunEvals etc
will not trigger till minTime

only same iter point x_k-1=x_k
or some coords = nan
can stop calculations
        # so it should be something like 1e-3...1e-5
    gtol = 1e-6 # (default gtol = 1e-6)

        # Assign problem:
         # 1st arg - objective function
        # 2nd arg - start point
        #p = NLP(f, x0, c=c,  gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
    x0 = rd.uniform(-0.8, 0.8, num_dof*(2*N+1))
    # x0[2*N] = 0
    # x0[2*(2*N)] = 0
    # x0[3*(2*N)] = 0
    #x0 = rd.uniform(-0.1, 0.1, 2*N+1).tolist() + rd.uniform(-0.01, 0.01, 2*N+1).tolist() + rd.uniform(-.001, 0.001, 2*N+1).tolist()

    #x0 = range(0, num_dof*(2*N+1))
    #p = NLP(f, x0, c=c, h=h, gtol=gtol, contol=contol, iprint = 1, maxIter = 1000, maxFunEvals = 1e7, name = 'NLP_1')
    p = NLP(f, x0, c=c, h=h, gtol=gtol, contol=contol, iprint = 1, maxIter = 700, maxFunEvals = 1e7, name = 'NLP_1')

    #p = NLP(f, x0,  gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')

        #optional: graphic output, requires pylab (matplotlib)
    p.plot = True

    solver = 'ralg'
    #solver = 'scipy_cobyla'
        #solver = 'algencan'
        #solver = 'ipopt'
        #solver = 'scipy_slsqp'

        # solve the problem

    r = p.solve(solver, plot=0) # string argument is solver name
Пример #27
0
    def minimize(self, **kwargs):
        """ solve the nonlinear problem using OpenOpt

        Returns:
        obj_value, solution

        obj_value -- value of the objective function at the discovered solution
        solution  -- the solution flux vector (indexed like matrix columns)
        """
        if self.iterator is None:
            nlp = NLP(self.obj,
                      self.x0,
                      df=self.d_obj,
                      c=self.nlc,
                      dc=self.d_nlc,
                      A=self.Aineq,
                      Aeq=self.Aeq,
                      b=self.bineq,
                      beq=self.beq,
                      lb=self.lb,
                      ub=self.ub,
                      **kwargs)
            nlp.debug = 1
            nlp.plot = False
            nlp.checkdf()
            if self.nlc is not None:
                nlp.checkdc()

            r = nlp.solve(self.solver)

        else:
            self.rlist = []
            for x0 in self.iterator:
                nlp = NLP(self.obj,
                          x0,
                          df=self.d_obj,
                          c=self.nlc,
                          dc=self.d_nlc,
                          A=self.Aineq,
                          Aeq=self.Aeq,
                          b=self.bineq,
                          beq=self.beq,
                          lb=self.lb,
                          ub=self.ub,
                          **kwargs)
                r = nlp.solve(self.solver)
                if r.istop > 0 and r.ff == r.ff:
                    self.rlist.append(r)

            if self.rlist != []:
                r = min(self.rlist, key=lambda x: x.ff)

        if r.istop <= 0 or r.ff != r.ff:  # check halting condition
            self.obj_value = 0.
            self.solution = []
            self.istop = r.istop
        else:
            self.obj_value = r.ff
            self.solution = r.xf
            self.istop = r.istop

        return self.obj_value, self.solution
Пример #28
0
"""
This is test for future work on ralg
It may fail for now
"""

from FuncDesigner import *
from openopt import NLP
from numpy import nan

a, b = oovars('a', 'b')
f = a**2 + b**2

K = 1e5
minTreshold = 0.1

c1 = ifThenElse(a > minTreshold, K * a**2 + 1.0 / K * b**2,
                nan) < K * minTreshold**2
c2 = a > minTreshold

startPoint = {a: -K, b: -K}

p = NLP(f, startPoint, constraints=[c1, c2], iprint=10, maxIter=1e4)

solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'

r = p.solve(solver)
Пример #29
0
T = Triangle((1,2,a),(2,b,4),(c,6.5,7))

# let's create an initial estimation to the problems below
startValues = {a:1, b:0.5, c:0.1} # you could mere set any, but sometimes a good estimation matters

# let's find an a,b,c values wrt r = 1.5 with required tolerance 10^-5, R = 4.2 and tol 10^-4, a+c == 2.5 wrt tol 10^-7
# if no tol is provided, p.contol is used (default 10^-6)
equations = [(T.r == 1.5)(tol=1e-5) , (T.R == 4.2)(tol=1e-4), (a+c == 2.5)(tol=1e-7)]
prob = SNLE(equations, startValues)
result = prob.solve('nssolve', iprint = 0) # nssolve is name of the solver involved
print('\nsolution has%s been found' % ('' if result.stopcase > 0 else ' not'))
print('values:' + str(result(a, b, c))) # [1.5773327492140974, -1.2582702179532217, 0.92266725078590239]
print('triangle sides: '+str(T.sides(result))) # [8.387574299361475, 7.0470774415247455, 4.1815836020856336]
print('orthocenter of the triangle: ' + str(T.H(result))) # [ 0.90789867  2.15008869  1.15609611]

# let's find minimum inscribed radius subjected to the constraints a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2] : 
objective = T.r
prob = NLP(objective, startValues, constraints = [a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2])
result1 = prob.minimize('ralg', iprint = 0) # ralg is name of the solver involved, see http://openopt.org/ralg for details
print('\nminimal inscribed radius: %0.3f' % T.r(result1)) #  1.321
print('optimal values:' + str(result1(a, b, c))) # [1.4999968332804028, 2.7938728907900973e-07, 0.62272481283890913]

#let's find minimum outscribed radius subjected to the constraints a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2] : 
prob = NLP(T.R, startValues, constraints = (a<1.5, a>-1, b<0, (a+2*c<4)(tol=1e-7),  log(1-b)<2))
result2 = prob.minimize('ralg', iprint = 0) 
print('\nminimal outscribed radius: %0.3f' % T.R(result2)) # 3.681
print('optimal values:' + str(result2(a, b, c))) # [1.499999901863762, -1.7546960034401648e-06, 1.2499958739399943]



Пример #30
0
# so it should be something like 1e-3...1e-5
gtol = 1e-7  # (default gtol = 1e-6)

# Assign problem:
# 1st arg - objective function
# 2nd arg - start point
p = NLP(f,
        x0,
        df=df,
        c=c,
        dc=dc,
        h=h,
        dh=dh,
        A=A,
        b=b,
        Aeq=Aeq,
        beq=beq,
        lb=lb,
        ub=ub,
        gtol=gtol,
        contol=contol,
        iprint=50,
        maxIter=10000,
        maxFunEvals=1e7,
        name='NLP_1')

#optional: graphic output, requires pylab (matplotlib)
p.plot = True

#optional: user-supplied 1st derivatives check
p.checkdf()