Пример #1
0
def wls_fit(function, initial_guess, X, Y, weights=None, lb=None, ub=None):
    """[Inputs]
        function is of form:
            def function(coeffs, xdata)
    """

    if weights is None:
        weights = [1] * len(X)

    def penalty(c):
        fit = function(c, X)
        error = (weights * (Y - fit) ** 2).sum()
        return error

    problem = NLP(penalty, initial_guess)

    if lb is not None:
        problem.lb = lb
    if ub is not None:
        problem.ub = ub

    solver = 'ipopt'
    result = problem.solve(solver)

    coeffs = result.xf
    return coeffs
Пример #2
0
    def _min(self, func, x0, *args, **kwargs):
        if _USE_OPENOPT:
            if 'solver' in kwargs:
                solver = kwargs['solver']
                del kwargs['solver']
            else:
                solver = 'ralg'
            if 'df' in kwargs:
                df = kwargs['df']
                del kwargs['df']
            else:
                df = self._diff
            p = NLP(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                df=df,
                **kwargs)
            z = p.solve(solver)

            return z.xf, z.ff, z.istop > 0, z.msg
        else:
            a = minimize(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                *args,
                **kwargs)
            return a.x, a.fun, a.success, a.message
Пример #3
0
    def openopt(self, *args, **kwargs):
        from openopt import NLP
        
        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False
        try:
            solver = args[0]
        except:
            solver = 'ralg'
        self.args += args[1:]
        self.kwargs.update(kwargs)

        
        f = lambda p: sum(((self.func(p, self.x, self.const,self.mima) - self.y) / self.weights)**2)
        self.prob = NLP(f, x0 = self.p0, *self.args, **self.kwargs)
        self.res = self.prob.solve(solver)
        self.p = self.res.xf

        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with openopt:\n solver: '+ solver)
Пример #4
0
Файл: garch.py Проект: pyfun/msf
    def garchfit(self,initvalue):
        """
        estimate GARCH(1,1) paramters by maximum likelihood method. 
        Optimization should be under the following constraints:
        ARCH + GARCH < 1 (Stationarity)
        All parameters >= 0 (Non-negative)
        -------------------------------------------------------------------------
        InitValue = [ARCH; GARCH; Constant]
        """

        try:
            from openopt import NLP
            lb = [0.0001, 0.0001, 0.] #lower bound
            A = [1, 1, 0]
            b = 1.0
            p = NLP(self.f,initvalue,lb=lb,A=A,b=b)
            r = p.solve('ralg')
        
            return r.xf
        except ImportError:
            print "Openopt is not installed, will use scipy.fmin_cobyla instead"
            print "the result may not accurate though"
            params = fmin_cobyla(self.f,initvalue,cons=[lambda x:1-(x[0]+x[1]),
                                                   lambda x:x[0],
                                                   lambda x:x[2],
                                                   lambda x:x[1]])
            return params
Пример #5
0
def test_openopt():
    p = NLP(residual, x0, lb=lb, ub=ub,
            iprint=1, plot=True)
    # uncomment the following (and set scale = 1 above) to use openopt's
    # scaling mechanism.  This only seems to work with a few solvers, though.
    #p.scale = np.array([1, 1e6, 1e6, 1e6, 1e6, 1])

    r = p.solve('ralg')             # OpenOpt solver, seems to work well,
Пример #6
0
 def startOptimization(self, root, varsRoot, AddVar, currValues, \
                       ValsColumnName, ObjEntry, ExperimentNumber, Next, NN, goal, objtol, C):
     AddVar.destroy()
     ValsColumnName.set('Experiment parameters')
     n = len(self.NameEntriesList)
     Names, Lb, Ub, Tol, x0 = [], [], [], [], []
     for i in range(n):
         N, L, U, T, valEntry = \
         self.NameEntriesList[i], self.LB_EntriesList[i], self.UB_EntriesList[i], self.TolEntriesList[i], self.ValueEntriesList[i]
         N.config(state=DISABLED)
         L.config(state=DISABLED)
         U.config(state=DISABLED)
         T.config(state=DISABLED)
         #valEntry.config(state=DISABLED)
         name, lb, ub, tol, val = N.get(), L.get(), U.get(), T.get(), valEntry.get()
         Names.append(name)
         x0.append(float(val))
         Lb.append(float(lb) if lb != '' else -inf)
         Ub.append(float(ub) if ub != '' else inf)
         
         # TODO: fix zero
         Tol.append(float(tol) if tol != '' else 0) 
         
     x0, Tol, Lb, Ub = asfarray(x0), asfarray(Tol), asfarray(Lb), asfarray(Ub)
     x0 *= xtolScaleFactor / Tol
     #self.x0 = copy(x0)
     from openopt import NLP, oosolver
     p = NLP(objective, x0, lb = Lb * xtolScaleFactor / Tol, ub=Ub * xtolScaleFactor / Tol)
     self.prob = p
     #calculated_points = [(copy(x0), copy(float(ObjEntry.get())))
     p.args = (Tol, self, ObjEntry, p, root, ExperimentNumber, Next, NN, objtol, C)
     #p.graphics.rate = -inf
     #p.f_iter = 2
     solver = oosolver('bobyqa', useStopByException = False)
     p.solve(solver, iprint = 1, goal = goal)#, plot=1, xlabel='nf')
     self.solved = True
     if p.stopcase >= 0:
         self.ValsColumnName.set('Best parameters')
         NN.set('Best obtained objective value:')
     #Next.config(state=DISABLED)
     Next.destroy()
     #reverse = True if goal == 'min' else False
     
     calculated_items = self.calculated_points.items() if isinstance(self.calculated_points, dict) else self.calculated_points
     vals = [calculated_items[i][1] for i in range(len(calculated_items))]
     ind = argsort(vals)
     j = ind[0] if goal == 'min' else ind[-1]
     key, val = calculated_items[j]
     text_coords = key.split(' ')
     for i in range(len(self.ValueEntriesList)):
         self.ValueEntriesList[i].delete(0, END)
         self.ValueEntriesList[i].insert(0, text_coords[i])
     ObjEntry.delete(0, END)
     obj_tol = self.ObjTolEntry.get()
     val = float(val) * 1e4 * objtol
     ObjEntry.insert(0, str(val))
     ObjEntry.config(state=DISABLED)
Пример #7
0
    def find2(self, POIMobj, motif_len, motif_start, base, path2pwm=None,solver="NLP"):
        self.motif_start = motif_start
        self.motif_len = motif_len
        x0 = tools.ini_pwm(motif_len, 1, len(base))[0]

        x0 = x0.flatten()

        lb = np.ones(x0.shape) * 0.001
        ub = np.ones(x0.shape) * 0.999
        iprint = 0
        maxIter = 1000
        ftol = 1e-04
        gradtol = 1e-03
        diffInt = 1e-05
        contol = 1e-02
        maxFunEvals = 1e04
        maxTime = 100

        lenA = int(len(x0))
        lenk = int(len(x0)) / len(base)
        Aeq = np.zeros((lenk, lenA))
        beq = np.ones(lenk)
        for i in range(lenk):
            for pk in range(i, lenA, lenk):
                Aeq[i, pk] = 1

                # ,Aeq=Aeq,beq=beq,
        cons = {'type': 'eq', 'fun': lambda x: np.dot(Aeq, x) - beq}
        bnds = []
        for i in range(len(x0)):
            bnds.append((lb[i], ub[i]))
        # bnds = np.vstack((lb,ub))


        if solver == "ralg":
            from openopt import NLP
            p = NLP(self.f_L2, x0,lb=lb, ub=ub, Aeq=Aeq,beq=beq, args=(POIMobj.gPOIM,POIMobj.L,motif_start,POIMobj.small_k,motif_len),  diffInt=diffInt, ftol=ftol, plot=0, iprint=iprint,maxIter = maxIter, maxFunEvals = maxFunEvals, show=False, contol=contol)
            result = p._solve(solver)
            x = result.xf
            f = result.ff
        elif solver == "LBFGSB":
            x, f, d = fmin_l_bfgs_b(self.f_L2, x0,
                                    args=(POIMobj.gPOIM, POIMobj.L, motif_start, POIMobj.small_k, motif_len),
                                    approx_grad=True)#constraints=cons)#
        elif solver == "SLSQP":
            result = minimize(self.f_L2, x0,args=(POIMobj.gPOIM, POIMobj.L, motif_start, POIMobj.small_k, motif_len),method='SLSQP',bounds=bnds,constraints=cons)
            x = result.x
            f = result.fun
        self.motif_pwm = np.reshape(x, (4, motif_len))
        fopt = f
        self.normalize()
        if not(path2pwm is None):
            np.savetxt(path2pwm, self.poim_norm)

        return self.motif_pwm
Пример #8
0
def balance(sam, debug=False):
    try:
        table = sam.array()
    except AttributeError:
        table = np.array(sam)

    assert table.shape[0] == table.shape[1]
    size = table.shape[0]
    x0 = np.array([v for v in table.flatten() if v != 0])

    def transform(ox):
        ret = np.zeros_like(table)
        i = 0
        for r in range(size):
            for c in range(size):
                if table[r, c] != 0:
                    ret[r, c] = ox[i]
                    i += 1
        return ret

    def objective(ox):
        ox = np.square((ox - x0) / x0)
        return np.sum(ox)

    def constraints(ox):
        ox = transform(ox)
        ret = np.sum(ox, 0) - np.sum(ox, 1)
        return ret

    print constraints(x0)

    if debug:
        print("--- balance ---")
    p = NLP(objective,
            x0,
            h=constraints,
            iprint=50 * int(debug),
            maxIter=100000,
            maxFunEvals=1e7,
            name='NLP_1')
    r = p.solve('ralg', plot=0)
    if debug:
        print 'constraints'
        print constraints(r.xf)
    assert r.isFeasible

    try:
        return sam.replace(transform(r.xf))
    except UnboundLocalError:
        return transform(r.xf)
Пример #9
0
def lower_bound_predictor(model, lower, upper):

    n_sample, n_feature = model.X.shape

    theta = model.theta_
    ph = 2
    beta = model.beta
    gamma = model.gamma
    X = model.X

    x_lb = atleast_2d(lower)
    x_ub = atleast_2d(upper)

    x_lb = x_lb.T if size(x_lb, 1) != n_feature else x_lb
    x_ub = x_ub.T if size(x_ub, 1) != n_feature else x_ub

    x_lb = (x_lb - model.X_mean) / model.X_std
    x_ub = (x_ub - model.X_mean) / model.X_std

    #    a = zeros(1, m) b = zeros(1, m)
    #    midpoint = (z_min + z_max)' / 2
    #    for i = 1:m
    #        if gamma(i) >= 0
    #            # tangent line relaxation
    #            b(i) = -gamma(i) * exp(-midpoint(i))
    #            a(i) = gamma(i) * exp(-midpoint(i)) - b(i) * midpoint(i)
    #        else
    #            # chord relaxation
    #            b(i) = (gamma(i) * exp(-z_max(i)) - gamma(i) * exp(-z_min(i))) / (z_max(i)-z_min(i))
    #            a(i) = gamma(i) * exp(-z_min(i)) - b(i) * z_min(i)
    #

    x0 = ((x_ub - x_lb) * rand(1, n_feature) +
          x_lb)[0].tolist()  # Make a starting guess at the solution
    obj = model.predict
    #    x_opt = fmin_slsqp(obj, x0, bounds=zip(x_lb[0], x_ub[0]), iter=1e4, iprint=-1)
    p = NLP(obj, x0, lb=x_lb[0], ub=x_ub[0], iprint=1e6)
    r = p.solve('ralg')
    x_opt = r.xf

    #    if any(x_opt < x_lb) or any(x_opt > x_ub):
    #        pdb.set_trace()
    res = obj(x_opt)

    return res
Пример #10
0
    def run(self, plot=True):
        """
        Solves the optimization problem.
        """
        # Initial try
        p0 = self.get_p0()

        #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
        lbound = N.array([0.0001] * len(p0))
        if self.gridsize == 1:
            ubound = [10.0] * (self.gridsize * self.nbr_us)
        else:
            ubound = [10.0] * (self.gridsize * self.nbr_us
                               ) + [0.20, 0.20, 0.20, 0.20, N.inf] * (
                                   (self.gridsize - 1))

        #UPPER BOUND FOR VDP
        #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)

        if self.verbosity >= Multiple_Shooting.NORMAL:
            print 'Initial parameter vector: '
            print p0
            print 'Lower bound:', len(lbound)
            print 'Upper bound:', len(ubound)

        # Get OpenOPT handler
        p_solve = NLP(self.f,
                      p0,
                      lb=lbound,
                      ub=ubound,
                      maxFunEvals=self.maxFeval,
                      maxIter=self.maxIter,
                      ftol=self.ftol,
                      maxTime=self.maxTime)

        #If multiple shooting is preformed or single shooting
        if self.gridsize > 1:
            p_solve.h = self.h

        if plot:
            p_solve.plot = 1

        self.opt = p_solve.solve(self.optMethod)

        return self.opt
Пример #11
0
def getDirectionOptimPoint(p, func, x, direction, forwardMultiplier = 2.0, maxiter = 150, xtol = None, maxConstrLimit = None,  \
                           alpha_lb = 0,  alpha_ub = inf,  \
                           rightLocalization = 0,  leftLocalization = 0, \
                           rightBorderForLocalization = 0, leftBorderForLocalization = None):
    if all(direction==0): p.err('nonzero direction is required')

    if maxConstrLimit is None:
        lsFunc = funcDirectionValue
        args = (func, x, direction)
    else:
        lsFunc = funcDirectionValueWithMaxConstraintLimit
        args = (func, x, direction, maxConstrLimit, p)

    prev_alpha, new_alpha = alpha_lb, min(alpha_lb+0.5, alpha_ub)
    prev_val = lsFunc(prev_alpha, *args)
    for i in range(p.maxLineSearch):
        if lsFunc(new_alpha, *args)>prev_val or new_alpha==alpha_ub: break
        else:
            if i != 0: prev_alpha = min(alpha_lb, new_alpha)
            new_alpha *= forwardMultiplier

    if i == p.maxLineSearch-1: p.debugmsg('getDirectionOptimPoint: maxLineSearch is exeeded')
    lb, ub = prev_alpha, new_alpha

    if xtol is None: xtol = p.xtol / 2.0
    # NB! goldenSection solver ignores x0
    p_LS = NLP(lsFunc, x0=0, lb = lb,  ub = ub, iprint = -1, \
               args=args, xtol = xtol, maxIter = maxiter, contol = p.contol)# contol is used in funcDirectionValueWithMaxConstraintLimit


    r = p_LS.solve('goldenSection', useOOiterfcn=False, rightLocalization=rightLocalization, leftLocalization=leftLocalization, rightBorderForLocalization=rightBorderForLocalization, leftBorderForLocalization=leftBorderForLocalization)
    if r.istop == IS_MAX_ITER_REACHED:
        p.warn('getDirectionOptimPoint: max iter has been exceeded')
    alpha_opt = r.special.rightXOptBorder

    R = DirectionOptimPoint()
    R.leftAlphaOptBorder = r.special.leftXOptBorder
    R.leftXOptBorder = x + R.leftAlphaOptBorder * direction
    R.rightAlphaOptBorder = r.special.rightXOptBorder
    R.rightXOptBorder = x + R.rightAlphaOptBorder * direction
    R.x = x + alpha_opt * direction
    R.alpha = alpha_opt
    R.evalsF = r.evals['f']+i
    return R
Пример #12
0
def balance(sam, debug=False):
    try:
        table = sam.array()
    except AttributeError:
        table = np.array(sam)

    assert table.shape[0] == table.shape[1]
    size = table.shape[0]
    x0 = np.array([v for v in table.flatten() if v !=0])

    def transform(ox):
        ret = np.zeros_like(table)
        i = 0
        for r in range(size):
            for c in range(size):
                if table[r, c] != 0:
                    ret[r, c] = ox[i]
                    i += 1
        return ret
    
    def objective(ox):
        ox = np.square((ox - x0) / x0)
        return np.sum(ox)

    def constraints(ox):
        ox = transform(ox)
        ret = np.sum(ox, 0) - np.sum(ox, 1)
        return ret

    print constraints(x0)

    if debug:
        print("--- balance ---")
    p = NLP(objective, x0, h=constraints, iprint = 50 * int(debug), maxIter = 100000, maxFunEvals = 1e7, name = 'NLP_1') 
    r = p.solve('ralg', plot=0)
    if debug:        
        print 'constraints'
        print constraints(r.xf)
    assert r.isFeasible

    try:
        return sam.replace(transform(r.xf))
    except UnboundLocalError:
        return transform(r.xf)
Пример #13
0
    def max_loglik_z(self, method='scipy_slsqp'):
        message(self, 'Optimizing strain frequencies using %s' % (method))

        # Initialize frequencies
        z = []

        # Bound frequencies in (0,1)
        lb = np.zeros(self.n)
        ub = np.ones(self.n)

        # Constrain frequencies to sum to 1
        def h(a):
            return sum(a) - 1

        quiet()

        # For every subject
        for i in range(self.m):

            # Objective function
            def f(a):
                # Get frequencies (N) and error rate
                zi = a
                ei = self.e
                # Get nucleotide frequencies at every position (L,4)
                a1 = np.einsum('i...,i...', zi, self.p)[self.mask[i]]
                # Remove masked sites from alignment (L,4)
                xi = self.x[i][self.mask[i]]
                # Error correct and weight by counts
                a2 = np.einsum('ij,ij', xi,
                               np.log(((1 - ei) * a1 + ei / 4.).clip(1e-10)))
                # Return negative log-likelihood
                return -1. * a2

            # Run optimization
            g = self.z[i, :]
            soln = NLP(f,
                       g,
                       lb=lb,
                       ub=ub,
                       h=h,
                       gtol=1e-5,
                       contol=1e-5,
                       name='NLP1').solve(method, plot=0)
            if soln.ff <= f(g) and soln.isFeasible == True:
                zi = soln.xf
                z.append(zi.clip(0, 1))
            else:
                z.append(g)

        loud()

        # Update frequencies and error rate
        self.z = np.array(z)

        return self
Пример #14
0
def phScenSolve(scen,rho,verbose=0):
    Wfilename = 'work/W%d.npy' %(scen); W=load(Wfilename); 
    orho,nscen,nwells,nx,LB,UB = readref()
#    print 'rho=%g,nx=%d' %(rho,nx)
#    refcard = load('work/refcard.npy')
#    orho,nscen,nwells,nx,LB,UB = readcard(refcard)
    qfn = 'work/qhat%d.npy' %(nx); qhat = load(qfn)
    H = GenHydroScen(nx,nscen,nwells,scen)
    e = lambda q,H,W,rho,qhat: gwrfull(q,H,W,rho,qhat)
    q0 = qhat; # we should read qsol!
#    q0 = array([0.0025,-0.0038,0.0018,-0.0092])
    which_opt = 2 #0 slsqp,1 cobyla, 2 NLP
    if which_opt>0:
       if which_opt==1:
           up = lambda q,H,W,rho,qhat,scen: min(q-LB)
           dn = lambda q,H,W,rho,qhat,scen: max(UB-q)
           qopt = fmin_cobyla(e,q0,cons=[up,dn],iprint=0,
		    args=[H,W,rho,qhat],rhoend=0.0001)
#       qopt = fmin_brute(q0,H,W,rho,qhat,scen)
       else:
           eNLP = lambda q: gwrfull(q,H,W,rho,qhat)
           popt = NLP(eNLP,q0,lb=LB*ones((nwells,1)),
		ub=UB*ones((nwells,1)),iprint=-1)
           ropt = popt.solve('ralg')
           qopt = ropt.xf
           qopt = qopt.reshape(1,-1)
    else:
       bounds = [(LB,UB) for i in range(size(qhat))]
#       print bounds
       qopt = fmin_slsqp(e,q0,bounds=bounds,iprint=0,
		args=[H,W,rho,qhat,scen],acc=0.001)
   
    filename = 'work/qsol%d' %(scen);    save(filename,squeeze(qopt))
    print 'qsol[%d] =' %(scen),qopt
  #  qpert = zeros((1,nwells));
 #  for i in range(nwells):
##       qpert[:,i]= qopt[:,i]+0.01*random.randn()
#    print 'qpert[%d]=' %(scen),qpert
#    z1=gwrfullCH(qopt,H,W,rho,qhat,scen)
#    z2=gwrfullCH(qpert,H,W,rho,qhat,scen)
#    print 'TicToc=%g' %(z1-z2)
    if verbose: scenvecprint(scen,qopt)
    return
Пример #15
0
    def _fit_openopt(self):

        from openopt import NLP


        solver = self.kwargs.get('solver','ralg')
        #~ self.args += args[1:]
        #~ self.kwargs.update(kwargs)
        self.err = [1e10,1e9,1e8,1e7]
        self.i = 0
        params = []
        for p in self.model.x0:
            params.append(p()['value'])

        def f(params):
            for i in xrange(len(params)):
                if self.model.x0[i]()['froozen']:
                    params[i] = self.model.x0[i]()['value']
                else:
                    if abs(params[i]) == nan or abs(params[i]) == inf:
                        params[i] = self.model.x0[i]()['value']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
                    if params[i] < self.model.x0[i]()['min']:
                        params[i] = self.model.x0[i]()['min']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
                    if params[i] > self.model.x0[i]()['max']:
                        params[i] = self.model.x0[i]()['max']
                        self.i = (self.i+1)%4
                        return sum([self.err[self.i]]*len(self.data.x))
            if self.data.we is not None:
                return sum((self.data.y-self.model.fcn(params,self.data.x,self.model.const))**2 * self.data.we)
            else:
                return sum(self.data.y-self.model.fcn(params,self.data.x,self.model.const)**2)

        #~ f = lambda p: sum(((self.func(p, self.x) - self.y) / self.weights)**2)
        self.prob = NLP(f, x0 = params)
        self.res = self.prob.solve(solver)

        self.storeResult(self.res)

        return self.result
def gridmax(al,gam,w,D,th,tl,steps):
	grid = linspace(0.000,1.0,steps)
	pisep = 0
	for n in grid:
		gridh = linspace(n,1,steps-n*steps)
		for nh in gridh:
			pinowsep = profitsseparating(n,nh,al,gam,w,D,th,tl) 
			if pinowsep >pisep:
				pisep = pinowsep
				solutioneffortsep = [n,nh]
	x0 = [solutioneffortsep[0],solutioneffortsep[1]]
	lb = [0,0]
	ub = [1,1]
	A= [1,-1]
	b=[0]
	f=lambda x: -profitsseparating(x[0],x[1],al,gam,w,D,th,tl)#note that the functions below search for a minimum, hence the "-"
	p=NLP(f,x0,lb=lb,ub=ub,A=A,b=b,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
	solver='ralg'
	r=p.solve(solver)
	#the "2 program" simply assumes ef=0 and maximizes over efh only; the result is then compared to the other program above. This is done because there is a local maximum at ef=0 (see paper)
	f2=lambda x: -profitsseparating(0,x[0],al,gam,w,D,th,tl)
	lb2=[0]
	ulb2=[1]
	p2=NLP(f2,solutioneffortsep[1],lb=lb2,ub=ulb2,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
	r2=p2.solve(solver)
	if r.ff<r2.ff:
		print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are: ', r.xf
		ref=r.xf[0]
		refh=r.xf[1]
		piff=r.ff
	else:
		print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are : 0',r2.xf
		ref=0
		refh=r2.xf[0]
		piff=r2.ff
	print ref,refh
	print 'ub1 is ', ub1(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul1 is ',ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)
	print 'ub2 is ', ub2(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul2 is ',ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)
	euff=al*(betah(ref,al,th,tl)*ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betah(ref,al,th,tl))*ub1(ref,refh, al,gam,util(w),util(w-D),th,tl))+(1-al)*(betal(ref,al,th,tl)*ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betal(ref,al,th,tl))*ub2(ref,refh, al,gam,util(w),util(w-D),th,tl))-cost(ref,gam)
	print 'expected utility under this contract is ', euff
	print 'expected solver profits are ',-piff
	return [-piff,euff]#this return is used for creating the graph
Пример #17
0
def fit(fitfunc, model, data_single, y, starting_values, param_upper_bounds, param_lower_bounds):
    print_level = -1
    if model.main_constrained:
        fitting = NLP(
                        partial (fitfunc,
                                 data = data_single,
                                 ind = y,
                                 return_residuals=False,
                                 return_SSR=True
                                 ),
                        starting_values,
                        ub=param_upper_bounds,
                        lb=param_lower_bounds,
                        ftol=model.tolerence
                        )
        #results = fitting.solve('nlp:ralg', iprint=print_level)
        #return fitting.solve('nssolve', iprint = print_level)
        return fitting.solve('ralg', iprint = print_level, maxIter = model.max_runs)
    else:
        fitting = NLP(
                        partial (fitfunc,
                                 data=data_single,
                                 ind = y,
                                 return_residuals=False,
                                 return_SSR=True
                                 ),
                        starting_values,
                        ftol=model.tolerence,
                        maxIter = model.max_runs,
                        )
        return fitting.solve('scipy_leastsq', iprint=print_level)
Пример #18
0
    def run(self, plot=True):
        """
        Solves the optimization problem.
        """        
        # Initial try
        p0 = self.get_p0()
        
        #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
        lbound = N.array([0.0001]*len(p0))
        if self.gridsize == 1:
            ubound = [10.0]*(self.gridsize*self.nbr_us)
        else:
            ubound = [10.0]*(self.gridsize*self.nbr_us) + [0.20,0.20,0.20,0.20,N.inf]*((self.gridsize-1))

        
        #UPPER BOUND FOR VDP
        #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)
        
        if self.verbosity >= Multiple_Shooting.NORMAL:
            print 'Initial parameter vector: '
            print p0
            print 'Lower bound:', len(lbound)
            print 'Upper bound:', len(ubound)

        # Get OpenOPT handler
        p_solve = NLP(self.f,p0,lb = lbound, ub=ubound,maxFunEvals = self.maxFeval, maxIter = self.maxIter, ftol=self.ftol, maxTime=self.maxTime)
        
        #If multiple shooting is preformed or single shooting
        if self.gridsize > 1:
            p_solve.h  = self.h
        
        if plot:
            p_solve.plot = 1

        self.opt = p_solve.solve(self.optMethod)        
        
        return self.opt
Пример #19
0
def optimise_openopt(target_speed):
	from openopt import NLP
	def fitfun(gene):
		ontimes = np.require(gene[:12].copy(), requirements=['C', 'A', 'O', 'W'])
		offtimes = np.require(ontimes + gene[12:].copy(), requirements=['C', 'A', 'O', 'W'])
		currents = [243.]*12
		flyer.prop.overwriteCoils(ontimes.ctypes.data_as(c_double_p), offtimes.ctypes.data_as(c_double_p))
		flyer.preparePropagation(currents)
		flyer.propagate(0)

		pos = flyer.finalPositions[0]
		vel = flyer.finalVelocities[0]
		
		ind = np.where((pos[:, 2] > 268.) & (vel[:, 2] < 1.1*target_speed) & (vel[:, 2] > 0.9*target_speed))[0] # all particles that reach the end
		print 'good particles:', ind.shape[0]
		return -1.*ind.shape[0]

	initval = np.append(flyer.ontimes, flyer.offtimes - flyer.ontimes)
	lb = np.array(24*[0])
	ub = np.array(12*[600] + 12*[85])

	p = NLP(fitfun, initval, lb=lb, ub=ub)
	r = p.solve('bobyqa', plot=0)
	return r
Пример #20
0
def upper_bound_mse(model, lower, upper):

    n_sample, n_feature = model.X.shape
    x_lb = atleast_2d(lower)
    x_ub = atleast_2d(upper)

    x_lb = x_lb.T if size(x_lb, 1) != n_feature else x_lb
    x_ub = x_ub.T if size(x_ub, 1) != n_feature else x_ub

    x0 = ((x_ub - x_lb) * rand(1, n_feature) + x_lb)[0]

    obj = objective_mse_wrapper(model)
    #    x_opt = fmin_slsqp(obj, x0, bounds=zip(x_lb[0], x_ub[0]), iter=1e4, iprint=-1)

    #    # openopt optimizers
    p = NLP(obj, x0, lb=x_lb[0], ub=x_ub[0], iprint=1e6)
    r = p.solve('ralg')
    x_opt = r.xf

    #    if any(x_opt < x_lb) or any(x_opt > x_ub):
    #        pdb.set_trace()
    res = -obj(x_opt)

    return res
Пример #21
0
def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.inspire(newProb)
    newProb.discreteVars = originProb.discreteVars

    def err(s):  # to prevent text output
        raise OpenOptException(s)

    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided,
                                               fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))

    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver
    return newProb
Пример #22
0
def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.fill(newProb)
    newProb.discreteVars = originProb.discreteVars
    def err(s): # to prevent text output
        raise OpenOptException(s)
    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided, fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))
    
    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver 
    return newProb
Пример #23
0
    def optimize(
        self, solver="ralg", plot=0, maxIter=1e5, maxCPUTime=3600, maxFunEvals=1e12
    ):
        if self.mtype == "LP" or self.mtype == "MILP":
            p = MILP(
                self.objective,
                self.init,
                constraints=self.constraints,
                maxIter=maxIter,
                maxCPUTime=maxCPUTime,
                maxFunEvals=maxFunEvals,
            )
        elif self.mtype == "NLP":
            p = NLP(
                self.objective,
                self.init,
                constraints=self.constraints,
                maxIter=maxIter,
                maxCPUTime=maxCPUTime,
                maxFunEvals=maxFunEvals,
            )
        else:
            print("Model Type Error")
            raise TypeError

        if self.sense == GRB.MAXIMIZE:
            self.Results = p.maximize(solver, plot=plot)
        else:
            self.Results = p.minimize(solver, plot=plot)
        # print(self.Results)
        self.ObjVal = self.Results.ff

        if self.Results.isFeasible:
            self.Status = GRB.OPTIMAL
        else:
            self.Status = GRB.INFEASIBLE

        for v in self.variables:
            v.VarName = v.name
            v.X = self.Results.xf[v]
        return self.Status
Пример #24
0
def fit_kernel_model(kernel, loss, X, y, gamma, weights=None):
    n_samples = X.shape[0]
    gamma = float(gamma)
    if weights is not None:
        weights = weights / np.sum(weights) * weights.size

    # --- optimize bias term ---

    bias = fd.oovar('bias', size=1)

    if weights is None:
        obj_fun = fd.sum(loss(y, bias))
    else:
        obj_fun = fd.sum(fd.dot(weights, loss(y, bias)))
    optimizer = NLP(obj_fun, {bias: 0.}, ftol=1e-6, iprint=-1)

    result = optimizer.solve('ralg')
    bias = result(bias)

    # --- optimize betas ---

    beta = fd.oovar('beta', size=n_samples)

    # gram matrix
    K = kernel(X, X)
    assert K.shape == (n_samples, n_samples)

    K_dot_beta = fd.dot(K, beta)

    penalization_term = gamma * fd.dot(beta, K_dot_beta)
    if weights is None:
        loss_term = fd.sum(loss(y - bias, K_dot_beta))
    else:
        loss_term = fd.sum(fd.dot(weights, loss(y - bias, K_dot_beta)))
    obj_fun = penalization_term + loss_term

    beta0 = np.zeros((n_samples, ))

    optimizer = NLP(obj_fun, {beta: beta0}, ftol=1e-4, iprint=-1)
    result = optimizer.solve('ralg')
    beta = result(beta)

    return KernelModel(X, kernel, beta, bias)
Пример #25
0
    def max_log_marginal_likelihood(self, hyp_initial_guess, maxiter=1,
            optimization_algorithm="scipy_cg", ftol=1.0e-3, fixedHypers=None,
            use_gradient=False, logscale=False):
        """
        Set up the optimization problem in order to maximize
        the log_marginal_likelihood.

        Parameters
        ----------
        parametric_model : Classifier
          the actual parameteric model to be optimized.
        hyp_initial_guess : numpy.ndarray
          set of hyperparameters' initial values where to start
          optimization.
        optimization_algorithm : string
          actual name of the optimization algorithm. See
          http://scipy.org/scipy/scikits/wiki/NLP
          for a comprehensive/updated list of available NLP solvers.
          (Defaults to 'ralg')
        ftol : float
          threshold for the stopping criterion of the solver,
          which is mapped in OpenOpt NLP.ftol
          (Defaults to 1.0e-3)
        fixedHypers : numpy.ndarray (boolean array)
          boolean vector of the same size of hyp_initial_guess;
          'False' means that the corresponding hyperparameter must
          be kept fixed (so not optimized).
          (Defaults to None, which during means all True)

        Notes
        -----
        The maximization of log_marginal_likelihood is a non-linear
        optimization problem (NLP). This fact is confirmed by Dmitrey,
        author of OpenOpt.
        """
        self.problem = None
        self.use_gradient = use_gradient
        self.logscale = logscale # use log-scale on hyperparameters to enhance numerical stability
        self.optimization_algorithm = optimization_algorithm
        self.hyp_initial_guess = np.array(hyp_initial_guess)
        self.hyp_initial_guess_log = np.log(self.hyp_initial_guess)
        if fixedHypers is None:
            fixedHypers = np.zeros(self.hyp_initial_guess.shape[0],dtype=bool)
            pass
        self.freeHypers = -fixedHypers
        if self.logscale:
            self.hyp_running_guess = self.hyp_initial_guess_log.copy()
        else:
            self.hyp_running_guess = self.hyp_initial_guess.copy()
            pass
        self.f_last_x = None

        def f(x):
            """
            Wrapper to the log_marginal_likelihood to be
            maximized.
            """
            # XXX EO: since some OpenOpt NLP solvers does not
            # implement lower bounds the hyperparameters bounds are
            # implemented inside PyMVPA: (see dmitrey's post on
            # [SciPy-user] 20080628).
            #
            # XXX EO: OpenOpt does not implement optimization of a
            # subset of the hyperparameters so it is implemented here.
            #
            # XXX EO: OpenOpt does not implement logrithmic scale of
            # the hyperparameters (to enhance numerical stability), so
            # it is implemented here.
            self.f_last_x = x.copy()
            self.hyp_running_guess[self.freeHypers] = x
            # REMOVE print "guess:",self.hyp_running_guess,x
            try:
                if self.logscale:
                    self.parametric_model.set_hyperparameters(np.exp(self.hyp_running_guess))
                else:
                    self.parametric_model.set_hyperparameters(self.hyp_running_guess)
                    pass
            except InvalidHyperparameterError:
                if __debug__: debug("MOD_SEL","WARNING: invalid hyperparameters!")
                return -np.inf
            try:
                self.parametric_model.train(self.dataset)
            except (np.linalg.linalg.LinAlgError, SL.basic.LinAlgError, ValueError):
                # Note that ValueError could be raised when Cholesky gets Inf or Nan.
                if __debug__: debug("MOD_SEL", "WARNING: Cholesky failed! Invalid hyperparameters!")
                return -np.inf
            log_marginal_likelihood = self.parametric_model.compute_log_marginal_likelihood()
            # REMOVE print log_marginal_likelihood
            return log_marginal_likelihood

        def df(x):
            """
            Proxy to the log_marginal_likelihood first
            derivative. Necessary for OpenOpt when using derivatives.
            """
            self.hyp_running_guess[self.freeHypers] = x
            # REMOVE print "df guess:",self.hyp_running_guess,x
            # XXX EO: Most of the following lines can be skipped if
            # df() is computed just after f() with the same
            # hyperparameters. The partial results obtained during f()
            # are what is needed for df(). For now, in order to avoid
            # bugs difficult to trace, we keep this redunundancy. A
            # deep check with how OpenOpt works or using memoization
            # should solve this issue.
            try:
                if self.logscale:
                    self.parametric_model.set_hyperparameters(np.exp(self.hyp_running_guess))
                else:
                    self.parametric_model.set_hyperparameters(self.hyp_running_guess)
                    pass
            except InvalidHyperparameterError:
                if __debug__: debug("MOD_SEL", "WARNING: invalid hyperparameters!")
                return -np.inf
            # Check if it is possible to avoid useless computations
            # already done in f(). According to tests and information
            # collected from OpenOpt people, it is sufficiently
            # unexpected that the following test succeed:
            if np.any(x!=self.f_last_x):
                if __debug__: debug("MOD_SEL","UNEXPECTED: recomputing train+log_marginal_likelihood.")
                try:
                    self.parametric_model.train(self.dataset)
                except (np.linalg.linalg.LinAlgError, SL.basic.LinAlgError, ValueError):
                    if __debug__: debug("MOD_SEL", "WARNING: Cholesky failed! Invalid hyperparameters!")
                    # XXX EO: which value for the gradient to return to
                    # OpenOpt when hyperparameters are wrong?
                    return np.zeros(x.size)
                log_marginal_likelihood = self.parametric_model.compute_log_marginal_likelihood() # recompute what's needed (to be safe) REMOVE IN FUTURE!
                pass
            if self.logscale:
                gradient_log_marginal_likelihood = self.parametric_model.compute_gradient_log_marginal_likelihood_logscale()
            else:
                gradient_log_marginal_likelihood = self.parametric_model.compute_gradient_log_marginal_likelihood()
                pass
            # REMOVE print "grad:",gradient_log_marginal_likelihood
            return gradient_log_marginal_likelihood[self.freeHypers]


        if self.logscale:
            # vector of hyperparameters' values where to start the search
            x0 = self.hyp_initial_guess_log[self.freeHypers]
        else:
            x0 = self.hyp_initial_guess[self.freeHypers]
            pass
        self.contol = 1.0e-20 # Constraint tolerance level
        # XXX EO: is it necessary to use contol when self.logscale is
        # True and there is no lb? Ask dmitrey.
        if self.use_gradient:
            # actual instance of the OpenOpt non-linear problem
            self.problem = NLP(f, x0, df=df, contol=self.contol, goal='maximum')
        else:
            self.problem = NLP(f, x0, contol=self.contol, goal='maximum')
            pass
        self.problem.name = "Max LogMargLikelihood"
        if not self.logscale:
             # set lower bound for hyperparameters: avoid negative
             # hyperparameters. Note: problem.n is the size of
             # hyperparameters' vector
            self.problem.lb = np.zeros(self.problem.n)+self.contol
            pass
        # max number of iterations for the optimizer.
        self.problem.maxiter = maxiter
        # check whether the derivative of log_marginal_likelihood converged to
        # zero before ending optimization
        self.problem.checkdf = True
         # set increment of log_marginal_likelihood under which the optimizer stops
        self.problem.ftol = ftol
        self.problem.iprint = _openopt_debug()
        return self.problem
Пример #26
0
# so it should be something like 1e-3...1e-5
gtol = 1e-7  # (default gtol = 1e-6)

# Assign problem:
# 1st arg - objective function
# 2nd arg - start point
p = NLP(f,
        x0,
        df=df,
        c=c,
        dc=dc,
        h=h,
        dh=dh,
        A=A,
        b=b,
        Aeq=Aeq,
        beq=beq,
        lb=lb,
        ub=ub,
        gtol=gtol,
        contol=contol,
        iprint=50,
        maxIter=10000,
        maxFunEvals=1e7,
        name='NLP_1')

#optional: graphic output, requires pylab (matplotlib)
p.plot = True

#optional: user-supplied 1st derivatives check
p.checkdf()
Пример #27
0
I can't inform how successfully OO-connected solvers
will handle a prob instance with restricted dom
because it seems to be too prob-specific

Still I can inform that ralg handles the problems rather well
provided in every point x from R^nVars at least one ineq constraint is active
(i.e. value constr[i](x) belongs to R+)

Note also that some solvers require x0 inside dom objFunc.
For ralg it doesn't matter.
"""

from numpy import *
from openopt import NLP

n = 100
an = arange(n) # array [0, 1, 2, ..., n-1]
x0 = n+15*(1+cos(an))

f = lambda x: (x**2).sum() + sqrt(x**3).sum() 
df = lambda x: 2*x + 1.5*x**0.5

lb = zeros(n)
solvers = ['ralg']
#solvers = ['ipopt']
for solver in solvers:
    p = NLP(f, x0, df=df, lb=lb, xtol = 1e-6, iprint = 50, maxIter = 10000, maxFunEvals = 1e8)
    #p.checkdf()
    r = p.solve(solver)
# expected r.xf = small values near zero
Пример #28
0
colors = colors[:len(solvers)]
lines, results = [], {}
for j in range(len(solvers)):
    solver = solvers[j]
    color = colors[j]
    p = NLP(f,
            x0,
            name='bench2',
            df=df,
            c=c,
            dc=dc,
            h=h,
            dh=dh,
            lb=lb,
            ub=ub,
            gtol=gtol,
            ftol=ftol,
            maxFunEvals=1e7,
            maxIter=maxIter,
            maxTime=maxTime,
            plot=1,
            color=color,
            iprint=10,
            legend=[solvers[j]],
            show=False,
            contol=contol)
    #    p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = 1e4, maxTime = maxTime,  plot = 1, color = color, iprint = 0, legend = [solvers[j]], show=False,  contol = contol)
    if solver[:4] == ['ralg']:
        pass
#        p.gtol = 1e-8
#        p.ftol = 1e-7
Пример #29
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df =  lambda x: 2*(x-M)
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2]
    return r
p.dc = dc

h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3
    return r
p.dh = dh

p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
Пример #30
0
T = Triangle((1,2,a),(2,b,4),(c,6.5,7))

# let's create an initial estimation to the problems below
startValues = {a:1, b:0.5, c:0.1} # you could mere set any, but sometimes a good estimation matters

# let's find an a,b,c values wrt r = 1.5 with required tolerance 10^-5, R = 4.2 and tol 10^-4, a+c == 2.5 wrt tol 10^-7
# if no tol is provided, p.contol is used (default 10^-6)
equations = [(T.r == 1.5)(tol=1e-5) , (T.R == 4.2)(tol=1e-4), (a+c == 2.5)(tol=1e-7)]
prob = SNLE(equations, startValues)
result = prob.solve('nssolve', iprint = 0) # nssolve is name of the solver involved
print('\nsolution has%s been found' % ('' if result.stopcase > 0 else ' not'))
print('values:' + str(result(a, b, c))) # [1.5773327492140974, -1.2582702179532217, 0.92266725078590239]
print('triangle sides: '+str(T.sides(result))) # [8.387574299361475, 7.0470774415247455, 4.1815836020856336]
print('orthocenter of the triangle: ' + str(T.H(result))) # [ 0.90789867  2.15008869  1.15609611]

# let's find minimum inscribed radius subjected to the constraints a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2] : 
objective = T.r
prob = NLP(objective, startValues, constraints = [a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2])
result1 = prob.minimize('ralg', iprint = 0) # ralg is name of the solver involved, see http://openopt.org/ralg for details
print('\nminimal inscribed radius: %0.3f' % T.r(result1)) #  1.321
print('optimal values:' + str(result1(a, b, c))) # [1.4999968332804028, 2.7938728907900973e-07, 0.62272481283890913]

#let's find minimum outscribed radius subjected to the constraints a<1.5, a>-1, b<0, a+2*c<4,  log(1-b)<2] : 
prob = NLP(T.R, startValues, constraints = (a<1.5, a>-1, b<0, (a+2*c<4)(tol=1e-7),  log(1-b)<2))
result2 = prob.minimize('ralg', iprint = 0) 
print('\nminimal outscribed radius: %0.3f' % T.R(result2)) # 3.681
print('optimal values:' + str(result2(a, b, c))) # [1.499999901863762, -1.7546960034401648e-06, 1.2499958739399943]



Пример #31
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x - M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 2 * (x - M)
p.c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8]


def dc(x):
    r = zeros((2, p.n))
    r[0, 0] = 2 * 4 * x[0]**3
    r[1, 1] = 2 * x[1]
    r[1, 2] = 2 * x[2]
    return r


p.dc = dc

h1 = lambda x: 1e1 * (x[-1] - 1)**4
h2 = lambda x: (x[-2] - 1.5)**4
p.h = lambda x: (h1(x), h2(x))


def dh(x):
    r = zeros((2, p.n))
    r[0, -1] = 1e1 * 4 * (x[-1] - 1)**3
    r[1, -2] = 4 * (x[-2] - 1.5)**3
    return r
Пример #32
0
def test(complexity=0, **kwargs):
    n = 15 * (complexity+1)

    x0 = 15*cos(arange(n)) + 8

    f = lambda x: ((x-15)**2).sum()
    df = lambda x: 2*(x-15)

    c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

    # dc(x)/dx: non-lin ineq constraints gradients (optional):
    def dc(x):
        r = zeros((len(c(x0)), n))
        r[0,0] = 2 * 4 * x[0]**3
        r[1,1] = 2 * x[1]
        r[1,2] = 2 * x[2]
        return r

    hp = 2
    h1 = lambda x: (x[-1]-13)**hp
    h2 = lambda x: (x[-2]-17)**hp
    h = lambda x:[h1(x), h2(x)]

    # dh(x)/dx: non-lin eq constraints gradients (optional):
    def dh(x):
        r = zeros((2, n))
        r[0, -1] = hp*(x[-1]-13)**(hp-1)
        r[1, -2] = hp*(x[-2]-17)**(hp-1)
        return r

    lb = -8*ones(n)
    ub = 15*ones(n)+8*cos(arange(n))

    ind = 3

    A = zeros((2, n))
    A[0, ind:ind+2] = 1
    A[1, ind+2:ind+4] = 1
    b = [15,  8]

    Aeq = zeros(n)
    Aeq[ind+4:ind+8] = 1
    beq = 45
    ########################################################
    colors = ['b', 'k', 'y', 'g', 'r']
    #solvers = ['ipopt', 'ralg','scipy_cobyla']
    solvers = ['ralg','scipy_slsqp', 'ipopt']
    solvers = [ 'ralg', 'scipy_slsqp']
    solvers = [ 'ralg']
    solvers = [ 'r2']
    solvers = [ 'ralg', 'scipy_slsqp']
    ########################################################
    for i, solver in enumerate(solvers):
        p = NLP(f, x0, df=df, c=c, h=h, dc=dc, dh=dh, lb=lb, ub=ub, A=A, b=b, Aeq=Aeq, beq=beq, maxIter = 1e4, \
                show = solver==solvers[-1], color=colors[i],  **kwargs )
        if not kwargs.has_key('iprint'): p.iprint = -1
#        p.checkdf()
#        p.checkdc()
#        p.checkdh()
        r = p.solve(solver)
    if r.istop>0: return True, r, p
    else: return False, r, p
Пример #33
0
#    print 'pos',pos_sum(p0)
#    print 'neg',neg_sum(p0)

    if 0:
        p0=N.ones(M*2+3)
        p0[0:3]=[.1,.1,.1]
    if 1:
        p0=N.ones(M*2)/(M)
    if 1:
        print len(p0)
        lowerm=1e-4*N.ones(len(p0))
        #lowerm[0:3]=[-1,-1,-1]
        upperm=N.ones(len(p0))
    if 1:
        p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)

    if 0:
        p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)


    if 0:
        p = NLP(max_wrap, p0, maxIter = 1e3, maxFunEvals = 1e5)
    if 0:
        p.lb=lowerm
        p.ub=upperm
        p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6
Пример #34
0
res = scipy.optimize.differential_evolution(objLegrand.cost,bounds=boxBounds)

solvers = [
    'ralg',
    'lincher',
    'gsubg',
    'scipy_slsqp',
    'scipy_cobyla',
    'interalg',
    'auglag',
    'ptn',
    'mma'
    ]


pro = NLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rRalg = pro.solve('ralg')

pro = NLP(f=objLegrand.cost,x0=theta3,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rRalg = pro.solve('ralg')



pro = NLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rSLSQP = pro.solve('scipy_slsqp')

# we try to refine the solution
Пример #35
0
    def max_loglik_p(self, method='scipy_slsqp'):

        message(self, 'Optimizing strain genotypes using %s' % (method))

        # Initialize frequencies
        p = []

        # Bound genotypes in (0,1)
        lb = np.zeros(4 * self.n)
        ub = np.ones(4 * self.n)

        # Constrain genotypes to sum to 1
        def h(a):
            return np.reshape(a, (self.n, 4)).sum(axis=1) - 1.0

        quiet()

        # Optimize genotypes at every position
        for j in range(self.l):

            # Copy mask
            mj = self.mask[:, j].copy()

            # Objective function
            def f(a):
                # Reshape strain genotypes
                pi = np.reshape(a, (self.n, 4))
                # Nucleotide frequencies at position j
                a1 = (1 - self.e) * (np.dot(self.z, pi)) + (self.e / 4.)
                # Site likelihoods
                l1 = (self.x[:, j, :] * np.log(a1.clip(1e-10))).sum(axis=1)
                # Robust estimation
                if self.robust == True:
                    # Alternative likelihoods
                    l2 = (np.log(.25) * self.x[:, j, :]).sum(axis=1)
                    # Pay likelihood penalty to mask sites
                    i = l1 >= self.penalty * l2
                    # Update mask
                    mj[i] = True
                    mj[np.logical_not(i)] = False
                    # Penalized likelihood
                    lf = l1[i].sum(
                    ) + self.penalty * l2[np.logical_not(i)].sum()
                else:
                    # Normal likelihood
                    lf = l1.sum()
                # L2 penalty
                #return -1.*lf - pi.max(axis=1).sum()
                return -1. * lf - (pi**2).sum()

            # Calculate original likelihood
            x0 = self.p[:, j, :].flatten()
            l0 = f(x0)

            # Optimize genotypes
            g = [.25] * 4 * self.n
            soln = NLP(f,
                       g,
                       h=h,
                       lb=lb,
                       ub=ub,
                       gtol=1e-5,
                       contol=1e-5,
                       name='NLP1').solve(method, plot=0)

            # Update genotypes
            if soln.ff <= l0 and soln.isFeasible == True:
                # Discretize results
                xf = discretize_genotypes(
                    np.reshape(soln.xf.clip(0, 1), (self.n, 4)))
                lf = f(xf.flatten())
                # Validate likelihood
                if lf < l0:
                    # Update genotypes and mask
                    p.append(xf)
                    if self.robust == True:
                        self.mask[:, j] = mj
                    continue
            # If likelihood not improved, use original genotypes
            p.append(self.p[:, j, :])

        loud()

        # Fix shape
        self.p = np.swapaxes(np.array(p), 0, 1)
        return self
Пример #36
0
    n = I.shape[1]

    x0fn = 'x0_' + infn[:-4] + '.txt'
    x0 = None
    if os.path.isfile(x0fn):
        x0 = np.loadtxt(x0fn)
    if x0 is None or x0.shape[0] != 2 * m + n:
        x0 = np.ones(2 * m + n)
#        x0[m + 1: 2 * m + 1] = I0 / 10
    logger.debug(x0.shape)

    lb = np.zeros(2 * m + n)
    ub = np.ones(2 * m + n)
    ub[m:] = np.inf

    p = NLP(error_function2, x0, maxIter=1e5, maxFunEvals=1e7, lb=lb, ub=ub)
    p.args.f = (I, m)
    p.df = grad
    p.checkdf()
    r = p.solve('ralg', plot=1)
    x = r.xf
    logger.info(x)

    xfn = 'x_' + infn[:-4] + '.txt'

    np.savetxt(xfn, x)

    # p = NLP(error_function2, x, maxIter=1e4, maxFunEvals=1e6, lb=lb, ub=ub)
    # p.args.f = (I, m)
    # r = p.solve('ralg', plot=1)
    # x = r.xf
Пример #37
0
    def fit_node(self,index):
        qnode=self.qlist[index]
        print qnode.q
        th=qnode.th_condensed['a3']
        counts=qnode.th_condensed['counts']
        counts_err=qnode.th_condensed['counts_err']
        print qnode.th_condensed['counts'].std()
        print qnode.th_condensed['counts'].mean()
        maxval=qnode.th_condensed['counts'].max()
        minval=qnode.th_condensed['counts'].min()
        diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
            -qnode.th_condensed['counts'].mean()
        sig=qnode.th_condensed['counts'].std()

        if diff-2*sig>0:
            #the difference between the high and low point and
            #the mean is greater than 3 sigma so we have a signal
            p0=findpeak(th,counts,1)
            print 'p0',p0
            #Area center width Bak
            center=p0[0]
            width=p0[1]
            sigma=width/2/N.sqrt(2*N.log(2))
            Imax=maxval-minval
            area=Imax*(N.sqrt(2*pi)*sigma)
            print 'Imax',Imax
            pin=[area,center,width,0]





            if 1:
                p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
                #p.lb=lowerm
                #p.ub=upperm
                p.args.f=(th,counts,counts_err)
                p.plot = 0
                p.iprint = 1
                p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
                p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
                p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
                p.maxTime = 4000
        #r=p.solve('scipy_cobyla')
            #r=p.solve('scipy_lbfgsb')
                #r = p.solve('algencan')
                print 'ralg'
                r = p.solve('ralg')
                print 'done'
                pfit=r.xf
                print 'pfit openopt',pfit
                print 'r dict', r.__dict__

            if 0:
                print 'curvefit'
                print sys.executable
                pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
                print 'p,popt', pfit,popt
                perror=N.sqrt(N.diag(popt))
                print 'perror',perror
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=len(th)-len(pfit)
                print 'chisq',chisqr
            if 0:
                oparam=scipy.odr.Model(gauss)
                mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
                myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
                myoutput=myodr.run()
                myoutput.pprint()
                pfit=myoutput.beta
            if 1:
                print 'mpfit'
                p0=pfit
                parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
                parinfo=[]
                for i in range(len(p0)):
                    parinfo.append(copy.deepcopy(parbase))
                for i in range(len(p0)):
                    parinfo[i]['value']=p0[i]
                fa = {'x':th, 'y':counts, 'err':counts_err}
                #parinfo[1]['fixed']=1
                #parinfo[2]['fixed']=1
                m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
                if (m.status <= 0):
                    print 'error message = ', m.errmsg
                params=m.params
                pfit=params
                perror=m.perror
                #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=m.dof
                #Icalc=gauss(pfit,th)
                #print 'mpfit chisqr', chisqr


            if 0:
                width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
                width_y=N.ones(width_x.shape)*(maxval-minval)/2
                pos_y=N.linspace(minval,maxval,100)
                pos_x=N.ones(pos_y.shape)*p0[0]
                if 1:
                    pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                    pylab.plot(width_x,width_y)
                    pylab.plot(pos_x,pos_y)
                    pylab.plot(th,Icalc)
                    pylab.show()

        else:
            #fix center
            #fix width
            print 'no peak'
            #Area center width Bak
            area=0
            center=th[len(th)/2]
            width=(th.max()-th.min())/5.0  #rather arbitrary, but we don't know if it's the first....
            Bak=qnode.th_condensed['counts'].mean()
            p0=N.array([area,center,width,Bak],dtype='float64')  #initial conditions
            parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
            parinfo=[]
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)):
                parinfo[i]['value']=p0[i]
            fa = {'x':th, 'y':counts, 'err':counts_err}
            parinfo[1]['fixed']=1
            parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
            if (m.status <= 0):
                print 'error message = ', m.errmsg
            params=m.params
            pfit=params
            perror=m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr=chisq(pfit,th,counts,counts_err)
            dof=m.dof
            Icalc=gauss(pfit,th)
            #print 'perror',perror
            if 0:
                pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                pylab.plot(th,Icalc)
                pylab.show()

        print 'final answer'
        print 'perror', 'perror'
        #If the fit is unweighted (i.e. no errors were given, or the weights
        #       were uniformly set to unity), then .perror will probably not represent
        #the true parameter uncertainties.

        #       *If* you can assume that the true reduced chi-squared value is unity --
        #       meaning that the fit is implicitly assumed to be of good quality --
        #       then the estimated parameter uncertainties can be computed by scaling
        #       .perror by the measured chi-squared value.

        #          dof = len(x) - len(mpfit.params) # deg of freedom
        #          # scaled uncertainties
        #          pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)

        print 'params', pfit
        print 'chisqr', chisqr  #note that chisqr already is scaled by dof
        pcerror=perror*N.sqrt(m.fnorm / m.dof)#chisqr
        print 'pcerror', pcerror

        self.qlist[index].th_integrated_intensity=N.abs(pfit[0])
        self.qlist[index].th_integrated_intensity_err=N.abs(pcerror[0])
        Icalc=gauss(pfit,th)
        print 'perror',perror
        if 0:
            pylab.figure()
            pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
            pylab.plot(th,Icalc)
            qstr=str(qnode.q['h_center'])+','+str(qnode.q['k_center'])+','+str(qnode.q['l_center'])
            pylab.title(qstr)
            #pylab.show()

        return
        # so it should be something like 1e-3...1e-5
    gtol = 1e-6 # (default gtol = 1e-6)

        # Assign problem:
         # 1st arg - objective function
        # 2nd arg - start point
        #p = NLP(f, x0, c=c,  gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
    x0 = rd.uniform(-0.8, 0.8, num_dof*(2*N+1))
    # x0[2*N] = 0
    # x0[2*(2*N)] = 0
    # x0[3*(2*N)] = 0
    #x0 = rd.uniform(-0.1, 0.1, 2*N+1).tolist() + rd.uniform(-0.01, 0.01, 2*N+1).tolist() + rd.uniform(-.001, 0.001, 2*N+1).tolist()

    #x0 = range(0, num_dof*(2*N+1))
    #p = NLP(f, x0, c=c, h=h, gtol=gtol, contol=contol, iprint = 1, maxIter = 1000, maxFunEvals = 1e7, name = 'NLP_1')
    p = NLP(f, x0, c=c, h=h, gtol=gtol, contol=contol, iprint = 1, maxIter = 700, maxFunEvals = 1e7, name = 'NLP_1')

    #p = NLP(f, x0,  gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')

        #optional: graphic output, requires pylab (matplotlib)
    p.plot = True

    solver = 'ralg'
    #solver = 'scipy_cobyla'
        #solver = 'algencan'
        #solver = 'ipopt'
        #solver = 'scipy_slsqp'

        # solve the problem

    r = p.solve(solver, plot=0) # string argument is solver name
Пример #39
0
# solvers = ['ralg', 'algencan']
solvers = ["ralg"]
###############################################################

lines, results = [], {}
for j, solver in enumerate(solvers):
    p = NLP(
        ff,
        startPoint,
        xlabel=Xlabel,
        gtol=gtol,
        diffInt=diffInt,
        ftol=ftol,
        maxIter=1390,
        plot=PLOT,
        color=colors[j],
        iprint=10,
        df_iter=4,
        legend=solver,
        show=False,
        contol=contol,
        maxTime=maxTime,
        maxFunEvals=maxFunEvals,
        name="NLP_bench_1",
    )
    p.constraints = [c1 < 0, c2 < 0, h1.eq(0), h2.eq(0), x > lb, x < ub]
    # p.constraints = h1.eq(0)

    # p._Prepare()
    # print p.dc(p.x0)
    # print h1.D(startPoint)
Пример #40
0
    #print 'len p',len(p)
    
    x,z=precompute_r()
    X,Z=N.meshgrid(x,z)
    coslist,cosmat_list=precompute_cos(h,k,l,x,z)
    #print 'coslist',coslist[0]
    flist=N.ones(len(p0))
    
    
    flist[M::]=-flist[M::]
    #vout=chisq_hessian(p,fqerr,p,coslist,flist)
    #print 'vout', vout
    
#    print 'pos',pos_sum(p0)
#    print 'neg',neg_sum(p0)
    p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
#    p.df = S_grad
#    p.d2f=S_hessian
#    p.userProvided.d2f=True
    
    
    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb =1e-7*N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
Пример #41
0
def AMPGO(objfun,
          x0,
          args=(),
          local='L-BFGS-B',
          local_opts=None,
          bounds=None,
          maxfunevals=None,
          totaliter=20,
          maxiter=5,
          glbtol=1e-5,
          eps1=0.02,
          eps2=0.1,
          tabulistsize=5,
          tabustrategy='farthest',
          fmin=-numpy.inf,
          disp=None):
    """
    Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for
    Global Optimization) algorithm. 
    
    :param `objfun`: Function to be optimized, in the form ``f(x, *args)``.
    :type `objfun`: callable
    :param `args`: Additional arguments passed to `objfun`.
    :type `args`: tuple
    :param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available
     `scipy` local solvers or `OpenOpt` solvers.
    :type `local`: string
    :param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable
     [(`xl0`, `xu0`), (`xl1`, `xu1`), ...]
    :type `bounds`: list
    :param `maxfunevals`: The maximum number of function evaluations allowed.
    :type `maxfunevals`: integer
    :param `totaliter`: The maximum number of global iterations allowed.
    :type `totaliter`: integer
    :param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration.
    :type `maxiter`: integer
    :param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective
     function value and the provided global optimum (`fmin`) is less than `glbtol`.
    :type `glbtol`: float
    :param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase.
    :type `eps1`: float
    :param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase.
    :type `eps2`: float
    :param `tabulistsize`: The size of the tabu search list (a circular list).
    :type `tabulistsize`: integer
    :param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be
     'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from
     the last local minimum found.
    :type `tabustrategy`: string
    :param `fmin`: If known, the objective function global optimum value.
    :type `fmin`: float
    :param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status
     messages are printed.
    :type `disp`: integer
 
    :returns: A tuple of 5 elements, in the following order:

     1. **best_x** (`array_like`): the estimated position of the global minimum.
     2. **best_f** (`float`): the value of `objfun` at the minimum.
     3. **evaluations** (`integer`): the number of function evaluations.
     4. **msg** (`string`): a message describes the cause of the termination.
     5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the
        successful ones.

    :rtype: `tuple`

    The detailed implementation of AMPGO is described in the paper 
    "Adaptive Memory Programming for Constrained Global Optimization" located here:

    http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf

    Copyright 2014 Andrea Gavana
    """

    if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS:
        raise Exception('Invalid local solver selected: %s' % local)

    if local in SCIPY_LOCAL_SOLVERS and not SCIPY:
        raise Exception(
            'The selected solver %s is not available as there is no scipy installation'
            % local)

    if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT:
        raise Exception(
            'The selected solver %s is not available as there is no OpenOpt installation'
            % local)

    x0 = numpy.atleast_1d(x0)
    n = len(x0)

    if bounds is None:
        bounds = [(None, None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    low = [0] * n
    up = [0] * n
    for i in range(n):
        if bounds[i] is None:
            l, u = -numpy.inf, numpy.inf
        else:
            l, u = bounds[i]
            if l is None:
                low[i] = -numpy.inf
            else:
                low[i] = l
            if u is None:
                up[i] = numpy.inf
            else:
                up[i] = u

    if maxfunevals is None:
        maxfunevals = max(100, 10 * len(x0))

    if tabulistsize < 1:
        raise Exception(
            'Invalid tabulistsize specified: %s. It should be an integer greater than zero.'
            % tabulistsize)
    if tabustrategy not in ['oldest', 'farthest']:
        raise Exception(
            'Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'
            % tabustrategy)

    iprint = 50
    if disp is None or disp <= 0:
        disp = 0
        iprint = -1

    low = numpy.asarray(low)
    up = numpy.asarray(up)

    tabulist = []
    best_f = numpy.inf
    best_x = x0

    global_iter = 0
    all_tunnel = success_tunnel = 0
    evaluations = 0

    if glbtol < 1e-8:
        local_tol = glbtol
    else:
        local_tol = 1e-8

    while 1:

        if disp > 0:
            print('\n')
            print('=' * 72)
            print('Starting MINIMIZATION Phase %-3d' % (global_iter + 1))
            print('=' * 72)

        if local in OPENOPT_LOCAL_SOLVERS:
            problem = NLP(objfun,
                          x0,
                          lb=low,
                          ub=up,
                          maxFunEvals=max(1, maxfunevals),
                          ftol=local_tol,
                          iprint=iprint)
            problem.args = args

            results = problem.solve(local)
            xf, yf, num_fun = results.xf, results.ff, results.evals['f']
        else:
            options = {'maxiter': max(1, maxfunevals), 'disp': disp}
            if local_opts is not None:
                options.update(local_opts)
            res = minimize(objfun,
                           x0,
                           args=args,
                           method=local,
                           bounds=bounds,
                           tol=local_tol,
                           options=options)
            xf, yf, num_fun = res['x'], res['fun'], res['nfev']

        maxfunevals -= num_fun
        evaluations += num_fun

        if yf < best_f:
            best_f = yf
            best_x = xf

        if disp > 0:
            print('\n\n ==> Reached local minimum: %s\n' % yf)

        if best_f < fmin + glbtol:
            if disp > 0:
                print('=' * 72)
            return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                all_tunnel, success_tunnel)
        if maxfunevals <= 0:
            if disp > 0:
                print('=' * 72)
            return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (
                all_tunnel, success_tunnel)

        tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
        tabulist.append(xf)

        i = improve = 0

        while i < maxiter and improve == 0:

            if disp > 0:
                print('-' * 72)
                print('Starting TUNNELLING   Phase (%3d-%3d)' %
                      (global_iter + 1, i + 1))
                print('-' * 72)

            all_tunnel += 1

            r = numpy.random.uniform(-1.0, 1.0, size=(n, ))
            beta = eps2 * numpy.linalg.norm(xf) / numpy.linalg.norm(r)

            if numpy.abs(beta) < 1e-8:
                beta = eps2

            x0 = xf + beta * r

            x0 = numpy.where(x0 < low, low, x0)
            x0 = numpy.where(x0 > up, up, x0)

            aspiration = best_f - eps1 * (1.0 + numpy.abs(best_f))

            tunnel_args = tuple([objfun, aspiration, tabulist] + list(args))

            if local in OPENOPT_LOCAL_SOLVERS:
                problem = NLP(tunnel,
                              x0,
                              lb=low,
                              ub=up,
                              maxFunEvals=max(1, maxfunevals),
                              ftol=local_tol,
                              iprint=iprint)
                problem.args = tunnel_args

                results = problem.solve(local)
                xf, yf, num_fun = results.xf, results.ff, results.evals['f']
            else:
                options = {'maxiter': max(1, maxfunevals), 'disp': disp}
                if local_opts is not None:
                    options.update(local_opts)

                res = minimize(tunnel,
                               x0,
                               args=tunnel_args,
                               method=local,
                               bounds=bounds,
                               tol=local_tol,
                               options=options)
                xf, yf, num_fun = res['x'], res['fun'], res['nfev']

            maxfunevals -= num_fun
            evaluations += num_fun

            yf = inverse_tunnel(xf, yf, aspiration, tabulist)

            if yf <= best_f + glbtol:
                oldf = best_f
                best_f = yf
                best_x = xf
                improve = 1
                success_tunnel += 1

                if disp > 0:
                    print(
                        '\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'
                        % (yf, oldf))

            if best_f < fmin + glbtol:
                return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                    all_tunnel, success_tunnel)

            i += 1

            if maxfunevals <= 0:
                return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (
                    all_tunnel, success_tunnel)

            tabulist = drop_tabu_points(xf, tabulist, tabulistsize,
                                        tabustrategy)
            tabulist.append(xf)

        if disp > 0:
            print('=' * 72)

        global_iter += 1
        x0 = xf.copy()

        if global_iter >= totaliter:
            return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (
                all_tunnel, success_tunnel)

        if best_f < fmin + glbtol:
            return best_x, best_f, evaluations, 'Optimization terminated successfully', (
                all_tunnel, success_tunnel)
Пример #42
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))

def df(x):
    r = 2*(x-M)
    r[0] += 15 #incorrect derivative
    r[8] += 80 #incorrect derivative
    return r
p.df =  df

p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2] + 15 #incorrect derivative
    return r
p.dc = dc

p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
    return r
Пример #43
0
subjected to 
y > 5
4x-5z < -1
(x-10)^2 + (y+1)^2 < 50
'''

from openopt import NLP
from numpy import *
 
x0 = [0,0,0] # start point estimation
 
# define objective function as a Python language function
# of course, you can use "def f(x):" for multi-line functions instead of "f = lambda x:"
f = lambda x: (x[0]-1)**2 + (x[1]-2)**2 + (x[2]-3)**4
 
# form box-bound constraints lb <= x <= ub
lb = [-inf, 5, -inf] # lower bound
 
# form general linear constraints Ax <= b
A = [4, 0, -5]
b = -1
 
# form general nonlinear constraints c(x) <= 0
c = lambda x: (x[0] - 10)**2 + (x[1]+1) ** 2 - 50
 
# optionally you can provide derivatives (user-supplied or from automatic differentiation)
# for objective function and/or nonlinear constraints, see further doc below
 
p = NLP(f, x0, lb=lb, A=A, b=b, c=c)
r = p.solve('ralg')
print r.xf # [ 6.25834211  4.99999931  5.20667372]
Пример #44
0
###############################################################

lines, results = [], {}
for j, solver in enumerate(solvers):
    p = NLP(
        ff,
        x0,
        xlabel=Xlabel,
        c=c,
        h=h,
        lb=lb,
        ub=ub,
        gtol=gtol,
        diffInt=diffInt,
        ftol=ftol,
        maxIter=1390,
        plot=PLOT,
        color=colors[j],
        iprint=10,
        df_iter=4,
        legend=solver,
        show=False,
        contol=contol,
        maxTime=maxTime,
        maxFunEvals=maxFunEvals,
        name="NLP_5",
    )

    if solver == "algencan":
        p.gtol = 1e-2
    elif solver == "ralg":
Пример #45
0
    #print 'len pd',len(pd)
    #print 'len p',len(p)

    x, z = precompute_r()
    X, Z = N.meshgrid(x, z)
    coslist, cosmat_list = precompute_cos(h, k, l, x, z)
    #print 'coslist',coslist[0]
    flist = N.ones(len(p0))

    flist[M::] = -flist[M::]
    #vout=chisq_hessian(p,fqerr,p,coslist,flist)
    #print 'vout', vout

    #    print 'pos',pos_sum(p0)
    #    print 'neg',neg_sum(p0)
    p = NLP(Entropy, p0, maxIter=1e3, maxFunEvals=1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
    #    p.df = S_grad
    #    p.d2f=S_hessian
    #    p.userProvided.d2f=True

    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb = 1e-7 * N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
    #p.ub[4] = -2.5
Пример #46
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array

N = 50
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)

# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3


# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5



# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
Пример #47
0
    def __init__(self, calibration, parameter, debug=False):
        def eqX(industry, Industry):
            i, I = industry, Industry

            def equation(x):
                X, px, pf = x[sX:sF], x[spx:spz], x[spf:epf]
                pf = np.array([float(x[spf:epf]), 1])
                return X[i] - self.calibration.alpha[I] * sum([
                    pf[h] * self.calibration.FF[H]
                    for h, H in enumerate(parameter.factors)
                ] / px[i])

            return equation

        def eqpx(i):
            def equation(x):
                X, Z = x[sX:sF], x[sZ:spx]
                return X[i] - Z[i]

            return equation

        def eqZ(i):
            def equation(x):
                px, pz = x[spx:spz], x[spz:spf]
                return px[i] - pz[i]

            return equation

        def eqpz(j, J):
            def equation(x):
                F, Z = Table.unflatten(index=parameter.factors,
                                       columns=parameter.industries,
                                       sam=x[sF:sZ]), x[sZ:spx]
                return Z[j] - self.calibration.b[J] * np.prod([
                    F[J][H]**self.calibration.beta[J][H]
                    for H in parameter.factors
                ])

            return equation

        def eqpf(H):
            def equation(x):
                F = Table.unflatten(index=parameter.factors,
                                    columns=parameter.industries,
                                    sam=x[sF:sZ])
                return sum(
                    F[J][H]
                    for J in parameter.industries) - self.calibration.FF[H]

            return equation

        def eqF(h, H, j, J):
            def equation(x):
                F, Z, pz, pf = Table.unflatten(
                    index=parameter.factors,
                    columns=parameter.industries,
                    sam=x[sF:sZ]), x[sZ:spx], x[spz:spf], x[spf:epf]
                pf = np.array([float(x[spf:epf]), 1])
                return F[J][
                    H] - self.calibration.beta[J][H] * pz[j] * Z[j] / pf[h]

            return equation

        self.calibration = copy(calibration)
        self.calibration.X0 = self.calibration.X0['HH']
        j = i = len(parameter.industries)
        h = len(parameter.factors)
        sX = 0
        sF = sX + i
        sZ = sF + i * h
        spx = sZ + j
        spz = spx + i
        spf = spz + j
        epf = spf + h - 1
        epf_numerair = epf + 1
        lb = np.array([(np.float64(0.001))] * epf)
        self.x = x = np.empty(epf, dtype='f64')
        x[sX:sF] = self.calibration.X0.data
        x[sF:sZ] = self.calibration.F0.as_matrix().flatten()
        x[sZ:spx] = self.calibration.Z0.data
        x[spx:spz] = [1] * i
        x[spz:spf] = [1] * j
        x[spf:epf] = [1] * (epf - spf)
        self.t = x[:]
        if debug:
            self.x = x = np.array([21.1] * epf)
        print x
        xnames = [] * (epf_numerair)
        xnames[sX:sF] = self.calibration.X0.names
        xnames[sF:sZ] = [
            i + h + ' ' for i in parameter.industries
            for h in parameter.factors
        ]
        xnames[sZ:spx] = self.calibration.Z0.names
        xnames[spx:spz] = parameter.industries
        xnames[spz:spf] = parameter.industries
        xnames[spf:epf] = parameter.factors
        xnames[epf] = parameter.factors[-1]

        xtypes = [] * epf_numerair
        xtypes[sX:sF] = ['X0'] * len(self.calibration.X0)
        xtypes[sF:sZ] = ['F'] * (len(parameter.industries) +
                                 len(parameter.factors))
        xtypes[sZ:spx] = ['F0'] * len(self.calibration.F0)
        xtypes[spx:spz] = ['pb'] * len(parameter.industries)
        xtypes[spz:spf] = ['pz'] * len(parameter.industries)
        xtypes[spf:epf] = ['pf'] * len(parameter.factors)
        xtypes[epf] = ['pf']

        self.xnametypes = [
            '%s %s' % (xnames[i], xtypes[i]) for i in range(epf - 1)
        ]

        constraints = []
        for i, I in enumerate(parameter.industries):
            industry, Industry = unlink2(i, I)
            constraints.append(eqX(industry, Industry))
            constraints.append(eqpx(industry))
            constraints.append(eqZ(industry))
            constraints.append(eqpz(industry, Industry))

        for F in parameter.factors:
            Factor = unlink(F)
            constraints.append(eqpf(Factor))

        for f, F in enumerate(parameter.factors):
            for i, I in enumerate(parameter.industries):
                industry, Industry = unlink2(i, I)
                factor, Factor = unlink2(f, F)
                constraints.append(eqF(factor, Factor, industry, Industry))

        self.UU = UU = lambda x: -np.prod([
            x[i]**self.calibration.alpha[i]
            for i in range(len(parameter.industries))
        ])

        p = NLP(UU,
                x,
                h=constraints,
                lb=lb,
                iprint=50,
                maxIter=10000,
                maxFunEvals=1e7,
                name='NLP_1')
        p.plot = debug
        self.r = p.solve('ralg', plot=0)

        if debug:
            for i, constraint in enumerate(constraints):
                print(i, '%02f' % constraint(self.r.xf))
Пример #48
0
from openopt import NLP
x0 = [4, 5, 6]
#h = lambda x: log(1+abs(4+x[1]))
#f = lambda x: log(1+abs(x[0]))
f = lambda x: x[0]**4 + x[1]**4 + x[2]**4
df = lambda x: [4*x[0]**3,  4*x[1]**3, 4*x[2]**3]
h = lambda x: [(x[0]-1)**2,  (x[1]-1)**4]
dh = lambda x: [[2*(x[0]-1), 0, 0],  [0, 4*(x[1]-1)**3, 0]]
colors = ['r', 'b', 'g', 'k', 'y']
solvers = ['ralg','scipy_cobyla', 'algencan', 'ipopt', 'scipy_slsqp']
solvers = ['ralg','algencan']
contol = 1e-8
gtol = 1e-8

for i, solver in enumerate(solvers):
    p = NLP(f, x0, df=df, h=h, dh=dh, gtol = gtol, diffInt = 1e-1, contol = contol,  iprint = 1000, maxIter = 1e5, maxTime = 50, maxFunEvals = 1e8, color=colors[i], plot=0, show = i == len(solvers))
    p.checkdh()
    r = p.solve(solver)

#
#x0 = 4
##h = lambda x: log(1+abs(4+x[1]))
##f = lambda x: log(1+abs(x[0]))
#f = lambda x: x**4
#h = lambda x: (x-1)**2
#colors = ['r', 'b', 'g', 'k', 'y']
#solvers = ['ralg','scipy_cobyla', 'algencan', 'ipopt', 'scipy_slsqp']
##solvers = ['algencan']
#contol = 1e-8
#gtol = 1e-8
#for i, solver in enumerate(solvers):
Пример #49
0
x1 = 1
z1 = 5
z2 = 2
x0 = [x1, z1, z2]

# calculate the constraints first
[y1, y2, _] = GaussSeidelCoordinator(
    x1, z1, z2, 0)  # find values of y1, y2 that is consistent with x1, z1, z2
coupling.y1 = y1
coupling.y2 = y2

# print initial values
print('Initial Objective: ' + str(ObjectiveMain(x0)))
print('[x1 z1 z2] = ' + str(x0))
print('[y1 y2] = [' + str(coupling.y1) + ', ' + str(coupling.y2) + ']')

# Non linear Programming for optimizing the Sellar problem
# documentation at: https://github.com/troyshu/openopt/blob/master/openopt/oo.py
p = NLP(f=ObjectiveMain,
        x0=x0,
        lb=(0, -10, 0),
        ub=(10, 10, 10),
        c=[Constraint1, Constraint2],
        iprint=50)
res = p.solve('scipy_cobyla')

# print the results
print('\n' + '-' * 50)
print('Final Objective: ' + str(res.ff))
print('[x1 z1 z2] = ' + str(res.xf))
print('[y1 y2] = [' + str(coupling.y1) + ', ' + str(coupling.y2) + ']')
Пример #50
0
# -*- coding: utf-8 -*-
'''
Created on 2014/3/26
min (x-1)^2 + (y-2)^2 + (z-3)^4 
s.t. 
    y > 5
    4x-5z < -1
    (x-10)^2 + (y+1)^2 < 50

'''

from FuncDesigner import *
from openopt import NLP
from time import time

t = time()
x,y,z = oovars('x', 'y', 'z')
f = (x-1)**2 + (y-2)**2 + (z-3)**4
startPoint = {x:0, y:0, z:0}
constraints = [y>5, 4*x-5*z<-1, (x-10)**2 + (y+1)**2 < 50] 
p = NLP(f, startPoint, constraints = constraints)
r = p.solve('ipopt')
x_opt, y_opt, z_opt = r(x,y,z)
print(x_opt, y_opt, z_opt) # x=6.25834212, y=4.99999936, z=5.2066737

print "elapsed %.3f secs"%(time() - t)
Пример #51
0
"""
This is test for future work on ralg
It may fail for now
"""

from FuncDesigner import *
from openopt import NLP
from numpy import nan

a, b = oovars('a', 'b')
f = a**2 + b**2

K = 1e5
minTreshold = 0.1

c1 = ifThenElse(a > minTreshold, K * a**2 + 1.0 / K * b**2,
                nan) < K * minTreshold**2
c2 = a > minTreshold

startPoint = {a: -K, b: -K}

p = NLP(f, startPoint, constraints=[c1, c2], iprint=10, maxIter=1e4)

solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'

r = p.solve(solver)
Пример #52
0
class data_fitting():
    def __init__(self, xdata, ydata, p0, const, mimas, func, *args, **kwargs):
        self.xdata = xdata
        self.ydata = ydata
        self.p0 = p0
	self.const = const
	self.mima = mimas
        self.func = func
        self.args = args

        if "weights" in kwargs:
            self.weights0 = kwargs['weights']
            kwargs.pop('weights')
        else:
            self.weights0 = 1.

        if 'plot' in kwargs:
            self.plot = kwargs['plot']
            kwargs.pop('plot')
        else:
            self.plot = plot

        if 'fit_range' in kwargs:
            if isscalar(kwargs['fit_range']):
                start, end, right_border, left_border = set_fitrange(self.xdata, self.ydata,  _plot = self.plot)
                self.x = xdata[start[0]:end[0] + 1]
                self.y = ydata[start[0]:end[0] + 1]
                self.start = start[0]
                self.end = end[0]
            elif isinstance(kwargs['fit_range'], list):
                start, end = kwargs['fit_range']
                self.x = xdata[start:end + 1]
                self.y = ydata[start:end + 1]
                self.start = start
                self.end = end
            kwargs.pop('fit_range')
            try:
                self.weights = self.weights0[self.start:self.end + 1]
            except:
                self.weights = 1.
        else:
            self.x = xdata
            self.y = ydata
            self.weights = self.weights0
        
        self.kwargs = kwargs

    def leastsq(self, *args, **kwargs):
        from scipy.optimize import leastsq

        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False

        f = lambda p, x, y, weights: (self.func(p, x,self.const,self.mima) - y) / weights
        self.p, self.cov = leastsq(f, self.p0, reargs = (self.x, self.y, self.weights), **kwargs)
        
        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with scipy.optimize.leastsq')
        


    def curve_fit(self, **kwargs):
        from scipy.optimize import curve_fit

        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False

        def f(x, *p):
            return self.func(p[0], x, self.const, self.mima)

        self.p, self.cov = curve_fit(f, self.x, self.y, p0 = self.p0, sigma = self.weights, **kwargs)

        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with scipy.optimize.curve_fit')




    def odr(self, *args, **kwargs):
        from scipy import odr

        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False


        f = lambda p, x: self.func(p, x,self.const,self.mima)
        self.mod = odr.Model(f)
        self.dat = odr.Data(self.x, self.y, we = 1. / self.weights)
        self.my_odr = odr.ODR(self.dat, self.mod, self.p0, *args, **kwargs)
        self.out = self.my_odr.run()
        self.p = self.out.beta
        self.cov = self.out.cov_beta
        #self.sd = out.sd_beta
        
        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with scipy.odr')

        

    def openopt(self, *args, **kwargs):
        from openopt import NLP
        
        if 'plotting' in kwargs:
            plotting = True
            kwargs.pop('plotting')
        else:
            plotting = False
        try:
            solver = args[0]
        except:
            solver = 'ralg'
        self.args += args[1:]
        self.kwargs.update(kwargs)

        
        f = lambda p: sum(((self.func(p, self.x, self.const,self.mima) - self.y) / self.weights)**2)
        self.prob = NLP(f, x0 = self.p0, *self.args, **self.kwargs)
        self.res = self.prob.solve(solver)
        self.p = self.res.xf

        if plotting:
            self.plot(self.xdata, self.ydata, 'o', label = 'original data')
            self.plot(self.x, self.y, '+')
            self.plot(self.xdata, self.func(self.p, self.xdata,self.const,self.mima), label = 'fit with openopt:\n solver: '+ solver)
Пример #53
0
from FuncDesigner import *
from openopt import NLP
from numpy import arange, ones

n = 50
kappa = 1.05
Tol = 1e16

c = arange(1, n + 1) / 10.0

x = oovar("x")
startPoint = {x: ones(n)}

xs = sum(x)

cons = (x <= kappa / n * xs)(tol=-1e-6)

f = sum(c * x) + 1e-300 * sum(sqrt(-x + kappa / n * xs + Tol))

p = NLP(f, startPoint, constraints=cons)

r = p.minimize(
    "gsubg", dilation=False, iprint=10, ftol=1e-10, fTol=1e-4, xtol=1e-6, maxIter=1e5, maxFunEvals=1e7, T="float128"
)
print "objective func evaluations: ", r.evals["f"]
Пример #54
0
###############################################################
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt','algencan']
#solvers = ['ralg', 'ipopt']
solvers = ['ralg', 'scipy_cobyla', 'lincher', 'scipy_slsqp', 'ipopt']
solvers = ['ralg', 'scipy_slsqp', 'scipy_cobyla', 'algencan']
#solvers = ['ipopt','ralg', 'algencan']
solvers = ['ralg', 'scipy_cobyla']
#solvers = ['ralg', 'scipy_slsqp']
#solvers = ['ralg', 'algencan']
solvers = ['ralg']
###############################################################

lines, results = [], {}
for j, solver in enumerate(solvers):
    p = NLP(ff, startPoint, xlabel = Xlabel, gtol=gtol, diffInt = diffInt, ftol = ftol, maxIter = 1390, plot = PLOT, color = colors[j], iprint = 10, df_iter = 4, legend = solver, show=False,  contol = contol,  maxTime = maxTime,  maxFunEvals = maxFunEvals, name='NLP_bench_1')
    p.constraints = [c1<0,  c2<0,  h1.eq(0),  h2.eq(0), x > lb, x< ub]
    #p.constraints = h1.eq(0)
    
    #p._Prepare()
    #print p.dc(p.x0)
    #print h1.D(startPoint)
    #print h2.D(startPoint)
    #continue
    
    if solver =='algencan':
        p.gtol = 1e-2
    elif solver == 'ralg':
        pass
        #p.debug = 1