Exemplo n.º 1
0
def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2] + 15 #incorrect derivative
    return r
p.dc = dc

p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
    return r
p.dh = dh

p.checkdf()
p.checkdc()
p.checkdh()
"""
you can use p.checkdF(x) for other point than x0 (F is f, c or h)
p.checkdc(myX)
or
p.checkdc(x=myX)
values with difference greater than
maxViolation (default 1e-5)
will be shown
p.checkdh(maxViolation=1e-4)
p.checkdh(myX, maxViolation=1e-4)
p.checkdh(x=myX, maxViolation=1e-4)
Exemplo n.º 2
0
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)

        if 0:
            #p.h=[pos_sum,neg_sum]
            p.h=[pos_sum,neg_sum]
            p.c=[chisq]
    #    p.h=[pos_sum,neg_sum]
            p.args.h=h_args
            p.args.c=h_args
            p.dh=[pos_sum_grad,neg_sum_grad]
            p.df=chisq_grad
        if 1:

            #p.h=[pos_sum,neg_sum,chisq]
            p.c=[chisq]
            p.h=[pos_sum,neg_sum]
            p.args.h=h_args
            p.args.c=h_args
            p.dh=[pos_sum_grad,neg_sum_grad]
            p.dc=chisq_grad
            #p.dh=[pos_sum_grad,neg_sum_grad,neg_sum_grad]
            p.df = S_grad

        if 0:
            print 'checking'
Exemplo n.º 3
0
        #print 'maxfun', p.maxfun
        p.maxIter = 50
        #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args = (h, k, l, fq, fqerr, x, z, cosmat_list, coslist, flist)

        if 0:
            #p.h=[pos_sum,neg_sum]
            p.h = [pos_sum, neg_sum]
            p.c = [chisq]
            #    p.h=[pos_sum,neg_sum]
            p.args.h = h_args
            p.args.c = h_args
            p.dh = [pos_sum_grad, neg_sum_grad]
            p.df = chisq_grad
        if 1:

            #p.h=[pos_sum,neg_sum,chisq]
            p.c = [chisq]
            p.h = [pos_sum, neg_sum]
            p.args.h = h_args
            p.args.c = h_args
            p.dh = [pos_sum_grad, neg_sum_grad]
            p.dc = chisq_grad
            #p.dh=[pos_sum_grad,neg_sum_grad,neg_sum_grad]
            p.df = S_grad

        if 0:
            print 'checking'
Exemplo n.º 4
0
    r[2,35] = 2*x[35] + x[25]
    return r
p.dc = DC

# non-linear equality constraints h(x) = 0
# 1e6*(x[last]-1)**4 = 0
# (x[last-1]-1.5)**4 = 0

p.h = lambda x: (1e4*(x[-1]-1)**4, (x[-2]-1.5)**4)
# dh(x)/dx: non-lin eq constraints gradients (optional):
def DH(x):
    r = zeros((2, p.n))
    r[0, -1] = 1e4*4 * (x[-1]-1)**3
    r[1, -2] = 4 * (x[-2]-1.5)**3
    return r
p.dh = DH

p.contol = 1e-3 # required constraints tolerance, default for NLP is 1e-6

# for ALGENCAN solver gtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gtol means norm of projected gradient of  the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gtol = 1e-5 # gradient stop criterium (default for NLP is 1e-6)


# see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
# that are connected to / used in lincher and some other solvers

# optional: check of user-supplied derivatives
p.checkdf()
Exemplo n.º 5
0
    def run_optimization(self, plot=True, _only_check_gradients=False):
        """Start/run optimization procedure and the optimum unless.
        
        Set the keyword parameter 'plot' to False (default=True) if plotting
        should not be conducted.
        """
        grid = self.get_grid()
        model = self.get_model()

        # Initial try
        p0 = self.get_p0()

        # Less than (-0.5 < u < 1)
        # TODO: These are currently hard coded. They shouldn't be.
        #NLT = len(grid) * len(model.u)
        #Alt = N.zeros( (NLT, len(p0)) )
        #Alt[:, (len(grid) - 1) * len(model.x):] = -N.eye(len(grid) *
        #                                              len(model.u))
        #blt = -0.5*N.ones(NLT)

        # TODO: These are currently hard coded. They shouldn't be.
        #N_xvars = (len(grid) - 1) * len(model.x)
        #N_uvars = len(grid) * len(model.u)
        #N_vars = N_xvars + N_uvars
        #Alt = -N.eye(N_vars)
        #blt = N.zeros(N_vars)
        #blt[0:N_xvars] = -N.ones(N_xvars)*0.001
        #blt[N_xvars:] = -N.ones(N_uvars)*1;

        # Get OpenOPT handler
        p = NLP(
            self.f,
            p0,
            maxIter=1e3,
            maxFunEvals=1e3,
            #A=Alt, # See TODO above
            #b=blt, # See TODO above
            df=self.df,
            ftol=1e-4,
            xtol=1e-4,
            contol=1e-4)
        if len(grid) > 1:
            p.h = self.h
            p.dh = self.dh

        if plot:
            p.plot = 1
        p.iprint = 1

        if _only_check_gradients:
            # Check gradients against finite difference quotients
            p.checkdf(maxViolation=0.05)
            p.checkdh()
            return None

        #opt = p.solve('ralg') # does not work - serious convergence issues
        opt = p.solve('scipy_slsqp')

        if plot:
            plot_control_solutions(model, grid, opt.xf)

        return opt