Ejemplo n.º 1
0
    def solve(self):
        """Solve the optimal control problem

        solve() first check for steady state feasibility and defines the
        optimal control problem. After solving, the instance variable sol can
        be used to examine the solution.

        TODO: Add support for other solvers
        TODO: version bump
        """
        if len(self.prob['s']) == 0:
            self.set_grid()
        # Check feasibility
        # self.check_ss_feasibility()
        # Construct optimization problem
        self.prob['solver'] = None
        N = self.options['N']
        self.prob['vars'] = cas.ssym("b", N + 1, self.sys.order)
        V = cas.vec(self.prob['vars'])
        self._make_objective()
        self._make_constraints()

        con = cas.SXFunction([V], [self.prob['con'][0]])
        obj = cas.SXFunction([V], [self.prob['obj']])
        if self.options.get('solver') == 'Ipopt':
            solver = cas.IpoptSolver(obj, con)
        else:
            print """Other solver than Ipopt are currently not supported,
            switching to Ipopt"""
            solver = cas.IpoptSolver(obj, con)
        for option, value in self.options.iteritems():
            if solver.hasOption(option):
                solver.setOption(option, value)
        solver.init()
        # Setting constraints
        solver.setInput(cas.vertcat(self.prob['con'][1]), "lbg")
        solver.setInput(cas.vertcat(self.prob['con'][2]), "ubg")
        solver.setInput([np.inf] * self.sys.order * (N + 1), "ubx")
        solver.setInput(
            cas.vertcat(
                ([0] * (N + 1), (self.sys.order - 1) * (N + 1) * [-np.inf])),
            "lbx")

        solver.solve()
        self.prob['solver'] = solver
        self._get_solution()
Ejemplo n.º 2
0
    def setupSolver(self, solverOpts=[], constraintFunOpts=[], callback=None):
        if not self.collocationIsSetup:
            raise ValueError("you forgot to call setupCollocation")

        g = self._constraints.getG()
        lbg = self._constraints.getLb()
        ubg = self._constraints.getUb()

        # Objective function/constraints of the NLP
        if not hasattr(self, '_objective'):
            raise ValueError('need to set objective function')
        nlp = CS.MXFunction(CS.nlpIn(x=self._dvMap.vectorize()),
                            CS.nlpOut(f=self._objective, g=g))
        setFXOptions(nlp, constraintFunOpts)
        nlp.init()

        # solver callback (optional)
        if callback is not None:
            nd = self._dvMap.vectorize().size()
            nc = self._constraints.getG().size()

            c = CS.PyFunction(
                callback,
                CS.nlpSolverOut(x=CS.sp_dense(nd, 1),
                                f=CS.sp_dense(1, 1),
                                lam_x=CS.sp_dense(nd, 1),
                                lam_g=CS.sp_dense(nc, 1),
                                lam_p=CS.sp_dense(0, 1),
                                g=CS.sp_dense(nc, 1)), [CS.sp_dense(1, 1)])
            c.init()
            solverOpts.append(("iteration_callback", c))

        # Allocate an NLP solver
        self.solver = CS.IpoptSolver(nlp)
        #        self.solver = CS.WorhpSolver(nlp)
        #        self.solver = CS.SQPMethod(nlp)

        # Set options
        setFXOptions(self.solver, solverOpts)

        # initialize the solver
        self.solver.init()

        # Bounds on g
        self.solver.setInput(lbg, 'lbg')
        self.solver.setInput(ubg, 'ubg')

        ## Nonlinear constraint function, for debugging
        gfcn = CS.MXFunction([self._dvMap.vectorize()], [g])
        gfcn.init()
        setFXOptions(gfcn, constraintFunOpts)
        self._gfcn = gfcn
Ejemplo n.º 3
0
def getSteadyState(dae, r0, v0):
    # make steady state model
    g = Constraints()
    g.add(dae.getResidual(), '==', 0, tag=('dae residual', None))

    def constrainInvariantErrs():
        R_n2b = dae['R_n2b']
        makeOrthonormal(g, R_n2b)
        g.add(dae['c'], '==', 0, tag=('c(0)==0', None))
        g.add(dae['cdot'], '==', 0, tag=('cdot(0)==0', None))

    constrainInvariantErrs()

    # constrain airspeed
    g.add(dae['airspeed'], '>=', v0, tag=('airspeed fixed', None))
    g.addBnds(dae['alpha_deg'], (4, 10), tag=('alpha', None))
    g.addBnds(dae['beta_deg'], (-10, 10), tag=('beta', None))

    dvs = C.veccat(
        [dae.xVec(),
         dae.zVec(),
         dae.uVec(),
         dae.pVec(),
         dae.xDotVec()])
    #    ffcn = C.SXFunction([dvs],[sum([dae[n]**2 for n in ['aileron','elevator','y','z']])])
    obj = 0
    obj += (dae['cL'] - 0.5)**2
    obj += dae.ddt('w_bn_b_x')**2
    obj += dae.ddt('w_bn_b_y')**2
    obj += dae.ddt('w_bn_b_z')**2
    ffcn = C.SXFunction([dvs], [obj])
    gfcn = C.SXFunction([dvs], [g.getG()])
    ffcn.init()
    gfcn.init()

    guess = {
        'x': r0,
        'y': 0,
        'z': -1,
        'r': r0,
        'dr': 0,
        'e11': 0,
        'e12': -1,
        'e13': 0,
        'e21': 0,
        'e22': 0,
        'e23': 1,
        'e31': -1,
        'e32': 0,
        'e33': 0,
        'dx': 0,
        'dy': -20,
        'dz': 0,
        'w_bn_b_x': 0,
        'w_bn_b_y': 0,
        'w_bn_b_z': 0,
        'aileron': 0,
        'elevator': 0,
        'rudder': 0,
        'daileron': 0,
        'delevator': 0,
        'drudder': 0,
        'nu': 300,
        'motor_torque': 10,
        'dmotor_torque': 0,
        'ddr': 0,
        'dddr': 0.0,
        'w0': 10.0
    }
    dotGuess = {
        'x': 0,
        'y': -20,
        'z': 0,
        'dx': 0,
        'dy': 0,
        'dz': 0,
        'r': 0,
        'dr': 0,
        'e11': 0,
        'e12': 0,
        'e13': 0,
        'e21': 0,
        'e22': 0,
        'e23': 0,
        'e31': 0,
        'e32': 0,
        'e33': 0,
        'w_bn_b_x': 0,
        'w_bn_b_y': 0,
        'w_bn_b_z': 0,
        'aileron': 0,
        'elevator': 0,
        'rudder': 0,
        'ddr': 0
    }

    guessVec = C.DMatrix([
        guess[n]
        for n in dae.xNames() + dae.zNames() + dae.uNames() + dae.pNames()
    ] + [dotGuess[n] for n in dae.xNames()])

    bounds = {
        'x': (0.01, r0 * 2),
        'y': (0, 0),
        'z': (-r0 * 0.2, -r0 * 0.2),
        'dx': (0, 0),
        'dy': (-50, 0),
        'dz': (0, 0),
        'r': (r0, r0),
        'dr': (0, 0),
        'ddr': (0, 0),
        'e11': (-0.5, 0.5),
        'e12': (-1.5, -0.5),
        'e13': (-0.5, 0.5),
        'e21': (-0.5, 0.5),
        'e22': (-0.5, 0.5),
        'e23': (0.5, 1.5),
        'e31': (-1.5, -0.5),
        'e32': (-0.5, 0.5),
        'e33': (-0.5, 0.5),
        'w_bn_b_x': (0, 0),
        'w_bn_b_y': (0, 0),
        'w_bn_b_z': (0, 0),
        #              'aileron':(-0.2,0.2),'elevator':(-0.2,0.2),'rudder':(-0.2,0.2),
        'aileron': (0, 0),
        'elevator': (0, 0),
        'rudder': (0, 0),
        'daileron': (0, 0),
        'delevator': (0, 0),
        'drudder': (0, 0),
        'nu': (0, 3000),
        'dddr': (0, 0),
        'w0': (10, 10)
    }
    dotBounds = {
        'dz': (-C.inf, 0)
    }  #'dx':(-500,-500),'dy':(-500,500),'dz':(-500,500),
    #                 'w_bn_b_x':(0,0),'w_bn_b_y':(0,0),'w_bn_b_z':(0,0),

    for name in dae.xNames():
        if name not in dotBounds:
            dotBounds[name] = (-C.inf, C.inf)
    boundsVec = [bounds[n] for n in dae.xNames()+dae.zNames()+dae.uNames()+dae.pNames()]+ \
                [dotBounds[n] for n in dae.xNames()]

    #    gfcn.setInput(guessVec)
    #    gfcn.evaluate()
    #    ret = gfcn.output()
    #    for k,v in enumerate(ret):
    #        if math.isnan(v):
    #            print 'index ',k,' is nan: ',g._tags[k]
    #    import sys; sys.exit()

    #    context   = zmq.Context(1)
    #    publisher = context.socket(zmq.PUB)
    #    publisher.bind("tcp://*:5563")
    #    class MyCallback:
    #        def __init__(self):
    #            import rawekite.kiteproto as kiteproto
    #            import rawekite.kite_pb2 as kite_pb2
    #            self.kiteproto = kiteproto
    #            self.kite_pb2 = kite_pb2
    #            self.iter = 0
    #        def __call__(self,f,*args):
    #            x = C.DMatrix(f.input('x'))
    #            sol = {}
    #            for k,name in enumerate(dae.xNames()+dae.zNames()+dae.uNames()+dae.pNames()):
    #                sol[name] = x[k].at(0)
    #            lookup = lambda name: sol[name]
    #            kp = self.kiteproto.toKiteProto(lookup,
    #                                            lineAlpha=0.2)
    #            mc = self.kite_pb2.MultiCarousel()
    #            mc.horizon.extend([kp])
    #            mc.messages.append("iter: "+str(self.iter))
    #            self.iter += 1
    #            publisher.send_multipart(["multi-carousel", mc.SerializeToString()])

    solver = C.IpoptSolver(ffcn, gfcn)

    def addCallback():
        nd = len(boundsVec)
        nc = g.getLb().size()
        c = C.PyFunction(
            MyCallback(),
            C.nlpsolverOut(x=C.sp_dense(nd, 1),
                           f=C.sp_dense(1, 1),
                           lam_x=C.sp_dense(nd, 1),
                           lam_p=C.sp_dense(0, 1),
                           lam_g=C.sp_dense(nc, 1),
                           g=C.sp_dense(nc, 1)), [C.sp_dense(1, 1)])
        c.init()
        solver.setOption("iteration_callback", c)
#    addCallback()

    solver.setOption('max_iter', 10000)
    solver.setOption('expand', True)
    #    solver.setOption('tol',1e-14)
    #    solver.setOption('suppress_all_output','yes')
    #    solver.setOption('print_time',False)
    solver.init()

    solver.setInput(g.getLb(), 'lbg')
    solver.setInput(g.getUb(), 'ubg')
    #guessVec = numpy.load('steady_state_guess.npy')
    solver.setInput(guessVec, 'x0')
    lb, ub = zip(*boundsVec)
    solver.setInput(C.DMatrix(lb), 'lbx')
    solver.setInput(C.DMatrix(ub), 'ubx')

    solver.solve()
    ret = solver.getStat('return_status')
    assert ret in ['Solve_Succeeded',
                   'Solved_To_Acceptable_Level'], 'Solver failed: ' + ret

    #    publisher.close()
    #    context.destroy()
    xOpt = solver.output('x')
    #numpy.save('steady_state_guess',numpy.squeeze(numpy.array(xOpt)))
    k = 0
    sol = {}
    for name in dae.xNames() + dae.zNames() + dae.uNames() + dae.pNames():
        sol[name] = xOpt[k].at(0)
        k += 1
#        print name+':\t',sol[name]
    dotSol = {}
    for name in dae.xNames():
        dotSol[name] = xOpt[k].at(0)
        k += 1
#        print 'DDT('+name+'):\t',dotSol[name]
    return sol, dotSol
Ejemplo n.º 4
0
        gliderOcp.guess[gliderOcp._getIdx('vz', k)] = Xguess[3, k]
        gliderOcp.guess[gliderOcp._getIdx('alphaDeg', k)] = uSim[0, k]

#    gliderOcp.guess[gliderOcp._getIdx('x')] = 0
#    gliderOcp.guess[gliderOcp._getIdx('z')] = 0
#    gliderOcp.guess[gliderOcp._getIdx('vx')] = 20
#    gliderOcp.guess[gliderOcp._getIdx('vz')] = -5

    gliderOcp.guess[gliderOcp._getIdx('tEnd')] = pSim[0]

    # Create the NLP
    gfcn = C.SXFunction([gliderOcp.designVariables],
                        [C.vertcat(gliderOcp.G)])  # constraint function

    # Allocate an NLP solver
    solver = C.IpoptSolver(ffcn, gfcn)

    # Set options
    #    solver.setOption("tol",1e-10)
    solver.setOption("tol", 1e-13)
    solver.setOption("hessian_approximation", "limited-memory")
    solver.setOption("derivative_test", "first-order")

    # initialize the solver
    solver.init()

    # Bounds on u and initial condition
    solver.setInput(gliderOcp.lb, C.NLP_SOLVER_LBX)
    solver.setInput(gliderOcp.ub, C.NLP_SOLVER_UBX)
    solver.setInput(gliderOcp.guess, C.NLP_SOLVER_X0)
Ejemplo n.º 5
0
    def _bvp_setup(self):
        """ Set up the casadi ipopt solver for the bvp solution """

        # Define some variables
        deg = self.deg
        nk = self.nk
        NV = nk * (deg + 1) * self.NEQ

        # NLP variable vector
        V = cs.msym("V", NV)
        XD = np.resize(np.array([], dtype=cs.MX), (nk, deg + 1))
        P = cs.msym("P", self.NP * self.nk)
        PK = P.reshape((self.nk, self.NP))

        offset = 0
        for k in range(nk):
            for j in range(deg + 1):
                XD[k][j] = V[offset:offset + self.NEQ]
                offset += self.NEQ

        # Constraint function for the NLP
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(nk):
            # For all collocation points
            for j in range(1, deg + 1):
                # Get an expression for the state derivative at the
                # collocation point
                xp_jk = 0
                for j2 in range(deg + 1):
                    # get the time derivative of the differential states
                    # (eq 10.19b)
                    xp_jk += self.coll_setup_dict['C'][j2][j] * XD[k][j2]

                # Generate parameter set, accounting for variable
                #
                # Add collocation equations to the NLP
                [fk] = self.rfmod.call(
                    [0., xp_jk / self.h, XD[k][j], PK[k, :].T])

                # impose system dynamics (for the differential states
                # (eq
                # 10.19b))
                g += [fk[:self.NEQ]]
                lbg.append(np.zeros(self.NEQ))  # equality constraints
                ubg.append(np.zeros(self.NEQ))  # equality constraints

            # Get an expression for the state at the end of the finite
            # element
            xf_k = 0
            for j in range(deg + 1):
                xf_k += self.coll_setup_dict['D'][j] * XD[k][j]

            # Add continuity equation to NLP
            # End = Beginning of next
            if k + 1 != nk:
                g += [XD[k + 1][0] - xf_k]
                # At the last segment, periodicity constraints
            else:
                g += [XD[0][0] - xf_k]
            lbg.append(np.zeros(self.NEQ))
            ubg.append(np.zeros(self.NEQ))

        # Nonlinear constraint function
        gfcn = cs.MXFunction([V, P], [cs.vertcat(g)])
        # Objective function (periodicity)
        ofcn = cs.MXFunction([V, P], [cs.sumAll(g[-1]**2)])

        ## ----
        ## SOLVE THE NLP
        ## ----

        # Allocate an NLP solver
        self.solver = cs.IpoptSolver(ofcn, gfcn)

        # Set options
        self.solver.setOption("expand_f", True)
        self.solver.setOption("expand_g", True)
        self.solver.setOption("generate_hessian", True)
        self.solver.setOption("max_iter", 1000)
        self.solver.setOption("tol", self.int_opt['bvp_tol'])
        self.solver.setOption("constr_viol_tol",
                              self.int_opt['bvp_constr_tol'])
        self.solver.setOption("linear_solver",
                              self.int_opt['bvp_linear_solver'])
        self.solver.setOption('parametric', True)
        self.solver.setOption('print_level', self.int_opt['bvp_print_level'])

        # initialize the self.solver
        self.solver.init()
        self.lbg = lbg
        self.ubg = ubg

        self.need_bvp_setup = False
Ejemplo n.º 6
0
sigma = casadi.SX('sigma')

lam = []
Lag = sigma * cost
for i in range(len(g)):
    lam.append(casadi.SX('lambda_' + str(i)))
    Lag = Lag + g[i] * lam[i]

Lag_fcn = casadi.SXFunction([xx, lam, [sigma]], [[Lag]])
H = Lag_fcn.hess()

#H_fcn = casadi.SXFunction([xx, lam, [sigma]],[H])
H_fcn = Lag_fcn.hessian(0, 0)

# Create Solver
solver = casadi.IpoptSolver(obj, g_res, H_fcn)

# Set options
solver.setOption("tol", 1e-10)
#solver.setOption("derivative_test",'second-order')
#solver.setOption("max_iter",30)
#solver.setOption("hessian_approximation","limited-memory")

# Initialize
solver.init()

# Initial condition
x_initial_guess = len(xx) * [0]
solver.setInput(x_initial_guess, casadi.NLP_X_INIT)

# Bounds on x
Ejemplo n.º 7
0
    def CollocationSetup(self, warmstart=False):
        """
        Sets up NLP for collocation solution. Constructs initial guess
        arrays, constructs constraint and objective functions, and
        otherwise passes arguments to the correct places. This looks
        really inefficient and is likely unneccessary to run multiple
        times for repeated runs with new data. Not sure how much time it
        takes compared to the NLP solution.

        Run immediately before CollocationSolve.
        """

        # Dimensions of the problem
        nx = self.NVAR  # total number of states
        ndiff = nx  # number of differential states
        nalg = 0  # number of algebraic states
        nu = 0  # number of controls

        # Collocated variables
        NXD = self.NICP * self.NK * (self.DEG +
                                     1) * ndiff  # differential states
        NXA = self.NICP * self.NK * self.DEG * nalg  # algebraic states
        NU = self.NK * nu  # Parametrized controls
        NV = NXD + NXA + NU + self.NP + self.NMP  # Total variables
        self.NV = NV

        # NLP variable vector
        V = cs.msym("V", NV)

        # All variables with bounds and initial guess
        vars_lb = np.zeros(NV)
        vars_ub = np.zeros(NV)
        vars_init = np.zeros(NV)
        offset = 0

        #
        # Split NLP vector into useable slices
        #
        # Get the parameters
        P = V[offset:offset + self.NP]
        vars_init[offset:offset + self.NP] = self.NLPdata['p_init']
        vars_lb[offset:offset + self.NP] = self.NLPdata['p_min']
        vars_ub[offset:offset + self.NP] = self.NLPdata['p_max']

        offset += self.NP  # indexing variable

        # Get collocated states and parametrized control
        XD = np.resize(np.array([], dtype=cs.MX),
                       (self.NK, self.NICP, self.DEG + 1))
        # NB: same name as above
        XA = np.resize(np.array([], dtype=cs.MX),
                       (self.NK, self.NICP, self.DEG))
        # NB: same name as above
        U = np.resize(np.array([], dtype=cs.MX), self.NK)

        # Prepare the starting data matrix vars_init, vars_ub, and
        # vars_lb, by looping over finite elements, states, etc. Also
        # groups the variables in the large unknown vector V into XD and
        # XA(unused) for later indexing
        for k in range(self.NK):
            # Collocated states
            for i in range(self.NICP):
                #
                for j in range(self.DEG + 1):

                    # Get the expression for the state vector
                    XD[k][i][j] = V[offset:offset + ndiff]
                    if j != 0:
                        XA[k][i][j - 1] = V[offset + ndiff:offset + ndiff +
                                            nalg]
                    # Add the initial condition
                    index = (self.DEG + 1) * (self.NICP * k + i) + j
                    if k == 0 and j == 0 and i == 0:
                        vars_init[offset:offset+ndiff] = \
                            self.NLPdata['xD_init'][index,:]

                        vars_lb[offset:offset+ndiff] = \
                                self.NLPdata['xDi_min']
                        vars_ub[offset:offset+ndiff] = \
                                self.NLPdata['xDi_max']
                        offset += ndiff
                    else:
                        if j != 0:
                            vars_init[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_init'][index,:],
                                      self.NLPdata['xA_init'][index,:])

                            vars_lb[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_min'],
                                      self.NLPdata['xA_min'])

                            vars_ub[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_max'],
                                      self.NLPdata['xA_max'])

                            offset += nx
                        else:
                            vars_init[offset:offset+ndiff] = \
                                    self.NLPdata['xD_init'][index,:]

                            vars_lb[offset:offset+ndiff] = \
                                    self.NLPdata['xD_min']

                            vars_ub[offset:offset+ndiff] = \
                                    self.NLPdata['xD_max']

                            offset += ndiff

            # Parametrized controls (unused here)
            U[k] = V[offset:offset + nu]

        # Attach these initial conditions to external dictionary
        self.NLPdata['v_init'] = vars_init
        self.NLPdata['v_ub'] = vars_ub
        self.NLPdata['v_lb'] = vars_lb

        # Setting up the constraint function for the NLP. Over each
        # collocated state, ensure continuitity and system dynamics
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(self.NK):
            for i in range(self.NICP):
                # For all collocation points
                for j in range(1, self.DEG + 1):
                    # Get an expression for the state derivative
                    # at the collocation point
                    xp_jk = 0
                    for j2 in range(self.DEG + 1):
                        # get the time derivative of the differential
                        # states (eq 10.19b)
                        xp_jk += self.C[j2][j] * XD[k][i][j2]

                    # Add collocation equations to the NLP
                    [fk] = self.rfmod.call([
                        0., xp_jk / self.h, XD[k][i][j], XA[k][i][j - 1], U[k],
                        P
                    ])

                    # impose system dynamics (for the differential
                    # states (eq 10.19b))
                    g += [fk[:ndiff]]
                    lbg.append(np.zeros(ndiff))  # equality constraints
                    ubg.append(np.zeros(ndiff))  # equality constraints

                    # impose system dynamics (for the algebraic states
                    # (eq 10.19b)) (unused)
                    g += [fk[ndiff:]]
                    lbg.append(np.zeros(nalg))  # equality constraints
                    ubg.append(np.zeros(nalg))  # equality constraints

                # Get an expression for the state at the end of the finite
                # element
                xf_k = 0
                for j in range(self.DEG + 1):
                    xf_k += self.D[j] * XD[k][i][j]

                # if i==self.NICP-1:

                # Add continuity equation to NLP
                if k + 1 != self.NK:  # End = Beginning of next
                    g += [XD[k + 1][0][0] - xf_k]
                    lbg.append(-self.NLPdata['CONtol'] * np.ones(ndiff))
                    ubg.append(self.NLPdata['CONtol'] * np.ones(ndiff))

                else:  # At the last segment
                    # Periodicity constraints (only for NEQ)
                    g += [XD[0][0][0][:self.NEQ] - xf_k[:self.NEQ]]
                    lbg.append(-self.NLPdata['CONtol'] * np.ones(self.NEQ))
                    ubg.append(self.NLPdata['CONtol'] * np.ones(self.NEQ))

                # else:
                #     g += [XD[k][i+1][0] - xf_k]

        # Flatten contraint arrays for last addition
        lbg = np.concatenate(lbg).tolist()
        ubg = np.concatenate(ubg).tolist()

        # Constraint to protect against fixed point solutions
        if self.NLPdata['FPgaurd'] is True:
            fout = self.model.call(
                cs.daeIn(t=self.tgrid[0],
                         x=XD[0, 0, 0][:self.NEQ],
                         p=V[:self.NP]))[0]
            g += [cs.MX(cs.sumAll(fout**2))]
            lbg.append(np.array(self.NLPdata['FPTOL']))
            ubg.append(np.array(cs.inf))

        elif self.NLPdata['FPgaurd'] is 'all':
            fout = self.model.call(
                cs.daeIn(t=self.tgrid[0],
                         x=XD[0, 0, 0][:self.NEQ],
                         p=V[:self.NP]))[0]
            g += [cs.MX(fout**2)]
            lbg += [self.NLPdata['FPTOL']] * self.NEQ
            ubg += [cs.inf] * self.NEQ

        # Nonlinear constraint function
        gfcn = cs.MXFunction([V], [cs.vertcat(g)])

        # Minimize derivative of first state variable at t=0
        xp_0 = 0
        for j in range(self.NLPdata['DEG'] + 1):
            # get the time derivative of the differential
            # states (eq 10.19b)
            xp_0 += self.C[j][0] * XD[0][j][0]

        obj = xp_0
        ofcn = cs.MXFunction([V], [obj])

        self.CollocationSolver = cs.IpoptSolver(ofcn, gfcn)

        for opt, val in self.IpoptOpts.iteritems():
            self.CollocationSolver.setOption(opt, val)

        self.CollocationSolver.setOption('obj_scaling_factor', len(vars_init))

        if warmstart:
            self.CollocationSolver.setOption('warm_start_init_point', 'yes')

        # initialize the self.CollocationSolver
        self.CollocationSolver.init()

        # Initial condition
        self.CollocationSolver.setInput(vars_init, cs.NLP_X_INIT)

        # Bounds on x
        self.CollocationSolver.setInput(vars_lb, cs.NLP_LBX)
        self.CollocationSolver.setInput(vars_ub, cs.NLP_UBX)

        # Bounds on g
        self.CollocationSolver.setInput(np.array(lbg), cs.NLP_LBG)
        self.CollocationSolver.setInput(np.array(ubg), cs.NLP_UBG)

        if warmstart:
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_X_OPT'],cs.NLP_X_INIT)
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_LAMBDA_G'],cs.NLP_LAMBDA_INIT)
            self.CollocationSolver.setOutput( \
                    self.WarmStartData['NLP_LAMBDA_X'],cs.NLP_LAMBDA_X)
Ejemplo n.º 8
0
def idSystem(makePlots=False):
    # parameters
    k = 5.0  # spring constant
    b = 0.4  # viscous damping

    # noise
    Q = N.matrix(N.diag([5e-3, 1e-2])**2)
    R = N.matrix(N.diag([0.03, 0.06])**2)

    # observation function
    def h(x):
        yOut = x
        return yOut

    # Time length
    T = 20.0

    # Shooting length
    nu = 150  # Number of control segments
    DT = N.double(T) / nu

    # Create integrator
    #integrator = create_integrator_cvodes()
    integrator = create_integrator_rk4()

    # Initialize the integrator
    integrator.init()

    ############# simulation
    # initial state
    s0 = 0  # initial position
    v0 = 1  # initial speed

    xNext = [s0, v0]

    Utrue = []
    Xtrue = [[s0, v0]]
    Y = [[s0, v0]]
    time = [0]

    for j in range(nu):
        u = N.sin(N.double(j) / 10.0)

        integrator.setInput(j * DT, C.INTEGRATOR_T0)
        integrator.setInput((j + 1) * DT, C.INTEGRATOR_TF)
        integrator.setInput([u, k, b], C.INTEGRATOR_P)
        integrator.setInput(xNext, C.INTEGRATOR_X0)
        integrator.evaluate()

        xNext = list(integrator.getOutput())
        x = list(integrator.getOutput())
        y = list(integrator.getOutput())

        # state record
        w = N.random.multivariate_normal([0, 0], Q)
        x[0] += w[0]
        x[1] += w[1]

        # measurement
        v = N.random.multivariate_normal([0, 0], R)
        y[0] += v[0]
        y[1] += v[1]

        Xtrue.append(x)
        Y.append(y)
        Utrue.append(u)
        time.append(time[-1] + DT)

    ############## parameter estimation #################
    # Integrate over all intervals

    T0 = C.MX(
        0
    )  # Beginning of time interval (changed from k*DT due to probable Sundials bug)
    TF = C.MX(
        DT
    )  # End of time interval (changed from (k+1)*DT due to probable Sundials bug)

    design_variables = C.MX('design_variables', 2 + 2 * (nu + 1))
    k_hat = design_variables[0]
    b_hat = design_variables[1]

    x_hats = []
    for j in range(nu + 1):
        x_hats.append(design_variables[2 + 2 * j:2 + 2 * j + 2])

    logLiklihood = C.MX(0)

    # logLiklihood += sensor error
    for j in range(nu + 1):
        yj = C.MX(2, 1)
        yj[0, 0] = C.MX(Y[j][0])
        yj[1, 0] = C.MX(Y[j][1])
        xj = x_hats[j]

        # ll = 1/2 * err^T * R^-1 * err
        err = yj - h(xj)
        ll = -C.MX(0.5) * C.prod(err.T, C.prod(C.MX(R.I), err))
        logLiklihood += ll

    # logLiklihood += dynamics constraint
    xdot = C.MX('xdot', 2, 1)
    for j in range(nu):
        xj = x_hats[j]
        Xnext = integrator(
            [T0, TF, xj,
             C.vertcat([Utrue[j], k_hat, b_hat]), xdot,
             C.MX()])

        err = Xnext - x_hats[j + 1]
        ll = -C.MX(0.5) * C.prod(err.T, C.prod(C.MX(Q.I), err))
        logLiklihood += ll

    # Objective function
    F = -logLiklihood

    # Terminal constraints
    G = k_hat - C.MX(20.0)

    # Create the NLP
    ffcn = C.MXFunction([design_variables], [F])  # objective function
    gfcn = C.MXFunction([design_variables], [G])  # constraint function

    # Allocate an NLP solver
    #solver = C.IpoptSolver(ffcn,gfcn)
    solver = C.IpoptSolver(ffcn)

    # Set options
    solver.setOption("tol", 1e-10)
    solver.setOption("hessian_approximation", "limited-memory")
    solver.setOption("derivative_test", "first-order")

    # initialize the solver
    solver.init()

    # Bounds on u and initial condition
    kb_min = [1, 0.1] + [-10 for j in range(2 * (nu + 1))]  # lower bound
    solver.setInput(kb_min, C.NLP_SOLVER_LBX)

    kb_max = [10, 1] + [10 for j in range(2 * (nu + 1))]  # upper bound
    solver.setInput(kb_max, C.NLP_SOLVER_UBX)

    guess = []
    for y in Y:
        guess.append(y[0])
        guess.append(y[1])
    kb_sol = [5, 0.4] + guess  # initial guess
    solver.setInput(kb_sol, C.NLP_SOLVER_X0)

    # Bounds on g
    #Gmin = Gmax = [] #[10, 0]
    #solver.setInput(Gmin,C.NLP_SOLVER_LBG)
    #solver.setInput(Gmax,C.NLP_SOLVER_UBG)

    # Solve the problem
    solver.solve()

    # Get the solution
    xopt = solver.getOutput(C.NLP_SOLVER_X)

    # estimated parameters:
    print ""
    print "(estimated, real) k = (" + str(k) + ", " + str(
        xopt[0]) + "),\t" + str(100.0 * (k - xopt[0]) / k) + " % error"
    print "(estimated, real) b = (" + str(b) + ", " + str(
        xopt[1]) + "),\t" + str(100.0 * (b - xopt[1]) / b) + " % error"

    # estimated state:
    s_est = []
    v_est = []
    for j in range(0, nu + 1):
        s_est.append(xopt[2 + 2 * j])
        v_est.append(xopt[2 + 2 * j + 1])

    # make plots
    if makePlots:
        plt.figure()
        plt.clf()

        plt.subplot(2, 1, 1)
        plt.xlabel('time')
        plt.ylabel('position')
        plt.plot(time, [x[0] for x in Xtrue])
        plt.plot(time, [y[0] for y in Y], '.')
        plt.plot(time, s_est)
        plt.legend(['true', 'meas', 'est'])

        plt.subplot(2, 1, 2)
        plt.xlabel('time')
        plt.ylabel('velocity')
        plt.plot(time, [x[1] for x in Xtrue])
        plt.plot(time, [y[1] for y in Y], '.')
        plt.plot(time, v_est)
        plt.legend(['true', 'meas', 'est'])

        #    plt.subplot(3,1,3)
        #    plt.xlabel('time')
        #    plt.ylabel('control input')
        #    plt.plot(time[:-1], Utrue, '.')

        plt.show()

    return {'k': xopt[0], 'b': xopt[1]}
Ejemplo n.º 9
0
 def __init__(self, dae, conf, omega0, r0, ref_dict = {},
              bounds = None, dotBounds = None, guess = None, dotGuess = None,
              robustVersion = False, verbose = True):
     
     self.dae, self.conf = dae, conf
     self.omega0 = omega0
     self.r0 = r0
     self.ref_dict = ref_dict
     
     self.robustVersion = robustVersion
     if robustVersion == True:
         self.dvs, self.ffcn, self.gfcn, self.lbg, self.ubg, zSol = self.getRobustSteadyStateNlpFunctions(dae, ref_dict)
         self.dvsNames = dae.xNames() + dae.uNames() + dae.pNames()
         
         zIn = C.veccat([dae[ name ] for name in self.dvsNames])
         zOut = C.veccat([zSol[ el ] for el in dae.zNames()])
         self.zFcn = C.SXFunction([ zIn ], [ zOut ])
         self.zFcn.init()
         
     else:
         self.dvs, self.ffcn, self.gfcn, self.lbg, self.ubg = self.getSteadyStateNlpFunctions(dae, ref_dict)
         self.dvsNames = dae.xNames() + dae.zNames() + dae.uNames() + dae.pNames()
     
     self.nlp = C.SXFunction(C.nlpIn(x = self.dvs), C.nlpOut(f = self.ffcn, g = self.gfcn))
     self.solver = C.IpoptSolver( self.nlp )
     
     self.solver.setOption('max_iter', 10000)
     self.solver.setOption('tol', 1e-9)
     if verbose is False:
         self.solver.setOption('suppress_all_output', 'yes')
         self.solver.setOption('print_time', False)
     self.solver.init()
     
     self.guess = guess
     if self.guess is None:
         self.guess = {'x':r0, 'y':0, 'z':0,
                       'r':r0, 'dr':0,
                       'e11':0, 'e12':-1, 'e13':0,
                       'e21':0, 'e22':0, 'e23':1,
                       'e31':-1, 'e32':0, 'e33':0,
                       'dx':0, 'dy':0, 'dz':0,
                       'w_bn_b_x':0, 'w_bn_b_y':omega0, 'w_bn_b_z':0,
                       'ddelta':omega0,
                       'cos_delta':1, 'sin_delta':0,
                       'aileron':0, 'elevator':0, 'rudder':0, 'flaps':0,
                       'daileron':0, 'delevator':0, 'drudder':0, 'dflaps':0,
                       'nu':100, 'motor_torque':0,
                       'dmotor_torque':0, 'ddr':0,
                       'dddr':0.0, 'w0':0.0,
                      
                       'dt1_disturbance':0.0, 'dt2_disturbance':0.0, 'dt3_disturbance':0.0,
                       't1_disturbance':0.0, 't2_disturbance':0.0, 't3_disturbance':0.0,
                      
                       'df1_disturbance':0.0, 'df2_disturbance':0.0, 'df3_disturbance':0.0,
                       'f1_disturbance':0.0, 'f2_disturbance':0.0, 'f3_disturbance':0.0,
                      
                       'wind_x':0.0, 'wind_y':0.0, 'wind_z':0.0,
                       'delta_wind_x':0.0, 'delta_wind_y':0.0, 'delta_wind_z':0.0,
                       'alpha0': 0.0}
     self.dotGuess = dotGuess
     if self.dotGuess is None:
         self.dotGuess = {'x':0, 'y':0, 'z':0, 'dx':0, 'dy':0, 'dz':0,
                          'r':0, 'dr':0,
                          'e11':0, 'e12':0, 'e13':0,
                          'e21':0, 'e22':0, 'e23':0,
                          'e31':0, 'e32':0, 'e33':0,
                          'w_bn_b_x':0, 'w_bn_b_y':0, 'w_bn_b_z':0,
                          'ddelta':0,
                          'cos_delta':0, 'sin_delta':omega0,
                          'aileron':0, 'elevator':0, 'rudder':0, 'flaps':0,
                          'motor_torque':0, 'ddr':0,
                     
                          'dt1_disturbance':0.0, 'dt2_disturbance':0.0, 'dt3_disturbance':0.0,
                          't1_disturbance':0.0, 't2_disturbance':0.0, 't3_disturbance':0.0,
                     
                          'df1_disturbance':0.0, 'df2_disturbance':0.0, 'df3_disturbance':0.0,
                          'f1_disturbance':0.0, 'f2_disturbance':0.0, 'f3_disturbance':0.0,
                     
                          'wind_x':0.0, 'wind_y':0.0, 'wind_z':0.0,
                          'delta_wind_x':0.0, 'delta_wind_y':0.0, 'delta_wind_z':0.0,
                          'alpha0': 0.0}
     
     self.bounds = bounds
     if self.bounds is None:
         self.bounds = {'x':(-0.1 * r0, r0 * 2), 'y':(-1.1 * r0, 1.1 * r0), 'z':(-1.1 * r0, 1.1 * r0),
                        'dx':(0, 0), 'dy':(0, 0), 'dz':(0, 0),
                        'r':(r0, r0), 'dr':(-100, 100),
                        'e11':(-2, 2), 'e12':(-2, 2), 'e13':(-2, 2),
                        'e21':(-2, 2), 'e22':(-2, 2), 'e23':(-2, 2),
                        'e31':(-2, 2), 'e32':(-2, 2), 'e33':(-2, 2),
                        'w_bn_b_x':(-50, 50), 'w_bn_b_y':(-50, 50), 'w_bn_b_z':(-50, 50),
                        'ddelta':(omega0, omega0),
                        'cos_delta':(1, 1), 'sin_delta':(0, 0),
                        'aileron':(-0.2, 0.2), 'elevator':(-0.2, 0.2), 'rudder':(-0.2, 0.2), 'flaps':(-0.2, 0.2),
                        'daileron':(0, 0), 'delevator':(0, 0), 'drudder':(0, 0), 'dflaps':(0, 0),
                        'nu':(0, 3000), 'motor_torque':(-1000, 1000),
                        'ddr':(0, 0),
                        'dmotor_torque':(0, 0), 'dddr':(0, 0), 'w0':(0, 0),
                        'dt1_disturbance':(0, 0), 'dt2_disturbance':(0, 0), 'dt3_disturbance':(0, 0),
                        't1_disturbance':(0, 0), 't2_disturbance':(0, 0), 't3_disturbance':(0, 0),
                  
                        'df1_disturbance':(0, 0), 'df2_disturbance':(0, 0), 'df3_disturbance':(0, 0),
                        'f1_disturbance':(0, 0), 'f2_disturbance':(0, 0), 'f3_disturbance':(0, 0),
                  
                        'wind_x':(0, 0), 'wind_y':(0, 0), 'wind_z':(0, 0),
                        'delta_wind_x':(0, 0), 'delta_wind_y':(0, 0), 'delta_wind_z':(0, 0),
                        'alpha0': (0.0, 0.0)}
     
     if ref_dict is not None:
         for name in ref_dict: self.bounds[name] = ref_dict[name]
     
     self.dotBounds = dotBounds
     if self.dotBounds is None:
         self.dotBounds = {'x':(-1, 1), 'y':(-1, 1), 'z':(-1, 1),
                           'dx':(0, 0), 'dy':(0, 0), 'dz':(0, 0),
                           'r':(-100, 100), 'dr':(-1, 1),
                           'e11':(-50, 50), 'e12':(-50, 50), 'e13':(-50, 50),
                           'e21':(-50, 50), 'e22':(-50, 50), 'e23':(-50, 50),
                           'e31':(-50, 50), 'e32':(-50, 50), 'e33':(-50, 50),
                           'w_bn_b_x':(0, 0), 'w_bn_b_y':(0, 0), 'w_bn_b_z':(0, 0),
                           'ddelta':(0, 0),
                           'cos_delta':(-1, 1), 'sin_delta':(omega0 - 1, omega0 + 1),
                           'aileron':(-1, 1), 'elevator':(-1, 1), 'rudder':(-1, 1), 'flaps':(-1, 1),
                           'motor_torque':(-1000, 1000), 'ddr':(-100, 100),
                           'dt1_disturbance':(0, 0), 'dt2_disturbance':(0, 0), 'dt3_disturbance':(0, 0),
                           't1_disturbance':(0, 0), 't2_disturbance':(0, 0), 't3_disturbance':(0, 0),
                          
                           'df1_disturbance':(0, 0), 'df2_disturbance':(0, 0), 'df3_disturbance':(0, 0),
                           'f1_disturbance':(0, 0), 'f2_disturbance':(0, 0), 'f3_disturbance':(0, 0),
                          
                           'wind_x':(0, 0), 'wind_y':(0, 0), 'wind_z':(0, 0),
                           'delta_wind_x':(0, 0), 'delta_wind_y':(0, 0), 'delta_wind_z':(0, 0),
                           'alpha0': (0.0, 0.0)}
         
     if self.robustVersion == True:
         # Joris's recommendation
         for name in self.dae.xNames():
             if name in ["r", "ddelta"] or "disturbance" in name:
                 continue
             
             # Override bounds
             if self.bounds[name][0] == self.bounds[name][1]:
                 self.bounds[name] = (-1e12, 1e12)
                 
             # Override dotBounds
             if self.dotBounds[name][0] == self.dotBounds[name][1]:
                 self.dotBounds[name] = (-1e12, 1e12)
             if name not in  ["aileron", "elevator", "motor_torque", "ddr", "sin_delta"]:
                 self.dotBounds[ name ] = (0, 0)
Ejemplo n.º 10
0
            callerLines.append(callerName + " fun (" +
                               string.capitalize(name) +
                               " x) = unsafeSetOption' fun \"" + name + "\" x")
    return (adtLines, callerLines)


sxs = writeADT("SXFunctionOption", "sxFunctionUnsafeSetOption", "SXFunction",
               f)
for line in sxs[0]:
    print line
print ""
for line in sxs[1]:
    print line
print ""

s = casadi.IpoptSolver(f)
nlps = writeADT("NLPSolverOption", "nlpSolverUnsafeSetOption", "NLPSolver", s)
for line in nlps[0]:
    print line
for line in nlps[1]:
    print line

s = casadi.IdasIntegrator(f)
nlps = writeADT("IdasOption", "idasUnsafeSetOption", "SXFunction", s)
for line in nlps[0]:
    print line
for line in nlps[1]:
    print line

s = casadi.CVodesIntegrator(f)
nlps = writeADT("CvodesOption", "cvodesUnsafeSetOption", "SXFunction", s)
Ejemplo n.º 11
0
class MyCallback:
    def __init__(self):
        self.iters = []
    def __call__(self,f,*args):
        self.iters.append(numpy.array(f.getInput("x")))
mycallback = MyCallback()
pyfun = C.PyFunction( mycallback, C.nlpSolverOut(x=C.sp_dense(dvs.size,1),
                                                 f=C.sp_dense(1,1),
                                                 lam_x=C.sp_dense(dvs.size,1),
                                                 lam_g = C.sp_dense(g.size,1),
                                                 lam_p = C.sp_dense(0,1),
                                                 g = C.sp_dense(g.size,1) ),
                      [C.sp_dense(1,1)] )
pyfun.init()

solver = C.IpoptSolver(nlp)
solver.setOption('tol',1e-11)
solver.setOption('linear_solver','ma27')
#solver.setOption('linear_solver','ma57')
solver.setOption("iteration_callback",pyfun)
solver.init()

lbg = g(solver.input("lbg"))
ubg = g(solver.input("ubg"))
lbx = dvs(solver.input("lbx"))
ubx = dvs(solver.input("ubx"))
x0 = dvs(solver.input("x0"))

lbg["makeMeZero"] = 0.0
ubg["makeMeZero"] = 0.0
Ejemplo n.º 12
0
    def collocation_solver_setup(self, warmstart=False):
        """
        Sets up NLP for collocation solution. Constructs initial guess
        arrays, constructs constraint and objective functions, and
        otherwise passes arguments to the correct places. This looks
        really inefficient and is likely unneccessary to run multiple
        times for repeated runs with new data. Not sure how much time it
        takes compared to the NLP solution.
        Run immediately before CollocationSolve.
        """
        
        # Dimensions of the problem
        nx    = self.NVAR # total number of states
        ndiff = nx        # number of differential states
        nalg  = 0         # number of algebraic states
        nu    = 0         # number of controls

        # Collocated variables
        NXD = self.nk*(self.deg+1)*ndiff # differential states 
        NXA = self.nk*self.deg*nalg      # algebraic states
        NU  = self.nk*nu                 # Parametrized controls
        NV  = NXD+NXA+NU+self.ParamCount+self.NMP # Total variables
        self.NV = NV

        # NLP variable vector
        V = cs.msym("V",NV)
          
        # All variables with bounds and initial guess
        vars_lb   = np.zeros(NV)
        vars_ub   = np.zeros(NV)
        vars_init = np.zeros(NV)
        offset    = 0
        
        ## I AM HERE ##
        
        
        #
        # Split NLP vector into useable slices
        #
        # Get the parameters
        P = V[offset:offset+self.ParamCount]
        vars_init[offset:offset+self.ParamCount] = self.NLPdata['p_init']
        vars_lb[offset:offset+self.ParamCount]   = self.NLPdata['p_min']
        vars_ub[offset:offset+self.ParamCount]   = self.NLPdata['p_max']

        # Initial conditions for measurement adjustment
        MP = V[self.NV-self.NMP:]
        vars_init[self.NV-self.NMP:] = np.ones(self.NMP)
        vars_lb[self.NV-self.NMP:] = 0.1*np.ones(self.NMP) 
        vars_ub[self.NV-self.NMP:] = 10*np.ones(self.NMP)


        pdb.set_trace()
        
        offset += self.np # indexing variable

        # Get collocated states and parametrized control
        XD = np.resize(np.array([], dtype=cs.MX), (self.nk, self.points_per_interval,
                                                   self.deg+1)) 
        # NB: same name as above
        XA = np.resize(np.array([],dtype=cs.MX),(self.nk,self.points_per_interval,self.deg)) 
        # NB: same name as above
        U = np.resize(np.array([],dtype=cs.MX),self.nk)

        # Prepare the starting data matrix vars_init, vars_ub, and
        # vars_lb, by looping over finite elements, states, etc. Also
        # groups the variables in the large unknown vector V into XD and
        # XA(unused) for later indexing
        for k in range(self.nk):  
            # Collocated states
            for i in range(self.points_per_interval):
                #
                for j in range(self.deg+1):
                              
                    # Get the expression for the state vector
                    XD[k][i][j] = V[offset:offset+ndiff]
                    if j !=0:
                        XA[k][i][j-1] = V[offset+ndiff:offset+ndiff+nalg]
                    # Add the initial condition
                    index = (self.deg+1)*(self.points_per_interval*k+i) + j
                    if k==0 and j==0 and i==0:
                        vars_init[offset:offset+ndiff] = \
                            self.NLPdata['xD_init'][index,:]
                        
                        vars_lb[offset:offset+ndiff] = \
                                self.NLPdata['xDi_min']
                        vars_ub[offset:offset+ndiff] = \
                                self.NLPdata['xDi_max']
                        offset += ndiff
                    else:
                        if j!=0:
                            vars_init[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_init'][index,:],
                                      self.NLPdata['xA_init'][index,:])
                            
                            vars_lb[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_min'],
                                      self.NLPdata['xA_min'])

                            vars_ub[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_max'],
                                      self.NLPdata['xA_max'])

                            offset += nx
                        else:
                            vars_init[offset:offset+ndiff] = \
                                    self.NLPdata['xD_init'][index,:]

                            vars_lb[offset:offset+ndiff] = \
                                    self.NLPdata['xD_min']

                            vars_ub[offset:offset+ndiff] = \
                                    self.NLPdata['xD_max']

                            offset += ndiff
            
            # Parametrized controls (unused here)
            U[k] = V[offset:offset+nu]

        # Attach these initial conditions to external dictionary
        self.NLPdata['v_init'] = vars_init
        self.NLPdata['v_ub'] = vars_ub
        self.NLPdata['v_lb'] = vars_lb

        # Setting up the constraint function for the NLP. Over each
        # collocated state, ensure continuitity and system dynamics
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(self.nk):
            for i in range(self.points_per_interval):
                # For all collocation points
                for j in range(1,self.deg+1):   		
                    # Get an expression for the state derivative
                    # at the collocation point
                    xp_jk = 0
                    for j2 in range (self.deg+1):
                        # get the time derivative of the differential
                        # states (eq 10.19b)
                        xp_jk += self.C[j2][j]*XD[k][i][j2]
                    
                    # Add collocation equations to the NLP
                    [fk] = self.rfmod.call([0., xp_jk/self.h,
                                            XD[k][i][j], XA[k][i][j-1],
                                            U[k], P])
                    
                    # impose system dynamics (for the differential
                    # states (eq 10.19b))
                    g += [fk[:ndiff]]
                    lbg.append(np.zeros(ndiff)) # equality constraints
                    ubg.append(np.zeros(ndiff)) # equality constraints

                    # impose system dynamics (for the algebraic states
                    # (eq 10.19b)) (unused)
                    g += [fk[ndiff:]]                               
                    lbg.append(np.zeros(nalg)) # equality constraints
                    ubg.append(np.zeros(nalg)) # equality constraints
                    
                # Get an expression for the state at the end of the finite
                # element
                xf_k = 0
                for j in range(self.deg+1):
                    xf_k += self.D[j]*XD[k][i][j]
                    
                # if i==self.points_per_interval-1:

                # Add continuity equation to NLP
                if k+1 != self.nk: # End = Beginning of next
                    g += [XD[k+1][0][0] - xf_k]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(ndiff))
                    ubg.append(self.NLPdata['CONtol']*np.ones(ndiff))
                
                else: # At the last segment
                    # Periodicity constraints (only for NEQ)
                    g += [XD[0][0][0][:self.neq] - xf_k[:self.neq]]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(self.neq))
                    ubg.append(self.NLPdata['CONtol']*np.ones(self.neq))


                # else:
                #     g += [XD[k][i+1][0] - xf_k]
                
        # Flatten contraint arrays for last addition
        lbg = np.concatenate(lbg).tolist()
        ubg = np.concatenate(ubg).tolist()

        # Constraint to protect against fixed point solutions
        if self.NLPdata['FPgaurd'] is True:
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(cs.sumAll(fout**2))]
            lbg.append(np.array(self.NLPdata['FPTOL']))
            ubg.append(np.array(cs.inf))

        elif self.NLPdata['FPgaurd'] is 'all':
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(fout**2)]
            lbg += [self.NLPdata['FPTOL']]*self.neq
            ubg += [cs.inf]*self.neq



        # Nonlinear constraint function
        gfcn = cs.MXFunction([V],[cs.vertcat(g)])


        # Get Linear Interpolant for YDATA from TDATA
        objlist = []
        # xarr = np.array([V[self.np:][i] for i in \
        #         xrange(self.neq*self.nk*(self.deg+1))])
        # xarr = xarr.reshape([self.nk,self.deg+1,self.neq])
        
        # List of the times when each finite element starts
        felist = self.tgrid.reshape((self.nk,self.deg+1))[:,0]
        felist = np.hstack([felist, self.tgrid[-1]])

        def z(tau, zs):
            """
            Functon to calculate the interpolated values of z^K at a
            given tau (0->1). zs is a matrix with the symbolic state
            values within the finite element
            """

            def l(j,t):
                """
                Intermediate values for polynomial interpolation
                """
                tau = self.NLPdata['collocation_points']\
                        [self.NLPdata['cp']][self.deg]
                return np.prod(np.array([ 
                        (t - tau[k])/(tau[j] - tau[k]) 
                        for k in xrange(0,self.deg+1) if k is not j]))

            
            interp_vector = []
            for i in xrange(self.neq): # only state variables
                interp_vector += [np.sum(np.array([l(j, tau)*zs[j][i]
                                  for j in xrange(0, self.deg+1)]))]
            return interp_vector

        # Set up Objective Function by minimizing distance from
        # Collocation solution to Measurement Data

        # Number of measurement functions
        for i in xrange(self.NM):

            # Number of sampling points per measurement]
            for j in xrange(len(self.tdata[i])):

                # the interpolating polynomial wants a tau value,
                # where 0 < tau < 1, distance along current element.
                
                # Get the index for which finite element of the tdata 
                # values
                feind = get_ind(self.tdata[i][j],felist)[0]

                # Get the starting time and tau (0->1) for the tdata
                taustart = felist[feind]
                tau = (self.tdata[i][j] - taustart)*(self.nk+1)/self.tf

                x_interp = z(tau, XD[feind][0])
                # Broken in newest numpy version, likely need to redo
                # this whole file with most recent versions
                y_model = self.Amatrix[i].dot(x_interp)

                # Add measurement scaling
                if self.mpars[i]: y_model *= MP[sum(self.mpars[:i])]

                # Using relative diff's is probably messing up weights
                # in Identifiability
                diff = (y_model - self.ydata[i][j])

                if   self.NLPdata['ObjMethod'] == 'lsq':
                    dist = (diff**2/self.edata[i][j]**2)

                elif self.NLPdata['ObjMethod'] == 'laplace':
                    dist = cs.fabs(diff)/np.sqrt(self.edata[i][j])
                
                try: dist *= self.NLPdata['state_weights'][i]
                except KeyError: pass
                    
                objlist += [dist]

        # Minimization Objective
        if self.minimize_f:
            # Function integral
            f_integral = 0
            # For each finite element
            for i in xrange(self.nk):
                # For each collocation point
                fvals = []
                for j in xrange(self.deg+1):
                    fvals += [self.minimize_f(XD[i][0][j], P)]
                # integrate with weights
                f_integral += sum([fvals[k] * self.E[k] for k in
                                   xrange(self.deg+1)])

            objlist += [self.NLPdata['f_minimize_weight']*f_integral]



        # Stability Objective (Floquet Multipliers)
        if self.monodromy:
            s_final = XD[-1,-1,-1][-self.neq**2:]
            s_final = s_final.reshape((self.neq,self.neq))
            trace = sum([s_final[i,i] for i in xrange(self.neq)])
            objlist += [self.NLPdata['stability']*(trace - 1)**2]




        # Objective function of the NLP
        obj = cs.sumAll(cs.vertcat(objlist))
        ofcn = cs.MXFunction([V], [obj])

        self.CollocationSolver = cs.IpoptSolver(ofcn,gfcn)

        for opt,val in self.IpoptOpts.iteritems():
            self.CollocationSolver.setOption(opt,val)

        self.CollocationSolver.setOption('obj_scaling_factor',
                                         len(vars_init))
        
        if warmstart:
            self.CollocationSolver.setOption('warm_start_init_point','yes')
        
        # initialize the self.CollocationSolver
        self.CollocationSolver.init()
          
        # Initial condition
        self.CollocationSolver.setInput(vars_init,cs.NLP_X_INIT)

        # Bounds on x
        self.CollocationSolver.setInput(vars_lb,cs.NLP_LBX)
        self.CollocationSolver.setInput(vars_ub,cs.NLP_UBX)

        # Bounds on g
        self.CollocationSolver.setInput(np.array(lbg),cs.NLP_LBG)
        self.CollocationSolver.setInput(np.array(ubg),cs.NLP_UBG)

        if warmstart:
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_X_OPT'],cs.NLP_X_INIT)
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_LAMBDA_G'],cs.NLP_LAMBDA_INIT)
            self.CollocationSolver.setOutput( \
                    self.WarmStartData['NLP_LAMBDA_X'],cs.NLP_LAMBDA_X)
                    
        
        pdb.set_trace()
Ejemplo n.º 13
0
    def parameter_estimation(self):
        """
        Again, borrow from PSJ. Gets a decent estimate of the parameters
        initial values by looking at slopes. I think.
        
        Here we will set up an NLP to estimate the initial parameter
        guess (potentially along with unmeasured state variables) by
        minimizing the difference between the calculated slope at each
        shooting node (a function of the parameters) and the measured
        slope (using a spline interpolant)
        """
        
        # Here we must interpolate the x_init data using a spline. We
        # will differentiate this spline to get intial values for
        # the parameters
        
        node_ts = self.tgrid.reshape((self.nk, self.deg+1))[:,0]
        try:
            f_init = self.sx(self.tgrid,1).T
        except AttributeError:
            # Create interpolation object
            sx = fnlist([])
            def flat_factory(x):
                """ Protects against nan's resulting from flat trajectories
                in spline curve """
                return lambda t, d: (np.array([x] * len(t)) if d is 0 else
                                     np.array([0] * len(t)))
            for x in np.array(self.x_init).T:
                tvals = list(node_ts)
                tvals.append(tvals[0] + self.tf)
                xvals = list(x.reshape((self.nk, self.deg+1))[:,0])
                ## here ##
                xvals.append(x[0])
                
                if np.linalg.norm(xvals - xvals[0]) < 1E-8:
                    # flat profile
                    sx += [flat_factory(xvals[0])]

                else: sx += [spline(tvals, xvals, 0)]

            f_init = sx(self.tgrid,1).T

        # set initial guess for parameters
        logmax = np.log10(np.array(self.PARMAX))
        logmin = np.log10(np.array(self.PARMIN))
        p_init = 10**(logmax - (logmax - logmin)/2)

        f = self.model
        V = cs.MX("V", self.ParamCount)
        par = V

        # Allocate the initial conditions
        pvars_init = np.ones(self.ParamCount)
        pvars_lb = np.array(self.PARMIN)
        pvars_ub = np.array(self.PARMAX)
        pvars_init = p_init

        xin = []
        fin = []

        # For each state, add the (variable or constant) MX
        # representation of the measured x and f value to the input
        # variable list.
        for state in xrange(self.neq):
            xin += [cs.MX(self.x_init[:,state])]
            fin += [cs.MX(f_init[:,state])]

        xin = cs.horzcat(xin)
        fin = cs.horzcat(fin)

        # For all nodes
        res = []
        xmax = self.x_init.max(0)
        for ii in xrange((self.deg+1)*self.nk):
            f_out = f.call(cs.daeIn(t=self.tgrid[ii],
                                    x=xin[ii,:].T, p=par))[0]
            res += [cs.sumAll(((f_out - fin[ii,:].T) / xmax)**2)]

        F = cs.MXFunction([V],[cs.sumAll(cs.vertcat(res))])
        F.init()
        parsolver = cs.IpoptSolver(F)

        for opt,val in self.IpoptOpts.iteritems():
            if not opt == "expand_g":
                parsolver.setOption(opt,val)

        parsolver.init()
        parsolver.setInput(pvars_init,cs.NLP_X_INIT)
        parsolver.setInput(pvars_lb,cs.NLP_LBX)
        parsolver.setInput(pvars_ub,cs.NLP_UBX)
        parsolver.solve()
        
        success = parsolver.getStat('return_status') == 'Solve_Succeeded'
        assert success, "Parameter Estimation Failed"

        self.pcost = float(parsolver.output(cs.NLP_COST))

        
        pvars_opt = np.array(parsolver.output( \
                            cs.NLP_X_OPT)).flatten()

        if success: return pvars_opt
        else: return False