Example #1
0
    def solveBVP_casadi(self):
        """
        Uses casadi's interface to sundials to solve the boundary value
        problem using a single-shooting method with automatic differen-
        tiation. 
        """

        # Here we create and initialize the integrator SXFunction
        self.bvpint = cs.CVodesIntegrator(self.modlT)
        self.bvpint.setOption('abstol',self.intoptions['bvp_abstol'])
        self.bvpint.setOption('reltol',self.intoptions['bvp_reltol'])
        self.bvpint.setOption('tf',1)
        self.bvpint.setOption('disable_internal_warnings', True)
        self.bvpint.setOption('fsens_err_con', True)
        self.bvpint.init()

        # Vector of unknowns [y0, T]
        V = cs.msym("V",self.NEQ+1)
        y0 = V[:-1]
        T = V[-1]
        t = cs.msym('t')
        param = cs.vertcat([self.paramset, T])

        yf = self.bvpint.call(cs.integratorIn(x0=y0,p=param))[0]
        fout = self.modlT.call(cs.daeIn(t=t, x=y0,p=param))[0]

        obj = (yf - y0)**2
        obj.append(fout[0])

        F = cs.MXFunction([V],[obj])
        F.init()

        solver = cs.KinsolSolver(F)
        solver.setOption('abstol',self.intoptions['bvp_ftol'])
        solver.setOption('ad_mode', "forward")
        solver.setOption('strategy','linesearch')
        solver.setOption('numeric_jacobian', True)
        solver.setOption('exact_jacobian', False)
        solver.setOption('pretype', 'both')
        solver.setOption('use_preconditioner', True)
        solver.setOption('numeric_hessian', True)
        solver.setOption('constraints', (2,)*(self.NEQ+1))
        solver.setOption('verbose', False)
        solver.setOption('sparse', False)
        solver.setOption('linear_solver', 'dense')
        solver.init()
        solver.output().set(self.y0)
        solver.solve()

        self.y0 = solver.output().toArray().squeeze()
Example #2
0
    def __init__(self,dae,nk):
        self.dae = dae
        self.nk = nk

        self._gaussNewtonObjF = []

        mapSize = len(self.dae.xNames())*(self.nk+1) + len(self.dae.pNames())
        V = C.msym('dvs',mapSize)
        self._dvMap = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,V)

        self._boundMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)
        self._guessMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)

        self._U = C.msym('u',self.nk,len(self.dae.uNames()))
        self._outputMapGenerator = nmheMaps.NmheOutputMapGenerator(self,self._U)
        self._outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, self._dvMap.vectorize(), self._U)

        self._constraints = Constraints()
Example #3
0
    def __init__(self,dae,nk):
        self.dae = dae
        self.nk = nk

        self._gaussNewtonObjF = []

        mapSize = len(self.dae.xNames())*(self.nk+1) + len(self.dae.pNames())
        V = C.msym('dvs',mapSize)
        self._dvMap = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,V)

        self._boundMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)
        self._guessMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)

        self._U = C.msym('u',self.nk,len(self.dae.uNames()))
        self._outputMapGenerator = nmheMaps.NmheOutputMapGenerator(self,self._U)
        self._outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, self._dvMap.vectorize(), self._U)

        self._constraints = Constraints()
Example #4
0
    def __init__(self, dae, nSteps):
        # check inputs
        assert(isinstance(dae, Dae))
        assert(isinstance(nSteps, int))

        # make sure dae has everything
        assert(hasattr(dae,'_odeRes'))
        assert(hasattr(dae,'_algRes'))
        assert(hasattr(dae,'stateDotDummy'))
        
        self.dae = dae
        self.dae._freeze('MultipleShootingStage(dae)')

        # set up design vars
        self.nSteps = nSteps
        self.states = C.msym("x" ,self.nStates(),self.nSteps)
        self.actions = C.msym("u",self.nActions(),self.nSteps)
        self.params = C.msym("p",self.nParams())
        
#        self._dvs = C.msym("dv",self.nStates()*self.nSteps+self.nActions()*self.nSteps+self.nParams())
#
#        numXVars = self.nStates()*self.nSteps
#        numUVars = self.nActions()*self.nSteps
#        numPVars = self.nParams()
#        
#        self.states  = C.reshape(self._dvs[:numXVars], [self.nStates(), self.nSteps])
#        self.actions = C.reshape(self._dvs[numXVars:numXVars+numUVars], [self.nActions(), self.nSteps])
#        self.params = self._dvs[numXVars+numUVars:]
#
#        assert( self.states.size() == numXVars )
#        assert( self.actions.size() == numUVars )
#        assert( self.params.size() == numPVars )

        # set up interface
        self._constraints = Constraints()
        self._bounds = Bounds(self.dae._xNames, self.dae._uNames, self.dae._pNames, self.nSteps)
        self._initialGuess = InitialGuess(self.dae._xNames, self.dae._uNames, self.dae._pNames, self.nSteps)
        self._designVars = DesignVars((self.dae._xNames,self.states),
                                      (self.dae._uNames,self.actions),
                                      (self.dae._pNames,self.params),
                                      self.nSteps)
Example #5
0
    def __init__(self, dae, nSteps):
        # check inputs
        assert isinstance(dae, Dae)
        assert isinstance(nSteps, int)

        # make sure dae has everything
        assert hasattr(dae, '_residual')

        self.dae = dae
        self.dae._freeze('MultipleShootingStage(dae)')

        # set up design vars
        self.nSteps = nSteps
        self.states = C.msym("x", self.nStates(), self.nSteps)
        self.actions = C.msym("u", self.nActions(), self.nSteps)
        self.params = C.msym("p", self.nParams())

        #        self._dvs = C.msym("dv",self.nStates()*self.nSteps+self.nActions()*self.nSteps+self.nParams())
        #
        #        numXVars = self.nStates()*self.nSteps
        #        numUVars = self.nActions()*self.nSteps
        #        numPVars = self.nParams()
        #
        #        self.states  = C.reshape(self._dvs[:numXVars], [self.nStates(), self.nSteps])
        #        self.actions = C.reshape(self._dvs[numXVars:numXVars+numUVars], [self.nActions(), self.nSteps])
        #        self.params = self._dvs[numXVars+numUVars:]
        #
        #        assert self.states.size() == numXVars
        #        assert self.actions.size() == numUVars
        #        assert self.params.size() == numPVars

        # set up interface
        self._constraints = Constraints()
        self._bounds = Bounds(self.dae._xNames, self.dae._uNames,
                              self.dae._pNames, self.nSteps)
        self._initialGuess = InitialGuess(self.dae._xNames, self.dae._uNames,
                                          self.dae._pNames, self.nSteps)
        self._designVars = DesignVars(
            (self.dae._xNames, self.states), (self.dae._uNames, self.actions),
            (self.dae._pNames, self.params), self.nSteps)
        def mkParallelG():
            gs = [C.MXFunction([self.getDesignVars()],[gg]) for gg in self._constraints._g]
            for gg in gs:
                gg.init()
            
            pg = C.Parallelizer(gs)
#            pg.setOption("parallelization","openmp")
            pg.setOption("parallelization","serial")
#            pg.setOption("parallelization","expand")
            pg.init()
    
            dvsDummy = C.msym('dvs',(self.nStates()+self.nActions())*self.nSteps+self.nParams())
            g_ = C.MXFunction([dvsDummy],[C.veccat(pg.call([dvsDummy]*len(gs)))])
            g_.init()
            return g_
Example #7
0
        def mkParallelG():
            gs = [C.MXFunction([self.getDesignVars()],[gg]) for gg in self._constraints._g]
            for gg in gs:
                gg.init()

            pg = C.Parallelizer(gs)
#            pg.setOption("parallelization","openmp")
            pg.setOption("parallelization","serial")
#            pg.setOption("parallelization","expand")
            pg.init()

            dvsDummy = C.msym('dvs',(self.nStates()+self.nActions())*self.nSteps+self.nParams())
            g_ = C.MXFunction([dvsDummy],[C.veccat(pg.call([dvsDummy]*len(gs)))])
            g_.init()
            return g_
    def inverse_parameterization(R, parameterizationFunction):
        angle1 = ssym('angle1')
        angle2 = ssym('angle2')
        angle3 = ssym('angle3')
        Restimated = parameterizationFunction(angle1,angle2,angle3)
        E = R - Restimated;
        e = casadi.sumAll(E*E)
        f = SXFunction([casadi.vertcat([angle1,angle2,angle3])], [e])
        f.init()
        anglerange = numpy.linspace(-1*numpy.pi,1*numpy.pi,25)
        best_eTemp = 9999999999999
        best_angle1 = numpy.nan
        best_angle2 = numpy.nan
        best_angle3 = numpy.nan
        for i in xrange(len(anglerange)):
            for j in xrange(len(anglerange)):
                for k in xrange(len(anglerange)):
                    r = anglerange[i]
                    p = anglerange[j]
                    y = anglerange[k]
                    f.setInput([r,p,y])
                    f.evaluate()
                    eTemp = f.output(0)
                    if eTemp<best_eTemp:
                        best_eTemp = eTemp
                        best_angle1 = r
                        best_angle2 = p
                        best_angle3 = y
        if 0:
            V = msym("V",(3,1))
            fval = f.call([V])
            fmx = MXFunction([V],fval)
            nlp_solver = IpoptSolver(fmx)
        else:
            nlp_solver = IpoptSolver(f)

        nlp_solver.setOption('generate_hessian',True)
        nlp_solver.setOption('expand_f',True)
        nlp_solver.setOption('expand_g',True)
        nlp_solver.init()
        nlp_solver.setInput([best_angle1,best_angle2,best_angle3],NLP_X_INIT)
        nlp_solver.solve()
        sol =  nlp_solver.output(NLP_X_OPT).toArray()
        sol =  nlp_solver.output(NLP_X_OPT).toArray()

        return sol
Example #9
0
    def __init__(self, dae, nk=None, nicp=1, deg=4, collPoly='RADAU'):
        assert nk is not None
        assert isinstance(dae, Dae)

        self.dae = dae

        self.nk = nk
        self.nicp = nicp
        self.deg = deg
        self.collPoly = collPoly

        self._bounds = BoundsMap(self,"bounds")
        self._guess = collmaps.WriteableCollMap(self,"guess")

        self._constraints = Constraints()

        # setup NLP variables
        self._dvMap = collmaps.VectorizedReadOnlyCollMap(self,"design var map",CS.msym("V",self.getNV()))

        # quadratures
        self._quadratureManager = collmaps.QuadratureManager(self)
Example #10
0
    def _bvp_setup(self):
        """ Set up the casadi ipopt solver for the bvp solution """

        # Define some variables
        deg = self.deg
        nk = self.nk
        NV = nk * (deg + 1) * self.NEQ

        # NLP variable vector
        V = cs.msym("V", NV)
        XD = np.resize(np.array([], dtype=cs.MX), (nk, deg + 1))
        P = cs.msym("P", self.NP * self.nk)
        PK = P.reshape((self.nk, self.NP))

        offset = 0
        for k in range(nk):
            for j in range(deg + 1):
                XD[k][j] = V[offset:offset + self.NEQ]
                offset += self.NEQ

        # Constraint function for the NLP
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(nk):
            # For all collocation points
            for j in range(1, deg + 1):
                # Get an expression for the state derivative at the
                # collocation point
                xp_jk = 0
                for j2 in range(deg + 1):
                    # get the time derivative of the differential states
                    # (eq 10.19b)
                    xp_jk += self.coll_setup_dict['C'][j2][j] * XD[k][j2]

                # Generate parameter set, accounting for variable
                #
                # Add collocation equations to the NLP
                [fk] = self.rfmod.call(
                    [0., xp_jk / self.h, XD[k][j], PK[k, :].T])

                # impose system dynamics (for the differential states
                # (eq
                # 10.19b))
                g += [fk[:self.NEQ]]
                lbg.append(np.zeros(self.NEQ))  # equality constraints
                ubg.append(np.zeros(self.NEQ))  # equality constraints

            # Get an expression for the state at the end of the finite
            # element
            xf_k = 0
            for j in range(deg + 1):
                xf_k += self.coll_setup_dict['D'][j] * XD[k][j]

            # Add continuity equation to NLP
            # End = Beginning of next
            if k + 1 != nk:
                g += [XD[k + 1][0] - xf_k]
                # At the last segment, periodicity constraints
            else:
                g += [XD[0][0] - xf_k]
            lbg.append(np.zeros(self.NEQ))
            ubg.append(np.zeros(self.NEQ))

        # Nonlinear constraint function
        gfcn = cs.MXFunction([V, P], [cs.vertcat(g)])
        # Objective function (periodicity)
        ofcn = cs.MXFunction([V, P], [cs.sumAll(g[-1]**2)])

        ## ----
        ## SOLVE THE NLP
        ## ----

        # Allocate an NLP solver
        self.solver = cs.IpoptSolver(ofcn, gfcn)

        # Set options
        self.solver.setOption("expand_f", True)
        self.solver.setOption("expand_g", True)
        self.solver.setOption("generate_hessian", True)
        self.solver.setOption("max_iter", 1000)
        self.solver.setOption("tol", self.int_opt['bvp_tol'])
        self.solver.setOption("constr_viol_tol",
                              self.int_opt['bvp_constr_tol'])
        self.solver.setOption("linear_solver",
                              self.int_opt['bvp_linear_solver'])
        self.solver.setOption('parametric', True)
        self.solver.setOption('print_level', self.int_opt['bvp_print_level'])

        # initialize the self.solver
        self.solver.init()
        self.lbg = lbg
        self.ubg = ubg

        self.need_bvp_setup = False
Example #11
0
    def CollocationSetup(self, warmstart=False):
        """
        Sets up NLP for collocation solution. Constructs initial guess
        arrays, constructs constraint and objective functions, and
        otherwise passes arguments to the correct places. This looks
        really inefficient and is likely unneccessary to run multiple
        times for repeated runs with new data. Not sure how much time it
        takes compared to the NLP solution.

        Run immediately before CollocationSolve.
        """

        # Dimensions of the problem
        nx = self.NVAR  # total number of states
        ndiff = nx  # number of differential states
        nalg = 0  # number of algebraic states
        nu = 0  # number of controls

        # Collocated variables
        NXD = self.NICP * self.NK * (self.DEG +
                                     1) * ndiff  # differential states
        NXA = self.NICP * self.NK * self.DEG * nalg  # algebraic states
        NU = self.NK * nu  # Parametrized controls
        NV = NXD + NXA + NU + self.NP + self.NMP  # Total variables
        self.NV = NV

        # NLP variable vector
        V = cs.msym("V", NV)

        # All variables with bounds and initial guess
        vars_lb = np.zeros(NV)
        vars_ub = np.zeros(NV)
        vars_init = np.zeros(NV)
        offset = 0

        #
        # Split NLP vector into useable slices
        #
        # Get the parameters
        P = V[offset:offset + self.NP]
        vars_init[offset:offset + self.NP] = self.NLPdata['p_init']
        vars_lb[offset:offset + self.NP] = self.NLPdata['p_min']
        vars_ub[offset:offset + self.NP] = self.NLPdata['p_max']

        offset += self.NP  # indexing variable

        # Get collocated states and parametrized control
        XD = np.resize(np.array([], dtype=cs.MX),
                       (self.NK, self.NICP, self.DEG + 1))
        # NB: same name as above
        XA = np.resize(np.array([], dtype=cs.MX),
                       (self.NK, self.NICP, self.DEG))
        # NB: same name as above
        U = np.resize(np.array([], dtype=cs.MX), self.NK)

        # Prepare the starting data matrix vars_init, vars_ub, and
        # vars_lb, by looping over finite elements, states, etc. Also
        # groups the variables in the large unknown vector V into XD and
        # XA(unused) for later indexing
        for k in range(self.NK):
            # Collocated states
            for i in range(self.NICP):
                #
                for j in range(self.DEG + 1):

                    # Get the expression for the state vector
                    XD[k][i][j] = V[offset:offset + ndiff]
                    if j != 0:
                        XA[k][i][j - 1] = V[offset + ndiff:offset + ndiff +
                                            nalg]
                    # Add the initial condition
                    index = (self.DEG + 1) * (self.NICP * k + i) + j
                    if k == 0 and j == 0 and i == 0:
                        vars_init[offset:offset+ndiff] = \
                            self.NLPdata['xD_init'][index,:]

                        vars_lb[offset:offset+ndiff] = \
                                self.NLPdata['xDi_min']
                        vars_ub[offset:offset+ndiff] = \
                                self.NLPdata['xDi_max']
                        offset += ndiff
                    else:
                        if j != 0:
                            vars_init[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_init'][index,:],
                                      self.NLPdata['xA_init'][index,:])

                            vars_lb[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_min'],
                                      self.NLPdata['xA_min'])

                            vars_ub[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_max'],
                                      self.NLPdata['xA_max'])

                            offset += nx
                        else:
                            vars_init[offset:offset+ndiff] = \
                                    self.NLPdata['xD_init'][index,:]

                            vars_lb[offset:offset+ndiff] = \
                                    self.NLPdata['xD_min']

                            vars_ub[offset:offset+ndiff] = \
                                    self.NLPdata['xD_max']

                            offset += ndiff

            # Parametrized controls (unused here)
            U[k] = V[offset:offset + nu]

        # Attach these initial conditions to external dictionary
        self.NLPdata['v_init'] = vars_init
        self.NLPdata['v_ub'] = vars_ub
        self.NLPdata['v_lb'] = vars_lb

        # Setting up the constraint function for the NLP. Over each
        # collocated state, ensure continuitity and system dynamics
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(self.NK):
            for i in range(self.NICP):
                # For all collocation points
                for j in range(1, self.DEG + 1):
                    # Get an expression for the state derivative
                    # at the collocation point
                    xp_jk = 0
                    for j2 in range(self.DEG + 1):
                        # get the time derivative of the differential
                        # states (eq 10.19b)
                        xp_jk += self.C[j2][j] * XD[k][i][j2]

                    # Add collocation equations to the NLP
                    [fk] = self.rfmod.call([
                        0., xp_jk / self.h, XD[k][i][j], XA[k][i][j - 1], U[k],
                        P
                    ])

                    # impose system dynamics (for the differential
                    # states (eq 10.19b))
                    g += [fk[:ndiff]]
                    lbg.append(np.zeros(ndiff))  # equality constraints
                    ubg.append(np.zeros(ndiff))  # equality constraints

                    # impose system dynamics (for the algebraic states
                    # (eq 10.19b)) (unused)
                    g += [fk[ndiff:]]
                    lbg.append(np.zeros(nalg))  # equality constraints
                    ubg.append(np.zeros(nalg))  # equality constraints

                # Get an expression for the state at the end of the finite
                # element
                xf_k = 0
                for j in range(self.DEG + 1):
                    xf_k += self.D[j] * XD[k][i][j]

                # if i==self.NICP-1:

                # Add continuity equation to NLP
                if k + 1 != self.NK:  # End = Beginning of next
                    g += [XD[k + 1][0][0] - xf_k]
                    lbg.append(-self.NLPdata['CONtol'] * np.ones(ndiff))
                    ubg.append(self.NLPdata['CONtol'] * np.ones(ndiff))

                else:  # At the last segment
                    # Periodicity constraints (only for NEQ)
                    g += [XD[0][0][0][:self.NEQ] - xf_k[:self.NEQ]]
                    lbg.append(-self.NLPdata['CONtol'] * np.ones(self.NEQ))
                    ubg.append(self.NLPdata['CONtol'] * np.ones(self.NEQ))

                # else:
                #     g += [XD[k][i+1][0] - xf_k]

        # Flatten contraint arrays for last addition
        lbg = np.concatenate(lbg).tolist()
        ubg = np.concatenate(ubg).tolist()

        # Constraint to protect against fixed point solutions
        if self.NLPdata['FPgaurd'] is True:
            fout = self.model.call(
                cs.daeIn(t=self.tgrid[0],
                         x=XD[0, 0, 0][:self.NEQ],
                         p=V[:self.NP]))[0]
            g += [cs.MX(cs.sumAll(fout**2))]
            lbg.append(np.array(self.NLPdata['FPTOL']))
            ubg.append(np.array(cs.inf))

        elif self.NLPdata['FPgaurd'] is 'all':
            fout = self.model.call(
                cs.daeIn(t=self.tgrid[0],
                         x=XD[0, 0, 0][:self.NEQ],
                         p=V[:self.NP]))[0]
            g += [cs.MX(fout**2)]
            lbg += [self.NLPdata['FPTOL']] * self.NEQ
            ubg += [cs.inf] * self.NEQ

        # Nonlinear constraint function
        gfcn = cs.MXFunction([V], [cs.vertcat(g)])

        # Minimize derivative of first state variable at t=0
        xp_0 = 0
        for j in range(self.NLPdata['DEG'] + 1):
            # get the time derivative of the differential
            # states (eq 10.19b)
            xp_0 += self.C[j][0] * XD[0][j][0]

        obj = xp_0
        ofcn = cs.MXFunction([V], [obj])

        self.CollocationSolver = cs.IpoptSolver(ofcn, gfcn)

        for opt, val in self.IpoptOpts.iteritems():
            self.CollocationSolver.setOption(opt, val)

        self.CollocationSolver.setOption('obj_scaling_factor', len(vars_init))

        if warmstart:
            self.CollocationSolver.setOption('warm_start_init_point', 'yes')

        # initialize the self.CollocationSolver
        self.CollocationSolver.init()

        # Initial condition
        self.CollocationSolver.setInput(vars_init, cs.NLP_X_INIT)

        # Bounds on x
        self.CollocationSolver.setInput(vars_lb, cs.NLP_LBX)
        self.CollocationSolver.setInput(vars_ub, cs.NLP_UBX)

        # Bounds on g
        self.CollocationSolver.setInput(np.array(lbg), cs.NLP_LBG)
        self.CollocationSolver.setInput(np.array(ubg), cs.NLP_UBG)

        if warmstart:
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_X_OPT'],cs.NLP_X_INIT)
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_LAMBDA_G'],cs.NLP_LAMBDA_INIT)
            self.CollocationSolver.setOutput( \
                    self.WarmStartData['NLP_LAMBDA_X'],cs.NLP_LAMBDA_X)
Example #12
0
 def _setupDesignVars(self):
     ## set these:
     # self._V  = V
     # self._XD = XD
     # self._XA = XA
     # self._U  = U
     # self._P  = P
     
     ndiff = len(self.dae.xNames())
     nalg = len(self.dae.zNames())
     nu = len(self.dae.uNames())
     NP = len(self.dae.pNames())
     nx = ndiff + nalg
     
     # Total number of variables
     NXD = self.nicp*self.nk*(self.deg+1)*ndiff # Collocated differential states
     NXA = self.nicp*self.nk*self.deg*nalg      # Collocated algebraic states
     NU = self.nk*nu                  # Parametrized controls
     NXF = ndiff                 # Final state (only the differential states)
     NV = NXD+NXA+NU+NXF+NP
 
     # NLP variable vector
     V = CS.msym("V",NV)
     
     offset = 0
     
     # Get the parameters
     P = V[offset:offset+NP]
     offset += NP
     
     # Get collocated states and parametrized control
     XD = np.resize(np.array([],dtype=CS.MX),(self.nk+1,self.nicp,self.deg+1)) # NB: same name as above
     XA = np.resize(np.array([],dtype=CS.MX),(self.nk,self.nicp,self.deg)) # NB: same name as above
     U = np.resize(np.array([],dtype=CS.MX),self.nk)
     for k in range(self.nk):
         # Collocated states
         for i in range(self.nicp):
             for j in range(self.deg+1):
                 # Get the expression for the state vector
                 XD[k][i][j] = V[offset:offset+ndiff]
                 if j !=0:
                     XA[k][i][j-1] = V[offset+ndiff:offset+ndiff+nalg]
                 # Add the initial condition
                 index = (self.deg+1)*(self.nicp*k+i) + j
                 if k==0 and j==0 and i==0:
                     offset += ndiff
                 else:
                     if j!=0:
                         offset += nx
                     else:
                         offset += ndiff
         
         # Parametrized controls
         U[k] = V[offset:offset+nu]
         offset += nu
     
     # State at end time
     XD[self.nk][0][0] = V[offset:offset+ndiff]
     
     offset += ndiff
     assert(offset==NV)
 
     self._V  = V
     self._XD = XD
     self._XA = XA
     self._U  = U
     self._P  = P
Example #13
0
    def collocation_solver_setup(self, warmstart=False):
        """
        Sets up NLP for collocation solution. Constructs initial guess
        arrays, constructs constraint and objective functions, and
        otherwise passes arguments to the correct places. This looks
        really inefficient and is likely unneccessary to run multiple
        times for repeated runs with new data. Not sure how much time it
        takes compared to the NLP solution.
        Run immediately before CollocationSolve.
        """
        
        # Dimensions of the problem
        nx    = self.NVAR # total number of states
        ndiff = nx        # number of differential states
        nalg  = 0         # number of algebraic states
        nu    = 0         # number of controls

        # Collocated variables
        NXD = self.nk*(self.deg+1)*ndiff # differential states 
        NXA = self.nk*self.deg*nalg      # algebraic states
        NU  = self.nk*nu                 # Parametrized controls
        NV  = NXD+NXA+NU+self.ParamCount+self.NMP # Total variables
        self.NV = NV

        # NLP variable vector
        V = cs.msym("V",NV)
          
        # All variables with bounds and initial guess
        vars_lb   = np.zeros(NV)
        vars_ub   = np.zeros(NV)
        vars_init = np.zeros(NV)
        offset    = 0
        
        ## I AM HERE ##
        
        
        #
        # Split NLP vector into useable slices
        #
        # Get the parameters
        P = V[offset:offset+self.ParamCount]
        vars_init[offset:offset+self.ParamCount] = self.NLPdata['p_init']
        vars_lb[offset:offset+self.ParamCount]   = self.NLPdata['p_min']
        vars_ub[offset:offset+self.ParamCount]   = self.NLPdata['p_max']

        # Initial conditions for measurement adjustment
        MP = V[self.NV-self.NMP:]
        vars_init[self.NV-self.NMP:] = np.ones(self.NMP)
        vars_lb[self.NV-self.NMP:] = 0.1*np.ones(self.NMP) 
        vars_ub[self.NV-self.NMP:] = 10*np.ones(self.NMP)


        pdb.set_trace()
        
        offset += self.np # indexing variable

        # Get collocated states and parametrized control
        XD = np.resize(np.array([], dtype=cs.MX), (self.nk, self.points_per_interval,
                                                   self.deg+1)) 
        # NB: same name as above
        XA = np.resize(np.array([],dtype=cs.MX),(self.nk,self.points_per_interval,self.deg)) 
        # NB: same name as above
        U = np.resize(np.array([],dtype=cs.MX),self.nk)

        # Prepare the starting data matrix vars_init, vars_ub, and
        # vars_lb, by looping over finite elements, states, etc. Also
        # groups the variables in the large unknown vector V into XD and
        # XA(unused) for later indexing
        for k in range(self.nk):  
            # Collocated states
            for i in range(self.points_per_interval):
                #
                for j in range(self.deg+1):
                              
                    # Get the expression for the state vector
                    XD[k][i][j] = V[offset:offset+ndiff]
                    if j !=0:
                        XA[k][i][j-1] = V[offset+ndiff:offset+ndiff+nalg]
                    # Add the initial condition
                    index = (self.deg+1)*(self.points_per_interval*k+i) + j
                    if k==0 and j==0 and i==0:
                        vars_init[offset:offset+ndiff] = \
                            self.NLPdata['xD_init'][index,:]
                        
                        vars_lb[offset:offset+ndiff] = \
                                self.NLPdata['xDi_min']
                        vars_ub[offset:offset+ndiff] = \
                                self.NLPdata['xDi_max']
                        offset += ndiff
                    else:
                        if j!=0:
                            vars_init[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_init'][index,:],
                                      self.NLPdata['xA_init'][index,:])
                            
                            vars_lb[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_min'],
                                      self.NLPdata['xA_min'])

                            vars_ub[offset:offset+nx] = \
                            np.append(self.NLPdata['xD_max'],
                                      self.NLPdata['xA_max'])

                            offset += nx
                        else:
                            vars_init[offset:offset+ndiff] = \
                                    self.NLPdata['xD_init'][index,:]

                            vars_lb[offset:offset+ndiff] = \
                                    self.NLPdata['xD_min']

                            vars_ub[offset:offset+ndiff] = \
                                    self.NLPdata['xD_max']

                            offset += ndiff
            
            # Parametrized controls (unused here)
            U[k] = V[offset:offset+nu]

        # Attach these initial conditions to external dictionary
        self.NLPdata['v_init'] = vars_init
        self.NLPdata['v_ub'] = vars_ub
        self.NLPdata['v_lb'] = vars_lb

        # Setting up the constraint function for the NLP. Over each
        # collocated state, ensure continuitity and system dynamics
        g = []
        lbg = []
        ubg = []

        # For all finite elements
        for k in range(self.nk):
            for i in range(self.points_per_interval):
                # For all collocation points
                for j in range(1,self.deg+1):   		
                    # Get an expression for the state derivative
                    # at the collocation point
                    xp_jk = 0
                    for j2 in range (self.deg+1):
                        # get the time derivative of the differential
                        # states (eq 10.19b)
                        xp_jk += self.C[j2][j]*XD[k][i][j2]
                    
                    # Add collocation equations to the NLP
                    [fk] = self.rfmod.call([0., xp_jk/self.h,
                                            XD[k][i][j], XA[k][i][j-1],
                                            U[k], P])
                    
                    # impose system dynamics (for the differential
                    # states (eq 10.19b))
                    g += [fk[:ndiff]]
                    lbg.append(np.zeros(ndiff)) # equality constraints
                    ubg.append(np.zeros(ndiff)) # equality constraints

                    # impose system dynamics (for the algebraic states
                    # (eq 10.19b)) (unused)
                    g += [fk[ndiff:]]                               
                    lbg.append(np.zeros(nalg)) # equality constraints
                    ubg.append(np.zeros(nalg)) # equality constraints
                    
                # Get an expression for the state at the end of the finite
                # element
                xf_k = 0
                for j in range(self.deg+1):
                    xf_k += self.D[j]*XD[k][i][j]
                    
                # if i==self.points_per_interval-1:

                # Add continuity equation to NLP
                if k+1 != self.nk: # End = Beginning of next
                    g += [XD[k+1][0][0] - xf_k]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(ndiff))
                    ubg.append(self.NLPdata['CONtol']*np.ones(ndiff))
                
                else: # At the last segment
                    # Periodicity constraints (only for NEQ)
                    g += [XD[0][0][0][:self.neq] - xf_k[:self.neq]]
                    lbg.append(-self.NLPdata['CONtol']*np.ones(self.neq))
                    ubg.append(self.NLPdata['CONtol']*np.ones(self.neq))


                # else:
                #     g += [XD[k][i+1][0] - xf_k]
                
        # Flatten contraint arrays for last addition
        lbg = np.concatenate(lbg).tolist()
        ubg = np.concatenate(ubg).tolist()

        # Constraint to protect against fixed point solutions
        if self.NLPdata['FPgaurd'] is True:
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(cs.sumAll(fout**2))]
            lbg.append(np.array(self.NLPdata['FPTOL']))
            ubg.append(np.array(cs.inf))

        elif self.NLPdata['FPgaurd'] is 'all':
            fout = self.model.call(cs.daeIn(t=self.tgrid[0],
                                            x=XD[0,0,0][:self.neq],
                                               p=V[:self.np]))[0]
            g += [cs.MX(fout**2)]
            lbg += [self.NLPdata['FPTOL']]*self.neq
            ubg += [cs.inf]*self.neq



        # Nonlinear constraint function
        gfcn = cs.MXFunction([V],[cs.vertcat(g)])


        # Get Linear Interpolant for YDATA from TDATA
        objlist = []
        # xarr = np.array([V[self.np:][i] for i in \
        #         xrange(self.neq*self.nk*(self.deg+1))])
        # xarr = xarr.reshape([self.nk,self.deg+1,self.neq])
        
        # List of the times when each finite element starts
        felist = self.tgrid.reshape((self.nk,self.deg+1))[:,0]
        felist = np.hstack([felist, self.tgrid[-1]])

        def z(tau, zs):
            """
            Functon to calculate the interpolated values of z^K at a
            given tau (0->1). zs is a matrix with the symbolic state
            values within the finite element
            """

            def l(j,t):
                """
                Intermediate values for polynomial interpolation
                """
                tau = self.NLPdata['collocation_points']\
                        [self.NLPdata['cp']][self.deg]
                return np.prod(np.array([ 
                        (t - tau[k])/(tau[j] - tau[k]) 
                        for k in xrange(0,self.deg+1) if k is not j]))

            
            interp_vector = []
            for i in xrange(self.neq): # only state variables
                interp_vector += [np.sum(np.array([l(j, tau)*zs[j][i]
                                  for j in xrange(0, self.deg+1)]))]
            return interp_vector

        # Set up Objective Function by minimizing distance from
        # Collocation solution to Measurement Data

        # Number of measurement functions
        for i in xrange(self.NM):

            # Number of sampling points per measurement]
            for j in xrange(len(self.tdata[i])):

                # the interpolating polynomial wants a tau value,
                # where 0 < tau < 1, distance along current element.
                
                # Get the index for which finite element of the tdata 
                # values
                feind = get_ind(self.tdata[i][j],felist)[0]

                # Get the starting time and tau (0->1) for the tdata
                taustart = felist[feind]
                tau = (self.tdata[i][j] - taustart)*(self.nk+1)/self.tf

                x_interp = z(tau, XD[feind][0])
                # Broken in newest numpy version, likely need to redo
                # this whole file with most recent versions
                y_model = self.Amatrix[i].dot(x_interp)

                # Add measurement scaling
                if self.mpars[i]: y_model *= MP[sum(self.mpars[:i])]

                # Using relative diff's is probably messing up weights
                # in Identifiability
                diff = (y_model - self.ydata[i][j])

                if   self.NLPdata['ObjMethod'] == 'lsq':
                    dist = (diff**2/self.edata[i][j]**2)

                elif self.NLPdata['ObjMethod'] == 'laplace':
                    dist = cs.fabs(diff)/np.sqrt(self.edata[i][j])
                
                try: dist *= self.NLPdata['state_weights'][i]
                except KeyError: pass
                    
                objlist += [dist]

        # Minimization Objective
        if self.minimize_f:
            # Function integral
            f_integral = 0
            # For each finite element
            for i in xrange(self.nk):
                # For each collocation point
                fvals = []
                for j in xrange(self.deg+1):
                    fvals += [self.minimize_f(XD[i][0][j], P)]
                # integrate with weights
                f_integral += sum([fvals[k] * self.E[k] for k in
                                   xrange(self.deg+1)])

            objlist += [self.NLPdata['f_minimize_weight']*f_integral]



        # Stability Objective (Floquet Multipliers)
        if self.monodromy:
            s_final = XD[-1,-1,-1][-self.neq**2:]
            s_final = s_final.reshape((self.neq,self.neq))
            trace = sum([s_final[i,i] for i in xrange(self.neq)])
            objlist += [self.NLPdata['stability']*(trace - 1)**2]




        # Objective function of the NLP
        obj = cs.sumAll(cs.vertcat(objlist))
        ofcn = cs.MXFunction([V], [obj])

        self.CollocationSolver = cs.IpoptSolver(ofcn,gfcn)

        for opt,val in self.IpoptOpts.iteritems():
            self.CollocationSolver.setOption(opt,val)

        self.CollocationSolver.setOption('obj_scaling_factor',
                                         len(vars_init))
        
        if warmstart:
            self.CollocationSolver.setOption('warm_start_init_point','yes')
        
        # initialize the self.CollocationSolver
        self.CollocationSolver.init()
          
        # Initial condition
        self.CollocationSolver.setInput(vars_init,cs.NLP_X_INIT)

        # Bounds on x
        self.CollocationSolver.setInput(vars_lb,cs.NLP_LBX)
        self.CollocationSolver.setInput(vars_ub,cs.NLP_UBX)

        # Bounds on g
        self.CollocationSolver.setInput(np.array(lbg),cs.NLP_LBG)
        self.CollocationSolver.setInput(np.array(ubg),cs.NLP_UBG)

        if warmstart:
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_X_OPT'],cs.NLP_X_INIT)
            self.CollocationSolver.setInput( \
                    self.WarmStartData['NLP_LAMBDA_G'],cs.NLP_LAMBDA_INIT)
            self.CollocationSolver.setOutput( \
                    self.WarmStartData['NLP_LAMBDA_X'],cs.NLP_LAMBDA_X)
                    
        
        pdb.set_trace()