def limit_cycle(self): """ integrate the solution for one period, remembering each of time points along the way """ self.ts = np.linspace(0, self.T, self.intoptions['lc_res']) intlc = cs.Integrator('cvodes',self.model) intlc.setOption("abstol" , self.intoptions['lc_abstol']) intlc.setOption("reltol" , self.intoptions['lc_reltol']) intlc.setOption("max_num_steps", self.intoptions['lc_maxnumsteps']) intlc.setOption("tf" , self.T) intsim = cs.Simulator(intlc, self.ts) intsim.init() # Input Arguments intsim.setInput(self.y0, cs.INTEGRATOR_X0) intsim.setInput(self.param, cs.INTEGRATOR_P) intsim.evaluate() self.sol = intsim.output().toArray().T # create interpolation object self.lc = self.interp_sol(self.ts, self.sol.T)
def int_odes(self, tf, y0=None, numsteps=10000, return_endpt=False, ts=0): """ This function integrates the ODEs until well past the transients. This uses Casadi's simulator class, C++ wrapped in swig. Inputs: tf - the final time of integration. numsteps - the number of steps in the integration is the second argument """ if y0 is None: y0 = self.y0 self.integrator = cs.Integrator('cvodes', self.model) #Set up the tolerances etc. self.integrator.setOption("abstol", self.intoptions['int_abstol']) self.integrator.setOption("reltol", self.intoptions['int_reltol']) self.integrator.setOption("max_num_steps", self.intoptions['int_maxstepcount']) self.integrator.setOption("tf", tf) #Let's integrate self.integrator.init() self.ts = np.linspace(ts, tf, numsteps, endpoint=True) self.simulator = cs.Simulator(self.integrator, self.ts) self.simulator.init() self.simulator.setInput(y0, cs.INTEGRATOR_X0) self.simulator.setInput(self.param, cs.INTEGRATOR_P) self.simulator.evaluate() sol = self.simulator.output().toArray().T if return_endpt == True: return sol[-1] else: return sol
def phase_of_point(self, point, error=False, tol=1E-3): """ Finds the phase at which the distance from the point to the limit cycle is minimized. phi=0 corresponds to the definition of y0, returns the phase and the minimum distance to the limit cycle """ point = np.asarray(point) #set up integrator so we only have to once... intr = cs.Integrator('cvodes',self.model) intr.setOption("abstol", self.intoptions['bvp_abstol']) intr.setOption("reltol", self.intoptions['bvp_reltol']) intr.setOption("tf", self.T) intr.setOption("max_num_steps", self.intoptions['transmaxnumsteps']) intr.setOption("disable_internal_warnings", True) intr.init() for i in xrange(100): dist = cs.SX.sym("dist") x = self.model.inputExpr(cs.DAE_X) ode = self.model.outputExpr()[0] dist_ode = cs.sumAll(2.*(x - point)*ode) cat_x = cs.vertcat([x, dist]) cat_ode = cs.vertcat([ode, dist_ode]) dist_model = cs.SXFunction( cs.daeIn(t=self.model.inputExpr(cs.DAE_T), x=cat_x, p=self.model.inputExpr(cs.DAE_P)), cs.daeOut(ode=cat_ode)) dist_model.setOption("name","distance model") dist_0 = ((self.y0 - point)**2).sum() if dist_0 < tol: # catch the case where we start at 0 return 0. cat_y0 = np.hstack([self.y0, dist_0]) roots_class = Oscillator(dist_model, self.param, cat_y0) roots_class.intoptions = self.intoptions #return roots_class roots_class.solve_bvp() roots_class.limit_cycle() roots_class.roots() phases = self._t_to_phi(roots_class.tmin[-1]) distances = roots_class.ymin[-1] distance = np.min(distances) if distance < tol: phase_ind = np.argmin(distances) # for multiple minima return phases[phase_ind]#, roots_class intr.setInput(point, cs.INTEGRATOR_X0) intr.setInput(self.param, cs.INTEGRATOR_P) intr.evaluate() point = intr.output().toArray().flatten() #advance by one cycle raise RuntimeError("Point failed to converge to limit cycle")
def average(self): """ integrate the solution with quadrature to find the average species concentration. outputs to self.avg """ ffcn_in = self.model.inputExpr() ode = self.model.outputExpr() quad = cs.vertcat([ffcn_in[cs.DAE_X], ffcn_in[cs.DAE_X]**2]) quadmodel = cs.SXFunction(ffcn_in, cs.daeOut(ode=ode[0], quad=quad)) qint = cs.Integrator('cvodes',quadmodel) qint.setOption("abstol" , self.intoptions['lc_abstol']) qint.setOption("reltol" , self.intoptions['lc_reltol']) qint.setOption("max_num_steps" , self.intoptions['lc_maxnumsteps']) qint.setOption("tf",self.T) qint.init() qint.setInput(self.y0, cs.INTEGRATOR_X0) qint.setInput(self.param, cs.INTEGRATOR_P) qint.evaluate() quad_out = qint.output(cs.INTEGRATOR_QF).toArray().squeeze() self.avg = quad_out[:self.neq]/self.T self.rms = np.sqrt(quad_out[self.neq:]/self.T) self.std = np.sqrt(self.rms**2 - self.avg**2)
def solve_ode(self): """ Solve the ODE using casadi's CVODES wrapper to ensure that the collocated dynamics match the error-controlled dynamics of the ODE """ self.ts.sort() # Assert ts is increasing f_integrator = cs.SXFunction( 'ode', cs.daeIn(t=self.dxdt.inputExpr(0), x=self.dxdt.inputExpr(1), p=self.dxdt.inputExpr(2)), cs.daeOut(ode=self.dxdt.outputExpr(0))) integrator = cs.Integrator('int', 'cvodes', f_integrator) simulator = cs.Simulator('sim', integrator, self.ts) simulator.setInput(self.sol[0], 'x0') simulator.setInput(self.var.p_op, 'p') simulator.evaluate() x_sim = self.sol_sim = np.array(simulator.getOutput()).T err = ((self.sol - x_sim).mean(0) / (self.sol.mean(0))).mean() if err > 1E-3: warn('Collocation does not match ODE Solution: \ {:.2%} Error'.format(err))
def solve_bvp_scipy(self, root_method='hybr'): """ Use a scipy optimize function to optimize the BVP function """ # Make sure inputs are the correct format paramset = list(self.param) # Here we create and initialize the integrator SXFunction self.bvpint = cs.Integrator('cvodes', self.modlT) self.bvpint.setOption('abstol', self.intoptions['bvp_abstol']) self.bvpint.setOption('reltol', self.intoptions['bvp_reltol']) self.bvpint.setOption('tf', 1) self.bvpint.setOption('disable_internal_warnings', True) self.bvpint.setOption('fsens_err_con', True) self.bvpint.init() def bvp_minimize_function(x): """ Minimization objective. X = [y0,T] """ # perhaps penalize in try/catch? if all( [self.intoptions['constraints'] == 'positive', np.any(x < 0)]): return np.ones(len(x)) self.bvpint.setInput(x[:-1], cs.INTEGRATOR_X0) self.bvpint.setInput(paramset + [x[-1]], cs.INTEGRATOR_P) self.bvpint.evaluate() out = x[:-1] - self.bvpint.output().toArray().flatten() out = out.tolist() self.modlT.setInput(x[:-1], cs.DAE_X) self.modlT.setInput(paramset + [x[-1]], 2) self.modlT.evaluate() out += self.modlT.output()[0].toArray()[0].tolist() return np.array(out) from scipy.optimize import root options = {} root_out = root(bvp_minimize_function, np.append(self.y0, self.T), tol=self.intoptions['bvp_ftol'], method=root_method, options=options) # Check solve success if not root_out.status: raise RuntimeError("bvpsolve: " + root_out.message) # Check output convergence if np.linalg.norm(root_out.qtf) > self.intoptions['bvp_ftol'] * 1E4: raise RuntimeError("bvpsolve: nonconvergent") # save output to self.y0 self.y0 = root_out.x[:-1] self.T = root_out.x[-1]
def _initialize_polynomial_coefs(self): """ Setup radau polynomials and initialize the weight factor matricies """ self.col_vars['tau_root'] = cs.collocationPoints(self.d, "radau") # Dimensionless time inside one control interval tau = cs.SX.sym("tau") # For all collocation points L = [[]]*(self.d+1) for j in range(self.d+1): # Construct Lagrange polynomials to get the polynomial basis at the # collocation point L[j] = 1 for r in range(self.d+1): if r != j: L[j] *= ( (tau - self.col_vars['tau_root'][r]) / (self.col_vars['tau_root'][j] - self.col_vars['tau_root'][r])) self.col_vars['lfcn'] = lfcn = cs.SXFunction( 'lfcn', [tau], [cs.vertcat(L)]) # Evaluate the polynomial at the final time to get the coefficients of # the continuity equation # Coefficients of the continuity equation self.col_vars['D'] = lfcn([1.0])[0].toArray().squeeze() # Evaluate the time derivative of the polynomial at all collocation # points to get the coefficients of the continuity equation tfcn = lfcn.tangent() # Coefficients of the collocation equation self.col_vars['C'] = np.zeros((self.d+1, self.d+1)) for r in range(self.d+1): self.col_vars['C'][:,r] = tfcn([self.col_vars['tau_root'][r]] )[0].toArray().squeeze() # Find weights for gaussian quadrature: approximate int_0^1 f(x) by # Sum( xtau = cs.SX.sym("xtau") Phi = [[]] * (self.d+1) for j in range(self.d+1): tau_f_integrator = cs.SXFunction('ode', cs.daeIn(t=tau, x=xtau), cs.daeOut(ode=L[j])) tau_integrator = cs.Integrator( "integrator", "cvodes", tau_f_integrator, {'t0':0., 'tf':1}) Phi[j] = np.asarray(tau_integrator({'x0' : 0})['xf'])[0][0] self.col_vars['Phi'] = np.array(Phi)
def solve_bvp_casadi(self): """ Uses casadi's interface to sundials to solve the boundary value problem using a single-shooting method with automatic differen- tiation. Related to PCSJ code. """ self.bvpint = cs.Integrator('cvodes', self.modlT) self.bvpint.setOption('abstol', self.intoptions['bvp_abstol']) self.bvpint.setOption('reltol', self.intoptions['bvp_reltol']) self.bvpint.setOption('tf', 1) self.bvpint.setOption('disable_internal_warnings', True) self.bvpint.setOption('fsens_err_con', True) self.bvpint.init() # Vector of unknowns [y0, T] V = cs.MX.sym("V", self.neq + 1) y0 = V[:-1] T = V[-1] param = cs.vertcat([self.param, T]) yf = self.bvpint.call(cs.integratorIn(x0=y0, p=param))[0] fout = self.modlT.call(cs.daeIn(t=T, x=y0, p=param))[0] # objective: continuity obj = (yf - y0)**2 # yf and y0 are the same ..i.e. 2 ends of periodic fcn obj.append( fout[0]) # y0 is a peak for state 0, i.e. fout[0] is slope state 0 #set up the matrix we want to solve F = cs.MXFunction([V], [obj]) F.init() guess = np.append(self.y0, self.T) solver = cs.ImplicitFunction('kinsol', F) solver.setOption('abstol', self.intoptions['bvp_ftol']) solver.setOption('strategy', 'linesearch') solver.setOption('exact_jacobian', False) solver.setOption('pretype', 'both') solver.setOption('use_preconditioner', True) if self.intoptions['constraints'] == 'positive': solver.setOption('constraints', (2, ) * (self.neq + 1)) solver.setOption('linear_solver_type', 'dense') solver.init() solver.setInput(guess) solver.evaluate() sol = solver.output().toArray().squeeze() self.y0 = sol[:-1] self.T = sol[-1]
def first_order_sensitivity(self): """ Function to calculate the first order period sensitivity matricies using the direct method. See Wilkins et al. 2009. Only calculates initial conditions and period sensitivities. """ self.check_monodromy() monodromy = self.monodromy integrator = cs.Integrator('cvodes',self.model) integrator.setOption("abstol", self.intoptions['sensabstol']) integrator.setOption("reltol", self.intoptions['sensreltol']) integrator.setOption("max_num_steps", self.intoptions['sensmaxnumsteps']) integrator.setOption("sensitivity_method", self.intoptions['sensmethod']); integrator.setOption("t0", 0) integrator.setOption("tf", self.T) integrator.setOption("fsens_err_con", 1) integrator.setOption("fsens_abstol", self.intoptions['sensabstol']) integrator.setOption("fsens_reltol", self.intoptions['sensreltol']) integrator.init() integrator.setInput(self.y0,cs.INTEGRATOR_X0) integrator.setInput(self.param,cs.INTEGRATOR_P) intdyfdp = integrator.jacobian(cs.INTEGRATOR_P, cs.INTEGRATOR_XF) intdyfdp.init() intdyfdp.setInput(self.y0,"x0") intdyfdp.setInput(self.param,"p") intdyfdp.evaluate() s0 = intdyfdp.output().toArray() self.model.init() self.model.setInput(self.y0,cs.DAE_X) self.model.setInput(self.param,cs.DAE_P) self.model.evaluate() ydot0 = self.model.output().toArray().squeeze() LHS = np.zeros([(self.neq + 1), (self.neq + 1)]) LHS[:-1,:-1] = monodromy - np.eye(len(monodromy)) LHS[-1,:-1] = self.dfdy(self.y0)[0] LHS[:-1,-1] = ydot0 RHS = np.zeros([(self.neq + 1), self.np]) RHS[:-1] = -s0 RHS[-1] = self.dfdp(self.y0)[0] unk = np.linalg.solve(LHS,RHS) self.S0 = unk[:-1] self.dTdp = unk[-1] self.reldTdp = self.dTdp*self.param/self.T
def findARC_whole(self, res=100, trans=3): """ Calculate entire sARC matrix, which will be faster than calcualting for each parameter """ # Calculate necessary quantities if not hasattr(self, 'avg'): self.average() if not hasattr(self, 'sPRC'): self.find_prc(res) # Set up quadrature integrator self.sarc_int = cs.Integrator( 'cvodes', self._create_ARC_model(numstates=self.neq)) self.sarc_int.setOption("abstol", self.intoptions['sensabstol']) self.sarc_int.setOption("reltol", self.intoptions['sensreltol']) self.sarc_int.setOption("max_num_steps", self.intoptions['sensmaxnumsteps']) self.sarc_int.setOption("t0", 0) self.sarc_int.setOption("tf", trans * self.T) #self.sarc_int.setOption("numeric_jacobian", True) self.sarc_int.init() self.arc_ts = np.linspace(0, self.T, res) amp_change = [] for t in self.arc_ts: # Initialize model and sensitivity states x0 = np.zeros(self.neq * (self.neq + 1)) x0[:self.neq] = self.lc(t) x0[self.neq:] = np.eye(self.neq).flatten() # Add dphi/dt from seed perturbation param = np.zeros(self.np + self.neq) param[:self.np] = self.param param[self.np:] = self.sPRC_interp(t) # Evaluate model self.sarc_int.setInput(x0, cs.INTEGRATOR_X0) self.sarc_int.setInput(param, cs.INTEGRATOR_P) self.sarc_int.evaluate() out = self.sarc_int.output(cs.INTEGRATOR_QF).toArray() # amp_change += [out] amp_change += [out * 2 * np.pi / self.T] #[time, state_out, state_in] self.sARC = np.array(amp_change) dfdp = np.array([self.dfdp(self.lc(t)) for t in self.arc_ts]) self.pARC = np.array([ self.sARC[i].dot(self._phi_to_t(dfdp[i])) for i in xrange(len(self.sARC)) ]) self.rel_pARC = (np.array(self.param) * self.pARC / np.atleast_2d(self.avg).T)
def fwd_simulation(self, dE_start, condition, detail=True, reltol=1e-6, abstol=1e-8): Tem = condition.Temperature time = condition.TimeGrid opts = {} opts['abstol'] = abstol opts['reltol'] = reltol opts['disable_internal_warnings'] = True opts['max_num_steps'] = 1e5 P_dae = np.hstack([dE_start, Tem]) # Partial Pressure Pinlet = np.zeros(self.ngas) for idx, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': Pinlet[idx] = condition.PartialPressure[str(spe)] x0 = Pinlet.tolist() + [0] * (self.nsurf - 1) + [1] # print(x0) Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) Fsim = cas.Simulator('Fsim', Fint, time) Fsim.setInput(x0, 'x0') Fsim.setInput(P_dae, 'p') Fsim.evaluate() # Evalu out = Fsim.getOutput().full() tor_list = {} for i, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': nt = int(condition.Ntime / 2) slope, intercept, r_value, p_value, std_err = stats.linregress( condition.TimeGrid[nt:], out[i, nt:]) tor_list[spe.name] = slope return out, tor_list
def check_monodromy(self): """ Check the stability of the limit cycle by finding the eigenvalues of the monodromy matrix """ integrator = cs.Integrator('cvodes', self.model) integrator.setOption("abstol", self.intoptions['sensabstol']) integrator.setOption("reltol", self.intoptions['sensreltol']) integrator.setOption("max_num_steps", self.intoptions['int_maxstepcount']) integrator.setOption("sensitivity_method", self.intoptions['sensmethod']) integrator.setOption("t0", 0) integrator.setOption("tf", self.T) integrator.setOption("fsens_err_con", 1) integrator.setOption("fsens_abstol", self.intoptions['sensabstol']) integrator.setOption("fsens_reltol", self.intoptions['sensreltol']) integrator.init() integrator.setInput(self.y0, cs.INTEGRATOR_X0) integrator.setInput(self.param, cs.INTEGRATOR_P) intdyfdy0 = integrator.jacobian(cs.INTEGRATOR_X0, cs.INTEGRATOR_XF) intdyfdy0.init() intdyfdy0.setInput(self.y0, "x0") intdyfdy0.setInput(self.param, "p") intdyfdy0.evaluate() monodromy = intdyfdy0.output().toArray() self.monodromy = monodromy # Calculate Floquet Multipliers, check if all (besides n_0 = 1) # are inside unit circle eigs = np.linalg.eigvals(monodromy) self.floquet_multipliers = np.abs(eigs) #self.floquet_multipliers.sort() idx = (np.abs(self.floquet_multipliers - 1.0)).argmin() f = self.floquet_multipliers.tolist() f.pop(idx) return np.all(np.array(f) < 1)
def _findARC_seed(self, seeds, res=100, trans=3): # Calculate necessary quantities if not hasattr(self, 'avg'): self.average() if not hasattr(self, 'sPRC'): self.find_prc(res) # Set up quadrature integrator self.sarc_int = cs.Integrator('cvodes',self._create_ARC_model()) self.sarc_int.setOption("abstol", self.intoptions['sensabstol']) self.sarc_int.setOption("reltol", self.intoptions['sensreltol']) self.sarc_int.setOption("max_num_steps", self.intoptions['sensmaxnumsteps']) self.sarc_int.setOption("t0", 0) self.sarc_int.setOption("tf", trans*self.T) #self.sarc_int.setOption("numeric_jacobian", True) self.sarc_int.init() t_arc = np.linspace(0, self.yT, res) arc = np.array([self._sarc_single_time(t, seed) for t, seed in zip(t_arc, seeds)]).squeeze() return t_arc, arc
def fwd_simulation(self, dE_start, condition, detail=True, reltol=1e-6, abstol=1e-8): # reevaluate the reaction condition condition._calSim() condition._calGrid() time = condition.TimeGrid T0 = condition.T0 beta = condition.Beta opts = {} opts['abstol'] = abstol opts['reltol'] = reltol opts['disable_internal_warnings'] = True opts['max_num_steps'] = 1e5 x0 = self.init_condition(condition) P_dae = np.hstack([dE_start, T0, beta]) # print(x0) # print(P_dae) # print(time) # opts['tf'] = 2 # Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) # F_sim = Fint(x0=x0, p=P_dae) Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) Fsim = cas.Simulator('Fsim', Fint, time) Fsim.setInput(x0, 'x0') Fsim.setInput(P_dae, 'p') Fsim.evaluate() # Evaluate out = Fsim.getOutput().full() out[:self.ngas, :] *= self.pump_ratio return out
def eval_likeli(self, dE, conditionlist, evidence_info={}): reltol = evidence_info.get('reltol', 1e-12) abstol = evidence_info.get('abstol', 1e-12) err = evidence_info.get('peak_err', 10) opts = {} opts['abstol'] = abstol opts['reltol'] = reltol opts['disable_internal_warnings'] = True opts['max_num_steps'] = 1e5 # Initialize simulator evidence = 0 for condition in conditionlist: time = condition.TimeGrid T0 = condition.T0 beta = condition.Beta x0 = self.init_condition(condition) P_dae = np.hstack([dE, T0, beta]) Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) Fsim = cas.Simulator('Fsim', Fint, time) Fsim.setInput(x0, 'x0') Fsim.setInput(P_dae, 'p') Fsim.evaluate() out = Fsim.getOutput().full() # Find the peak for spe, peak_exp in condition.PeakPosition.items(): idx = get_index_species(spe, self.specieslist) des = out[idx, :] idx_peak = np.argmax(des) peak_sim = condition.TemGrid[idx_peak] dev = peak_sim - peak_exp evidence += (dev * dev) / err**2 return -evidence
def solve_ode(self): """ Solve the ODE using casadi's CVODES wrapper to ensure that the collocated dynamics match the error-controlled dynamics of the ODE """ f_integrator = cs.SXFunction( 'ode', cs.daeIn(t=self.model.inputExpr(0), x=self.model.inputExpr(1), p=self.model.inputExpr(2)), cs.daeOut(ode=self.model.outputExpr(0))) integrator = cs.Integrator('int', 'cvodes', f_integrator) simulator = cs.Simulator('sim', integrator, self._tgrid) simulator.setInput(self._output['x_opt'][0], 'x0') simulator.setInput(self._output['p_opt'], 'p') simulator.evaluate() x_sim = self._output['x_sim'] = np.array(simulator.getOutput()).T err = ((self._output['x_opt'] - x_sim).mean(0) / (self._output['x_opt'].mean(0))).mean() if err > 1E-3: warn('Collocation does not match ODE Solution: \ {:.2f}% Error'.format(100 * err))
def test_X(self): self.message("Extensive integrator tests") num=self.num tstart = SX.sym("tstart") tend = SX.sym("tstart") for Integrator, features, options in integrators: self.message(Integrator) def variations(p_features, din, dout, rdin, rdout, *args): if "ode" in p_features: p_features_ = copy.copy(p_features) p_features_[p_features.index("ode")] = "dae" din_ = copy.copy(din) dout_ = copy.copy(dout) rdin_ = copy.copy(rdin) rdout_ = copy.copy(rdout) z = SX.sym("x", din_["x"].shape) din_["z"] = z dout_["ode"] = z dout_["alg"] = ( dout["ode"] - z) * (-0.8) if len(rdin_)>0: rz = SX.sym("rx", rdin_["rx"].shape) rdin_["rz"] = rz rdin_["z"] = z rdout_["ode"] = rz rdout_["alg"] = ( rdout["ode"] - rz) * (-0.7) yield (p_features, din, dout, rdin, rdout) + tuple(args) yield (p_features_, din_, dout_, rdin_, rdout_) + tuple(args) else: yield (p_features, din, dout, rdin, rdout) + tuple(args) def checks(): x0=num['q0'] p_=num['p'] rx0_= 0.13 t=SX.sym("t") x=SX.sym("x") rx=SX.sym("rx") p=SX.sym("p") dp=SX.sym("dp") z=SX.sym("z") rz=SX.sym("rz") rp=SX.sym("rp") si = {'x0':x, 'p': p, 'rx0': rx,'rp' : rp} pointA = {'x0':x0,'p': p_, 'rx0': rx0_, 'rp': 0.127} ti = (0.2,num['tend']) yield (["ode"],{'x':x},{'ode': 0},{},{},si,{'xf':x},pointA,ti) yield (["ode"],{'x':x},{'ode': 1},{},{},si,{'xf':x+(tend-tstart)},pointA,ti) yield (["ode"],{'x':x},{'ode': x},{},{},si,{'xf':x*exp(tend-tstart)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode': t},{},{},si,{'xf':x+(tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode': x*t},{},{},si,{'xf':x*exp(tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x,'p':p},{'ode': x/p},{},{},si,{'xf':x*exp((tend-tstart)/p)},pointA,ti) if not(args.run_slow): return yield (["ode"],{'x':x},{'ode': x,'quad':0},{},{},si,{'qf':0},pointA,ti) yield (["ode"],{'x':x},{'ode': x,'quad':1},{},{},si,{'qf':(tend-tstart)},pointA,ti) yield (["ode"],{'x':x},{'ode': 0,'quad':x},{},{},si,{'qf':x*(tend-tstart)},pointA,ti) #yield ({'x':x},{'ode': 1,'quad':x},{'qf':(x-tstart)*(tend-tstart)+(tend**2/2-tstart**2/2)}), # bug in cvodes quad_err_con yield (["ode"],{'x':x},{'ode': x,'quad':x},{},{},si,{'qf':x*(exp(tend-tstart)-1)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode': x,'quad':t},{},{},si,{'qf':(tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode': x,'quad':x*t},{},{},si,{'qf':x*(exp(tend-tstart)*(tend-1)-(tstart-1))},pointA,ti) yield (["ode"],{'x':x,'p':p},{'ode': x,'quad':x/p},{},{},si,{'qf':x*(exp((tend-tstart))-1)/p},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':0},si,{'rxf': rx},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':1},si,{'rxf': rx+tend-tstart},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':t},si,{'rxf': rx+tend**2/2-tstart**2/2},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':rx},si,{'rxf': rx*exp(tend-tstart)},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':x},si,{'rxf': rx+x*(exp(tend-tstart)-1)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':x*t},si,{'rxf': rx+x*(exp(tend-tstart)*(tend-1)-(tstart-1))},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':rx*t},si,{'rxf': rx*exp(tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':rx, 'quad': 0},si,{'rqf': 0},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':rx, 'quad': 1},si,{'rqf': (tend-tstart)},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':rx, 'quad': rx},si,{'rqf': rx*(exp(tend-tstart)-1)},pointA,ti) yield (["ode"],{'x':x},{'ode':x},{'x':x,'rx':rx},{'ode':rx, 'quad': x},si,{'rqf': x*(exp(tend-tstart)-1)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':rx, 'quad': t},si,{'rqf': (tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':rx, 'quad': x*t},si,{'rqf': x*(exp(tend-tstart)*(tend-1)-(tstart-1))},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':rx, 'quad': rx*t},si,{'rqf': rx*(exp(tend-tstart)*(tstart+1)-(tend+1))},pointA,ti) # this one is special: integrate(t*rx*exp(tf-t),t,t0,tf) yield (["ode"],{'x':x,'p':p},{'ode':x},{'x':x,'rx':rx,'p':p},{'ode':rx, 'quad': p},si,{'rqf': p*(tend-tstart)},pointA,ti) yield (["ode"],{'x':x,'p':p},{'ode':x},{'x':x,'rx':rx,'p':p,'rp':rp},{'ode':rx, 'quad': rp},si,{'rqf': rp*(tend-tstart)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':rx*t},si,{'rxf': rx*exp(tend**2/2-tstart**2/2)},pointA,ti) yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode':x*t},si,{'rxf': rx+x*(exp(tend-tstart)*(tend-1)-(tstart-1))},pointA,ti) yield (["dae"],{'x':x,'z':z},{'ode':z,'alg': -0.8*(z-x),'quad': z},{},{},si,{'qf':x*(exp(tend-tstart)-1)},pointA,ti) yield (["dae"],{'x':x,'z':z},{'ode':z,'alg': -0.8*(z-x)},{'x':x,'rx':rx,'rz': rz,'z':z},{'ode':rz, 'alg': -0.7*(rz-rx), 'quad': rz},si,{'rqf': rx*(exp(tend-tstart)-1)},pointA,ti) yield (["dae"],{'x':x,'z':z},{'ode':z,'alg': -0.8*(z-x)},{'x':x,'rx':rx,'rz': rz,'z':z},{'ode':rz, 'alg': -0.7*(rz-rx), 'quad': z},si,{'rqf': x*(exp(tend-tstart)-1)},pointA,ti) A=array([1,0.1]) p0 = 1.13 q=SX.sym("y",2,1) y0=q[0] yc0=dy0=q[1] p=SX.sym("p",1,1) s1=(2*y0-log(yc0**2/p+1))/2-log(cos(arctan(yc0/sqrt(p))+sqrt(p)*(tend-tstart))) s2=sqrt(p)*tan(arctan(yc0/sqrt(p))+sqrt(p)*(tend-tstart)) yield (["ode"],{'x':q,'p':p},{'ode': vertcat([q[1],p[0]+q[1]**2 ])},{},{},{'x0':q, 'p': p} ,{'xf': vertcat([s1,s2])},{'x0': A, 'p': p0},(0,0.4) ) for tt in checks(): print tt for p_features, din, dout, rdin, rdout, solutionin, solution, point, (tstart_, tend_) in variations(*tt): if p_features[0] in features: message = "%s: %s => %s, %s => %s, explicit (%s) tstart = %f" % (Integrator,str(din),str(dout),str(rdin),str(rdout),str(solution),tstart_) print message g = Function() if len(rdin)>1: g = SXFunction(rdaeIn(**rdin),rdaeOut(**rdout)) g.init() f = SXFunction(daeIn(**din),daeOut(**dout)) f.init() for k in solution.keys(): solution[k] = substitute(solution[k],vertcat([tstart,tend]),vertcat([tstart_,tend_])) fs = SXFunction(integratorIn(**solutionin),integratorOut(**solution)) fs.init() integrator = c.Integrator(Integrator,f,g) integrator.setOption(options) integrator.setOption("t0",tstart_) if integrator.hasOption("abstol"): integrator.setOption("abstol",1e-9) if integrator.hasOption("reltol"): integrator.setOption("reltol",1e-9) integrator.setOption("tf",tend_) if integrator.hasOption("init_xdot"): integrator.setOption("init_xdot",list(DMatrix(point["x0"]))) integrator.setOption("calc_icB",True) integrator.setOption("augmented_options", {"init_xdot":None, "abstol":1e-9,"reltol":1e-9}) #if "dae" in p_features and integrator.hasOption("init_z"): # integrator.setOption("init_z",[0.1]) # integrator.setOption("augmented_options", {"init_z":GenericType(),"init_xdot":GenericType()}) integrator.init() # reproduce = """ #from casadi import * #t=SX.sym("t") #x=SX.sym("x") #rx=SX.sym("rx") #p=SX.sym("p") #dp=SX.sym("dp") #z=SX.sym("z") #rz=SX.sym("rz") #rp=SX.sym("rp") #f = SXFunction(daeIn(**{din}),daeOut(**{dout})) #f.init() #g = SXFunction(rdaeIn(**{rdin}),rdaeOut(**{rdout})) #g.init() #integrator = {intclass.__name__}(f,g) #integrator.setOption({options}) #integrator.init() #integrator.setInput({x0},"x0") #if not integrator.input("p").isEmpty(): # integrator.setInput({p_},"p") #if not integrator.input("rx0").isEmpty(): # integrator.setInput(0.13,"rx0") #if not integrator.input("rp").isEmpty(): # integrator.setInput(0.127,"rp") # """.format(din=din,dout=dout,rdin=rdin,rdout=rdout,x0=x0,p_=p_,intclass=Integrator,options=integrator.dictionary()) # message+="\nTo reproduce:\n" + reproduce for ff in [fs,integrator]: for k,v in point.items(): i = getattr(casadi,('integrator_'+k).upper()) if not ff.input(i).isEmpty(): ff.setInput(v,i) integrator.evaluate() self.checkfunction(integrator,fs,gradient=False,hessian=False,sens_der=False,evals=False,digits=4,digits_sens=4,failmessage=message,verbose=False)
def test_jac(self): self.message("Test exact jacobian #536") # This test is not automized, but works by inspection only. # To activate, recompile after ucnommenting the printout lines in cvodes.c, near "Used for validating casadi#536" #return DMatrix.setPrecision(18) tstart = SX.sym("tstart") tend = SX.sym("tend") integrators = [ ("idas",["dae","ode"],{"abstol": 1e-9,"reltol":1e-9,"fsens_err_con": True,"calc_ic":True,"calc_icB":True}), ("cvodes",["ode"],{"abstol": 1e-5,"reltol":1e-5,"fsens_err_con": False,"quad_err_con": False}) ] def variations(p_features, din, dout, rdin, rdout, *args): if "ode" in p_features: p_features_ = copy.copy(p_features) p_features_[p_features.index("ode")] = "dae" din_ = copy.copy(din) dout_ = copy.copy(dout) rdin_ = copy.copy(rdin) rdout_ = copy.copy(rdout) z = SX.sym("x", din_["x"].shape) din_["z"] = z dout_["ode"] = z dout_["alg"] = ( dout["ode"] - z) * (-0.8) if len(rdin_)>0: rz = SX.sym("rx", rdin_["rx"].shape) rdin_["rz"] = rz rdin_["z"] = z rdout_["ode"] = rz rdout_["alg"] = ( rdout["ode"] - rz) * (-0.7) yield (p_features, din, dout, rdin, rdout) + tuple(args) yield (p_features_, din_, dout_, rdin_, rdout_) + tuple(args) else: yield (p_features, din, dout, rdin, rdout) + tuple(args) def checks(): Ns = 1 x = SX.sym("x") rx = SX.sym("rx") t = SX.sym("t") ti = (0,0.9995) pointA = {'x0': 1, 'rx0': 1} si = {'x0':x, 'rx0': rx} #sol = {'rxf': 1.0/(1-tend)} sol = {'rxf': rx*exp(tend), 'xf': x*exp(tend)} yield (["ode"],{'x':x,'t':t},{'ode':x},{'x':x,'rx':rx,'t':t},{'ode': rx},si,sol,pointA,ti) refXF = refRXF = None for tt in checks(): for p_features, din, dout, rdin, rdout, solutionin, solution, point, (tstart_, tend_) in variations(*tt): for Integrator, features, options in integrators: self.message(Integrator) dummyIntegrator = c.Integrator(Integrator,c.SXFunction()) if p_features[0] in features: g = Function() if len(rdin)>1: g = SXFunction(rdaeIn(**rdin),rdaeOut(**rdout)) g.init() f = SXFunction(daeIn(**din),daeOut(**dout)) f.init() for k in solution.keys(): solution[k] = substitute(solution[k],vertcat([tstart,tend]),vertcat([tstart_,tend_])) fs = SXFunction(integratorIn(**solutionin),integratorOut(**solution)) fs.init() def itoptions(post=""): yield {"iterative_solver"+post: "gmres"} yield {"iterative_solver"+post: "bcgstab"} yield {"iterative_solver"+post: "tfqmr", "use_preconditionerB": True, "linear_solverB" : "csparse"} # Bug in Sundials? Preconditioning seems to be needed def solveroptions(post=""): yield {"linear_solver_type" +post: "dense" } allowedOpts = list(dummyIntegrator.getOptionAllowed("linear_solver_type" +post)) #allowedOpts.remove("iterative") # disabled, see #1231 if "iterative" in allowedOpts: for it in itoptions(post): d = {"linear_solver_type" +post: "iterative" } d.update(it) yield d if "banded" in allowedOpts: yield {"linear_solver_type" +post: "banded" } yield {"linear_solver_type" +post: "user_defined", "linear_solver"+post: "csparse" } for a_options in solveroptions("B"): for f_options in solveroptions(): message = "f_options: %s , a_options: %s" % (str(f_options) , str(a_options)) print message integrator = c.Integrator(Integrator,f,g) integrator.setOption("exact_jacobianB",True) integrator.setOption("gather_stats",True) #integrator.setOption("verbose",True) #integrator.setOption("monitor",["djacB","resB","djac","res"]) integrator.setOption("t0",tstart_) integrator.setOption("tf",tend_) integrator.setOption(options) integrator.setOption(f_options) integrator.setOption(a_options) integrator.init() for ff in [fs,integrator]: for k,v in point.items(): i = getattr(casadi,('integrator_'+k).upper()) if not ff.getInput(i).isEmpty(): ff.setInput(v,i) integrator.evaluate() fs.evaluate() print "res=",integrator.getOutput("xf")-fs.getOutput("xf"), fs.getOutput("xf") print "Rres=",integrator.getOutput("rxf")-fs.getOutput("rxf"), fs.getOutput("rxf") # self.checkarray(integrator.getOutput("rxf"),fs.getOutput("rxf"),digits=4) stats = integrator.getStats() print stats self.assertTrue(stats["nsteps"]<1500) self.assertTrue(stats["nstepsB"]<2500) self.assertTrue(stats["nlinsetups"]<100) self.assertTrue(stats["nlinsetupsB"]<250)
def test_lsolvers(self): self.message("Test different linear solvers") tstart = SX.sym("tstart") tend = SX.sym("tend") integrators = [ ("idas",["dae","ode"],{"abstol": 1e-9,"reltol":1e-9,"fsens_err_con": True,"calc_ic":True,"calc_icB":True}), ("cvodes",["ode"],{"abstol": 1e-15,"reltol":1e-15,"fsens_err_con": True,"quad_err_con": False}) ] def checks(): t=SX.sym("t") x=SX.sym("x") rx=SX.sym("rx") p=SX.sym("p") dp=SX.sym("dp") z=SX.sym("z") rz=SX.sym("rz") rp=SX.sym("rp") solutionin = {'x0':x, 'p': p, 'rx0': rx,'rp' : rp} pointA = {'x0':7.1,'p': 2, 'rx0': 0.13, 'rp': 0.127} ti = (0.2,2.3) yield (["dae"],{'x': x, 'z': z},{'alg': x-z, 'ode': z},{'x': x, 'z': z, 'rx': rx, 'rz': rz},{'alg': x-rz, 'ode': rz},solutionin,{'rxf': rx+x*(exp(tend-tstart)-1), 'xf':x*exp(tend-tstart)},pointA,ti) if not(args.run_slow): return yield (["dae"],{'x': x, 'z': z},{'alg': x-z, 'ode': z},{'x': x, 'z': z, 'rx': rx, 'rz': rz},{'alg': rx-rz, 'ode': rz},solutionin,{'rxf': rx*exp(tend-tstart), 'xf':x*exp(tend-tstart)},pointA,ti) yield (["ode"],{'x': x},{'ode': x},{'x': x,'rx': rx},{'ode': x},solutionin,{'rxf': rx+x*(exp(tend-tstart)-1), 'xf':x*exp(tend-tstart)},pointA,ti) yield (["ode"],{'x': x},{'ode': x},{'x': x,'rx': rx},{'ode': rx},solutionin,{'rxf': rx*exp(tend-tstart), 'xf':x*exp(tend-tstart)},pointA,ti) A=array([1,0.1]) p0 = 1.13 q=SX.sym("y",2,1) y0=q[0] yc0=dy0=q[1] p=SX.sym("p",1,1) s1=(2*y0-log(yc0**2/p+1))/2-log(cos(arctan(yc0/sqrt(p))+sqrt(p)*(tend-tstart))) s2=sqrt(p)*tan(arctan(yc0/sqrt(p))+sqrt(p)*(tend-tstart)) yield (["ode"],{'x':q,'p':p},{'ode': vertcat([q[1],p[0]+q[1]**2 ])},{},{},{'x0':q, 'p': p} ,{'xf': vertcat([s1,s2])},{'x0': A, 'p': p0},(0,0.4) ) for p_features, din, dout, rdin, rdout, solutionin, solution, point, (tstart_, tend_) in checks(): for Integrator, features, options in integrators: self.message(Integrator) dummyIntegrator = c.Integrator(Integrator,SXFunction()) if p_features[0] in features: g = Function() if len(rdin)>1: g = SXFunction(rdaeIn(**rdin),rdaeOut(**rdout)) g.init() f = SXFunction(daeIn(**din),daeOut(**dout)) f.init() for k in solution.keys(): solution[k] = substitute(solution[k],vertcat([tstart,tend]),vertcat([tstart_,tend_])) fs = SXFunction(integratorIn(**solutionin),integratorOut(**solution)) fs.init() def itoptions(post=""): yield {"iterative_solver"+post: "gmres"} yield {"iterative_solver"+post: "bcgstab"} yield {"iterative_solver"+post: "tfqmr", "use_preconditionerB": True, "linear_solverB" : "csparse"} # Bug in Sundials? Preconditioning seems to be needed def solveroptions(post=""): yield {"linear_solver_type" +post: "dense" } allowedOpts = list(dummyIntegrator.getOptionAllowed("linear_solver_type" +post)) #allowedOpts.remove("iterative") # disabled, see #1231 if "iterative" in allowedOpts: for it in itoptions(post): d = {"linear_solver_type" +post: "iterative" } d.update(it) yield d if "banded" in allowedOpts: yield {"linear_solver_type" +post: "banded" } yield {"linear_solver_type" +post: "user_defined", "linear_solver"+post: "csparse" } for a_options in solveroptions("B"): for f_options in solveroptions(): message = "f_options: %s , a_options: %s" % (str(f_options) , str(a_options)) print message integrator = c.Integrator(Integrator,f,g) integrator.setOption("exact_jacobianB",True) integrator.setOption("t0",tstart_) integrator.setOption("tf",tend_) integrator.setOption(options) integrator.setOption(f_options) integrator.setOption(a_options) integrator.init() for ff in [fs,integrator]: for k,v in point.items(): i = getattr(casadi,('integrator_'+k).upper()) if not ff.input(i).isEmpty(): ff.setInput(v,i) integrator.evaluate() self.checkfunction(integrator,fs,gradient=False,hessian=False,sens_der=False,evals=False,digits=4,digits_sens=4,failmessage=message,verbose=False)
def defineOCP(self, ocp, DT=20, controlCost=0, xOpt=[], uOpt=[], finalStateCost=1, deltaUCons=[]): self.ocp = ocp ocp = self.ocp self.DT = DT self.n_k = int(self.ocp.tf / self.DT) self.controlCost = controlCost stateScaling = C.vertcat([ ocp.variable(ocp.x[k].getName()).nominal for k in range(ocp.x.size()) ]) algStateScaling = C.vertcat([ ocp.variable(ocp.z[k].getName()).nominal for k in range(ocp.z.size()) ]) controlScaling = C.vertcat([ ocp.variable(ocp.u[k].getName()).nominal for k in range(ocp.u.size()) ]) xOpt = xOpt / stateScaling uOpt = uOpt / controlScaling self.xOpt = xOpt self.uOpt = uOpt self.stateScaling = C.vertcat([ ocp.variable(ocp.x[k].getName()).nominal for k in range(ocp.x.size()) ]) self.algStateScaling = C.vertcat([ ocp.variable(ocp.z[k].getName()).nominal for k in range(ocp.z.size()) ]) self.controlScaling = C.vertcat([ ocp.variable(ocp.u[k].getName()).nominal for k in range(ocp.u.size()) ]) odeS = C.substitute( ocp.ode(ocp.x), C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) / stateScaling algS = C.substitute( ocp.alg, C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) ltermS = C.substitute( ocp.lterm, C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) sysIn = C.daeIn(x=ocp.x, z=ocp.z, p=ocp.u, t=ocp.t) sysOut = C.daeOut(ode=odeS, alg=algS, quad=ltermS) odeF = C.SXFunction(sysIn, sysOut) odeF.init() C.Integrator.loadPlugin("idas") G = C.Integrator("idas", odeF) G.setOption("reltol", self.INTG_REL_TOL) #for CVODES and IDAS G.setOption("abstol", self.INTG_ABS_TOL) #for CVODES and IDAS G.setOption("max_multistep_order", 5) #for CVODES and IDAS G.setOption("max_step_size", self.IDAS_MAX_STEP_SIZE) #for IDAS only G.setOption("tf", self.DT) self.G = G #============================================================================== # G.setOption('verbose',True) # G.addMonitor('res') # G.addMonitor('inputs') # G.addMonitor('outputs') #G.addMonitor('djacB') # G.addMonitor('bjacB') # G.addMonitor('jtimesB') # G.addMonitor('psetup') # G.addMonitor('psetupB') # G.addMonitor('psolveB') # G.addMonitor('resB') # G.addMonitor('resS') # G.addMonitor('rhsQB') #============================================================================== G.init() self.n_u = self.ocp.u.size() self.n_x = self.ocp.x.size() self.n_v = self.n_u * self.n_k + self.n_x * self.n_k self.V = C.MX.sym("V", int(self.n_v), 1) self.U, self.X = self.splitVariables(self.V) uMin = C.vertcat([ self.ocp.variable(self.ocp.u[i].getName()).min.getValue() for i in range(self.n_u) ]) / controlScaling uMax = C.vertcat([ self.ocp.variable(self.ocp.u[i].getName()).max.getValue() for i in range(self.n_u) ]) / controlScaling UMIN = C.vertcat([uMin for k in range(self.n_k)]) UMAX = C.vertcat([uMax for k in range(self.n_k)]) xMin = C.vertcat([ self.ocp.variable(self.ocp.x[i].getName()).min.getValue() for i in range(self.n_x) ]) / stateScaling xMax = C.vertcat([ self.ocp.variable(self.ocp.x[i].getName()).max.getValue() for i in range(self.n_x) ]) / stateScaling XMIN = C.vertcat([xMin for k in range(self.n_k)]) XMAX = C.vertcat([xMax for k in range(self.n_k)]) if len(deltaUCons) > 0: addDeltaUCons = True deltaUCons = deltaUCons / self.controlScaling else: addDeltaUCons = False pathIn = C.daeIn(x=ocp.x, z=ocp.z, p=ocp.u, t=ocp.t) pathVarNames = [sv.getName() for sv in ocp.beq(ocp.path)] pathScaling = C.vertcat([ocp.nominal(pv) for pv in pathVarNames]) pathS = C.substitute( ocp.beq(ocp.beq(ocp.path)), C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) / pathScaling pathConstraints = C.SXFunction(pathIn, [pathS]) pathMax = C.vertcat([ ocp.variable(pathVarNames[i]).max.getValue() for i in range(ocp.path.size()) ]) / pathScaling pathMin = C.vertcat([ ocp.variable(pathVarNames[i]).min.getValue() for i in range(ocp.path.size()) ]) / pathScaling pathConstraints.setOption("name", "PATH") pathConstraints.init() pathConstraints.setInput(xOpt, 'x') pathConstraints.setInput([], 'z') pathConstraints.setInput(uOpt, 'p') pathConstraints.setInput(0, 't') pathConstraints.evaluate() pathOpt = pathConstraints.getOutput() optimalValues = {} print 'min <= (name,optimal,nominal) <= max' for i in range(self.n_x): print ocp.variable( ocp.x[i].getName()).min.getValue(), ' <= (', ocp.x[i].getName( ), ',', xOpt[i] * stateScaling[i], ',', stateScaling[ i], ') <= ', ocp.variable( ocp.x[i].getName()).max.getValue() optimalValues[ocp.x[i].getName()] = xOpt[i] * stateScaling[i] for i in range(self.n_u): print ocp.variable( ocp.u[i].getName()).min.getValue(), ' <= (', ocp.u[i].getName( ), ',', uOpt[i] * controlScaling[i], ',', controlScaling[ i], ') <= ', ocp.variable( ocp.u[i].getName()).max.getValue() if addDeltaUCons: print -deltaUCons[i] * controlScaling[i], ' <= (Delta(', ocp.u[ i].getName(), ')/DTMPC,', 0, ',', controlScaling[ i], ') <= ', deltaUCons[i] * controlScaling[i] optimalValues[ocp.u[i].getName()] = uOpt[i] * controlScaling[i] for i in range(len(pathVarNames)): print ocp.variable(pathVarNames[i]).min.getValue( ), ' <= (', pathVarNames[i], ',', pathOpt[i] * pathScaling[ i], ',', pathScaling[i], ') <= ', ocp.variable( pathVarNames[i]).max.getValue() optimalValues[pathVarNames[i]] = pathOpt[i] * pathScaling[i] plotTags = [ocp.x[i].getName() for i in range(ocp.x.size())] plotTags = plotTags + [ocp.u[i].getName() for i in range(ocp.u.size())] plotTags = plotTags + [sv.getName() for sv in ocp.beq(ocp.path)] self.plotTags = plotTags self.optimalValues = optimalValues # Constraint functions g = [] g_min = [] g_max = [] self.XU0 = C.MX.sym("XU0", self.n_x + self.n_u, 1) Z = self.XU0[0:self.n_x] U0 = self.XU0[self.n_x:self.n_x + self.n_u] # Build up a graph of integrator calls obj = 0 zf = C.vertcat([ ocp.variable(ocp.z[k].getName()).start for k in range(ocp.z.size()) ]) / algStateScaling for k in range(self.n_k): Z, QF, zf = C.integratorOut( G(C.integratorIn(x0=Z, p=self.U[k], z0=zf)), "xf", "qf", "zf") errU = self.U[k] - U0 obj = obj + QF + C.mul(C.mul(errU.T, controlCost), errU) U0 = self.U[k] # include MS constraints! g.append(Z - self.X[k]) g_min.append(NP.zeros(self.n_x)) g_max.append(NP.zeros(self.n_x)) Z = self.X[k] [pathCons] = pathConstraints.call( C.daeIn(t=[], x=self.X[k], z=zf, p=self.U[k])) g.append(pathCons) ## be carefull on giving all inputs g_max.append(pathMax) g_min.append(pathMin) if addDeltaUCons: g.append(errU) g_max.append(deltaUCons * DT) g_min.append(-deltaUCons * DT) #errU = (self.U[-1]-uOpt) #errX = self.X[-1]-xOpt #obj = obj + finalStateCost*C.mul((errX).trans(),(errX))+C.mul(C.mul(errU.T,controlCost),errU) self.obj = obj ### Constrains g = C.vertcat(g) nlp = C.MXFunction(C.nlpIn(x=self.V, p=self.XU0), C.nlpOut(f=obj, g=g)) nlp.init() self.odeF = odeF self.nlp = nlp solver = C.NlpSolver('ipopt', nlp) # remove the comment to implement the hessian solver.setOption('hessian_approximation', 'limited-memory') # comment for exact hessian solver.setOption('print_user_options', 'no') solver.setOption("tol", self.IPOPT_tol) # IPOPT tolerance solver.setOption("dual_inf_tol", self.IPOPT_dual_inf_tol) # dual infeasibility solver.setOption("constr_viol_tol", self.IPOPT_constr_viol_tol) # primal infeasibility solver.setOption("compl_inf_tol", self.IPOPT_compl_inf_tol) # complementarity # solver.setOption("acceptable_tol",0.01) # solver.setOption("acceptable_obj_change_tol",1e-6) # solver.setOption("acceptable_constr_viol_tol",1e-6) solver.setOption("max_iter", self.IPOPT_max_iter) # IPOPT maximum iterations solver.setOption("print_level", self.IPOPT_print_level) solver.setOption("max_cpu_time", self.IPOPT_max_cpu_time) # IPOPT maximum iterations solver.init() ### Variable Bounds and initial guess solver.setInput(C.vertcat([UMIN, XMIN]), 'lbx') # u_L solver.setInput(C.vertcat([UMAX, XMAX]), 'ubx') # u_U solver.setInput(C.vertcat(g_min), 'lbg') # g_L solver.setInput(C.vertcat(g_max), 'ubg') # g_U self.solver = solver u0N = C.vertcat([ self.ocp.variable(self.ocp.u[i].getName()).initialGuess.getValue() for i in range(self.n_u) ]) / controlScaling x0N = C.vertcat([ self.ocp.variable(self.ocp.x[i].getName()).initialGuess.getValue() for i in range(self.n_x) ]) / stateScaling USOL, XSOL = self.forwardSimulation(x0N, u0N) self.USOL = USOL self.XSOL = XSOL
def plotNLP(self, x0, DTplot=10, additionalplottags=None): plotH = [] plotAdd = [] plotT = [] plotTags = self.plotTags ocp = self.ocp algStateScaling = self.algStateScaling controlScaling = self.controlScaling stateScaling = self.stateScaling DT = self.DT Z = x0 / self.stateScaling odeF = self.odeF xOpt = self.xOpt uOpt = self.uOpt C.Integrator.loadPlugin("idas") G = C.Integrator("idas", odeF) G.setOption("reltol", self.INTG_REL_TOL) #for CVODES and IDAS G.setOption("abstol", self.INTG_ABS_TOL) #for CVODES and IDAS G.setOption("max_multistep_order", 5) #for CVODES and IDAS G.setOption("max_step_size", self.IDAS_MAX_STEP_SIZE) #for IDAS only G.setOption("tf", DTplot) G.init() pathIn = C.daeIn(x=ocp.x, z=ocp.z, p=ocp.u, t=ocp.t) if additionalplottags is None: Config = ConfigParser.ConfigParser() Config.read('config.ini') additionalplottags = Config.get('MultipleShooting', 'additionalplottags') additionalplottags = additionalplottags.split(',') self.additionalplottags = additionalplottags addTagScale = C.vertcat([ocp.nominal(pv) for pv in additionalplottags]) tagsFsx = C.substitute( C.vertcat([ocp.beq(sv) for sv in additionalplottags]), C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) tagsF = C.SXFunction(pathIn, [tagsFsx]) tagsF.init() tagsF.setInput(xOpt, 'x') tagsF.setInput([], 'z') tagsF.setInput(uOpt, 'p') tagsF.setInput(0, 't') tagsF.evaluate() addTagsOpt = tagsF.getOutput() for i in range(len(additionalplottags)): self.optimalValues[additionalplottags[i]] = addTagsOpt[i] sxPlotTags = C.vertcat([ocp.beq(tag) for tag in plotTags]) pathS = C.substitute( sxPlotTags, C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) pathF = C.SXFunction(pathIn, [pathS, tagsFsx]) pathF.init() nPlot = NP.int(NP.round(self.DT / DTplot)) zf = C.vertcat([ ocp.variable(ocp.z[k].getName()).start for k in range(ocp.z.size()) ]) / algStateScaling for k in range(self.n_k): try: for jp in range(nPlot): #Z,QF,zf = C.integratorOut(G(C.integratorIn(x0=Z,p=self.USOL[k],z0=zf)),"xf","qf","zf") G.setInput(Z, 'x0') G.setInput(self.USOL[k], 'p') G.evaluate() Z = G.getOutput('xf') QF = G.getOutput('qf') zf = G.getOutput('zf') pathF.setInput(Z, 'x') pathF.setInput(zf, 'z') pathF.setInput(self.USOL[k], 'p') t = k * DT + jp * DTplot pathF.setInput(t, 't') pathF.evaluate() p = pathF.getOutput(0) pAdd = pathF.getOutput(1) plotH.append(p) plotAdd.append(pAdd) plotT.append(t) Z = self.XSOL[k] except: print 'something bad happened', sys.exc_info()[0] break plotDic = {} plotDic['t'] = plotT for si in range(len(plotTags)): plotDic[plotTags[si]] = [i[si] for i in plotH] for si in range(len(additionalplottags)): plotDic[additionalplottags[si]] = [i[si] for i in plotAdd] self.plotDic = plotDic
def fwd_simulation(self, dE_start, condition, detail=True, reltol=1e-8, abstol=1e-10, DRX=False, drc_opt={}): TotalPressure = condition.TotalPressure TotalFlow = condition.TotalFlow Tem = condition.Temperature tf = condition.SimulationTime opts = {} opts['tf'] = tf # Simulation time opts['abstol'] = abstol opts['reltol'] = reltol opts['disable_internal_warnings'] = True opts['max_num_steps'] = 1e8 if py == 2: Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) elif py == 3: Fint = cas.integrator('Fint', 'cvodes', self._dae_, opts) if condition.InitCoverage == {}: x0 = [0] * (self.nspe - 1) + [1] else: # Construct Coverage x0 = [0] * (self.nspe - 1) + [1] for spe, cov in condition.InitCoverage.items(): idx = get_index_species(spe, self.specieslist) x0[idx - self.ngas] = cov x0[-1] -= cov # Partial Pressure Pinlet = np.zeros(self.ngas) for idx, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': Pinlet[idx] = condition.PartialPressure[str(spe)] if str( spe) in condition.PartialPressure.keys() else 0 P_dae = np.hstack([dE_start, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) tor = {} for idx, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': tor[str(spe)] = float(F_sim['xf'][idx] - Pinlet[idx] / TotalPressure * TotalFlow) # Detailed Reaction network data # Evaluate partial pressure and surface coverage self.pressure_value = list( (F_sim['xf'][:self.ngas] / TotalFlow * TotalPressure).full().T[0]) self.coverage_value = list(F_sim['xf'][self.ngas:].full().T[0]) # Evaluate Reaction Rate automatically save to Rate attribute x = self._x p = self._p if py == 2: rate_fxn = cas.SXFunction('rate_fxn', [x, p], [self._rate, self._rfor, self._rrev]) rate_fxn.setInput(F_sim['xf'], 'i0') rate_fxn.setInput(P_dae, 'i1') rate_fxn.evaluate() self.rate_value = {} self.rate_value['rnet'] = rate_fxn.getOutput( 'o0').full().T[0].tolist() self.rate_value['rfor'] = rate_fxn.getOutput( 'o1').full().T[0].tolist() self.rate_value['rrev'] = rate_fxn.getOutput( 'o2').full().T[0].tolist() # Evaluate Reaction Energy ene_fxn = cas.SXFunction('ene_fxn', [x, p], [ self._reaction_energy_expression['activation'], self._reaction_energy_expression['enthalpy'] ]) ene_fxn.setInput(F_sim['xf'], 'i0') ene_fxn.setInput(P_dae, 'i1') ene_fxn.evaluate() self.energy_value = {} self.energy_value['activation'] = list( ene_fxn.getOutput('o0').full().T[0]) self.energy_value['enthalpy'] = list( ene_fxn.getOutput('o1').full().T[0]) # Evaluate Equilibrium Constant and Rate Constant k_fxn = cas.SXFunction('k_fxn', [x, p], [self._Keq, self._Qeq, self._kf, self._kr]) k_fxn.setInput(F_sim['xf'], 'i0') k_fxn.setInput(P_dae, 'i1') k_fxn.evaluate() self.equil_rate_const_value = {} self.equil_rate_const_value['Keq'] = list( k_fxn.getOutput('o0').full().T[0]) self.equil_rate_const_value['Qeq'] = list( k_fxn.getOutput('o1').full().T[0]) self.equil_rate_const_value['kf'] = list( k_fxn.getOutput('o2').full().T[0]) self.equil_rate_const_value['kr'] = list( k_fxn.getOutput('o3').full().T[0]) elif py == 3: rate_fxn = cas.Function('rate_fxn', [x, p], [self._rate, self._rfor, self._rrev]) outs = rate_fxn(F_sim['xf'], P_dae) self.rate_value = {} self.rate_value['rnet'] = outs[0].full().T[0].tolist() self.rate_value['rfor'] = outs[1].full().T[0].tolist() self.rate_value['rrev'] = outs[2].full().T[0].tolist() # Evaluate Reaction Energy ene_fxn = cas.Function('ene_fxn', [x, p], [ self._reaction_energy_expression['activation'], self._reaction_energy_expression['enthalpy'] ]) outs = ene_fxn(F_sim['xf'], P_dae) self.energy_value = {} self.energy_value['activation'] = list(outs[0].full().T[0]) self.energy_value['enthalpy'] = list(outs[1].full().T[0]) # Evaluate Equilibrium Constant and Rate Constant k_fxn = cas.Function('k_fxn', [x, p], [self._Keq, self._Qeq, self._kf, self._kr]) outs = k_fxn(F_sim['xf'], P_dae) self.equil_rate_const_value = {} self.equil_rate_const_value['Keq'] = list(outs[0].full().T[0]) self.equil_rate_const_value['Qeq'] = list(outs[1].full().T[0]) self.equil_rate_const_value['kf'] = list(outs[2].full().T[0]) self.equil_rate_const_value['kr'] = list(outs[3].full().T[0]) xrc, xtrc = [], [] # TODO: degree of rate control if DRX: delG = drc_opt.get('delG', 1) ref_species = drc_opt.get('ref', 'H2(g)') numer = drc_opt.get('numer', 'fwd') tor0 = tor[ref_species] if numer == 'ad': opts = fwd_sensitivity_option(tf=tf, reltol=1e-8, abstol=1e-16) Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) Pnlp = self._Pnlp P_dae = cas.vertcat([Pnlp, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) ii = [ ii for ii, spe in enumerate(self.specieslist) if str(spe) == ref_species ][0] tor_ad = F_sim['xf'][ ii] - Pinlet[ii] / TotalPressure * TotalFlow # define the jacobian and MX function jac = cas.jacobian(tor_ad, Pnlp) # evaluate the jacobian fjac = cas.MXFunction('fjac', [Pnlp], [jac]) xrc = fjac([np.copy(dE_start)])[0] xrc = xrc / tor0 * (_const.Rg * Tem) / (-1000) xrc = xrc.full()[0].tolist()[:len(self.dEa_index)] for idx, j in enumerate(self.dEa_index): dP = np.copy(dE_start) dP[idx] += delG # Partial Pressure P_dae = np.hstack([dP, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) for ii, spe in enumerate(self.specieslist): if str(spe) == ref_species: tor_p = float(F_sim['xf'][ii] - Pinlet[ii] / TotalPressure * TotalFlow) if numer == 'cent': dP = np.copy(dE_start) dP[idx] -= delG # Partial Pressure P_dae = np.hstack([dP, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) for ii, spe in enumerate(self.specieslist): if str(spe) == ref_species: tor_n = float(F_sim['xf'][ii] - Pinlet[ii] / TotalPressure * TotalFlow) if numer == 'fwd': xrc.append((tor_p - tor0) / tor0 / (-delG * 1000 / (_const.Rg * Tem))) # xrc.append((np.log(np.abs(tor_p)) - np.log(np.abs(tor0)))/(-delG * 1000 /(_const.Rg * Tem))) if numer == 'cent': xrc.append((tor_p - tor_n) / tor0 / (-2 * delG * 1000 / (_const.Rg * Tem))) # xrc.append((np.log(np.abs(tor_p)) - np.log(np.abs(tor_n)))/(-2 * delG * 1000 /(_const.Rg * Tem))) # XTRC for idx, j in enumerate(self.dBE_index): spe = self.reactionlist[idx] dP = np.copy(dE_start) deltaE = np.zeros(self.nspe) deltaE[j] += delG # propagate through stoichiomatric deltaEa = self.stoimat.dot(deltaE) # dP[:len(self.dEa_index)] -= deltaEa dP[len(self.dEa_index):] += deltaE[self.dBE_index] # Partial Pressure P_dae = np.hstack([dP, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) for ii, spe in enumerate(self.specieslist): if str(spe) == ref_species: tor_p = float(F_sim['xf'][ii] - Pinlet[ii] / TotalPressure * TotalFlow) if numer == 'fwd': xtrc.append((tor_p - tor0) / tor0 / (-delG * 1000 / (_const.Rg * Tem))) # xrc.append((np.log(np.abs(tor_p)) - np.log(np.abs(tor0)))/(-delG * 1000 /(_const.Rg * Tem))) if numer == 'cent': dP = np.copy(dE_start) deltaE = np.zeros(self.nspe) deltaE[j] -= delG # propagate through stoichiomatric deltaEa = self.stoimat.dot(deltaE) # dP[:len(self.dEa_index)] -= deltaEa dP[len(self.dEa_index):] += deltaE[self.dBE_index] # Partial Pressure P_dae = np.hstack([dP, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) for ii, spe in enumerate(self.specieslist): if str(spe) == ref_species: tor_n = float(F_sim['xf'][ii] - Pinlet[ii] / TotalPressure * TotalFlow) xtrc.append((tor_p - tor_n) / tor0 / (-2 * delG * 1000 / (_const.Rg * Tem))) # RESULT result = {} result['pressure'] = self.pressure_value result['coverage'] = self.coverage_value result['rate'] = self.rate_value result['energy'] = self.energy_value result['equil_rate'] = self.equil_rate_const_value result['xrc'] = xrc result['xtrc'] = xtrc self.xrc = xrc return tor, result
def init(self, ocp, DT, measuremntsList): measurementScaling = C.vertcat( [ocp.variable(k).nominal for k in measuremntsList]) stateScaling = C.vertcat([ ocp.variable(ocp.x[k].getName()).nominal for k in range(ocp.x.size()) ]) algStateScaling = C.vertcat([ ocp.variable(ocp.z[k].getName()).nominal for k in range(ocp.z.size()) ]) controlScaling = C.vertcat([ ocp.variable(ocp.u[k].getName()).nominal for k in range(ocp.u.size()) ]) odeS = C.substitute( ocp.ode(ocp.x), C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) / stateScaling algS = C.substitute( ocp.alg, C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) sysIn = C.daeIn(x=ocp.x, z=ocp.z, p=ocp.u, t=ocp.t) sysOut = C.daeOut(ode=odeS, alg=algS) odeF = C.SXFunction(sysIn, sysOut) odeF.init() C.Integrator.loadPlugin("idas") G = C.Integrator("idas", odeF) G.setOption("reltol", 1e-8) #for IDAS G.setOption("abstol", 1e-8) #for IDAS G.setOption("max_multistep_order", 5) #for IDAS G.setOption("tf", DT) G.init() mSX = C.vertcat([ ocp.variable(measuremntsList[k]).beq for k in range(len(measuremntsList)) ]) mSX = C.substitute( mSX, C.vertcat([ocp.x, ocp.z, ocp.u]), C.vertcat([ stateScaling * ocp.x, algStateScaling * ocp.z, controlScaling * ocp.u ])) / measurementScaling mSXF = C.SXFunction(sysIn, [mSX]) mSXF.init() self.measurementScaling = measurementScaling self.stateScaling = stateScaling self.algStateScaling = algStateScaling self.controlScaling = controlScaling self.ocp = ocp self.DT = DT self.measurmentList = measuremntsList self.mSXF = mSXF self.G = G self.z0 = C.vertcat([ ocp.variable(ocp.z[k].getName()).start for k in range(ocp.z.size()) ]) / algStateScaling self.x0 = C.vertcat([ ocp.variable(ocp.x[k].getName()).initialGuess.getValue() for k in range(ocp.x.size()) ]) / stateScaling
def find_prc(self, res=100, num_cycles=20): """ Function to calculate the phase response curve with specified resolution """ # Make sure the lc object exists if not hasattr(self, 'lc'): self.limit_cycle() # Get a state that is not at a local max/min (0 should be at # max) state_ind = 1 while np.abs(self.dydt(self.y0)[state_ind]) < 1E-5: state_ind += 1 integrator = cs.Integrator('cvodes',self.model) integrator.setOption("abstol", self.intoptions['sensabstol']) integrator.setOption("reltol", self.intoptions['sensreltol']) integrator.setOption("max_num_steps", self.intoptions['sensmaxnumsteps']) integrator.setOption("sensitivity_method", self.intoptions['sensmethod']); integrator.setOption("t0", 0) integrator.setOption("tf", num_cycles*self.T) #integrator.setOption("numeric_jacobian", True) integrator.setOption("fsens_err_con", 1) integrator.setOption("fsens_abstol", self.intoptions['sensabstol']) integrator.setOption("fsens_reltol", self.intoptions['sensreltol']) integrator.init() seed = np.zeros(self.neq) seed[state_ind] = 1. integrator.setInput(self.y0, cs.INTEGRATOR_X0) integrator.setInput(self.param, cs.INTEGRATOR_P) #adjseed = (seed, cs.INTEGRATOR_XF) integrator.evaluate()#0, 1) monodromy = integrator.jacobian(cs.INTEGRATOR_X0,cs.INTEGRATOR_XF) monodromy.init() monodromy.setInput(self.y0,"x0") monodromy.setInput(self.param,"p") monodromy.evaluate() # initial state is Kcross(T,T) = I adjsens = monodromy.getOutput().toArray().T.dot(seed) from scipy.integrate import odeint def adj_func(y, t): """ t will increase, trace limit cycle backwards through -t. y is the vector of adjoint sensitivities """ jac = self.dfdy(self.lc((-t)%self.T)) return y.dot(jac) seed = adjsens self.prc_ts = np.linspace(0, self.T, res) P = odeint(adj_func, seed, self.prc_ts)[::-1] # Adjoint matrix at t self.sPRC = self._t_to_phi(P/self.dydt(self.y0)[state_ind]) dfdp = np.array([self.dfdp(self.lc(t)) for t in self.prc_ts]) # Must rescale f to \hat{f}, inverse of rescaling t self.pPRC = self._t_to_phi( np.array([self.sPRC[i].dot(self._phi_to_t(dfdp[i])) for i in xrange(len(self.sPRC))]) ) self.rel_pPRC = self.pPRC*np.array(self.param) # Create interpolation object for the state phase response curve self.sPRC_interp = self.interp_sol(self.prc_ts, self.sPRC.T) #phi units self.pPRC_interp = self.interp_sol(self.prc_ts, self.pPRC.T) #phi units
def evidence_construct(self, conditionlist, evidence_info, sensitivity=True): # simulation option reltol = evidence_info.get('reltol', 1e-6) abstol = evidence_info.get('abstol', 1e-10) # error value err_type = evidence_info['type'] err = evidence_info['err'] lowSurf = evidence_info.get('lowSurf', 1e4) lowSurf_thres = evidence_info.get('lowSurf_thres', 1e-5) cov_err = evidence_info.get('cov_err', 0.05) # Initialize simulator Pnlp = self._Pnlp if sensitivity: # opts = fwd_sensitivity_option(reltol=reltol, adjtol=adjtol, fwdtol=fwdtol) opts = fwd_sensitivity_option() else: opts = fwd_NoSensitivity_option(reltol=reltol, abstol=abstol) print(opts) Fint = cas.Integrator('Fint', 'cvodes', self._dae_, opts) evidence = 0 for condition in conditionlist: TotalPressure = condition.TotalPressure TotalFlow = condition.TotalFlow Tem = condition.Temperature if condition.InitCoverage == {}: x0 = [0] * (self.nspe - 1) + [1] else: # Construct Coverage x0 = [0] * (self.nspe - 1) + [1] for spe, cov in condition.InitCoverage.items(): idx = get_index_species(spe, self.specieslist) x0[idx - self.ngas] = cov x0[-1] -= cov # construct initial partial pressure Pinlet = np.zeros(self.ngas) for idx, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': Pinlet[idx] = condition.PartialPressure[str(spe)] if str( spe) in condition.PartialPressure.keys() else 0 # run simulation P_dae = cas.vertcat([Pnlp, Pinlet, Tem, TotalFlow]) F_sim = Fint(x0=x0, p=P_dae) for idx, spe in enumerate(self.specieslist): if spe.phase == 'gaseous': # construct evidence with turnover frequency tor = F_sim['xf'][ idx] - Pinlet[idx] / TotalPressure * TotalFlow if str(spe) in condition.TurnOverFrequency.keys(): exp_tor = condition.TurnOverFrequency[str(spe)] if err_type == 'abs' or abs(exp_tor) <= lowSurf_thres: dev = tor - exp_tor elif err_type == 'rel': dev = 1 - tor / exp_tor elif err_type == 'log': dev = cas.log(tor / exp_tor) else: pass # if abs(exp_tor) <= lowSurf_thres: # evidence += (dev * dev) * lowSurf # else: evidence += (dev * dev) / err**2 # if spe.phase == 'surface': # cov = F_sim['xf'][idx] # if str(spe) in condition.Coverage.keys(): # exp_cov = condition.Coverage[str(spe)] # dev = cas.log(cov / exp_cov) # evidence += (dev * dev) / cov_err**2 self._evidence_ = evidence return evidence
def run_simulation(self, \ x0 = None, tsim = None, usim = None, psim = None, method = "rk"): r''' :param x0: initial value for the states :math:`x_0 \in \mathbb{R}^{n_x}` :type x0: list, numpy,ndarray, casadi.DMatrix :param tsim: optional, switching time points for the controls :math:`t_{sim} \in \mathbb{R}^{L}` to be used for the simulation :type tsim: list, numpy,ndarray, casadi.DMatrix :param usim: optional, control values :math:`u_{sim} \in \mathbb{R}^{n_u \times L}` to be used for the simulation :type usim: list, numpy,ndarray, casadi.DMatrix :param psim: optional, parameter set :math:`p_{sim} \in \mathbb{R}^{n_p}` to be used for the simulation :type psim: list, numpy,ndarray, casadi.DMatrix :param method: optional, CasADi integrator to be used for the simulation :type method: str This function performs a simulation of the system for a given parameter set :math:`p_{sim}`, starting from a user-provided initial value for the states :math:`x_0`. If the argument ``psim`` is not specified, the estimated parameter set :math:`\hat{p}` is used. For this, a parameter estimation using :func:`run_parameter_estimation()` has to be done beforehand, of course. By default, the switching time points for the controls :math:`t_u` and the corresponding controls :math:`u_N` will be used for simulation. If desired, other time points :math:`t_{sim}` and corresponding controls :math:`u_{sim}` can be passed to the function. For the moment, the function can only be used for systems of type :class:`pecas.systems.ExplODE`. ''' intro.pecas_intro() print('\n' + 27 * '-' + \ ' PECas system simulation ' + 26 * '-') print('\nPerforming system simulation, this might take some time ...') if not type(self.pesetup.system) is systems.ExplODE: raise NotImplementedError("Until now, this function can only " + \ "be used for systems of type ExplODE.") if x0 == None: raise ValueError("You have to provide an initial value x0 " + \ "to run the simulation.") x0 = np.squeeze(np.asarray(x0)) if np.atleast_1d(x0).shape[0] != self.pesetup.nx: raise ValueError("Wrong dimension for initial value x0.") if tsim == None: tsim = self.pesetup.tu if usim == None: usim = self.pesetup.uN if psim == None: try: psim = self.phat except AttributeError: errmsg = ''' You have to either perform a parameter estimation beforehand to obtain a parameter set that can be used for simulation, or you have to provide a parameter set in the argument psim. ''' raise AttributeError(errmsg) else: if not np.atleast_1d(np.squeeze(psim)).shape[0] == self.pesetup.np: raise ValueError("Wrong dimension for parameter set psim.") fp = ca.MXFunction("fp", \ [self.pesetup.system.t, self.pesetup.system.u, \ self.pesetup.system.x, self.pesetup.system.eps_e, \ self.pesetup.system.eps_u, self.pesetup.system.p], \ [self.pesetup.system.f]) fpeval = fp([\ self.pesetup.system.t, self.pesetup.system.u, \ self.pesetup.system.x, np.zeros(self.pesetup.neps_e), \ np.zeros(self.pesetup.neps_u), psim])[0] fsim = ca.MXFunction("fsim", \ ca.daeIn(t = self.pesetup.system.t, \ x = self.pesetup.system.x, \ p = self.pesetup.system.u), \ ca.daeOut(ode = fpeval)) Xsim = [] Xsim.append(x0) u0 = ca.DMatrix() for k, e in enumerate(tsim[:-1]): try: integrator = ca.Integrator("integrator", method, \ fsim, {"t0": e, "tf": tsim[k+1]}) except RuntimeError as err: errmsg = ''' It seems like you want to use an integration method that is not currently supported by CasADi. Please refer to the CasADi documentation for a list of supported integrators, or use the default RK4-method by not setting the method-argument of the function. ''' raise RuntimeError(errmsg) if not self.pesetup.nu == 0: u0 = usim[:, k] Xk_end = itemgetter('xf')(integrator({'x0': x0, 'p': u0})) Xsim.append(Xk_end) x0 = Xk_end self.Xsim = ca.horzcat(Xsim) print( \ '''System simulation finished.''')
] ) # ca.mul( [ x_err.T, cost_mat, x_err ] ) dae = ca.SXFunction('dae', ca.daeIn(x=x, p=u, t=t), ca.daeOut(ode=ode)) # Create an integrator opts = {'tf': tf / nk} # final time if coll: opts['number_of_finite_elements'] = 5 opts['interpolation_order'] = 5 opts['collocation_scheme'] = 'legendre' opts['implicit_solver'] = 'kinsol' opts['implicit_solver_options'] = {'linear_solver': 'csparse'} opts['expand_f'] = True integrator = ca.Integrator('integrator', 'oldcollocation', dae, opts) else: opts['abstol'] = 1e-1 # tolerance opts['reltol'] = 1e-1 # tolerance # opts['steps_per_checkpoint'] = 1000 opts['quad_err_con'] = True opts['fsens_err_con'] = True opts['t0'] = 0. opts['tf'] = tf integrator = ca.Integrator('integrator', 'cvodes', dae, opts) integrator.setInput(x0, 'x0') integrator.setInput(0, 'p') integrator.evaluate() integrator.reset()
nk = 20 # Control discretization tf = 10.0 # End time # Declare variables (use scalar graph) u = ca.SX.sym("u") # control x = ca.SX.sym("x", 2) # states # ODE right hand side and quadratures xdot = ca.vertcat([(1 - x[1] * x[1]) * x[0] - x[1] + u, x[0]]) qdot = x[0] * x[0] + x[1] * x[1] + u * u # DAE residual function dae = ca.SXFunction("dae", ca.daeIn(x=x, p=u), ca.daeOut(ode=xdot, quad=qdot)) # Create an integrator integrator = ca.Integrator("integrator", "cvodes", dae, {"tf": tf / nk}) # All controls (use matrix graph) x = ca.MX.sym("x", nk) # nk-by-1 symbolic variable U = ca.vertsplit(x) # cheaper than x[0], x[1], ... # The initial state (x_0=0, x_1=1) X = ca.MX([0, 1]) # Objective function f = 0 # Build a graph of integrator calls for k in range(nk): X, QF = itemgetter('xf', 'qf')(integrator({'x0': X, 'p': U[k]})) f += QF