def calc_state_matrix(self): """ Return state matrix and store to ``self.As`` Returns ------- matrix state matrix """ system = self.system gyx = matrix(system.dae.gx) self.solver.linsolve(system.dae.gy, gyx) self.As = matrix(system.dae.fx - system.dae.fy * gyx) # ------------------------------------------------------ # TODO: use scipy eigs # self.As = sparse(self.As) # I = np.array(self.As.I).reshape((-1,)) # J = np.array(self.As.J).reshape((-1,)) # V = np.array(self.As.V).reshape((-1,)) # self.As = csr_matrix((V, (I, J)), shape=self.As.size) # ------------------------------------------------------ return self.As
def nr_step(self): """ Single step using Newton-Raphson method. Returns ------- float maximum absolute mismatch """ system = self.system # evaluate discrete, differential, algebraic, and Jacobians system.dae.clear_fg() system.l_update_var(self.models, niter=self.niter, err=self.mis[-1]) system.s_update_var(self.models) system.f_update(self.models) system.g_update(self.models) system.l_update_eq(self.models) system.fg_to_dae() if self.config.method == 'NR': system.j_update(models=self.models) elif self.config.method == 'dishonest': if self.niter < self.config.n_factorize: system.j_update(self.models) # prepare and solve linear equations self.inc = -matrix([matrix(system.dae.f), matrix(system.dae.g)]) self.A = sparse([[system.dae.fx, system.dae.gx], [system.dae.fy, system.dae.gy]]) if not self.config.linsolve: self.inc = self.solver.solve(self.A, self.inc) else: self.inc = self.solver.linsolve(self.A, self.inc) system.dae.x += np.ravel(np.array(self.inc[:system.dae.n])) system.dae.y += np.ravel(np.array(self.inc[system.dae.n:])) # find out variables associated with maximum mismatches fmax = 0 if system.dae.n > 0: fmax_idx = np.argmax(np.abs(system.dae.f)) fmax = system.dae.f[fmax_idx] logger.debug("Max. diff mismatch %.10g on %s", fmax, system.dae.x_name[fmax_idx]) gmax_idx = np.argmax(np.abs(system.dae.g)) gmax = system.dae.g[gmax_idx] logger.debug("Max. algeb mismatch %.10g on %s", gmax, system.dae.y_name[gmax_idx]) mis = max(abs(fmax), abs(gmax)) if self.niter == 0: self.mis[0] = mis else: self.mis.append(mis) system.vars_to_models() return mis
def _debug_ac(self, xy_idx): """ Debug Ac matrix by printing out equations and derivatives associated with the max. mismatch variable. Parameters ---------- xy_idx Index of the maximum mismatch into the `xy` array. """ xy_idx = xy_idx.tolist() assoc_eqns = self.Ac[:, xy_idx] assoc_vars = self.Ac[xy_idx, :] eqns_idx = np.where(np.ravel(matrix(assoc_eqns)))[0] vars_idx = np.where(np.ravel(matrix(assoc_vars)))[0] logger.debug('Max. correction is for variable %s [%d]', self.system.dae.xy_name[xy_idx], xy_idx) logger.debug('Associated equation rhs is %20g', self.system.dae.fg[xy_idx]) logger.debug('') logger.debug(f'{"xy_index":<10} {"Equation (row)":<20} {"Derivative":<20} {"Eq. Mismatch":<20}') for eq in eqns_idx: eq = eq.tolist() logger.debug(f'{eq:<10} {self.system.dae.xy_name[eq]:<20} {assoc_eqns[eq]:<20g} ' f'{self.system.dae.fg[eq]:<20g}') logger.debug('') logger.debug(f'{"xy_index":<10} {"Variable (col)":<20} {"Derivative":<20} {"Eq. Mismatch":<20}') for v in vars_idx: v = v.tolist() logger.debug(f'{v:<10} {self.system.dae.xy_name[v]:<20} {assoc_vars[v]:<20g} ' f'{self.system.dae.fg[v]:<20g}')
def calc_state_matrix(self): r""" Return state matrix and store to ``self.As``. Notes ----- For systems with the form .. math :: T \dot{x} = f(x, y) \\ 0 = g(x, y) The state matrix is calculated from .. math :: A_s = T^{-1} (f_x - f_y * g_y^{-1} * g_x) Returns ------- cvxopt.matrix state matrix """ system = self.system gyx = matrix(system.dae.gx) self.solver.linsolve(system.dae.gy, gyx) Tfnz = system.dae.Tf + np.ones_like(system.dae.Tf) * np.equal( system.dae.Tf, 0.0) iTf = spdiag((1 / Tfnz).tolist()) self.As = matrix(iTf * (system.dae.fx - system.dae.fy * gyx)) return self.As
def nr_step(self): """ Single stepping for Newton Raphson method Returns ------- """ system = self.system # evaluate discrete, differential, algebraic, and jacobians system.e_clear() system.l_update_var() system.f_update() system.g_update() system.l_check_eq() system.l_set_eq() system.fg_to_dae() system.j_update() # prepare and solve linear equations self.inc = -matrix([matrix(system.dae.f), matrix(system.dae.g)]) self.A = sparse([[system.dae.fx, system.dae.gx], [system.dae.fy, system.dae.gy]]) self.inc = self.solver.solve(self.A, self.inc) system.dae.x += np.ravel(np.array(self.inc[:system.dae.n])) system.dae.y += np.ravel(np.array(self.inc[system.dae.n:])) mis = np.max(np.abs(system.dae.fg)) self.mis.append(mis) system.vars_to_models() return mis
def solve(self, A, b): """ Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``. Store the solution in ``b``. This function caches the symbolic factorization in ``self.F`` and is faster in general. Will attempt ``Solver.linsolve`` if the cached symbolic factorization is invalid. Parameters ---------- A Sparse matrix for the equation set coefficients. F The symbolic factorization of A or a matrix with the same non-zero shape as ``A``. N Numeric factorization of A. b RHS of the equation. Returns ------- numpy.ndarray The solution in a 1-D ndarray """ self.A = A self.b = b if self.factorize is True: self.F = self._symbolic(self.A) self.factorize = False try: self.N = self._numeric(self.A, self.F) self._solve(self.A, self.F, self.N, self.b) return np.ravel(self.b) except ValueError: logger.debug('Unexpected symbolic factorization.') self.F = self._symbolic(self.A) self.N = self._numeric(self.A, self.F) self._solve(self.A, self.F, self.N, self.b) return np.ravel(self.b) except ArithmeticError: logger.error('Jacobian matrix is singular.') diag = self.A[0:self.A.size[0]**2:self.A.size[0] + 1] idx = (np.argwhere(np.array(matrix(diag)).ravel() == 0.0)).ravel() logger.error('The xy indices of associated variables:') logger.error(idx) return np.ravel(matrix(np.nan, self.b.size, 'd'))
def nr_step(self): """ Single step using Newton-Raphson method. Returns ------- float maximum absolute mismatch """ system = self.system # evaluate discrete, differential, algebraic, and Jacobians system.dae.clear_fg() system.l_update_var(self.models, niter=self.niter, err=self.mis[-1]) system.s_update_var(self.models) system.f_update(self.models) system.g_update(self.models) system.l_update_eq(self.models) system.fg_to_dae() if self.config.method == 'NR': system.j_update(models=self.models) elif self.config.method == 'dishonest': if self.niter < self.config.n_factorize: system.j_update(self.models) # prepare and solve linear equations self.inc = -matrix([matrix(system.dae.f), matrix(system.dae.g)]) self.A = sparse([[system.dae.fx, system.dae.gx], [system.dae.fy, system.dae.gy]]) if not self.config.linsolve: self.inc = self.solver.solve(self.A, self.inc) else: self.inc = self.solver.linsolve(self.A, self.inc) system.dae.x += np.ravel(np.array(self.inc[:system.dae.n])) system.dae.y += np.ravel(np.array(self.inc[system.dae.n:])) mis = np.max(np.abs(system.dae.fg)) if self.niter == 0: self.mis[0] = mis else: self.mis.append(mis) system.vars_to_models() return mis
def _debug_g(self, y_idx): """ Print out the associated variables with the given algebraic equation index. Parameters ---------- y_idx Index of the equation into the `g` array. Diff. eqns. are not counted in. """ y_idx = y_idx.tolist() logger.debug('--> Iteration Number: niter = %d', self.niter) logger.debug('Max. algebraic equation mismatch:') logger.debug(' <%s> [y_idx=%d]', self.system.dae.y_name[y_idx], y_idx) logger.debug(' Variable value = %.4f', self.system.dae.y[y_idx]) logger.debug(' Mismatch value = %.4f', self.system.dae.g[y_idx]) assoc_vars = self.system.dae.gy[y_idx, :] vars_idx = np.where(np.ravel(matrix(assoc_vars)))[0] logger.debug('Related variable values:') logger.debug(f'{"y_index":<10} {"Variable":<20} {"Derivative":<20}') for v in vars_idx: v = v.tolist() logger.debug('%10d %20s %20g', v, self.system.dae.y_name[v], assoc_vars[v])
def _debug_g(self, y_idx): """ Print out the associated variables with the given algebraic equation index. Parameters ---------- y_idx Index of the equation into the `g` array. Diff. eqns. are not counted in. """ y_idx = y_idx.tolist() logger.debug( f'Max. algebraic mismatch associated with {self.system.dae.y_name[y_idx]} [y_idx={y_idx}]' ) assoc_vars = self.system.dae.gy[y_idx, :] vars_idx = np.where(np.ravel(matrix(assoc_vars)))[0] logger.debug('') logger.debug(f'{"y_index":<10} {"Variable":<20} {"Derivative":<20}') for v in vars_idx: v = v.tolist() logger.debug( f'{v:<10} {self.system.dae.y_name[v]:<20} {assoc_vars[v]:<20g}' ) pass
def _calc_h_first(self): """ Compute the first time step and save to ``self.h``. """ system = self.system config = self.config if not system.dae.n: freq = 1.0 elif system.dae.n == 1: B = matrix(system.dae.gx) self.solver.linsolve(system.dae.gy, B) As = system.dae.fx - system.dae.fy * B freq = max(abs(As[0, 0]), 1) else: freq = 30.0 if freq > system.config.freq: freq = float(system.config.freq) tspan = abs(config.tf - config.t0) tcycle = 1 / freq self.deltatmax = min(tcycle, tspan / 100.0) self.deltat = min(tcycle, tspan / 100.0) self.deltatmin = min(tcycle / 500, self.deltatmax / 20) if config.tstep <= 0: logger.warning('Fixed time step is negative or zero') logger.warning('Switching to automatic time step') config.fixt = False if config.fixt: self.deltat = config.tstep if config.tstep < self.deltatmin: logger.warning( 'Fixed time step is smaller than the estimated minimum.') if config.tstep > self.deltatmax: logger.debug('Increased deltatmax to tstep.') self.deltatmax = config.tstep self.h = self.deltat # do not skip over the end time at the first step self.h = min(self.h, config.tf - system.dae.t) # if from CSV, determine `h` from data if self.data_csv is not None: if self.data_csv.shape[0] > 1: self.h = self.data_csv[1, 0] - self.data_csv[0, 0] else: logger.warning( "CSV data does not contain more than one time step.") self.h = 0 return self.h
def reorder_As(self): """ reorder As by moving rows and cols associated with zero time constants to the end. Returns `fx`, `fy`, `gx`, `gy`, `Tf`. """ system = self.system rows = np.arange(system.dae.n, dtype=int) cols = np.arange(system.dae.n, dtype=int) vals = np.ones(system.dae.n, dtype=float) swaps = [] bidx = self.non_zeros for ii in range(system.dae.n - self.non_zeros): if ii in self.singular_idx: while (bidx in self.singular_idx): bidx += 1 cols[ii] = bidx rows[bidx] = ii swaps.append((ii, bidx)) # swap the variable names for fr, bk in swaps: bk_name = self.x_name[bk] self.x_name[fr] = bk_name self.x_name = self.x_name[:self.non_zeros] # compute the permutation matrix for `As` containing non-states perm = spmatrix(matrix(vals), matrix(rows), matrix(cols)) As_perm = perm * sparse(self.As) * perm self.As_perm = As_perm nfx = As_perm[:self.non_zeros, :self.non_zeros] nfy = As_perm[:self.non_zeros, self.non_zeros:] ngx = As_perm[self.non_zeros:, :self.non_zeros] ngy = As_perm[self.non_zeros:, self.non_zeros:] nTf = np.delete(system.dae.Tf, self.singular_idx) return nfx, nfy, ngx, ngy, nTf
def _calc_state_matrix(self, fx, fy, gx, gy, Tf, dense=True): """ Kernel function for calculating state matrix. """ gyx = matrix(gx) self.solver.linsolve(gy, gyx) Tfnz = Tf + np.ones_like(Tf) * np.equal(Tf, 0.0) iTf = spdiag((1 / Tfnz).tolist()) if dense: return iTf * (fx - fy * gyx) else: return sparse(iTf * (fx - fy * gyx))
def calc_part_factor(self, As=None): """ Compute participation factor of states in eigenvalues. Returns ------- """ if As is None: As = self.As mu, N = np.linalg.eig(As) N = matrix(N) n = len(mu) idx = range(n) mu_complex = np.zeros_like(mu, dtype=complex) W = matrix(spmatrix(1.0, idx, idx, As.size, N.typecode)) gesv(N, W) partfact = mul(abs(W.T), abs(N)) b = matrix(1.0, (1, n)) WN = b * partfact partfact = partfact.T for item in idx: mu_real = float(mu[item].real) mu_imag = float(mu[item].imag) mu_complex[item] = complex(round(mu_real, 5), round(mu_imag, 5)) partfact[item, :] /= WN[item] # participation factor self.mu = matrix(mu_complex) self.part_fact = matrix(partfact) return self.mu, self.part_fact
def calc_part_factor(self): """ Compute participation factor of states in eigenvalues Returns ------- """ mu, N = np.linalg.eig(self.As) # TODO: use scipy.sparse.linalg.eigs(self.As) N = matrix(N) n = len(mu) idx = range(n) mu_complex = np.array([0] * n, dtype=complex) W = matrix(spmatrix(1.0, idx, idx, (n, n), N.typecode)) gesv(N, W) partfact = mul(abs(W.T), abs(N)) b = matrix(1.0, (1, n)) WN = b * partfact partfact = partfact.T for item in idx: mu_real = float(mu[item].real) mu_imag = float(mu[item].imag) mu_complex[item] = complex(round(mu_real, 4), round(mu_imag, 4)) partfact[item, :] /= WN[item] # participation factor self.mu = matrix(mu_complex) self.part_fact = matrix(partfact) return self.mu, self.part_fact
def solve(self, A, b): """ Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``. Store the solution in ``b``. This function caches the symbolic factorization in ``self.F`` and is faster in general. Will attempt ``Solver.linsolve`` if the cached symbolic factorization is invalid. Parameters ---------- A Sparse matrix for the equation set coefficients. F The symbolic factorization of A or a matrix with the same non-zero shape as ``A``. N Numeric factorization of A. b RHS of the equation. Returns ------- numpy.ndarray The solution in a 1-D ndarray """ self.A = A self.b = b if self.sparselib in ('umfpack', 'klu'): if self.factorize is True: self.F = self._symbolic(self.A) self.factorize = False try: self.N = self._numeric(self.A, self.F) self._solve(self.A, self.F, self.N, self.b) return np.ravel(self.b) except ValueError: logger.debug('Unexpected symbolic factorization.') self.F = self._symbolic(self.A) self.N = self._numeric(self.A, self.F) self._solve(self.A, self.F, self.N, self.b) return np.ravel(self.b) except ArithmeticError: logger.error('Jacobian matrix is singular.') return np.ravel(matrix(np.nan, self.b.size, 'd')) elif self.sparselib in ('spsolve', 'cupy'): return self.linsolve(A, b)
def _reduce(self, fx, fy, gx, gy, Tf, dense=True): """ Reduce algebraic equations (or states associated with zero time constants). Returns ------- spmatrix The reduced state matrix """ gyx = matrix(gx) self.solver.linsolve(gy, gyx) Tfnz = Tf + np.ones_like(Tf) * np.equal(Tf, 0.0) iTf = spdiag((1 / Tfnz).tolist()) if dense: return iTf * (fx - fy * gyx) else: return sparse(iTf * (fx - fy * gyx))
def _calc_h_first(self): """ Compute the first time step and save to ``self.h``. """ system = self.system config = self.config if not system.dae.n: freq = 1.0 elif system.dae.n == 1: B = matrix(system.dae.gx) self.solver.linsolve(system.dae.gy, B) As = system.dae.fx - system.dae.fy * B freq = max(abs(As[0, 0]), 1) else: freq = 30.0 if freq > system.config.freq: freq = float(system.config.freq) tspan = abs(config.tf - config.t0) tcycle = 1 / freq self.deltatmax = min(tcycle, tspan / 100.0) self.deltat = min(tcycle, tspan / 100.0) self.deltatmin = min(tcycle / 500, self.deltatmax / 20) if config.tstep <= 0: logger.warning('Fixed time step is negative or zero') logger.warning('Switching to automatic time step') config.fixt = False if config.fixt: self.deltat = config.tstep if config.tstep < self.deltatmin: logger.warning( 'Fixed time step is smaller than the estimated minimum.') self.h = self.deltat return self.h
def _solve_g(self, verbose): system = self.system dae = system.dae self.converged = False self.niter = 0 self.mis = [] # check if the next step is critical time if self.is_switch_time(): self._last_switch_t = system.switch_times[self._switch_idx] system.switch_action(self.pflow_tds_models) while True: system.e_clear(models=self.pflow_tds_models) system.l_update_var(models=self.pflow_tds_models) system.g_update(models=self.pflow_tds_models) inc = -matrix(system.dae.g) system.j_update(models=self.pflow_tds_models) inc = self.solver.solve(dae.gy, inc) dae.y += np.ravel(np.array(inc)) system.vars_to_models() mis = np.max(np.abs(inc)) self.mis.append(mis) if verbose: print(f't={dae.t:<.4g}, iter={self.niter:<g}, mis={mis:<.4g}') if mis < self.config.tol: self.converged = True break elif self.niter > self.config.max_iter: raise NoConvergence(f'Convergence not reached after {self.config.max_iter} iterations') elif mis >= 1000 and (mis > 1e4 * self.mis[0]): raise NoConvergence('Mismatch increased too fast. Convergence not likely.') self.niter += 1
def step(tds): """ Integrate with Implicit Trapezoidal Method (ITM) to the current time. This function has an internal Newton-Raphson loop for algebraized semi-explicit DAE. The function returns the convergence status when done but does NOT progress simulation time. Returns ------- bool Convergence status in ``tds.converged``. """ system = tds.system dae = tds.system.dae if tds.h == 0: logger.error( "Current step size is zero. Integration is not permitted.") return False tds.mis = [1, 1] tds.niter = 0 tds.converged = False tds.x0[:] = dae.x tds.y0[:] = dae.y tds.f0[:] = dae.f while True: tds.fg_update(models=system.exist.pflow_tds) # lazy Jacobian update reason = '' if dae.t == 0: reason = 't=0' elif tds.config.honest: reason = 'using honest method' elif tds.custom_event: reason = 'custom event set' elif not tds.last_converged: reason = 'non-convergence in the last step' elif tds.niter > 4 and (tds.niter + 1) % 3 == 0: reason = 'every 3 iterations beyond 4 iterations' elif dae.t - tds._last_switch_t < 0.1: reason = 'within 0.1s of event' if reason: system.j_update(models=system.exist.pflow_tds, info=reason) # set flag in `solver.worker.factorize`, not `solver.factorize`. tds.solver.worker.factorize = True # `Tf` should remain constant throughout the simulation, even if the corresponding diff. var. # is pegged by the anti-windup limiters. # solve implicit trapezoidal method (ITM) integration if tds.config.g_scale > 0: gxs = tds.config.g_scale * tds.h * dae.gx gys = tds.config.g_scale * tds.h * dae.gy else: gxs = dae.gx gys = dae.gy # calculate complete Jacobian matrix ``Ac``` tds.Ac = tds.method.calc_jac(tds, gxs, gys) # equation `tds.qg[:dae.n] = 0` is the implicit form of differential equations using ITM tds.qg[:dae.n] = tds.method.calc_q(dae.x, dae.f, dae.Tf, tds.h, tds.x0, tds.f0) # reset the corresponding q elements for pegged anti-windup limiter for item in system.antiwindups: for key, _, eqval in item.x_set: np.put(tds.qg, key, eqval) # set or scale the algebraic residuals if tds.config.g_scale > 0: tds.qg[dae.n:] = tds.config.g_scale * tds.h * dae.g else: tds.qg[dae.n:] = dae.g # calculate variable corrections if not tds.config.linsolve: inc = tds.solver.solve(tds.Ac, matrix(tds.qg)) else: inc = tds.solver.linsolve(tds.Ac, matrix(tds.qg)) # check for np.nan first if np.isnan(inc).any(): tds.err_msg = 'NaN found in solution. Convergence is not likely' tds.niter = tds.config.max_iter + 1 tds.busted = True break # reset tiny values to reduce chattering if tds.config.reset_tiny: inc[np.where(np.abs(inc) < tds.tol_zero)] = 0 # set new values dae.x -= inc[:dae.n].ravel() dae.y -= inc[dae.n:dae.n + dae.m].ravel() # synchronize solutions to model internal storage system.vars_to_models() # store `inc` to tds for debugging tds.inc = inc mis = np.max(np.abs(inc)) # store initial maximum mismatch if tds.niter == 0: tds.mis[0] = mis else: tds.mis[-1] = mis tds.niter += 1 # converged if mis <= tds.config.tol: tds.converged = True break # non-convergence cases if tds.niter > tds.config.max_iter: if system.options.get("verbose", 20) <= 10: tqdm.write( f'* Max. iter. {tds.config.max_iter} reached for t={dae.t:.6f}s, ' f'h={tds.h:.6f}s, max inc={mis:.4g} ') # debug helpers g_max = np.argmax(abs(dae.g)) inc_max = np.argmax(abs(inc)) tds._debug_g(g_max) tds._debug_ac(inc_max) break if (mis > 1e6) and (mis > 1e6 * tds.mis[0]): tds.err_msg = 'Error increased too quickly.' break if not tds.converged: dae.x[:] = np.array(tds.x0) dae.y[:] = np.array(tds.y0) dae.f[:] = np.array(tds.f0) system.vars_to_models() tds.last_converged = tds.converged return tds.converged
def _implicit_step(self): """ Integrate for a single given step. This function has an internal Newton-Raphson loop for algebraized semi-explicit DAE. The function returns the convergence status when done but does NOT progress simulation time. Returns ------- bool Convergence status in ``self.converged``. """ system = self.system dae = self.system.dae self.mis = [] self.niter = 0 self.converged = False self.x0 = np.array(dae.x) self.y0 = np.array(dae.y) self.f0 = np.array(dae.f) while True: system.e_clear(models=self.pflow_tds_models) system.l_update_var(models=self.pflow_tds_models) system.f_update(models=self.pflow_tds_models) system.g_update(models=self.pflow_tds_models) system.l_check_eq(models=self.pflow_tds_models) system.l_set_eq(models=self.pflow_tds_models) system.fg_to_dae() # lazy jacobian update if dae.t == 0 or self.niter > 3 or (dae.t - self._last_switch_t < 0.2): system.j_update(models=self.pflow_tds_models) self.solver.factorize = True # solve trapezoidal rule integration In = spdiag([1] * dae.n) self.Ac = sparse([[In - self.h * 0.5 * dae.fx, dae.gx], [-self.h * 0.5 * dae.fy, dae.gy]], 'd') # reset q as well q = dae.x - self.x0 - self.h * 0.5 * (dae.f + self.f0) for item in system.antiwindups: if len(item.x_set) > 0: for key, val in item.x_set: np.put(q, key[np.where(item.zi == 0)], 0) qg = np.hstack((q, dae.g)) inc = self.solver.solve(self.Ac, -matrix(qg)) # check for np.nan first if np.isnan(inc).any(): logger.error(f'NaN found in solution. Convergence not likely') self.niter = self.config.max_iter + 1 self.busted = True break # reset really small values to avoid anti-windup limiter flag jumps inc[np.where(np.abs(inc) < 1e-12)] = 0 # set new values dae.x += np.ravel(np.array(inc[:dae.n])) dae.y += np.ravel(np.array(inc[dae.n: dae.n + dae.m])) system.vars_to_models() # calculate correction mis = np.max(np.abs(inc)) self.mis.append(mis) self.niter += 1 # converged if mis <= self.config.tol: self.converged = True break # non-convergence cases if self.niter > self.config.max_iter: logger.debug(f'Max. iter. {self.config.max_iter} reached for t={dae.t:.6f}, ' f'h={self.h:.6f}, mis={mis:.4g} ' f'({system.dae.xy_name[np.argmax(inc)]})') break if mis > 1000 and (mis > 1e8 * self.mis[0]): logger.error(f'Error increased too quickly. Convergence not likely.') self.busted = True break if not self.converged: dae.x = np.array(self.x0) dae.y = np.array(self.y0) dae.f = np.array(self.f0) system.vars_to_models() return self.converged
def _itm_step(self): """ Integrate with Implicit Trapezoidal Method (ITM) to the current time. This function has an internal Newton-Raphson loop for algebraized semi-explicit DAE. The function returns the convergence status when done but does NOT progress simulation time. Returns ------- bool Convergence status in ``self.converged``. """ system = self.system dae = self.system.dae self.mis = 1 self.niter = 0 self.converged = False self.x0 = np.array(dae.x) self.y0 = np.array(dae.y) self.f0 = np.array(dae.f) while True: self._fg_update(models=system.exist.pflow_tds) # lazy Jacobian update if dae.t == 0 or self.niter > 3 or (dae.t - self._last_switch_t < 0.2): system.j_update(models=system.exist.pflow_tds) self.solver.factorize = True # TODO: set the `Tf` corresponding to the pegged anti-windup limiters to zero. # Although this should not affect anything since corr. mismatches in `self.qg` are reset to zero # solve implicit trapezoidal method (ITM) integration self.Ac = sparse([[self.Teye - self.h * 0.5 * dae.fx, dae.gx], [-self.h * 0.5 * dae.fy, dae.gy]], 'd') # equation `self.qg[:dae.n] = 0` is the implicit form of differential equations using ITM self.qg[:dae.n] = dae.Tf * (dae.x - self.x0) - self.h * 0.5 * (dae.f + self.f0) # reset the corresponding q elements for pegged anti-windup limiter for item in system.antiwindups: for key, val in item.x_set: np.put(self.qg, key, 0) self.qg[dae.n:] = dae.g if not self.config.linsolve: inc = self.solver.solve(self.Ac, -matrix(self.qg)) else: inc = self.solver.linsolve(self.Ac, -matrix(self.qg)) # check for np.nan first if np.isnan(inc).any(): self.err_msg = 'NaN found in solution. Convergence not likely' self.niter = self.config.max_iter + 1 self.busted = True break # reset small values to reduce chattering inc[np.where(np.abs(inc) < self.tol_zero)] = 0 # set new values dae.x += inc[:dae.n].ravel() dae.y += inc[dae.n: dae.n + dae.m].ravel() system.vars_to_models() # calculate correction mis = np.max(np.abs(inc)) if self.niter == 0: self.mis = mis self.niter += 1 # converged if mis <= self.config.tol: self.converged = True break # non-convergence cases if self.niter > self.config.max_iter: logger.debug(f'Max. iter. {self.config.max_iter} reached for t={dae.t:.6f}, ' f'h={self.h:.6f}, mis={mis:.4g} ') # debug helpers g_max = np.argmax(abs(dae.g)) inc_max = np.argmax(abs(inc)) self._debug_g(g_max) self._debug_ac(inc_max) break if mis > 1000 and (mis > 1e8 * self.mis): self.err_msg = 'Error increased too quickly. Convergence not likely.' self.busted = True break if not self.converged: dae.x = np.array(self.x0) dae.y = np.array(self.y0) dae.f = np.array(self.f0) system.vars_to_models() return self.converged
def _itm_step(self): """ Integrate with Implicit Trapezoidal Method (ITM) to the current time. This function has an internal Newton-Raphson loop for algebraized semi-explicit DAE. The function returns the convergence status when done but does NOT progress simulation time. Returns ------- bool Convergence status in ``self.converged``. """ system = self.system dae = self.system.dae self.mis = 1 self.niter = 0 self.converged = False self.x0 = np.array(dae.x) self.y0 = np.array(dae.y) self.f0 = np.array(dae.f) while True: self._fg_update(models=system.exist.pflow_tds) # lazy Jacobian update if dae.t == 0 or \ self.config.honest or \ self.custom_event or \ not self.last_converged or \ self.niter > 4 or \ (dae.t - self._last_switch_t < 0.1): system.j_update(models=system.exist.pflow_tds) # set flag in `solver.worker.factorize`, not `solver.factorize`. self.solver.worker.factorize = True # `Tf` should remain constant throughout the simulation, even if the corresponding diff. var. # is pegged by the anti-windup limiters. # solve implicit trapezoidal method (ITM) integration self.Ac = sparse([[self.Teye - self.h * 0.5 * dae.fx, dae.gx], [-self.h * 0.5 * dae.fy, dae.gy]], 'd') # equation `self.qg[:dae.n] = 0` is the implicit form of differential equations using ITM self.qg[:dae.n] = dae.Tf * (dae.x - self.x0) - self.h * 0.5 * (dae.f + self.f0) # reset the corresponding q elements for pegged anti-windup limiter for item in system.antiwindups: for key, _, eqval in item.x_set: np.put(self.qg, key, eqval) self.qg[dae.n:] = dae.g if not self.config.linsolve: inc = self.solver.solve(self.Ac, matrix(self.qg)) else: inc = self.solver.linsolve(self.Ac, matrix(self.qg)) # check for np.nan first if np.isnan(inc).any(): self.err_msg = 'NaN found in solution. Convergence is not likely' self.niter = self.config.max_iter + 1 self.busted = True break # reset small values to reduce chattering inc[np.where(np.abs(inc) < self.tol_zero)] = 0 # set new values dae.x -= inc[:dae.n].ravel() dae.y -= inc[dae.n: dae.n + dae.m].ravel() # store `inc` to self for debugging self.inc = inc system.vars_to_models() # calculate correction mis = np.max(np.abs(inc)) # store initial maximum mismatch if self.niter == 0: self.mis = mis self.niter += 1 # converged if mis <= self.config.tol: self.converged = True break # non-convergence cases if self.niter > self.config.max_iter: tqdm.write(f'* Max. iter. {self.config.max_iter} reached for t={dae.t:.6f}, ' f'h={self.h:.6f}, mis={mis:.4g} ') # debug helpers g_max = np.argmax(abs(dae.g)) inc_max = np.argmax(abs(inc)) self._debug_g(g_max) self._debug_ac(inc_max) break if mis > 1e6 and (mis > 1e6 * self.mis): self.err_msg = 'Error increased too quickly. Convergence not likely.' self.busted = True break if not self.converged: dae.x[:] = np.array(self.x0) dae.y[:] = np.array(self.y0) dae.f[:] = np.array(self.f0) system.vars_to_models() self.last_converged = self.converged return self.converged