def test_str(self): """Test _core._base._NonparametricMixin.__str__() (string representation). """ # Continuous ROMs model = roi.InferredContinuousROM("A") assert str(model) == \ "Reduced-order model structure: dx / dt = Ax(t)" model = roi.InferredContinuousROM("cA") assert str(model) == \ "Reduced-order model structure: dx / dt = c + Ax(t)" model = roi.InferredContinuousROM("HB") assert str(model) == \ "Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)" model = roi.InferredContinuousROM("G") assert str(model) == \ "Reduced-order model structure: dx / dt = G(x(t) ⊗ x(t) ⊗ x(t))" model = roi.InferredContinuousROM("cH") assert str(model) == \ "Reduced-order model structure: dx / dt = c + H(x(t) ⊗ x(t))" # Discrete ROMs model = roi.IntrusiveDiscreteROM("A") assert str(model) == \ "Reduced-order model structure: x_{j+1} = Ax_{j}" model = roi.IntrusiveDiscreteROM("cB") assert str(model) == \ "Reduced-order model structure: x_{j+1} = c + Bu_{j}" model = roi.IntrusiveDiscreteROM("H") assert str(model) == \ "Reduced-order model structure: x_{j+1} = H(x_{j} ⊗ x_{j})"
def test_reproject_continuous(n=100, m=20, r=10): """Test pre.reproject_continuous().""" # Construct dummy operators. k = 1 + r + r*(r+1)//2 I = np.eye(n) D = np.diag(1 - np.logspace(-1, -2, n)) W = la.qr(np.random.normal(size=(n,n)))[0] A = W.T @ D @ W Ht = np.random.random((n,n,n)) H = (Ht + Ht.T) / 20 H = H.reshape((n, n**2)) B = np.random.random((n,m)) U = np.random.random((m,k)) B1d = np.random.random(n) U1d = np.random.random(k) Vr = np.eye(n)[:,:r] X = np.random.random((n,k)) # Try with bad initial condition shape. with pytest.raises(ValueError) as exc: roi.pre.reproject_continuous(lambda x:x, Vr, X[:-1,:]) assert exc.value.args[0] == \ f"X and Vr not aligned, first dimension {n-1} != {n}" # Linear case, no inputs. f = lambda x: A @ x X, Xdot = roi.pre.reproject_continuous(f, Vr, X) assert X.shape == (n,k) assert Xdot.shape == (n,k) assert np.allclose(Vr @ Vr.T @ X, X) model = roi.InferredContinuousROM("A").fit(Vr, X, Xdot) assert np.allclose(model.A_, Vr.T @ A @ Vr) # Linear case, 1D inputs. f = lambda x, u: A @ x + B1d * u X, Xdot = roi.pre.reproject_continuous(f, Vr, X, U1d) assert X.shape == (n,k) assert Xdot.shape == (n,k) model = roi.InferredContinuousROM("AB").fit(Vr, X, Xdot, U1d) assert np.allclose(model.A_, Vr.T @ A @ Vr) assert np.allclose(model.B_.flatten(), Vr.T @ B1d) # Linear case, 2D inputs. f = lambda x, u: A @ x + B @ u X, Xdot = roi.pre.reproject_continuous(f, Vr, X, U) assert X.shape == (n,k) assert Xdot.shape == (n,k) model = roi.InferredContinuousROM("AB").fit(Vr, X, Xdot, U) assert np.allclose(model.A_, Vr.T @ A @ Vr) assert np.allclose(model.B_, Vr.T @ B) # Quadratic case, no inputs. f = lambda x: A @ x + H @ np.kron(x,x) X, Xdot = roi.pre.reproject_continuous(f, Vr, X) assert X.shape == (n,k) assert Xdot.shape == (n,k) assert np.allclose(Vr @ Vr.T @ X, X) model = roi.InferredContinuousROM("AH").fit(Vr, X, Xdot) assert np.allclose(model.A_, Vr.T @ A @ Vr, atol=1e-6) assert np.allclose(model.H_, Vr.T @ H @ np.kron(Vr, Vr), atol=1e-1, rtol=1)
def train_single(trainsize, r, regs): """Train and save a ROM with the given dimension and regularization hyperparameters. Parameters ---------- trainsize : int Number of snapshots to use to train the ROM. r : int Dimension of the desired ROM. Also the number of retained POD modes (left singular vectors) used to project the training data. regs : two or three non-negative floats Regularization hyperparameters (first-order, quadratic, cubic) to use in the Operator Inference least-squares problem for training the ROM. """ utils.reset_logger(trainsize) # Validate inputs. modelform = get_modelform(regs) check_lstsq_size(trainsize, r, modelform) check_regs(regs) # Load training data. Q_, Qdot_, t = utils.load_projected_data(trainsize, r) U = config.U(t) # Train and save the ROM. with utils.timed_block(f"Training ROM with k={trainsize:d}, " f"{config.REGSTR(regs)}"): rom = opinf.InferredContinuousROM(modelform) rom.fit(None, Q_, Qdot_, U, P=regularizer(r, *list(regs))) save_trained_rom(trainsize, r, regs, rom)
def train_single(trainsize, r, regs): """Train and save a ROM with the given dimension and regularization hyperparameters. Parameters ---------- trainsize : int Number of snapshots to use to train the ROM. r : int Dimension of the desired ROM. Also the number of retained POD modes (left singular vectors) used to project the training data. regs : two positive floats Regularization hyperparameters (non-quadratic, quadratic) to use in the Operator Inference least-squares problem for training the ROM. """ utils.reset_logger(trainsize) # Validate inputs. d = check_lstsq_size(trainsize, r) λ1, λ2 = check_regs(regs) # Load training data. Q_, Qdot_, t = utils.load_projected_data(trainsize, r) U = config.U(t) # Train and save the ROM. with utils.timed_block(f"Training ROM with k={trainsize:d}, " f"r={r:d}, λ1={λ1:.0f}, λ2={λ2:.0f}"): rom = roi.InferredContinuousROM(config.MODELFORM) rom.fit(None, Q_, Qdot_, U, P=regularizer(r, d, λ1, λ2)) save_trained_rom(trainsize, r, regs, rom)
def train_rom(*args): """Train a ROM with the given arguments. If there is a Linear Algebra Error, suppress the error and instead return None. """ try: return roi.InferredContinuousROM(config.MODELFORM).fit(None, *args) except (np.linalg.LinAlgError, ValueError) as e: if e.args[0] in [ # Near-singular data matrix. "SVD did not converge in Linear Least Squares", "On entry to DLASCL parameter number 4 had an illegal value" ]: return None else: raise
def test_fit(self): model = roi.InferredContinuousROM("cAH") # Get test data. n, k, m, r = 200, 100, 20, 10 X, Xdot, U = _get_data(n, k, m) Vr = la.svd(X)[0][:,:r] # Fit the model with each possible modelform. for form in _MODEL_FORMS: if "B" not in form: model.modelform = form model.fit(X, Xdot, Vr) # Test fit output sizes. model.modelform = "cAHB" model.fit(X, Xdot, Vr, U=U) assert model.n == n assert model.r == r assert model.m == m assert model.A_.shape == (r,r) assert model.Hc_.shape == (r,r*(r+1)//2) assert model.H_.shape == (r,r**2) assert model.c_.shape == (r,) assert model.B_.shape == (r,m) assert hasattr(model, "residual_") # Try again with one-dimensional inputs. m = 1 U = np.ones(k) model.fit(X, Xdot, Vr, U=U) n, r, m = model.n, model.r, model.m assert model.n == n assert model.r == r assert model.m == 1 assert model.A_.shape == (r,r) assert model.Hc_.shape == (r,r*(r+1)//2) assert model.H_.shape == (r,r**2) assert model.c_.shape == (r,) assert model.B_.shape == (r,1) assert hasattr(model, "residual_")
def _train_minimize_1D(trainsize, r, regs, testsize=None, margin=1.1): """Train ROMs with the given dimension(s), saving only the ROM with the least training error that satisfies a bound on the integrated POD coefficients, using a search algorithm to choose the regularization parameter. Parameters ---------- trainsize : int Number of snapshots to use to train the ROM. r : int Dimension of the desired ROM. Also the number of retained POD modes (left singular vectors) used to project the training data. regs : two non-negative floats Bounds for the (single) regularization hyperparameter to use in the Operator Inference least-squares problem for training the ROM. testsize : int Number of time steps for which a valid ROM must satisfy the POD bound. margin : float ≥ 1 Amount that the integrated POD coefficients of a valid ROM are allowed to deviate in magnitude from the maximum magnitude of the training data Q, i.e., bound = margin * max(abs(Q)). """ utils.reset_logger(trainsize) # Parse aguments. check_lstsq_size(trainsize, r, modelform="cAHB") log10regs = np.log10(regs) # Load training data. t = utils.load_time_domain(testsize) Q_, Qdot_, _ = utils.load_projected_data(trainsize, r) U = config.U(t[:trainsize]) # Compute the bound to require for integrated POD modes. B = margin * np.abs(Q_).max() # Create a solver mapping regularization hyperparameters to operators. with utils.timed_block(f"Constructing least-squares solver, r={r:d}"): rom = opinf.InferredContinuousROM("cAHB") rom._construct_solver(None, Q_, Qdot_, U, 1) # Test each regularization hyperparameter. def training_error(log10reg): """Return the training error resulting from the regularization hyperparameters λ1 = λ2 = 10^log10reg. If the resulting model violates the POD bound, return "infinity". """ λ = 10**log10reg # Train the ROM on all training snapshots. with utils.timed_block(f"Testing ROM with λ={λ:e}"): rom._evaluate_solver(λ) # Simulate the ROM over the full domain. with np.warnings.catch_warnings(): np.warnings.simplefilter("ignore") q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45") # Check for boundedness of solution. if not is_bounded(q_rom, B): return _MAXFUN # Calculate integrated relative errors in the reduced space. return opinf.post.Lp_error(Q_, q_rom[:, :trainsize], t[:trainsize])[1] opt_result = opt.minimize_scalar(training_error, method="bounded", bounds=log10regs) if opt_result.success and opt_result.fun != _MAXFUN: λ = 10**opt_result.x with utils.timed_block(f"Best regularization for k={trainsize:d}, " f"r={r:d}: λ={λ:.0f}"): rom._evaluate_solver(λ) save_trained_rom(trainsize, r, (λ, λ), rom) else: message = "Regularization search optimization FAILED" print(message) logging.info(message)
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.1): """Train ROMs with the given dimension over a grid of potential regularization hyperparameters, saving only the ROM with the least training error that satisfies a bound on the integrated POD coefficients. Parameters ---------- trainsize : int Number of snapshots to use to train the ROM. r : int Dimension of the desired ROM. Also the number of retained POD modes (left singular vectors) used to project the training data. regs : (float, float, int, float, float, int) Bounds and sizes for the grid of regularization hyperparameters. First-order: search in [regs[0], regs[1]] at regs[2] points. Quadratic: search in [regs[3], regs[4]] at regs[5] points. Cubic: search in [regs[6], regs[7]] at regs[8] points. testsize : int Number of time steps for which a valid ROM must satisfy the POD bound. margin : float ≥ 1 Amount that the integrated POD coefficients of a valid ROM are allowed to deviate in magnitude from the maximum magnitude of the training data Q, i.e., bound = margin * max(abs(Q)). """ utils.reset_logger(trainsize) # Parse aguments. if len(regs) not in [6, 9]: raise ValueError("6 or 9 regs required (bounds / sizes of grids") grids = [] for i in range(0, len(regs), 3): check_regs(regs[i:i + 2]) grids.append( np.logspace(np.log10(regs[i]), np.log10(regs[i + 1]), int(regs[i + 2]))) modelform = get_modelform(grids) d = check_lstsq_size(trainsize, r, modelform) # Load training data. t = utils.load_time_domain(testsize) Q_, Qdot_, _ = utils.load_projected_data(trainsize, r) U = config.U(t[:trainsize]) # Compute the bound to require for integrated POD modes. M = margin * np.abs(Q_).max() # Create a solver mapping regularization hyperparameters to operators. num_tests = np.prod([grid.size for grid in grids]) print(f"TRAINING {num_tests} ROMS") with utils.timed_block(f"Constructing least-squares solver, r={r:d}"): rom = opinf.InferredContinuousROM(modelform) rom._construct_solver(None, Q_, Qdot_, U, np.ones(d)) # Test each regularization hyperparameter. errors_pass = {} errors_fail = {} for i, regs in enumerate(itertools.product(*grids)): with utils.timed_block(f"({i+1:d}/{num_tests:d}) Testing ROM with " f"{config.REGSTR(regs)}"): # Train the ROM on all training snapshots. rom._evaluate_solver(regularizer(r, *list(regs))) # Simulate the ROM over the full domain. with np.warnings.catch_warnings(): np.warnings.simplefilter("ignore") q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45") # Check for boundedness of solution. errors = errors_pass if is_bounded(q_rom, M) else errors_fail # Calculate integrated relative errors in the reduced space. if q_rom.shape[1] > trainsize: errors[tuple(regs)] = opinf.post.Lp_error( Q_, q_rom[:, :trainsize], t[:trainsize])[1] # Choose and save the ROM with the least error. if not errors_pass: message = f"NO STABLE ROMS for r={r:d}" print(message) logging.info(message) return err2reg = {err: reg for reg, err in errors_pass.items()} regs = list(err2reg[min(err2reg.keys())]) with utils.timed_block(f"Best regularization for k={trainsize:d}, " f"r={r:d}: {config.REGSTR(regs)}"): rom._evaluate_solver(regularizer(r, *regs)) save_trained_rom(trainsize, r, regs, rom)
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.5): """Train ROMs with the given dimension over a grid of potential regularization hyperparameters, saving only the ROM with the least training error that satisfies a bound on the integrated POD coefficients. Parameters ---------- trainsize : int Number of snapshots to use to train the ROM. r : int Dimension of the desired ROM. Also the number of retained POD modes (left singular vectors) used to project the training data. regs : (float, float, int, float, float, int) Bounds and sizes for the grid of regularization parameters. Linear: search in [regs[0], regs[1]] at regs[2] points. Quadratic: search in [regs[3], regs[4]] at regs[5] points. testsize : int Number of time steps for which a valid ROM must satisfy the POD bound. margin : float >= 1 Amount that the integrated POD coefficients of a valid ROM are allowed to deviate in magnitude from the maximum magnitude of the training data Q, i.e., bound = margin * max(abs(Q)). """ utils.reset_logger(trainsize) # Parse aguments. d = check_lstsq_size(trainsize, r) if len(regs) != 6: raise ValueError("len(regs) != 6 (bounds / sizes for parameter grid") check_regs(regs[0:2]) check_regs(regs[3:5]) λ1grid = np.logspace(np.log10(regs[0]), np.log10(regs[1]), int(regs[2])) λ2grid = np.logspace(np.log10(regs[3]), np.log10(regs[4]), int(regs[5])) # Load training data. t = utils.load_time_domain(testsize) Q_, Qdot_, _ = utils.load_projected_data(trainsize, r) U = config.U(t[:trainsize]) # Compute the bound to require for integrated POD modes. M = margin * np.abs(Q_).max() # Create a solver mapping regularization parameters to operators. print(f"TRAINING {λ1grid.size*λ2grid.size} ROMS") with utils.timed_block(f"Constructing least-squares solver, r={r:d}"): rom = roi.InferredContinuousROM(config.MODELFORM) rom._construct_solver(None, Q_, Qdot_, U, np.ones(d)) # Test each regularization parameter. errors_pass = {} errors_fail = {} for λ1, λ2 in itertools.product(λ1grid, λ2grid): with utils.timed_block(f"Testing ROM with λ1={λ1:5e}, λ2={λ2:5e}"): # Train the ROM on all training snapshots. rom._evaluate_solver(regularizer(r, d, λ1, λ2)) # Simulate the ROM over the full domain. with np.warnings.catch_warnings(): np.warnings.simplefilter("ignore") q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45") # Check for boundedness of solution. errors = errors_pass if is_bounded(q_rom, M) else errors_fail # Calculate integrated relative errors in the reduced space. if q_rom.shape[1] > trainsize: errors[(λ1, λ2)] = roi.post.Lp_error(Q_, q_rom[:, :trainsize], t[:trainsize])[1] # Choose and save the ROM with the least error. if not errors_pass: message = f"NO STABLE ROMS for r={r:d}" print(message) logging.info(message) return err2reg = {err: reg for reg, err in errors_pass.items()} λ1, λ2 = err2reg[min(err2reg.keys())] with utils.timed_block(f"Best regularization for k={trainsize:d}, " f"r={r:d}: λ1={λ1:.0f}, λ2={λ2:.0f}"): rom._evaluate_solver(regularizer(r, d, λ1, λ2)) save_trained_rom(trainsize, r, (λ1, λ2), rom)