def train_and_save_all(trainsize, num_modes, regs):
    """Train and save ROMs with the given dimension and regularization.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : float or list(float)
        regularization parameter(s) to use in the training.
    """
    utils.reset_logger(trainsize)

    logging.info(f"TRAINING {len(num_modes)*len(regs)} ROMS")
    for r in num_modes:
        # Load training data.
        X_, Xdot_, time_domain, _ = utils.load_projected_data(trainsize, r)

        # Evaluate inputs over the training time domain.
        Us = config.U(time_domain)

        # Train and save each ROM.
        for reg in regs:
            with utils.timed_block(f"Training ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us, reg)
                if rom:
                    rom.save_model(config.rom_path(trainsize, r, reg),
                                   save_basis=False, overwrite=True)
def train_single(trainsize, r, regs):
    """Train and save a ROM with the given dimension and regularization
    hyperparameters.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two or three non-negative floats
        Regularization hyperparameters (first-order, quadratic, cubic) to use
        in the Operator Inference least-squares problem for training the ROM.
    """
    utils.reset_logger(trainsize)

    # Validate inputs.
    modelform = get_modelform(regs)
    check_lstsq_size(trainsize, r, modelform)
    check_regs(regs)

    # Load training data.
    Q_, Qdot_, t = utils.load_projected_data(trainsize, r)
    U = config.U(t)

    # Train and save the ROM.
    with utils.timed_block(f"Training ROM with k={trainsize:d}, "
                           f"{config.REGSTR(regs)}"):
        rom = opinf.InferredContinuousROM(modelform)
        rom.fit(None, Q_, Qdot_, U, P=regularizer(r, *list(regs)))
        save_trained_rom(trainsize, r, regs, rom)
示例#3
0
def train_single(trainsize, r, regs):
    """Train and save a ROM with the given dimension and regularization
    hyperparameters.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two positive floats
        Regularization hyperparameters (non-quadratic, quadratic) to use in
        the Operator Inference least-squares problem for training the ROM.
    """
    utils.reset_logger(trainsize)

    # Validate inputs.
    d = check_lstsq_size(trainsize, r)
    λ1, λ2 = check_regs(regs)

    # Load training data.
    Q_, Qdot_, t = utils.load_projected_data(trainsize, r)
    U = config.U(t)

    # Train and save the ROM.
    with utils.timed_block(f"Training ROM with k={trainsize:d}, "
                           f"r={r:d}, λ1={λ1:.0f}, λ2={λ2:.0f}"):
        rom = roi.InferredContinuousROM(config.MODELFORM)
        rom.fit(None, Q_, Qdot_, U, P=regularizer(r, d, λ1, λ2))
        save_trained_rom(trainsize, r, regs, rom)
def simulate_rom(trainsize, r, regs, steps=None):
    """Load everything needed to simulate a given ROM, run the simulation,
    and return the simulation results and everything needed to reconstruct
    the results in the original high-dimensional space.
    Raise an Exception if any of the ingredients are missing.

    Parameters
    ----------
    trainsize : int
        Number of snapshots used to train the ROM.

    r : int
        Dimension of the ROM.

    regs : two or three positive floats
        Regularization hyperparameters used to train the ROM.

    steps : int or None
        Number of time steps to simulate the ROM.

    Returns
    -------
    t : (nt,) ndarray
        Time domain corresponding to the ROM outputs.

    V : (NUM_ROMVARS*DOF,r) ndarray
        POD basis used to project the training data (and for reconstructing
        the full-order scaled predictions).

    qbar : (NUM_ROMVARS*DOF,) ndarray
        Mean snapshot that the training data was shifted by after scaling
        but before projection.

    scales : (NUM_ROMVARS,4) ndarray
        Information for how the data was scaled. See data_processing.scale().

    q_rom : (nt,r) ndarray
        Prediction results from the ROM.
    """
    # Load the time domain, basis, initial conditions, and trained ROM.
    t = utils.load_time_domain(steps)
    V, qbar, scales = utils.load_basis(trainsize, r)
    Q_, _, _ = utils.load_projected_data(trainsize, r)
    rom = utils.load_rom(trainsize, r, regs)

    # Simulate the ROM over the full time domain.
    with utils.timed_block(f"Simulating ROM with k={trainsize:d}, r={r:d}, "
                           f"{config.REGSTR(regs)} over full time domain"):
        q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

    return t, V, qbar, scales, q_rom
def simulate_rom(trainsize, r, reg, steps=None):
    """Load everything needed to simulate a given ROM, simulate the ROM,
    and return the simulation results and everything needed to reconstruct
    the results in the original high-dimensional space.
    Raise an Exception if any of the ingredients are missing.

    Parameters
    ----------
    trainsize : int
        Number of snapshots used to train the ROM.

    r : int
        Dimension of the ROM. This is also the number of retained POD
        modes (left singular vectors) used to project the training data.

    reg : float
        Regularization parameter used to train the ROM.

    steps : int or None
        Number of time steps to simulate the ROM.

    Returns
    -------
    t : (nt,) ndarray
        Time domain corresponding to the ROM outputs.

    V : (config*NUM_ROMVARS*config.DOF,r) ndarray
        POD basis used to project the training data (and for reconstructing
        the full-order scaled predictions).

    scales : (NUM_ROMVARS,4) ndarray
        Information for how the data was scaled. See data_processing.scale().

    x_rom : (nt,r) ndarray
        Prediction results from the ROM.
    """
    # Load the time domain, basis, initial conditions, and trained ROM.
    t = utils.load_time_domain(steps)
    V, _ = utils.load_basis(trainsize, r)
    X_, _, _, scales = utils.load_projected_data(trainsize, r)
    rom = utils.load_rom(trainsize, r, reg)

    # Simulate the ROM over the full time domain.
    with utils.timed_block(f"Simulating ROM with r={r:d}, "
                           f"reg={reg:e} over full time domain"):
        x_rom = rom.predict(X_[:, 0], t, config.U, method="RK45")

    return t, V, scales, x_rom
def main(timeindices,
         variables=None,
         snaptype=["gems", "rom", "error"],
         trainsize=None,
         r=None,
         reg=None):
    """Convert a snapshot in .h5 format to a .dat file that matches the format
    of grid.dat. The new file is saved in `config.tecplot_path()` with the same
    filename and the new file extension .dat.

    Parameters
    ----------
    timeindices : ndarray(int) or int
        Indices (one-based) in the full time domain of the snapshots to save.

    variables : str or list(str)
        The variables to scale, a subset of config.ROM_VARIABLES.
        Defaults to all variables.

    snaptype : {"rom", "gems", "error"} or list(str)
        Which kinds of snapshots to save. Options:
        * "gems": snapshots from the full-order GEMS data;
        * "rom": reconstructed snapshots produced by a ROM;
        * "error": absolute error between the full-order data
                   and the reduced-order reconstruction.
        If "rom" or "error" are selected, the ROM is selected by the
        remaining arguments.

    trainsize : int
        Number of snapshots used to train the ROM.

    r : int
        Number of retained modes in the ROM.

    reg : float
        Regularization factor used to train the ROM.
    """
    utils.reset_logger(trainsize)

    # Parse parameters.
    timeindices = np.sort(np.atleast_1d(timeindices))
    simtime = timeindices.max()
    t = utils.load_time_domain(simtime + 1)

    # Parse the variables.
    if variables is None:
        variables = config.ROM_VARIABLES
    elif isinstance(variables, str):
        variables = [variables]
    varnames = '\n'.join(f'"{v}"' for v in variables)

    if isinstance(snaptype, str):
        snaptype = [snaptype]
    for stype in snaptype:
        if stype not in ("gems", "rom", "error"):
            raise ValueError(f"invalid snaptype '{stype}'")

    # Read the grid file.
    with utils.timed_block("Reading Tecplot grid data"):
        # Parse the header.
        grid_path = config.grid_data_path()
        with open(grid_path, 'r') as infile:
            grid = infile.read()
        if int(re.findall(r"Elements=(\d+)", grid)[0]) != config.DOF:
            raise RuntimeError(f"{grid_path} DOF and config.DOF do not match")
        num_nodes = int(re.findall(r"Nodes=(\d+)", grid)[0])
        end_of_header = re.findall(r"DT=.*?\n", grid)[0]
        headersize = grid.find(end_of_header) + len(end_of_header)

        # Extract geometry information.
        grid_data = grid[headersize:].split()
        x = grid_data[:num_nodes]
        y = grid_data[num_nodes:2 * num_nodes]
        cell_volume = grid_data[2 * num_nodes:3 * num_nodes]
        connectivity = grid_data[3 * num_nodes:]

    # Extract full-order data if needed.
    if ("gems" in snaptype) or ("error" in snaptype):
        gems_data, _ = utils.load_gems_data(cols=timeindices)
        with utils.timed_block("Lifting selected snapshots of GEMS data"):
            lifted_data = dproc.lift(gems_data)
            true_snaps = np.concatenate(
                [dproc.getvar(v, lifted_data) for v in variables])
    # Simulate ROM if needed.
    if ("rom" in snaptype) or ("error" in snaptype):
        # Load the SVD data.
        V, _ = utils.load_basis(trainsize, r)

        # Load the initial conditions and scales.
        X_, _, _, scales = utils.load_projected_data(trainsize, r)

        # Load the appropriate ROM.
        rom = utils.load_rom(trainsize, r, reg)

        # Simulate the ROM over the time domain.
        with utils.timed_block(f"Simulating ROM with r={r:d}, reg={reg:.0e}"):
            x_rom = rom.predict(X_[:, 0], t, config.U, method="RK45")
            if np.any(np.isnan(x_rom)) or x_rom.shape[1] < simtime:
                raise ValueError("ROM unstable!")

        # Reconstruct the results (only selected variables / snapshots).
        with utils.timed_block("Reconstructing simulation results"):
            x_rec = dproc.unscale(V[:, :r] @ x_rom[:, timeindices], scales)
            x_rec = np.concatenate([dproc.getvar(v, x_rec) for v in variables])

    dsets = {}
    if "rom" in snaptype:
        dsets["rom"] = x_rec
    if "gems" in snaptype:
        dsets["gems"] = true_snaps
    if "error" in snaptype:
        with utils.timed_block("Computing absolute error of reconstruction"):
            abs_err = np.abs(true_snaps - x_rec)
        dsets["error"] = abs_err

    # Save each of the selected snapshots in Tecplot format matching grid.dat.
    for j, tindex in enumerate(timeindices):

        header = HEADER.format(varnames, tindex, t[tindex], num_nodes,
                               config.DOF,
                               len(variables) + 2, "SINGLE " * len(variables))
        for label, dset in dsets.items():

            if label == "gems":
                save_path = config.gems_snapshot_path(tindex)
            if label in ("rom", "error"):
                folder = config.rom_snapshot_path(trainsize, r, reg)
                save_path = os.path.join(folder, f"{label}_{tindex:05d}.dat")
            with utils.timed_block(f"Writing {label} snapshot {tindex:05d}"):
                with open(save_path, 'w') as outfile:
                    # Write the header.
                    outfile.write(header)

                    # Write the geometry data (x,y coordinates).
                    for i in range(0, len(x), NCOLS):
                        outfile.write(' '.join(x[i:i + NCOLS]) + '\n')
                    for i in range(0, len(y), NCOLS):
                        outfile.write(' '.join(y[i:i + NCOLS]) + '\n')

                    # Write the data for each variable.
                    for i in range(0, dset.shape[0], NCOLS):
                        row = ' '.join(f"{v:.9E}"
                                       for v in dset[i:i + NCOLS, j])
                        outfile.write(row + '\n')

                    # Write connectivity information.
                    for i in range(0, len(connectivity), NCOLS):
                        outfile.write(' '.join(connectivity[i:i + NCOLS]) +
                                      '\n')
def _train_minimize_1D(trainsize, r, regs, testsize=None, margin=1.1):
    """Train ROMs with the given dimension(s), saving only the ROM with
    the least training error that satisfies a bound on the integrated POD
    coefficients, using a search algorithm to choose the regularization
    parameter.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two non-negative floats
        Bounds for the (single) regularization hyperparameter to use in the
        Operator Inference least-squares problem for training the ROM.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float ≥ 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    check_lstsq_size(trainsize, r, modelform="cAHB")
    log10regs = np.log10(regs)

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    B = margin * np.abs(Q_).max()

    # Create a solver mapping regularization hyperparameters to operators.
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = opinf.InferredContinuousROM("cAHB")
        rom._construct_solver(None, Q_, Qdot_, U, 1)

    # Test each regularization hyperparameter.
    def training_error(log10reg):
        """Return the training error resulting from the regularization
        hyperparameters λ1 = λ2 = 10^log10reg. If the resulting model
        violates the POD bound, return "infinity".
        """
        λ = 10**log10reg

        # Train the ROM on all training snapshots.
        with utils.timed_block(f"Testing ROM with λ={λ:e}"):
            rom._evaluate_solver(λ)

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            if not is_bounded(q_rom, B):
                return _MAXFUN

            # Calculate integrated relative errors in the reduced space.
            return opinf.post.Lp_error(Q_, q_rom[:, :trainsize],
                                       t[:trainsize])[1]

    opt_result = opt.minimize_scalar(training_error,
                                     method="bounded",
                                     bounds=log10regs)
    if opt_result.success and opt_result.fun != _MAXFUN:
        λ = 10**opt_result.x
        with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                               f"r={r:d}: λ={λ:.0f}"):
            rom._evaluate_solver(λ)
            save_trained_rom(trainsize, r, (λ, λ), rom)
    else:
        message = "Regularization search optimization FAILED"
        print(message)
        logging.info(message)
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.1):
    """Train ROMs with the given dimension over a grid of potential
    regularization hyperparameters, saving only the ROM with the least
    training error that satisfies a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : (float, float, int, float, float, int)
        Bounds and sizes for the grid of regularization hyperparameters.
        First-order: search in [regs[0], regs[1]] at regs[2] points.
        Quadratic:   search in [regs[3], regs[4]] at regs[5] points.
        Cubic:       search in [regs[6], regs[7]] at regs[8] points.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float ≥ 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if len(regs) not in [6, 9]:
        raise ValueError("6 or 9 regs required (bounds / sizes of grids")
    grids = []
    for i in range(0, len(regs), 3):
        check_regs(regs[i:i + 2])
        grids.append(
            np.logspace(np.log10(regs[i]), np.log10(regs[i + 1]),
                        int(regs[i + 2])))
    modelform = get_modelform(grids)
    d = check_lstsq_size(trainsize, r, modelform)

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    M = margin * np.abs(Q_).max()

    # Create a solver mapping regularization hyperparameters to operators.
    num_tests = np.prod([grid.size for grid in grids])
    print(f"TRAINING {num_tests} ROMS")
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = opinf.InferredContinuousROM(modelform)
        rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))

    # Test each regularization hyperparameter.
    errors_pass = {}
    errors_fail = {}
    for i, regs in enumerate(itertools.product(*grids)):
        with utils.timed_block(f"({i+1:d}/{num_tests:d}) Testing ROM with "
                               f"{config.REGSTR(regs)}"):
            # Train the ROM on all training snapshots.
            rom._evaluate_solver(regularizer(r, *list(regs)))

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            errors = errors_pass if is_bounded(q_rom, M) else errors_fail

            # Calculate integrated relative errors in the reduced space.
            if q_rom.shape[1] > trainsize:
                errors[tuple(regs)] = opinf.post.Lp_error(
                    Q_, q_rom[:, :trainsize], t[:trainsize])[1]

    # Choose and save the ROM with the least error.
    if not errors_pass:
        message = f"NO STABLE ROMS for r={r:d}"
        print(message)
        logging.info(message)
        return

    err2reg = {err: reg for reg, err in errors_pass.items()}
    regs = list(err2reg[min(err2reg.keys())])
    with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                           f"r={r:d}: {config.REGSTR(regs)}"):
        rom._evaluate_solver(regularizer(r, *regs))
        save_trained_rom(trainsize, r, regs, rom)
def train_with_minimization(trainsize, num_modes, regs,
                            testsize=None, margin=1.5):
    """Train ROMs with the given dimension(s), saving only the ROM with
    the least training error that satisfies a bound on the integrated POD
    coefficients, using a search algorithm to choose the regularization
    parameter.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : [float, float]
        regularization parameter(s) to use in the training.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if np.isscalar(num_modes):
        num_modes = [num_modes]
    if np.isscalar(regs) or len(regs) != 2:
        raise ValueError("2 regularizations required (reg_low, reg_high)")
    bounds = np.log10(regs)

    # Load the full time domain and evaluate the input function.
    t = utils.load_time_domain(testsize)
    Us = config.U(t)

    for r in num_modes:
        # Load training data.
        X_, Xdot_, _, scales = utils.load_projected_data(trainsize, r)

        # Compute the bound to require for integrated POD modes.
        B = margin * np.abs(X_).max()

        # Test each regularization parameter.
        def training_error_from_rom(log10reg):
            reg = 10**log10reg

            # Train the ROM on all training snapshots.
            with utils.timed_block(f"Testing ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us[:trainsize], reg)
                if not rom:
                    return _MAXFUN

                # Simulate the ROM over the full domain.
                with np.warnings.catch_warnings():
                    np.warnings.simplefilter("ignore")
                    x_rom = rom.predict(X_[:,0], t, config.U, method="RK45")

                # Check for boundedness of solution.
                if not is_bounded(x_rom, B):
                    return _MAXFUN

                # Calculate integrated relative errors in the reduced space.
                return roi.post.Lp_error(X_, x_rom[:,:trainsize],
                                                   t[:trainsize])[1]

        opt_result = opt.minimize_scalar(training_error_from_rom,
                                         bounds=bounds, method="bounded")
        if opt_result.success and opt_result.fun != _MAXFUN:
            best_reg = 10 ** opt_result.x
            best_rom = train_rom(X_, Xdot_, Us[:trainsize], best_reg)
            save_best_trained_rom(trainsize, r, best_reg, best_rom)
        else:
            print(f"Regularization search optimization FAILED for r = {r:d}")
def train_with_gridsearch(trainsize, num_modes, regs,
                          testsize=None, margin=1.5):
    """Train ROMs with the given dimension(s) and regularization(s),
    saving only the ROM with the least training error that satisfies
    a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : float or list(float)
        regularization parameter(s) to use in the training.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if np.isscalar(num_modes):
        num_modes = [num_modes]
    if np.isscalar(regs):
        regs = [regs]

    # Load the full time domain and evaluate the input function.
    t = utils.load_time_domain(testsize)
    Us = config.U(t)

    logging.info(f"TRAINING {len(num_modes)*len(regs)} ROMS")
    for ii,r in enumerate(num_modes):
        # Load training data.
        X_, Xdot_, _, scales = utils.load_projected_data(trainsize, r)

        # Compute the bound to require for integrated POD modes.
        M = margin * np.abs(X_).max()

        # Test each regularization parameter.
        trained_roms = {}
        errors_pass = {}
        errors_fail = {}
        for reg in regs:

            # Train the ROM on all training snapshots.
            with utils.timed_block(f"Testing ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us[:trainsize], reg)
                if not rom:
                    continue        # Skip if training fails.
                trained_roms[reg] = rom

                # Simulate the ROM over the full domain.
                with np.warnings.catch_warnings():
                    np.warnings.simplefilter("ignore")
                    x_rom = rom.predict(X_[:,0], t, config.U, method="RK45")

                # Check for boundedness of solution.
                errors = errors_pass if is_bounded(x_rom, M) else errors_fail

                # Calculate integrated relative errors in the reduced space.
                if x_rom.shape[1] > trainsize:
                    errors[reg] = roi.post.Lp_error(X_, x_rom[:,:trainsize],
                                                              t[:trainsize])[1]

        # Choose and save the ROM with the least error.
        plt.semilogx(list(errors_fail.keys()), list(errors_fail.values()),
                     f"C{ii}x", mew=1, label=fr"$r = {r:d}$, bound violated")
        if not errors_pass:
            print(f"NO STABLE ROMS for r = {r:d}")
            continue

        err2reg = {err:reg for reg,err in errors_pass.items()}
        best_reg = err2reg[min(err2reg.keys())]
        best_rom = trained_roms[best_reg]
        save_best_trained_rom(trainsize, r, best_reg, best_rom)

        plt.semilogx(list(errors_pass.keys()), list(errors_pass.values()),
                     f"C{ii}*", mew=0, label=fr"$r = {r:d}$, bound satisfied")
        plt.axvline(best_reg, lw=.5, color=f"C{ii}")

    plt.legend()
    plt.xlabel(r"Regularization parameter $\lambda$")
    plt.ylabel(r"ROM relative error $\frac"
               r"{||\widehat{\mathbf{Q}} - \widetilde{\mathbf{Q}}'||}"
               r"{||\widehat{\mathbf{Q}}||}$")
    plt.ylim(0, 1)
    plt.xlim(min(regs), max(regs))
    plt.title(fr"$n_t = {trainsize}$")
    utils.save_figure(f"regsweep_nt{trainsize:05d}.pdf")
示例#11
0
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.5):
    """Train ROMs with the given dimension over a grid of potential
    regularization hyperparameters, saving only the ROM with the least
    training error that satisfies a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : (float, float, int, float, float, int)
        Bounds and sizes for the grid of regularization parameters.
        Linear:    search in [regs[0], regs[1]] at regs[2] points.
        Quadratic: search in [regs[3], regs[4]] at regs[5] points.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    d = check_lstsq_size(trainsize, r)
    if len(regs) != 6:
        raise ValueError("len(regs) != 6 (bounds / sizes for parameter grid")
    check_regs(regs[0:2])
    check_regs(regs[3:5])
    λ1grid = np.logspace(np.log10(regs[0]), np.log10(regs[1]), int(regs[2]))
    λ2grid = np.logspace(np.log10(regs[3]), np.log10(regs[4]), int(regs[5]))

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    M = margin * np.abs(Q_).max()

    # Create a solver mapping regularization parameters to operators.
    print(f"TRAINING {λ1grid.size*λ2grid.size} ROMS")
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = roi.InferredContinuousROM(config.MODELFORM)
        rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))

    # Test each regularization parameter.
    errors_pass = {}
    errors_fail = {}
    for λ1, λ2 in itertools.product(λ1grid, λ2grid):
        with utils.timed_block(f"Testing ROM with λ1={λ1:5e}, λ2={λ2:5e}"):
            # Train the ROM on all training snapshots.
            rom._evaluate_solver(regularizer(r, d, λ1, λ2))

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            errors = errors_pass if is_bounded(q_rom, M) else errors_fail

            # Calculate integrated relative errors in the reduced space.
            if q_rom.shape[1] > trainsize:
                errors[(λ1, λ2)] = roi.post.Lp_error(Q_, q_rom[:, :trainsize],
                                                     t[:trainsize])[1]

    # Choose and save the ROM with the least error.
    if not errors_pass:
        message = f"NO STABLE ROMS for r={r:d}"
        print(message)
        logging.info(message)
        return

    err2reg = {err: reg for reg, err in errors_pass.items()}
    λ1, λ2 = err2reg[min(err2reg.keys())]
    with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                           f"r={r:d}: λ1={λ1:.0f}, λ2={λ2:.0f}"):
        rom._evaluate_solver(regularizer(r, d, λ1, λ2))
        save_trained_rom(trainsize, r, (λ1, λ2), rom)