def train_single(trainsize, r, regs):
    """Train and save a ROM with the given dimension and regularization
    hyperparameters.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two or three non-negative floats
        Regularization hyperparameters (first-order, quadratic, cubic) to use
        in the Operator Inference least-squares problem for training the ROM.
    """
    utils.reset_logger(trainsize)

    # Validate inputs.
    modelform = get_modelform(regs)
    check_lstsq_size(trainsize, r, modelform)
    check_regs(regs)

    # Load training data.
    Q_, Qdot_, t = utils.load_projected_data(trainsize, r)
    U = config.U(t)

    # Train and save the ROM.
    with utils.timed_block(f"Training ROM with k={trainsize:d}, "
                           f"{config.REGSTR(regs)}"):
        rom = opinf.InferredContinuousROM(modelform)
        rom.fit(None, Q_, Qdot_, U, P=regularizer(r, *list(regs)))
        save_trained_rom(trainsize, r, regs, rom)
def train_and_save_all(trainsize, num_modes, regs):
    """Train and save ROMs with the given dimension and regularization.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : float or list(float)
        regularization parameter(s) to use in the training.
    """
    utils.reset_logger(trainsize)

    logging.info(f"TRAINING {len(num_modes)*len(regs)} ROMS")
    for r in num_modes:
        # Load training data.
        X_, Xdot_, time_domain, _ = utils.load_projected_data(trainsize, r)

        # Evaluate inputs over the training time domain.
        Us = config.U(time_domain)

        # Train and save each ROM.
        for reg in regs:
            with utils.timed_block(f"Training ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us, reg)
                if rom:
                    rom.save_model(config.rom_path(trainsize, r, reg),
                                   save_basis=False, overwrite=True)
Example #3
0
def train_single(trainsize, r, regs):
    """Train and save a ROM with the given dimension and regularization
    hyperparameters.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two positive floats
        Regularization hyperparameters (non-quadratic, quadratic) to use in
        the Operator Inference least-squares problem for training the ROM.
    """
    utils.reset_logger(trainsize)

    # Validate inputs.
    d = check_lstsq_size(trainsize, r)
    λ1, λ2 = check_regs(regs)

    # Load training data.
    Q_, Qdot_, t = utils.load_projected_data(trainsize, r)
    U = config.U(t)

    # Train and save the ROM.
    with utils.timed_block(f"Training ROM with k={trainsize:d}, "
                           f"r={r:d}, λ1={λ1:.0f}, λ2={λ2:.0f}"):
        rom = roi.InferredContinuousROM(config.MODELFORM)
        rom.fit(None, Q_, Qdot_, U, P=regularizer(r, d, λ1, λ2))
        save_trained_rom(trainsize, r, regs, rom)
def _train_minimize_1D(trainsize, r, regs, testsize=None, margin=1.1):
    """Train ROMs with the given dimension(s), saving only the ROM with
    the least training error that satisfies a bound on the integrated POD
    coefficients, using a search algorithm to choose the regularization
    parameter.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : two non-negative floats
        Bounds for the (single) regularization hyperparameter to use in the
        Operator Inference least-squares problem for training the ROM.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float ≥ 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    check_lstsq_size(trainsize, r, modelform="cAHB")
    log10regs = np.log10(regs)

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    B = margin * np.abs(Q_).max()

    # Create a solver mapping regularization hyperparameters to operators.
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = opinf.InferredContinuousROM("cAHB")
        rom._construct_solver(None, Q_, Qdot_, U, 1)

    # Test each regularization hyperparameter.
    def training_error(log10reg):
        """Return the training error resulting from the regularization
        hyperparameters λ1 = λ2 = 10^log10reg. If the resulting model
        violates the POD bound, return "infinity".
        """
        λ = 10**log10reg

        # Train the ROM on all training snapshots.
        with utils.timed_block(f"Testing ROM with λ={λ:e}"):
            rom._evaluate_solver(λ)

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            if not is_bounded(q_rom, B):
                return _MAXFUN

            # Calculate integrated relative errors in the reduced space.
            return opinf.post.Lp_error(Q_, q_rom[:, :trainsize],
                                       t[:trainsize])[1]

    opt_result = opt.minimize_scalar(training_error,
                                     method="bounded",
                                     bounds=log10regs)
    if opt_result.success and opt_result.fun != _MAXFUN:
        λ = 10**opt_result.x
        with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                               f"r={r:d}: λ={λ:.0f}"):
            rom._evaluate_solver(λ)
            save_trained_rom(trainsize, r, (λ, λ), rom)
    else:
        message = "Regularization search optimization FAILED"
        print(message)
        logging.info(message)
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.1):
    """Train ROMs with the given dimension over a grid of potential
    regularization hyperparameters, saving only the ROM with the least
    training error that satisfies a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : (float, float, int, float, float, int)
        Bounds and sizes for the grid of regularization hyperparameters.
        First-order: search in [regs[0], regs[1]] at regs[2] points.
        Quadratic:   search in [regs[3], regs[4]] at regs[5] points.
        Cubic:       search in [regs[6], regs[7]] at regs[8] points.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float ≥ 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if len(regs) not in [6, 9]:
        raise ValueError("6 or 9 regs required (bounds / sizes of grids")
    grids = []
    for i in range(0, len(regs), 3):
        check_regs(regs[i:i + 2])
        grids.append(
            np.logspace(np.log10(regs[i]), np.log10(regs[i + 1]),
                        int(regs[i + 2])))
    modelform = get_modelform(grids)
    d = check_lstsq_size(trainsize, r, modelform)

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    M = margin * np.abs(Q_).max()

    # Create a solver mapping regularization hyperparameters to operators.
    num_tests = np.prod([grid.size for grid in grids])
    print(f"TRAINING {num_tests} ROMS")
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = opinf.InferredContinuousROM(modelform)
        rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))

    # Test each regularization hyperparameter.
    errors_pass = {}
    errors_fail = {}
    for i, regs in enumerate(itertools.product(*grids)):
        with utils.timed_block(f"({i+1:d}/{num_tests:d}) Testing ROM with "
                               f"{config.REGSTR(regs)}"):
            # Train the ROM on all training snapshots.
            rom._evaluate_solver(regularizer(r, *list(regs)))

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            errors = errors_pass if is_bounded(q_rom, M) else errors_fail

            # Calculate integrated relative errors in the reduced space.
            if q_rom.shape[1] > trainsize:
                errors[tuple(regs)] = opinf.post.Lp_error(
                    Q_, q_rom[:, :trainsize], t[:trainsize])[1]

    # Choose and save the ROM with the least error.
    if not errors_pass:
        message = f"NO STABLE ROMS for r={r:d}"
        print(message)
        logging.info(message)
        return

    err2reg = {err: reg for reg, err in errors_pass.items()}
    regs = list(err2reg[min(err2reg.keys())])
    with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                           f"r={r:d}: {config.REGSTR(regs)}"):
        rom._evaluate_solver(regularizer(r, *regs))
        save_trained_rom(trainsize, r, regs, rom)
def train_with_minimization(trainsize, num_modes, regs,
                            testsize=None, margin=1.5):
    """Train ROMs with the given dimension(s), saving only the ROM with
    the least training error that satisfies a bound on the integrated POD
    coefficients, using a search algorithm to choose the regularization
    parameter.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : [float, float]
        regularization parameter(s) to use in the training.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if np.isscalar(num_modes):
        num_modes = [num_modes]
    if np.isscalar(regs) or len(regs) != 2:
        raise ValueError("2 regularizations required (reg_low, reg_high)")
    bounds = np.log10(regs)

    # Load the full time domain and evaluate the input function.
    t = utils.load_time_domain(testsize)
    Us = config.U(t)

    for r in num_modes:
        # Load training data.
        X_, Xdot_, _, scales = utils.load_projected_data(trainsize, r)

        # Compute the bound to require for integrated POD modes.
        B = margin * np.abs(X_).max()

        # Test each regularization parameter.
        def training_error_from_rom(log10reg):
            reg = 10**log10reg

            # Train the ROM on all training snapshots.
            with utils.timed_block(f"Testing ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us[:trainsize], reg)
                if not rom:
                    return _MAXFUN

                # Simulate the ROM over the full domain.
                with np.warnings.catch_warnings():
                    np.warnings.simplefilter("ignore")
                    x_rom = rom.predict(X_[:,0], t, config.U, method="RK45")

                # Check for boundedness of solution.
                if not is_bounded(x_rom, B):
                    return _MAXFUN

                # Calculate integrated relative errors in the reduced space.
                return roi.post.Lp_error(X_, x_rom[:,:trainsize],
                                                   t[:trainsize])[1]

        opt_result = opt.minimize_scalar(training_error_from_rom,
                                         bounds=bounds, method="bounded")
        if opt_result.success and opt_result.fun != _MAXFUN:
            best_reg = 10 ** opt_result.x
            best_rom = train_rom(X_, Xdot_, Us[:trainsize], best_reg)
            save_best_trained_rom(trainsize, r, best_reg, best_rom)
        else:
            print(f"Regularization search optimization FAILED for r = {r:d}")
def train_with_gridsearch(trainsize, num_modes, regs,
                          testsize=None, margin=1.5):
    """Train ROMs with the given dimension(s) and regularization(s),
    saving only the ROM with the least training error that satisfies
    a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM(s).

    num_modes : int or list(int)
        Dimension of the ROM(s) to train, i.e., the number of retained POD
        modes (left singular vectors) used to project the training data.

    regs : float or list(float)
        regularization parameter(s) to use in the training.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    if np.isscalar(num_modes):
        num_modes = [num_modes]
    if np.isscalar(regs):
        regs = [regs]

    # Load the full time domain and evaluate the input function.
    t = utils.load_time_domain(testsize)
    Us = config.U(t)

    logging.info(f"TRAINING {len(num_modes)*len(regs)} ROMS")
    for ii,r in enumerate(num_modes):
        # Load training data.
        X_, Xdot_, _, scales = utils.load_projected_data(trainsize, r)

        # Compute the bound to require for integrated POD modes.
        M = margin * np.abs(X_).max()

        # Test each regularization parameter.
        trained_roms = {}
        errors_pass = {}
        errors_fail = {}
        for reg in regs:

            # Train the ROM on all training snapshots.
            with utils.timed_block(f"Testing ROM with r={r:d}, reg={reg:e}"):
                rom = train_rom(X_, Xdot_, Us[:trainsize], reg)
                if not rom:
                    continue        # Skip if training fails.
                trained_roms[reg] = rom

                # Simulate the ROM over the full domain.
                with np.warnings.catch_warnings():
                    np.warnings.simplefilter("ignore")
                    x_rom = rom.predict(X_[:,0], t, config.U, method="RK45")

                # Check for boundedness of solution.
                errors = errors_pass if is_bounded(x_rom, M) else errors_fail

                # Calculate integrated relative errors in the reduced space.
                if x_rom.shape[1] > trainsize:
                    errors[reg] = roi.post.Lp_error(X_, x_rom[:,:trainsize],
                                                              t[:trainsize])[1]

        # Choose and save the ROM with the least error.
        plt.semilogx(list(errors_fail.keys()), list(errors_fail.values()),
                     f"C{ii}x", mew=1, label=fr"$r = {r:d}$, bound violated")
        if not errors_pass:
            print(f"NO STABLE ROMS for r = {r:d}")
            continue

        err2reg = {err:reg for reg,err in errors_pass.items()}
        best_reg = err2reg[min(err2reg.keys())]
        best_rom = trained_roms[best_reg]
        save_best_trained_rom(trainsize, r, best_reg, best_rom)

        plt.semilogx(list(errors_pass.keys()), list(errors_pass.values()),
                     f"C{ii}*", mew=0, label=fr"$r = {r:d}$, bound satisfied")
        plt.axvline(best_reg, lw=.5, color=f"C{ii}")

    plt.legend()
    plt.xlabel(r"Regularization parameter $\lambda$")
    plt.ylabel(r"ROM relative error $\frac"
               r"{||\widehat{\mathbf{Q}} - \widetilde{\mathbf{Q}}'||}"
               r"{||\widehat{\mathbf{Q}}||}$")
    plt.ylim(0, 1)
    plt.xlim(min(regs), max(regs))
    plt.title(fr"$n_t = {trainsize}$")
    utils.save_figure(f"regsweep_nt{trainsize:05d}.pdf")
Example #8
0
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.5):
    """Train ROMs with the given dimension over a grid of potential
    regularization hyperparameters, saving only the ROM with the least
    training error that satisfies a bound on the integrated POD coefficients.

    Parameters
    ----------
    trainsize : int
        Number of snapshots to use to train the ROM.

    r : int
        Dimension of the desired ROM. Also the number of retained POD modes
        (left singular vectors) used to project the training data.

    regs : (float, float, int, float, float, int)
        Bounds and sizes for the grid of regularization parameters.
        Linear:    search in [regs[0], regs[1]] at regs[2] points.
        Quadratic: search in [regs[3], regs[4]] at regs[5] points.

    testsize : int
        Number of time steps for which a valid ROM must satisfy the POD bound.

    margin : float >= 1
        Amount that the integrated POD coefficients of a valid ROM are allowed
        to deviate in magnitude from the maximum magnitude of the training
        data Q, i.e., bound = margin * max(abs(Q)).
    """
    utils.reset_logger(trainsize)

    # Parse aguments.
    d = check_lstsq_size(trainsize, r)
    if len(regs) != 6:
        raise ValueError("len(regs) != 6 (bounds / sizes for parameter grid")
    check_regs(regs[0:2])
    check_regs(regs[3:5])
    λ1grid = np.logspace(np.log10(regs[0]), np.log10(regs[1]), int(regs[2]))
    λ2grid = np.logspace(np.log10(regs[3]), np.log10(regs[4]), int(regs[5]))

    # Load training data.
    t = utils.load_time_domain(testsize)
    Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
    U = config.U(t[:trainsize])

    # Compute the bound to require for integrated POD modes.
    M = margin * np.abs(Q_).max()

    # Create a solver mapping regularization parameters to operators.
    print(f"TRAINING {λ1grid.size*λ2grid.size} ROMS")
    with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
        rom = roi.InferredContinuousROM(config.MODELFORM)
        rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))

    # Test each regularization parameter.
    errors_pass = {}
    errors_fail = {}
    for λ1, λ2 in itertools.product(λ1grid, λ2grid):
        with utils.timed_block(f"Testing ROM with λ1={λ1:5e}, λ2={λ2:5e}"):
            # Train the ROM on all training snapshots.
            rom._evaluate_solver(regularizer(r, d, λ1, λ2))

            # Simulate the ROM over the full domain.
            with np.warnings.catch_warnings():
                np.warnings.simplefilter("ignore")
                q_rom = rom.predict(Q_[:, 0], t, config.U, method="RK45")

            # Check for boundedness of solution.
            errors = errors_pass if is_bounded(q_rom, M) else errors_fail

            # Calculate integrated relative errors in the reduced space.
            if q_rom.shape[1] > trainsize:
                errors[(λ1, λ2)] = roi.post.Lp_error(Q_, q_rom[:, :trainsize],
                                                     t[:trainsize])[1]

    # Choose and save the ROM with the least error.
    if not errors_pass:
        message = f"NO STABLE ROMS for r={r:d}"
        print(message)
        logging.info(message)
        return

    err2reg = {err: reg for reg, err in errors_pass.items()}
    λ1, λ2 = err2reg[min(err2reg.keys())]
    with utils.timed_block(f"Best regularization for k={trainsize:d}, "
                           f"r={r:d}: λ1={λ1:.0f}, λ2={λ2:.0f}"):
        rom._evaluate_solver(regularizer(r, d, λ1, λ2))
        save_trained_rom(trainsize, r, (λ1, λ2), rom)