def main(
    grid_intervals: int = Argument(..., help='Grid interval count.'),
    time_steps: int = Argument(
        ..., help='Number of time steps used for discretization.'),
    training_samples: int = Argument(
        ..., help='Number of samples used for training the neural network.'),
    validation_samples: int = Argument(
        ...,
        help='Number of samples used for validation during the training phase.'
    ),
):
    """Model oder reduction with neural networks for an instationary problem
    (approach by Hesthaven and Ubbiali).
    """
    if not config.HAVE_TORCH:
        raise TorchMissing()

    fom = create_fom(grid_intervals, time_steps)

    parameter_space = fom.parameters.space(1., 2.)

    from pymor.reductors.neural_network import NeuralNetworkInstationaryReductor

    training_set = parameter_space.sample_uniformly(training_samples)
    validation_set = parameter_space.sample_randomly(validation_samples)

    reductor = NeuralNetworkInstationaryReductor(fom,
                                                 training_set,
                                                 validation_set,
                                                 basis_size=10)
    rom = reductor.reduce(hidden_layers='[30, 30, 30]', restarts=100)

    test_set = parameter_space.sample_randomly(10)

    speedups = []

    import time

    print(f'Performing test on set of size {len(test_set)} ...')

    U = fom.solution_space.empty(reserve=len(test_set))
    U_red = fom.solution_space.empty(reserve=len(test_set))

    for mu in test_set:
        tic = time.time()
        U.append(fom.solve(mu))
        time_fom = time.time() - tic

        tic = time.time()
        U_red.append(reductor.reconstruct(rom.solve(mu)))
        time_red = time.time() - tic

        speedups.append(time_fom / time_red)

    absolute_errors = (U - U_red).norm2()
    relative_errors = (U - U_red).norm2() / U.norm2()

    print(f'Average absolute error: {np.average(absolute_errors)}')
    print(f'Average relative error: {np.average(relative_errors)}')
    print(f'Median of speedup: {np.median(speedups)}')
Beispiel #2
0
def main(
    training_samples: int = Argument(
        ..., help='Number of samples used for training the neural network.'),
    validation_samples: int = Argument(
        ...,
        help='Number of samples used for validation during the training phase.'
    ),
):
    """Reduction of a FEniCS model using neural networks (approach by Hesthaven and Ubbiali)."""

    logger = getLogger('pymordemos.neural_networks')

    if not config.HAVE_TORCH:
        raise TorchMissing()

    fom, parameter_space = discretize_fenics()

    from pymor.reductors.neural_network import NeuralNetworkReductor

    training_set = parameter_space.sample_uniformly(training_samples)
    validation_set = parameter_space.sample_randomly(validation_samples)

    reductor = NeuralNetworkReductor(fom,
                                     training_set,
                                     validation_set,
                                     l2_err=1e-4,
                                     ann_mse=1e-4)
    rom = reductor.reduce(hidden_layers='[(N+P)*3, (N+P)*3, (N+P)*3]',
                          restarts=100)

    test_set = parameter_space.sample_randomly(1)

    speedups = []

    import time

    print(f'Performing test on set of size {len(test_set)} ...')

    U = fom.solution_space.empty(reserve=len(test_set))
    U_red = fom.solution_space.empty(reserve=len(test_set))

    for mu in test_set:
        tic = time.perf_counter()
        U.append(fom.solve(mu))
        time_fom = time.perf_counter() - tic

        tic = time.perf_counter()
        U_red.append(reductor.reconstruct(rom.solve(mu)))
        time_red = time.perf_counter() - tic

        speedups.append(time_fom / time_red)

    absolute_errors = (U - U_red).norm()
    relative_errors = (U - U_red).norm() / U.norm()

    print(f'Average absolute error: {np.average(absolute_errors)}')
    print(f'Average relative error: {np.average(relative_errors)}')
    print(f'Median of speedup: {np.median(speedups)}')
Beispiel #3
0
def neural_networks_demo(args):
    if not config.HAVE_TORCH:
        raise TorchMissing()

    fom = create_fom(args)

    parameter_space = fom.parameters.space((0.1, 1))

    from pymor.reductors.neural_network import NeuralNetworkReductor

    training_set = parameter_space.sample_uniformly(
        int(args['TRAINING_SAMPLES']))
    validation_set = parameter_space.sample_randomly(
        int(args['VALIDATION_SAMPLES']))

    reductor = NeuralNetworkReductor(fom,
                                     training_set,
                                     validation_set,
                                     l2_err=1e-5,
                                     ann_mse=1e-5)
    rom = reductor.reduce(restarts=100)

    test_set = parameter_space.sample_randomly(10)

    speedups = []

    import time

    print(f'Performing test on set of size {len(test_set)} ...')

    U = fom.solution_space.empty(reserve=len(test_set))
    U_red = fom.solution_space.empty(reserve=len(test_set))

    for mu in test_set:
        tic = time.time()
        U.append(fom.solve(mu))
        time_fom = time.time() - tic

        tic = time.time()
        U_red.append(reductor.reconstruct(rom.solve(mu)))
        time_red = time.time() - tic

        speedups.append(time_fom / time_red)

    absolute_errors = (U - U_red).norm()
    relative_errors = (U - U_red).norm() / U.norm()

    if args['--vis']:
        fom.visualize((U, U_red), legend=('Full solution', 'Reduced solution'))

    print(f'Average absolute error: {np.average(absolute_errors)}')
    print(f'Average relative error: {np.average(relative_errors)}')
    print(f'Median of speedup: {np.median(speedups)}')
def neural_networks_demo(args):
    logger = getLogger('pymordemos.neural_networks')

    if not config.HAVE_TORCH:
        raise TorchMissing()

    TRAINING_SAMPLES = args['TRAINING_SAMPLES']
    VALIDATION_SAMPLES = args['VALIDATION_SAMPLES']

    fom, parameter_space = discretize_fenics()

    from pymor.reductors.neural_network import NeuralNetworkReductor

    training_set = parameter_space.sample_uniformly(int(TRAINING_SAMPLES))
    validation_set = parameter_space.sample_randomly(int(VALIDATION_SAMPLES))

    reductor = NeuralNetworkReductor(fom,
                                     training_set,
                                     validation_set,
                                     l2_err=1e-4,
                                     ann_mse=1e-4)
    rom = reductor.reduce(hidden_layers='[(N+P)*3, (N+P)*3, (N+P)*3]',
                          restarts=100)

    test_set = parameter_space.sample_randomly(1)  #0)

    speedups = []

    import time

    print(f'Performing test on set of size {len(test_set)} ...')

    U = fom.solution_space.empty(reserve=len(test_set))
    U_red = fom.solution_space.empty(reserve=len(test_set))

    for mu in test_set:
        tic = time.time()
        U.append(fom.solve(mu))
        time_fom = time.time() - tic

        tic = time.time()
        U_red.append(reductor.reconstruct(rom.solve(mu)))
        time_red = time.time() - tic

        speedups.append(time_fom / time_red)

    absolute_errors = (U - U_red).norm()
    relative_errors = (U - U_red).norm() / U.norm()

    print(f'Average absolute error: {np.average(absolute_errors)}')
    print(f'Average relative error: {np.average(relative_errors)}')
    print(f'Median of speedup: {np.median(speedups)}')
Beispiel #5
0
def main(
    grid_intervals: int = Argument(..., help='Grid interval count.'),
    training_samples: int = Argument(
        ..., help='Number of samples used for training the neural network.'),
    validation_samples: int = Argument(
        ...,
        help='Number of samples used for validation during the training phase.'
    ),
    fv: bool = Option(
        False,
        help='Use finite volume discretization instead of finite elements.'),
    vis: bool = Option(
        False,
        help=
        'Visualize full order solution and reduced solution for a test set.'),
):
    """Model oder reduction with neural networks (approach by Hesthaven and Ubbiali)."""

    if not config.HAVE_TORCH:
        raise TorchMissing()

    fom = create_fom(fv, grid_intervals)

    parameter_space = fom.parameters.space((0.1, 1))

    from pymor.reductors.neural_network import NeuralNetworkReductor

    training_set = parameter_space.sample_uniformly(training_samples)
    validation_set = parameter_space.sample_randomly(validation_samples)

    reductor = NeuralNetworkReductor(fom,
                                     training_set,
                                     validation_set,
                                     l2_err=1e-5,
                                     ann_mse=1e-5)
    rom = reductor.reduce(restarts=100)

    test_set = parameter_space.sample_randomly(10)

    speedups = []

    import time

    print(f'Performing test on set of size {len(test_set)} ...')

    U = fom.solution_space.empty(reserve=len(test_set))
    U_red = fom.solution_space.empty(reserve=len(test_set))

    for mu in test_set:
        tic = time.perf_counter()
        U.append(fom.solve(mu))
        time_fom = time.perf_counter() - tic

        tic = time.perf_counter()
        U_red.append(reductor.reconstruct(rom.solve(mu)))
        time_red = time.perf_counter() - tic

        speedups.append(time_fom / time_red)

    absolute_errors = (U - U_red).norm()
    relative_errors = (U - U_red).norm() / U.norm()

    if vis:
        fom.visualize((U, U_red), legend=('Full solution', 'Reduced solution'))

    print(f'Average absolute error: {np.average(absolute_errors)}')
    print(f'Average relative error: {np.average(relative_errors)}')
    print(f'Median of speedup: {np.median(speedups)}')