Ejemplo n.º 1
0
def main(BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST):
    # discretize
    ############
    if BACKEND == 'pymor':
        fom = discretize_pymor()
    elif BACKEND == 'fenics':
        fom = discretize_fenics()
    else:
        raise NotImplementedError

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional('1.', fom.parameter_type)
    reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product, coercivity_estimator=coercivity_estimator)

    # generate reduced model
    ########################
    if ALG == 'greedy':
        rom = reduce_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'adaptive_greedy':
        rom = reduce_adaptive_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'pod':
        rom = reduce_pod(fom, reductor, SNAPSHOTS, RBSIZE)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rom, fom=fom, reductor=reductor, estimator=True,
        error_norms=[lambda U: DT * np.sqrt(np.sum(fom.h1_0_semi_norm(U)[1:]**2))],
        error_norm_names=['l^2-h^1'],
        condition=False, test_mus=TEST, random_seed=999, plot=True
    )

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot as plt
    plt.show(results['figure'])

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rom, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = fom.solve(mumax)
    U_RB = reductor.reconstruct(rom.solve(mumax))
    if BACKEND == 'fenics':  # right now the fenics visualizer does not support time trajectories
        U = U[len(U) - 1].copy()
        U_RB = U_RB[len(U_RB) - 1].copy()
    fom.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                separate_colorbars=True)

    return results
Ejemplo n.º 2
0
 def set(self, key, value):
     key = base64.b64encode(key)
     now = datetime.datetime.now()
     filename = now.isoformat() + '.dat'
     file_path = os.path.join(self.path, filename)
     while os.path.exists(file_path):
         now = now + datetime.timedelta(microseconds=1)
         filename = now().isoformat()
         file_path = os.path.join(self.path, filename)
     fd = os.open(file_path, os.O_WRONLY | os.O_EXCL | os.O_CREAT)
     try:
         f = os.fdopen(fd, 'w')
         dump(value, f)
         file_size = f.tell()
     finally:
         f.close()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute("INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})"
                   .format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn('Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Ejemplo n.º 3
0
def main(BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST):
    # discretize
    ############
    if BACKEND == 'pymor':
        d = discretize_pymor()
    elif BACKEND == 'fenics':
        d = discretize_fenics()
    else:
        raise NotImplementedError

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional('1.', d.parameter_type)
    reductor = ParabolicRBReductor(d, product=d.h1_0_semi_product, coercivity_estimator=coercivity_estimator)

    # generate reduced model
    ########################
    if ALG == 'greedy':
        rd = reduce_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'adaptive_greedy':
        rd = reduce_adaptive_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'pod':
        rd = reduce_pod(d, reductor, SNAPSHOTS, RBSIZE)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rd, d=d, reductor=reductor, estimator=True,
        error_norms=[lambda U: DT * np.sqrt(np.sum(d.h1_0_semi_norm(U)[1:]**2))],
        error_norm_names=['l^2-h^1'],
        condition=False, test_mus=TEST, random_seed=999, plot=True
    )

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot as plt
    plt.show(results['figure'])

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rd, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = d.solve(mumax)
    U_RB = reductor.reconstruct(rd.solve(mumax))
    if BACKEND == 'fenics':  # right now the fenics visualizer does not support time trajectories
        U = U[len(U) - 1].copy()
        U_RB = U_RB[len(U_RB) - 1].copy()
    d.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                separate_colorbars=True)

    return results
Ejemplo n.º 4
0
Archivo: cache.py Proyecto: emamy/pymor
 def set(self, key, value):
     fd, file_path = tempfile.mkstemp(
         '.dat',
         _safe_filename(datetime.datetime.now().isoformat()[:-7]) + '-',
         self.path)
     filename = os.path.basename(file_path)
     with os.fdopen(fd, 'wb') as f:
         dump(value, f)
         file_size = f.tell()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute(
             "INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})"
             .format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn(
             'Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Ejemplo n.º 5
0
def test_blockspace():
    from pymor.vectorarrays.block import BlockVectorSpace, BlockVectorArray
    from pymor.core.pickle import dump, load

    b = BlockVectorSpace([])
    with tempfile.TemporaryFile('wb') as dp_file:
        dump(b, file=dp_file)
Ejemplo n.º 6
0
 def set(self, key, value):
     key = base64.b64encode(key)
     now = datetime.datetime.now()
     filename = now.isoformat() + '.dat'
     file_path = os.path.join(self.path, filename)
     while os.path.exists(file_path):
         now = now + datetime.timedelta(microseconds=1)
         filename = now().isoformat()
         file_path = os.path.join(self.path, filename)
     fd = os.open(file_path, os.O_WRONLY | os.O_EXCL | os.O_CREAT)
     try:
         f = os.fdopen(fd, 'w')
         dump(value, f)
         file_size = f.tell()
     finally:
         f.close()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute("INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})".format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn('Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Ejemplo n.º 7
0
 def dump_file(k, v):
     if k not in data:
         count = 0
     elif not isinstance(data[k], list):
         count = 1
     else:
         count = len(data[k])
     filename = 'DATA.' + k + '.' + str(count)
     with open(os.path.join(_current_dataset, filename), 'wb') as f:
         dump(v, f, protocol=-1)
     return filename
Ejemplo n.º 8
0
 def set(self, key, value):
     key = base64.b64encode(key)
     response = self.server.set(self.secret, key)
     assert len(response) == 2 and isinstance(response[0], bool) and isinstance(response[1], str)
     if response[0]:
         with open(response[1], 'wb') as f:
             dump(value, f)
             file_size = f.tell()
         response = self.server.set_finished(self.secret, key, file_size)
         assert isinstance(response, bool) and response
     else:
         from pymor.core.logger import getLogger
         getLogger('pymor.core.network_cache.NetworkFilesystemRegion')\
             .warn('Key already present in cache region, ignoring.')
Ejemplo n.º 9
0
 def set(self, key, value):
     key = base64.b64encode(key)
     response = self.server.set(self.secret, key)
     assert len(response) == 2 and isinstance(response[0], bool) and isinstance(response[1], str)
     if response[0]:
         with open(response[1], 'w') as f:
             dump(value, f)
             file_size = f.tell()
         response = self.server.set_finished(self.secret, key, file_size)
         assert isinstance(response, bool) and response
     else:
         from pymor.core.logger import getLogger
         getLogger('pymor.core.network_cache.NetworkFilesystemRegion')\
             .warn('Key already present in cache region, ignoring.')
Ejemplo n.º 10
0
 def set(self, key, value):
     fd, file_path = tempfile.mkstemp('.dat', _safe_filename(datetime.datetime.now().isoformat()[:-7]) + '-', self.path)
     filename = os.path.basename(file_path)
     with os.fdopen(fd, 'wb') as f:
         dump(value, f)
         file_size = f.tell()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute("INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})"
                   .format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn('Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Ejemplo n.º 11
0
def main(args):

    args = parse_arguments(args)

    pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile'])

    if args['--fenics']:
        fom, fom_summary = discretize_fenics(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--order'])
    else:
        fom, fom_summary = discretize_pymor(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--list-vector-array'])

    if args['--cache-region'] != 'none':
        fom.enable_caching(args['--cache-region'])

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in fom.parameter_space.sample_randomly(2):
            print(f"Solving for diffusion = \n{mu['diffusion']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        fom.visualize(Us, legend=legend, title='Detailed Solutions for different parameters',
                      separate_colorbars=False, block=True)

    print('RB generation ...')

    # define estimator for coercivity constant
    from pymor.parameters.functionals import ExpressionParameterFunctional
    coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', fom.parameter_type)

    # inner product for computation of Riesz representatives
    product = fom.h1_0_semi_product if args['--product'] == 'h1' else None

    if args['--reductor'] == 'residual_basis':
        from pymor.reductors.coercive import CoerciveRBReductor
        reductor = CoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator,
                                      check_orthonormality=False)
    elif args['--reductor'] == 'traditional':
        from pymor.reductors.coercive import SimpleCoerciveRBReductor
        reductor = SimpleCoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator,
                                            check_orthonormality=False)
    else:
        assert False  # this should never happen

    if args['--alg'] == 'naive':
        rom, red_summary = reduce_naive(fom=fom, reductor=reductor, basis_size=args['RBSIZE'])
    elif args['--alg'] == 'greedy':
        parallel = not (args['--fenics'] and args['--greedy-without-estimator'])  # cannot pickle FEniCS model
        rom, red_summary = reduce_greedy(fom=fom, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                         extension_alg_name=args['--extension-alg'],
                                         max_extensions=args['RBSIZE'],
                                         use_estimator=not args['--greedy-without-estimator'],
                                         pool=pool if parallel else None)
    elif args['--alg'] == 'adaptive_greedy':
        parallel = not (args['--fenics'] and args['--greedy-without-estimator'])  # cannot pickle FEniCS model
        rom, red_summary = reduce_adaptive_greedy(fom=fom, reductor=reductor, validation_mus=args['SNAPSHOTS'],
                                                  extension_alg_name=args['--extension-alg'],
                                                  max_extensions=args['RBSIZE'],
                                                  use_estimator=not args['--greedy-without-estimator'],
                                                  rho=args['--adaptive-greedy-rho'],
                                                  gamma=args['--adaptive-greedy-gamma'],
                                                  theta=args['--adaptive-greedy-theta'],
                                                  pool=pool if parallel else None)
    elif args['--alg'] == 'pod':
        rom, red_summary = reduce_pod(fom=fom, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                      basis_size=args['RBSIZE'])
    else:
        assert False  # this should never happen

    if args['--pickle']:
        print(f"\nWriting reduced model to file {args['--pickle']}_reduced ...")
        with open(args['--pickle'] + '_reduced', 'wb') as f:
            dump(rom, f)
        if not args['--fenics']:  # FEniCS data structures do not support serialization
            print(f"Writing detailed model and reductor to file {args['--pickle']}_detailed ...")
            with open(args['--pickle'] + '_detailed', 'wb') as f:
                dump((fom, reductor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(rom,
                                       fom=fom,
                                       reductor=reductor,
                                       estimator=True,
                                       error_norms=(fom.h1_0_semi_norm, fom.l2_norm),
                                       condition=True,
                                       test_mus=args['--test'],
                                       basis_sizes=0 if args['--plot-error-sequence'] else 1,
                                       plot=args['--plot-error-sequence'],
                                       pool=None if args['--fenics'] else pool,  # cannot pickle FEniCS model
                                       random_seed=999)

    print('\n*** RESULTS ***\n')
    print(fom_summary)
    print(red_summary)
    print(results['summary'])
    sys.stdout.flush()

    if args['--plot-error-sequence']:
        import matplotlib.pyplot
        matplotlib.pyplot.show(results['figure'])
    if args['--plot-err']:
        mumax = results['max_error_mus'][0, -1]
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                    title='Maximum Error Solution', separate_colorbars=True, block=True)

    return results
Ejemplo n.º 12
0
def main(
    rbsize: int = Argument(..., help='Size of the reduced basis.'),

    cache_region: Choices('none memory disk persistent') = Option(
        'none',
        help='Name of cache region to use for caching solution snapshots.'
    ),
    error_estimator: bool = Option(True, help='Use error estimator for basis generation.'),
    gamma: float = Option(0.2, help='Weight factor for age penalty term in refinement indicators.'),
    grid: int = Option(100, help='Use grid with 2*NI*NI elements.'),
    ipython_engines: int = Option(
        0,
        help='If positive, the number of IPython cluster engines to use for parallel greedy search. '
             'If zero, no parallelization is performed.'
    ),
    ipython_profile: str = Option(None, help='IPython profile to use for parallelization.'),
    list_vector_array: bool = Option(
        False,
        help='Solve using ListVectorArray[NumpyVector] instead of NumpyVectorArray.'
    ),
    pickle: str = Option(
        None,
        help='Pickle reduced discretization, as well as reductor and high-dimensional model to files with this prefix.'
    ),
    plot_err: bool = Option(False, help='Plot error.'),
    plot_solutions: bool = Option(False, help='Plot some example solutions.'),
    plot_error_sequence: bool = Option(False, help='Plot reduction error vs. basis size.'),
    product: Choices('euclidean h1') = Option(
        'h1',
        help='Product  w.r.t. which to orthonormalize and calculate Riesz representatives.'
    ),
    reductor: Choices('traditional residual_basis') = Option(
        'residual_basis',
        help='Reductor (error estimator) to choose (traditional, residual_basis).'
    ),
    rho: float = Option(1.1, help='Maximum allowed ratio between error on validation set and on training set.'),
    test: int = Option(10, help='Use COUNT snapshots for stochastic error estimation.'),
    theta: float = Option(0., help='Ratio of elements to refine.'),
    validation_mus: int = Option(0, help='Size of validation set.'),
    visualize_refinement: bool = Option(True, help='Visualize the training set refinement indicators.'),
):
    """Modified thermalblock demo using adaptive greedy basis generation algorithm."""

    problem = thermal_block_problem(num_blocks=(2, 2))
    functionals = [ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}),
                   ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': 2}),
                   ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}),
                   ExpressionParameterFunctional('diffusion[1]', {'diffusion': 2})]
    problem = problem.with_(
        diffusion=problem.diffusion.with_(coefficients=functionals),
    )

    print('Discretize ...')
    fom, _ = discretize_stationary_cg(problem, diameter=1. / grid)

    if list_vector_array:
        from pymor.discretizers.builtin.list import convert_to_numpy_list_vector_array
        fom = convert_to_numpy_list_vector_array(fom)

    if cache_region != 'none':
        # building a cache_id is only needed for persistent CacheRegions
        cache_id = f"pymordemos.thermalblock_adaptive {grid}"
        fom.enable_caching(cache_region.value, cache_id)

    if plot_solutions:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in problem.parameter_space.sample_randomly(2):
            print(f"Solving for diffusion = \n{mu['diffusion']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        fom.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True)

    print('RB generation ...')

    product_op = fom.h1_0_semi_product if product == 'h1' else None
    coercivity_estimator = ExpressionParameterFunctional('min([diffusion[0], diffusion[1]**2])',
                                                         fom.parameters)
    reductors = {'residual_basis': CoerciveRBReductor(fom, product=product_op,
                                                      coercivity_estimator=coercivity_estimator),
                 'traditional': SimpleCoerciveRBReductor(fom, product=product_op,
                                                         coercivity_estimator=coercivity_estimator)}
    reductor = reductors[reductor]

    pool = new_parallel_pool(ipython_num_engines=ipython_engines, ipython_profile=ipython_profile)
    greedy_data = rb_adaptive_greedy(
        fom, reductor, problem.parameter_space,
        validation_mus=validation_mus,
        rho=rho,
        gamma=gamma,
        theta=theta,
        use_error_estimator=error_estimator,
        error_norm=fom.h1_0_semi_norm,
        max_extensions=rbsize,
        visualize=visualize_refinement
    )

    rom = greedy_data['rom']

    if pickle:
        print(f"\nWriting reduced model to file {pickle}_reduced ...")
        with open(pickle + '_reduced', 'wb') as f:
            dump(rom, f)
        print(f"Writing detailed model and reductor to file {pickle}_detailed ...")
        with open(pickle + '_detailed', 'wb') as f:
            dump((fom, reductor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(rom,
                                       fom=fom,
                                       reductor=reductor,
                                       error_estimator=True,
                                       error_norms=(fom.h1_0_semi_norm,),
                                       condition=True,
                                       test_mus=problem.parameter_space.sample_randomly(test),
                                       basis_sizes=25 if plot_error_sequence else 1,
                                       plot=True,
                                       pool=pool)

    real_rb_size = rom.solution_space.dim

    print('''
*** RESULTS ***

Problem:
   number of blocks:                   2x2
   h:                                  sqrt(2)/{grid}

Greedy basis generation:
   error estimator enalbed:            {error_estimator}
   product:                            {product}
   prescribed basis size:              {rbsize}
   actual basis size:                  {real_rb_size}
   elapsed time:                       {greedy_data[time]}
'''.format(**locals()))
    print(results['summary'])

    sys.stdout.flush()

    if plot_error_sequence:
        from matplotlib import pyplot as plt
        plt.show()
    if plot_err:
        mumax = results['max_error_mus'][0, -1]
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                      title='Maximum Error Solution', separate_colorbars=True, block=True)
Ejemplo n.º 13
0
def main(args):

    args = parse_arguments(args)

    pool = new_parallel_pool(ipython_num_engines=args["--ipython-engines"], ipython_profile=args["--ipython-profile"])

    if args["--fenics"]:
        d, d_summary = discretize_fenics(args["XBLOCKS"], args["YBLOCKS"], args["--grid"], args["--order"])
    else:
        d, d_summary = discretize_pymor(args["XBLOCKS"], args["YBLOCKS"], args["--grid"], args["--list-vector-array"])

    if args["--cache-region"] != "none":
        d.enable_caching(args["--cache-region"])

    if args["--plot-solutions"]:
        print("Showing some solutions")
        Us = ()
        legend = ()
        for mu in d.parameter_space.sample_randomly(2):
            print("Solving for diffusion = \n{} ... ".format(mu["diffusion"]))
            sys.stdout.flush()
            Us = Us + (d.solve(mu),)
            legend = legend + (str(mu["diffusion"]),)
        d.visualize(
            Us, legend=legend, title="Detailed Solutions for different parameters", separate_colorbars=False, block=True
        )

    print("RB generation ...")

    # define estimator for coercivity constant
    from pymor.parameters.functionals import ExpressionParameterFunctional

    coercivity_estimator = ExpressionParameterFunctional("min(diffusion)", d.parameter_type)

    # inner product for computation of Riesz representatives
    product = d.h1_0_semi_product if args["--estimator-norm"] == "h1" else None

    if args["--reductor"] == "residual_basis":
        from pymor.reductors.coercive import reduce_coercive

        reductor = partial(reduce_coercive, product=product, coercivity_estimator=coercivity_estimator)
    elif args["--reductor"] == "traditional":
        from pymor.reductors.coercive import reduce_coercive_simple

        reductor = partial(reduce_coercive_simple, product=product, coercivity_estimator=coercivity_estimator)
    else:
        assert False  # this should never happen

    if args["--alg"] == "naive":
        rd, rc, red_summary = reduce_naive(d=d, reductor=reductor, basis_size=args["RBSIZE"])
    elif args["--alg"] == "greedy":
        parallel = not (args["--fenics"] and args["--greedy-without-estimator"])  # cannot pickle FEniCS discretization
        rd, rc, red_summary = reduce_greedy(
            d=d,
            reductor=reductor,
            snapshots_per_block=args["SNAPSHOTS"],
            extension_alg_name=args["--extension-alg"],
            max_extensions=args["RBSIZE"],
            use_estimator=not args["--greedy-without-estimator"],
            pool=pool if parallel else None,
        )
    elif args["--alg"] == "adaptive_greedy":
        parallel = not (args["--fenics"] and args["--greedy-without-estimator"])  # cannot pickle FEniCS discretization
        rd, rc, red_summary = reduce_adaptive_greedy(
            d=d,
            reductor=reductor,
            validation_mus=args["SNAPSHOTS"],
            extension_alg_name=args["--extension-alg"],
            max_extensions=args["RBSIZE"],
            use_estimator=not args["--greedy-without-estimator"],
            rho=args["--adaptive-greedy-rho"],
            gamma=args["--adaptive-greedy-gamma"],
            theta=args["--adaptive-greedy-theta"],
            pool=pool if parallel else None,
        )
    elif args["--alg"] == "pod":
        rd, rc, red_summary = reduce_pod(
            d=d,
            reductor=reductor,
            snapshots_per_block=args["SNAPSHOTS"],
            basis_size=args["RBSIZE"],
            product_name=args["--pod-product"],
        )
    else:
        assert False  # this should never happen

    if args["--pickle"]:
        print("\nWriting reduced discretization to file {} ...".format(args["--pickle"] + "_reduced"))
        with open(args["--pickle"] + "_reduced", "wb") as f:
            dump(rd, f)
        if not args["--fenics"]:  # FEniCS data structures do not support serialization
            print(
                "Writing detailed discretization and reconstructor to file {} ...".format(
                    args["--pickle"] + "_detailed"
                )
            )
            with open(args["--pickle"] + "_detailed", "wb") as f:
                dump((d, rc), f)

    print("\nSearching for maximum error on random snapshots ...")

    results = reduction_error_analysis(
        rd,
        discretization=d,
        reconstructor=rc,
        estimator=True,
        error_norms=(d.h1_0_semi_norm, d.l2_norm),
        condition=True,
        test_mus=args["--test"],
        basis_sizes=0 if args["--plot-error-sequence"] else 1,
        plot=args["--plot-error-sequence"],
        pool=None if args["--fenics"] else pool,  # cannot pickle FEniCS discretization
        random_seed=999,
    )

    print("\n*** RESULTS ***\n")
    print(d_summary)
    print(red_summary)
    print(results["summary"])
    sys.stdout.flush()

    if args["--plot-error-sequence"]:
        import matplotlib.pyplot

        matplotlib.pyplot.show(results["figure"])
    if args["--plot-err"]:
        mumax = results["max_error_mus"][0, -1]
        U = d.solve(mumax)
        URB = rc.reconstruct(rd.solve(mumax))
        d.visualize(
            (U, URB, U - URB),
            legend=("Detailed Solution", "Reduced Solution", "Error"),
            title="Maximum Error Solution",
            separate_colorbars=True,
            block=True,
        )

    return results
Ejemplo n.º 14
0
 def _store(self, i, v):
     with open(os.path.join(self.dir, str(i)), 'wb') as f:
         dump(v, f)
     self._cache[i] = v
     if len(self._cache) > self.cache_size:
         self._cache.popitem(last=False)
Ejemplo n.º 15
0
def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--estimator-norm'] = args['--estimator-norm'].lower()
    assert args['--estimator-norm'] in {'trivial', 'h1'}
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {
        'trivial', 'gram_schmidt', 'h1_gram_schmidt'
    }
    args['--reductor'] = args['--reductor'].lower()
    assert args['--reductor'] in {'traditional', 'residual_basis'}

    print('Solving on TriaGrid(({0},{0}))'.format(args['--grid']))

    print('Setup Problem ...')
    problem = ThermalBlockProblem(num_blocks=(args['XBLOCKS'],
                                              args['YBLOCKS']))

    print('Discretize ...')
    discretization, _ = discretize_elliptic_cg(problem,
                                               diameter=1. / args['--grid'])

    print('The parameter type is {}'.format(discretization.parameter_type))

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = tuple()
        legend = tuple()
        for mu in discretization.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu), )
            legend = legend + (str(mu['diffusion']), )
        discretization.visualize(
            Us,
            legend=legend,
            title='Detailed Solutions for different parameters',
            block=True)

    print('RB generation ...')

    error_product = discretization.h1_product if args[
        '--estimator-norm'] == 'h1' else None
    coercivity_estimator = ExpressionParameterFunctional(
        'min(diffusion)', discretization.parameter_type)
    reductors = {
        'residual_basis':
        partial(reduce_stationary_coercive,
                error_product=error_product,
                coercivity_estimator=coercivity_estimator),
        'traditional':
        partial(reduce_stationary_affine_linear,
                error_product=error_product,
                coercivity_estimator=coercivity_estimator)
    }
    reductor = reductors[args['--reductor']]
    extension_algorithms = {
        'trivial':
        trivial_basis_extension,
        'gram_schmidt':
        gram_schmidt_basis_extension,
        'h1_gram_schmidt':
        partial(gram_schmidt_basis_extension,
                product=discretization.h1_product)
    }
    extension_algorithm = extension_algorithms[args['--extension-alg']]
    greedy_data = greedy(discretization,
                         reductor,
                         discretization.parameter_space.sample_uniformly(
                             args['SNAPSHOTS']),
                         use_estimator=args['--with-estimator'],
                         error_norm=discretization.h1_norm,
                         extension_algorithm=extension_algorithm,
                         max_extensions=args['RBSIZE'])
    rb_discretization, reconstructor = greedy_data[
        'reduced_discretization'], greedy_data['reconstructor']

    if args['--pickle']:
        print('\nWriting reduced discretization to file {} ...'.format(
            args['--pickle'] + '_reduced'))
        with open(args['--pickle'] + '_reduced', 'w') as f:
            dump(rb_discretization, f)
        print(
            'Writing detailed discretization and reconstructor to file {} ...'.
            format(args['--pickle'] + '_detailed'))
        with open(args['--pickle'] + '_detailed', 'w') as f:
            dump((discretization, reconstructor), f)

    print('\nSearching for maximum error on random snapshots ...')

    def error_analysis(d, rd, rc, mus):
        print('N = {}: '.format(rd.operator.source.dim), end='')
        h1_err_max = -1
        h1_est_max = -1
        cond_max = -1
        for mu in mus:
            print('.', end='')
            sys.stdout.flush()
            u = rd.solve(mu)
            URB = rc.reconstruct(u)
            U = d.solve(mu)
            h1_err = d.h1_norm(U - URB)[0]
            h1_est = rd.estimate(u, mu=mu)
            cond = np.linalg.cond(rd.operator.assemble(mu)._matrix)
            if h1_err > h1_err_max:
                h1_err_max = h1_err
                mumax = mu
            if h1_est > h1_est_max:
                h1_est_max = h1_est
                mu_est_max = mu
            if cond > cond_max:
                cond_max = cond
                cond_max_mu = mu
        print()
        return h1_err_max, mumax, h1_est_max, mu_est_max, cond_max, cond_max_mu

    tic = time.time()

    real_rb_size = len(greedy_data['basis'])
    if args['--plot-error-sequence']:
        N_count = min(real_rb_size - 1, 25)
        Ns = np.linspace(1, real_rb_size, N_count).astype(np.int)
    else:
        Ns = np.array([real_rb_size])
    rd_rcs = [
        reduce_to_subbasis(rb_discretization, N, reconstructor)[:2] for N in Ns
    ]
    mus = list(discretization.parameter_space.sample_randomly(args['--test']))

    errs, err_mus, ests, est_mus, conds, cond_mus = zip(
        *(error_analysis(discretization, rd, rc, mus) for rd, rc in rd_rcs))
    h1_err_max = errs[-1]
    mumax = err_mus[-1]
    cond_max = conds[-1]
    cond_max_mu = cond_mus[-1]
    toc = time.time()
    t_est = toc - tic

    print('''
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    Greedy basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       used estimator:                     {args[--with-estimator]}
       estimator norm:                     {args[--estimator-norm]}
       extension method:                   {args[--extension-alg]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {greedy_data[time]}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()

    if args['--plot-error-sequence']:
        plt.semilogy(Ns, errs, Ns, ests)
        plt.legend(('error', 'estimator'))
        plt.show()
    if args['--plot-err']:
        U = discretization.solve(mumax)
        URB = reconstructor.reconstruct(rb_discretization.solve(mumax))
        discretization.visualize(
            (U, URB, U - URB),
            legend=('Detailed Solution', 'Reduced Solution', 'Error'),
            title='Maximum Error Solution',
            separate_colorbars=True,
            block=True)
Ejemplo n.º 16
0
def thermalblock_demo(args):
    args['--grid'] = int(args['--grid'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--ipython-engines'] = int(args['--ipython-engines'])
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {'trivial', 'gram_schmidt'}
    args['--product'] = args['--product'].lower()
    assert args['--product'] in {'trivial', 'h1'}
    args['--reductor'] = args['--reductor'].lower()
    assert args['--reductor'] in {'traditional', 'residual_basis'}
    args['--cache-region'] = args['--cache-region'].lower()
    args['--validation-mus'] = int(args['--validation-mus'])
    args['--rho'] = float(args['--rho'])
    args['--gamma'] = float(args['--gamma'])
    args['--theta'] = float(args['--theta'])

    problem = thermal_block_problem(num_blocks=(2, 2))
    functionals = [
        ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}),
        ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': 2}),
        ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}),
        ExpressionParameterFunctional('diffusion[1]', {'diffusion': 2})
    ]
    problem = problem.with_(
        diffusion=problem.diffusion.with_(coefficients=functionals), )

    print('Discretize ...')
    fom, _ = discretize_stationary_cg(problem, diameter=1. / args['--grid'])

    if args['--list-vector-array']:
        from pymor.discretizers.builtin.list import convert_to_numpy_list_vector_array
        fom = convert_to_numpy_list_vector_array(fom)

    if args['--cache-region'] != 'none':
        # building a cache_id is only needed for persistent CacheRegions
        cache_id = f"pymordemos.thermalblock_adaptive {args['--grid']}"
        fom.enable_caching(args['--cache-region'], cache_id)

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in problem.parameter_space.sample_randomly(2):
            print(f"Solving for diffusion = \n{mu['diffusion']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu), )
            legend = legend + (str(mu['diffusion']), )
        fom.visualize(Us,
                      legend=legend,
                      title='Detailed Solutions for different parameters',
                      block=True)

    print('RB generation ...')

    product = fom.h1_0_semi_product if args['--product'] == 'h1' else None
    coercivity_estimator = ExpressionParameterFunctional(
        'min([diffusion[0], diffusion[1]**2])', fom.parameters)
    reductors = {
        'residual_basis':
        CoerciveRBReductor(fom,
                           product=product,
                           coercivity_estimator=coercivity_estimator),
        'traditional':
        SimpleCoerciveRBReductor(fom,
                                 product=product,
                                 coercivity_estimator=coercivity_estimator)
    }
    reductor = reductors[args['--reductor']]

    pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'],
                             ipython_profile=args['--ipython-profile'])
    greedy_data = rb_adaptive_greedy(
        fom,
        reductor,
        problem.parameter_space,
        validation_mus=args['--validation-mus'],
        rho=args['--rho'],
        gamma=args['--gamma'],
        theta=args['--theta'],
        use_estimator=not args['--without-estimator'],
        error_norm=fom.h1_0_semi_norm,
        max_extensions=args['RBSIZE'],
        visualize=not args['--no-visualize-refinement'])

    rom = greedy_data['rom']

    if args['--pickle']:
        print(
            f"\nWriting reduced model to file {args['--pickle']}_reduced ...")
        with open(args['--pickle'] + '_reduced', 'wb') as f:
            dump(rom, f)
        print(
            f"Writing detailed model and reductor to file {args['--pickle']}_detailed ..."
        )
        with open(args['--pickle'] + '_detailed', 'wb') as f:
            dump((fom, reductor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(
        rom,
        fom=fom,
        reductor=reductor,
        estimator=True,
        error_norms=(fom.h1_0_semi_norm, ),
        condition=True,
        test_mus=problem.parameter_space.sample_randomly(args['--test']),
        basis_sizes=25 if args['--plot-error-sequence'] else 1,
        plot=True,
        pool=pool)

    real_rb_size = rom.solution_space.dim

    print('''
*** RESULTS ***

Problem:
   number of blocks:                   2x2
   h:                                  sqrt(2)/{args[--grid]}

Greedy basis generation:
   estimator disabled:                 {args[--without-estimator]}
   extension method:                   {args[--extension-alg]}
   product:                            {args[--product]}
   prescribed basis size:              {args[RBSIZE]}
   actual basis size:                  {real_rb_size}
   elapsed time:                       {greedy_data[time]}
'''.format(**locals()))
    print(results['summary'])

    sys.stdout.flush()

    if args['--plot-error-sequence']:
        from matplotlib import pyplot as plt
        plt.show(results['figure'])
    if args['--plot-err']:
        mumax = results['max_error_mus'][0, -1]
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize(
            (U, URB, U - URB),
            legend=('Detailed Solution', 'Reduced Solution', 'Error'),
            title='Maximum Error Solution',
            separate_colorbars=True,
            block=True)
Ejemplo n.º 17
0
 def _store(self, i, v):
     with open(os.path.join(self.dir, str(i)), 'wb') as f:
         dump(v, f)
     self._cache[i] = v
     if len(self._cache) > self.cache_size:
         self._cache.popitem(last=False)
Ejemplo n.º 18
0
def thermalblock_demo(args):

    args = parse_arguments(args)

    pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile'])

    if args['--fenics']:
        d, d_summary = discretize_fenics(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--order'])
    else:
        d, d_summary = discretize_pymor(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--list-vector-array'])

    if args['--cache-region'] != 'none':
        d.enable_caching(args['--cache-region'])

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = tuple()
        legend = tuple()
        for mu in d.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (d.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        d.visualize(Us, legend=legend, title='Detailed Solutions for different parameters',
                    separate_colorbars=False, block=True)

    print('RB generation ...')

    # define estimator for coercivity constant
    from pymor.parameters.functionals import ExpressionParameterFunctional
    coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', d.parameter_type)

    # inner product for computation of Riesz representatives
    error_product = d.h1_0_semi_product if args['--estimator-norm'] == 'h1' else None

    if args['--reductor'] == 'residual_basis':
        from pymor.reductors.stationary import reduce_stationary_coercive
        reductor = partial(reduce_stationary_coercive, error_product=error_product,
                           coercivity_estimator=coercivity_estimator)
    elif args['--reductor'] == 'traditional':
        from pymor.reductors.linear import reduce_stationary_affine_linear
        reductor = partial(reduce_stationary_affine_linear, error_product=error_product,
                           coercivity_estimator=coercivity_estimator)
    else:
        assert False  # this should never happen

    if args['--pod']:
        rd, rc, red_summary = reduce_pod(d=d, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                         basis_size=args['RBSIZE'], product_name=args['--pod-product'])
    else:
        rd, rc, red_summary = reduce_greedy(d=d, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                            extension_alg_name=args['--extension-alg'],
                                            max_extensions=args['RBSIZE'],
                                            use_estimator=not args['--without-estimator'], pool=pool)

    if args['--pickle']:
        print('\nWriting reduced discretization to file {} ...'.format(args['--pickle'] + '_reduced'))
        with open(args['--pickle'] + '_reduced', 'w') as f:
            dump(rd, f)
        if not args['--fenics']:  # FEniCS data structures do not support serialization
            print('Writing detailed discretization and reconstructor to file {} ...'
                  .format(args['--pickle'] + '_detailed'))
            with open(args['--pickle'] + '_detailed', 'w') as f:
                dump((d, rc), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(rd,
                                       discretization=d,
                                       reconstructor=rc,
                                       estimator=True,
                                       error_norms=(d.h1_0_semi_norm, d.l2_norm),
                                       condition=True,
                                       test_mus=args['--test'],
                                       basis_sizes=0 if args['--plot-error-sequence'] else 1,
                                       plot=args['--plot-error-sequence'],
                                       pool=pool)

    print('\n*** RESULTS ***\n')
    print(d_summary)
    print(red_summary)
    print(results['summary'])
    sys.stdout.flush()

    if args['--plot-error-sequence']:
        from matplotlib import pyplot as plt
        plt.show(results['figure'])
    if args['--plot-err']:
        mumax = results['max_error_mus'][0, -1]
        U = d.solve(mumax)
        URB = rc.reconstruct(rd.solve(mumax))
        d.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                    title='Maximum Error Solution', separate_colorbars=True, block=True)
Ejemplo n.º 19
0
 def _dump_keylist(self):
     dump((self._keylist, self._size), open(self._keylist_fn, 'wb'))
Ejemplo n.º 20
0
def thermalblock_demo(args):
    args["XBLOCKS"] = int(args["XBLOCKS"])
    args["YBLOCKS"] = int(args["YBLOCKS"])
    args["--grid"] = int(args["--grid"])
    args["--order"] = int(args["--order"])
    args["SNAPSHOTS"] = int(args["SNAPSHOTS"])
    args["RBSIZE"] = int(args["RBSIZE"])
    args["--test"] = int(args["--test"])
    args["--estimator-norm"] = args["--estimator-norm"].lower()
    assert args["--estimator-norm"] in {"trivial", "h1"}
    args["--extension-alg"] = args["--extension-alg"].lower()
    assert args["--extension-alg"] in {"trivial", "gram_schmidt", "h1_gram_schmidt"}
    args["--reductor"] = args["--reductor"].lower()
    assert args["--reductor"] in {"traditional", "residual_basis"}

    print("Discretize ...")
    discretization = discretize(args)

    print("The parameter type is {}".format(discretization.parameter_type))

    if args["--plot-solutions"]:
        print("Showing some solutions")
        Us = tuple()
        legend = tuple()
        for mu in discretization.parameter_space.sample_randomly(2):
            print("Solving for diffusion = \n{} ... ".format(mu["diffusion"]))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu),)
            legend = legend + (str(mu["diffusion"]),)
        discretization.visualize(Us, legend=legend, title="Detailed Solutions for different parameters")

    print("RB generation ...")

    error_product = discretization.h1_product if args["--estimator-norm"] == "h1" else None
    coercivity_estimator = ExpressionParameterFunctional("min(diffusion)", discretization.parameter_type)
    reductors = {
        "residual_basis": partial(
            reduce_stationary_coercive, error_product=error_product, coercivity_estimator=coercivity_estimator
        ),
        "traditional": partial(
            reduce_stationary_affine_linear, error_product=error_product, coercivity_estimator=coercivity_estimator
        ),
    }
    reductor = reductors[args["--reductor"]]
    extension_algorithms = {
        "trivial": trivial_basis_extension,
        "gram_schmidt": gram_schmidt_basis_extension,
        "h1_gram_schmidt": partial(gram_schmidt_basis_extension, product=discretization.h1_product),
    }
    extension_algorithm = extension_algorithms[args["--extension-alg"]]
    greedy_data = greedy(
        discretization,
        reductor,
        discretization.parameter_space.sample_uniformly(args["SNAPSHOTS"]),
        use_estimator=args["--with-estimator"],
        error_norm=discretization.h1_norm,
        extension_algorithm=extension_algorithm,
        max_extensions=args["RBSIZE"],
    )
    rb_discretization, reconstructor = greedy_data["reduced_discretization"], greedy_data["reconstructor"]

    if args["--pickle"]:
        print("\nWriting reduced discretization to file {} ...".format(args["--pickle"] + "_reduced"))
        with open(args["--pickle"] + "_reduced", "w") as f:
            dump(rb_discretization, f)

    print("\nSearching for maximum error on random snapshots ...")

    tic = time.time()

    real_rb_size = len(greedy_data["basis"])

    mus = list(discretization.parameter_space.sample_randomly(args["--test"]))

    h1_err_max = -1
    h1_est_max = -1
    cond_max = -1
    for mu in mus:
        print(".", end="")
        sys.stdout.flush()
        u = rb_discretization.solve(mu)
        URB = reconstructor.reconstruct(u)
        U = discretization.solve(mu)
        h1_err = discretization.h1_norm(U - URB)[0]
        h1_est = rb_discretization.estimate(u, mu=mu)
        cond = np.linalg.cond(rb_discretization.operator.assemble(mu)._matrix)
        if h1_err > h1_err_max:
            h1_err_max = h1_err
            mumax = mu
        if h1_est > h1_est_max:
            h1_est_max = h1_est
            mu_est_max = mu
        if cond > cond_max:
            cond_max = cond
            cond_max_mu = mu
    print()

    toc = time.time()
    t_est = toc - tic

    print(
        """
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    Greedy basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       used estimator:                     {args[--with-estimator]}
       estimator norm:                     {args[--estimator-norm]}
       extension method:                   {args[--extension-alg]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {greedy_data[time]}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    """.format(
            **locals()
        )
    )

    sys.stdout.flush()

    if args["--plot-err"]:
        U = discretization.solve(mumax)
        URB = reconstructor.reconstruct(rb_discretization.solve(mumax))
        discretization.visualize(
            (U, URB, U - URB), legend=("Detailed Solution", "Reduced Solution", "Error"), title="Maximum Error Solution"
        )
Ejemplo n.º 21
0
def main(
    model: Choices('pymor fenics ngsolve pymor_text') = Argument(
        ..., help='High-dimensional model.'),
    alg: Choices('naive greedy adaptive_greedy pod') = Argument(
        ..., help='The model reduction algorithm to use.'),
    snapshots: int = Argument(
        ...,
        help='naive: ignored.\n\n'
        'greedy/pod: Number of training_set parameters per block'
        '(in total SNAPSHOTS^(XBLOCKS * YBLOCKS) parameters).\n\n'
        'adaptive_greedy: size of validation set.'),
    rbsize: int = Argument(..., help='Size of the reduced basis.'),
    test: int = Argument(
        ..., help='Number of parameters for stochastic error estimation.'),
):
    # discretize
    ############
    if model == 'pymor':
        fom, parameter_space = discretize_pymor()
    elif model == 'fenics':
        fom, parameter_space = discretize_fenics()
    elif model == 'ngsolve':
        fom, parameter_space = discretize_ngsolve()
    elif model == 'pymor_text':
        fom, parameter_space = discretize_pymor_text()
    else:
        raise NotImplementedError

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional(
        'min(diffusion)', fom.parameters)
    reductor = CoerciveRBReductor(fom,
                                  product=fom.h1_0_semi_product,
                                  coercivity_estimator=coercivity_estimator,
                                  check_orthonormality=False)

    # generate reduced model
    ########################
    if alg == 'naive':
        rom = reduce_naive(fom, reductor, parameter_space, rbsize)
    elif alg == 'greedy':
        rom = reduce_greedy(fom, reductor, parameter_space, snapshots, rbsize)
    elif alg == 'adaptive_greedy':
        rom = reduce_adaptive_greedy(fom, reductor, parameter_space, snapshots,
                                     rbsize)
    elif alg == 'pod':
        rom = reduce_pod(fom, reductor, parameter_space, snapshots, rbsize)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rom,
        fom=fom,
        reductor=reductor,
        error_estimator=True,
        error_norms=[fom.h1_0_semi_norm],
        condition=True,
        test_mus=parameter_space.sample_randomly(test),
        plot=True)

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot
    matplotlib.pyplot.show()

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump((rom, parameter_space), open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = fom.solve(mumax)
    U_RB = reductor.reconstruct(rom.solve(mumax))
    fom.visualize((U, U_RB, U - U_RB),
                  legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                  separate_colorbars=True,
                  block=True)
Ejemplo n.º 22
0
def main(BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST):
    # discretize
    ############
    if BACKEND == "pymor":
        d = discretize_pymor()
    elif BACKEND == "fenics":
        d = discretize_fenics()
    else:
        raise NotImplementedError

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional("1.", d.parameter_type)
    reductor = partial(reduce_parabolic, product=d.h1_0_semi_product, coercivity_estimator=coercivity_estimator)

    # generate reduced model
    ########################
    if ALG == "greedy":
        rd, rc = reduce_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == "adaptive_greedy":
        rd, rc = reduce_adaptive_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == "pod":
        rd, rc = reduce_pod(d, reductor, SNAPSHOTS, RBSIZE)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rd,
        discretization=d,
        reconstructor=rc,
        estimator=True,
        error_norms=[lambda U: DT * np.sqrt(np.sum(d.h1_0_semi_norm(U)[1:] ** 2))],
        error_norm_names=["l^2-h^1"],
        condition=False,
        test_mus=TEST,
        random_seed=999,
        plot=True,
    )

    # show results
    ##############
    print(results["summary"])
    import matplotlib.pyplot as plt

    plt.show(results["figure"])

    # write results to disk
    #######################
    from pymor.core.pickle import dump

    dump(rd, open("reduced_model.out", "wb"))
    results.pop("figure")  # matplotlib figures cannot be serialized
    dump(results, open("results.out", "wb"))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results["max_error_mus"][0, -1]
    U = d.solve(mumax)
    U_RB = rc.reconstruct(rd.solve(mumax))
    if BACKEND == "fenics":  # right now the fenics visualizer does not support time trajectories
        U = U.copy(len(U) - 1)
        U_RB = U_RB.copy(len(U_RB) - 1)
    d.visualize((U, U_RB, U - U_RB), legend=("Detailed Solution", "Reduced Solution", "Error"), separate_colorbars=True)

    return results
Ejemplo n.º 23
0
def main(
    xblocks: int = Argument(..., help='Number of blocks in x direction.'),
    yblocks: int = Argument(..., help='Number of blocks in y direction.'),
    snapshots: int = Argument(
        ...,
        help='naive: ignored\n\n'
        'greedy/pod: Number of training_set parameters per block '
        '(in total SNAPSHOTS^(XBLOCKS * YBLOCKS) parameters).\n\n'
        'adaptive_greedy: size of validation set.\n\n'),
    rbsize: int = Argument(..., help='Size of the reduced basis.'),
    adaptive_greedy_gamma: float = Option(
        0.2, help='See pymor.algorithms.adaptivegreedy.'),
    adaptive_greedy_rho: float = Option(
        1.1, help='See pymor.algorithms.adaptivegreedy.'),
    adaptive_greedy_theta: float = Option(
        0., help='See pymor.algorithms.adaptivegreedy.'),
    alg: Choices('naive greedy adaptive_greedy pod') = Option(
        'greedy', help='The model reduction algorithm to use.'),
    cache_region: Choices('none memory disk persistent') = Option(
        'none',
        help='Name of cache region to use for caching solution snapshots.'),
    extension_alg: Choices('trivial gram_schmidt') = Option(
        'gram_schmidt', help='Basis extension algorithm to be used.'),
    fenics: bool = Option(False, help='Use FEniCS model.'),
    greedy_with_error_estimator: bool = Option(
        True, help='Use error estimator for basis generation.'),
    grid: int = Option(100, help='Use grid with 4*NI*NI elements'),
    ipython_engines: int = Option(
        None,
        help='If positive, the number of IPython cluster engines to use for '
        'parallel greedy search. If zero, no parallelization is performed.'),
    ipython_profile: str = Option(
        None, help='IPython profile to use for parallelization.'),
    list_vector_array: bool = Option(
        False,
        help=
        'Solve using ListVectorArray[NumpyVector] instead of NumpyVectorArray.'
    ),
    order: int = Option(
        1,
        help=
        'Polynomial order of the Lagrange finite elements to use in FEniCS.'),
    pickle: str = Option(
        None,
        help=
        'Pickle reduced model, as well as reductor and high-dimensional model '
        'to files with this prefix.'),
    product: Choices('euclidean h1') = Option(
        'h1',
        help=
        'Product w.r.t. which to orthonormalize and calculate Riesz representatives.'
    ),
    plot_err: bool = Option(False, help='Plot error'),
    plot_error_sequence: bool = Option(
        False, help='Plot reduction error vs. basis size.'),
    plot_solutions: bool = Option(False, help='Plot some example solutions.'),
    reductor: Choices('traditional residual_basis') = Option(
        'residual_basis', help='Reductor (error estimator) to choose.'),
    test: int = Option(
        10, help='Use COUNT snapshots for stochastic error estimation.'),
):
    """Thermalblock demo."""

    if fenics and cache_region != 'none':
        raise ValueError(
            'Caching of high-dimensional solutions is not supported for FEniCS model.'
        )
    if not fenics and order != 1:
        raise ValueError(
            'Higher-order finite elements only supported for FEniCS model.')

    pool = new_parallel_pool(ipython_num_engines=ipython_engines,
                             ipython_profile=ipython_profile)

    if fenics:
        fom, fom_summary = discretize_fenics(xblocks, yblocks, grid, order)
    else:
        fom, fom_summary = discretize_pymor(xblocks, yblocks, grid,
                                            list_vector_array)

    parameter_space = fom.parameters.space(0.1, 1.)

    if cache_region != 'none':
        # building a cache_id is only needed for persistent CacheRegions
        cache_id = (f"pymordemos.thermalblock {fenics} {xblocks} {yblocks}"
                    f"{grid} {order}")
        fom.enable_caching(cache_region.value, cache_id)

    if plot_solutions:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in parameter_space.sample_randomly(2):
            print(f"Solving for diffusion = \n{mu['diffusion']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu), )
            legend = legend + (str(mu['diffusion']), )
        fom.visualize(Us,
                      legend=legend,
                      title='Detailed Solutions for different parameters',
                      separate_colorbars=False,
                      block=True)

    print('RB generation ...')

    # define estimator for coercivity constant
    from pymor.parameters.functionals import ExpressionParameterFunctional
    coercivity_estimator = ExpressionParameterFunctional(
        'min(diffusion)', fom.parameters)

    # inner product for computation of Riesz representatives
    product = fom.h1_0_semi_product if product == 'h1' else None

    if reductor == 'residual_basis':
        from pymor.reductors.coercive import CoerciveRBReductor
        reductor = CoerciveRBReductor(
            fom,
            product=product,
            coercivity_estimator=coercivity_estimator,
            check_orthonormality=False)
    elif reductor == 'traditional':
        from pymor.reductors.coercive import SimpleCoerciveRBReductor
        reductor = SimpleCoerciveRBReductor(
            fom,
            product=product,
            coercivity_estimator=coercivity_estimator,
            check_orthonormality=False)
    else:
        assert False  # this should never happen

    if alg == 'naive':
        rom, red_summary = reduce_naive(fom=fom,
                                        reductor=reductor,
                                        parameter_space=parameter_space,
                                        basis_size=rbsize)
    elif alg == 'greedy':
        parallel = greedy_with_error_estimator or not fenics  # cannot pickle FEniCS model
        rom, red_summary = reduce_greedy(
            fom=fom,
            reductor=reductor,
            parameter_space=parameter_space,
            snapshots_per_block=snapshots,
            extension_alg_name=extension_alg.value,
            max_extensions=rbsize,
            use_error_estimator=greedy_with_error_estimator,
            pool=pool if parallel else None)
    elif alg == 'adaptive_greedy':
        parallel = greedy_with_error_estimator or not fenics  # cannot pickle FEniCS model
        rom, red_summary = reduce_adaptive_greedy(
            fom=fom,
            reductor=reductor,
            parameter_space=parameter_space,
            validation_mus=snapshots,
            extension_alg_name=extension_alg.value,
            max_extensions=rbsize,
            use_error_estimator=greedy_with_error_estimator,
            rho=adaptive_greedy_rho,
            gamma=adaptive_greedy_gamma,
            theta=adaptive_greedy_theta,
            pool=pool if parallel else None)
    elif alg == 'pod':
        rom, red_summary = reduce_pod(fom=fom,
                                      reductor=reductor,
                                      parameter_space=parameter_space,
                                      snapshots_per_block=snapshots,
                                      basis_size=rbsize)
    else:
        assert False  # this should never happen

    if pickle:
        print(f"\nWriting reduced model to file {pickle}_reduced ...")
        with open(pickle + '_reduced', 'wb') as f:
            dump((rom, parameter_space), f)
        if not fenics:  # FEniCS data structures do not support serialization
            print(
                f"Writing detailed model and reductor to file {pickle}_detailed ..."
            )
            with open(pickle + '_detailed', 'wb') as f:
                dump((fom, reductor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(
        rom,
        fom=fom,
        reductor=reductor,
        error_estimator=True,
        error_norms=(fom.h1_0_semi_norm, fom.l2_norm),
        condition=True,
        test_mus=parameter_space.sample_randomly(test, seed=999),
        basis_sizes=0 if plot_error_sequence else 1,
        plot=plot_error_sequence,
        pool=None if fenics else pool  # cannot pickle FEniCS model
    )

    print('\n*** RESULTS ***\n')
    print(fom_summary)
    print(red_summary)
    print(results['summary'])
    sys.stdout.flush()

    if plot_error_sequence:
        import matplotlib.pyplot
        matplotlib.pyplot.show()
    if plot_err:
        mumax = results['max_error_mus'][0, -1]
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize(
            (U, URB, U - URB),
            legend=('Detailed Solution', 'Reduced Solution', 'Error'),
            title='Maximum Error Solution',
            separate_colorbars=True,
            block=True)

    global test_results
    test_results = results
Ejemplo n.º 24
0
def main(
    backend: Choices('pymor fenics') = Argument(..., help='Discretization toolkit to use.'),
    alg: Choices('greedy adaptive_greedy pod') = Argument(..., help='The model reduction algorithm to use.'),
    snapshots: int = Argument(
        ...,
        help='greedy/pod: number of training set parameters\n\n'
             'adaptive_greedy: size of validation set.'
    ),
    rbsize: int = Argument(..., help='Size of the reduced basis.'),
    test: int = Argument(..., help='Number of test parameters for reduction error estimation.'),
):
    """Reduced basis approximation of the heat equation."""
    # discretize
    ############
    if backend == 'pymor':
        fom = discretize_pymor()
    elif backend == 'fenics':
        fom = discretize_fenics()
    else:
        raise NotImplementedError
    parameter_space=fom.parameters.space(1, 100)

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional('1.', fom.parameters)
    reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product, coercivity_estimator=coercivity_estimator)

    # generate reduced model
    ########################
    if alg == 'greedy':
        rom = reduce_greedy(fom, reductor, parameter_space, snapshots, rbsize)
    elif alg == 'adaptive_greedy':
        rom = reduce_adaptive_greedy(fom, reductor, parameter_space, snapshots, rbsize)
    elif alg == 'pod':
        rom = reduce_pod(fom, reductor, parameter_space, snapshots, rbsize)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rom, fom=fom, reductor=reductor, error_estimator=True,
        error_norms=[lambda U: DT * np.sqrt(np.sum(fom.h1_0_semi_norm(U)[1:]**2))],
        error_norm_names=['l^2-h^1'],
        condition=False, test_mus=parameter_space.sample_randomly(test, seed=999), plot=True
    )

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot as plt
    plt.show()

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rom, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = fom.solve(mumax)
    U_RB = reductor.reconstruct(rom.solve(mumax))
    if backend == 'fenics':  # right now the fenics visualizer does not support time trajectories
        U = U[len(U) - 1].copy()
        U_RB = U_RB[len(U_RB) - 1].copy()
    fom.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                  separate_colorbars=True)

    return results
Ejemplo n.º 25
0
def main(args):

    args = parse_arguments(args)

    pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile'])

    if args['--fenics']:
        fom, fom_summary = discretize_fenics(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--order'])
    else:
        fom, fom_summary = discretize_pymor(args['XBLOCKS'], args['YBLOCKS'], args['--grid'], args['--list-vector-array'])

    if args['--cache-region'] != 'none':
        # building a cache_id is only needed for persistent CacheRegions
        cache_id = (f"pymordemos.thermalblock {args['--fenics']} {args['XBLOCKS']} {args['YBLOCKS']}"
                    f"{args['--grid']} {args['--order']}")
        fom.enable_caching(args['--cache-region'], cache_id)

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in fom.parameter_space.sample_randomly(2):
            print(f"Solving for diffusion = \n{mu['diffusion']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        fom.visualize(Us, legend=legend, title='Detailed Solutions for different parameters',
                      separate_colorbars=False, block=True)

    print('RB generation ...')

    # define estimator for coercivity constant
    from pymor.parameters.functionals import ExpressionParameterFunctional
    coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', fom.parameter_type)

    # inner product for computation of Riesz representatives
    product = fom.h1_0_semi_product if args['--product'] == 'h1' else None

    if args['--reductor'] == 'residual_basis':
        from pymor.reductors.coercive import CoerciveRBReductor
        reductor = CoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator,
                                      check_orthonormality=False)
    elif args['--reductor'] == 'traditional':
        from pymor.reductors.coercive import SimpleCoerciveRBReductor
        reductor = SimpleCoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator,
                                            check_orthonormality=False)
    else:
        assert False  # this should never happen

    if args['--alg'] == 'naive':
        rom, red_summary = reduce_naive(fom=fom, reductor=reductor, basis_size=args['RBSIZE'])
    elif args['--alg'] == 'greedy':
        parallel = not (args['--fenics'] and args['--greedy-without-estimator'])  # cannot pickle FEniCS model
        rom, red_summary = reduce_greedy(fom=fom, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                         extension_alg_name=args['--extension-alg'],
                                         max_extensions=args['RBSIZE'],
                                         use_estimator=not args['--greedy-without-estimator'],
                                         pool=pool if parallel else None)
    elif args['--alg'] == 'adaptive_greedy':
        parallel = not (args['--fenics'] and args['--greedy-without-estimator'])  # cannot pickle FEniCS model
        rom, red_summary = reduce_adaptive_greedy(fom=fom, reductor=reductor, validation_mus=args['SNAPSHOTS'],
                                                  extension_alg_name=args['--extension-alg'],
                                                  max_extensions=args['RBSIZE'],
                                                  use_estimator=not args['--greedy-without-estimator'],
                                                  rho=args['--adaptive-greedy-rho'],
                                                  gamma=args['--adaptive-greedy-gamma'],
                                                  theta=args['--adaptive-greedy-theta'],
                                                  pool=pool if parallel else None)
    elif args['--alg'] == 'pod':
        rom, red_summary = reduce_pod(fom=fom, reductor=reductor, snapshots_per_block=args['SNAPSHOTS'],
                                      basis_size=args['RBSIZE'])
    else:
        assert False  # this should never happen

    if args['--pickle']:
        print(f"\nWriting reduced model to file {args['--pickle']}_reduced ...")
        with open(args['--pickle'] + '_reduced', 'wb') as f:
            dump(rom, f)
        if not args['--fenics']:  # FEniCS data structures do not support serialization
            print(f"Writing detailed model and reductor to file {args['--pickle']}_detailed ...")
            with open(args['--pickle'] + '_detailed', 'wb') as f:
                dump((fom, reductor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(rom,
                                       fom=fom,
                                       reductor=reductor,
                                       estimator=True,
                                       error_norms=(fom.h1_0_semi_norm, fom.l2_norm),
                                       condition=True,
                                       test_mus=args['--test'],
                                       basis_sizes=0 if args['--plot-error-sequence'] else 1,
                                       plot=args['--plot-error-sequence'],
                                       pool=None if args['--fenics'] else pool,  # cannot pickle FEniCS model
                                       random_seed=999)

    print('\n*** RESULTS ***\n')
    print(fom_summary)
    print(red_summary)
    print(results['summary'])
    sys.stdout.flush()

    if args['--plot-error-sequence']:
        import matplotlib.pyplot
        matplotlib.pyplot.show(results['figure'])
    if args['--plot-err']:
        mumax = results['max_error_mus'][0, -1]
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                    title='Maximum Error Solution', separate_colorbars=True, block=True)

    return results
Ejemplo n.º 26
0
def _write_data(successful):
    assert _current_dataset

    def process_data(v):
        if isinstance(v, list):
            a = np.array(v)
            if not a.dtype == np.object:
                return a
        return v

    data = {k: process_data(v) for k, v in _current_dataset_data.items()}

    with open(os.path.join(_current_dataset, 'DATA'), 'wb') as f:
        try:
            dump(data, f, protocol=-1)
        except PicklingError:
            for kk, vv in data.items():
                try:
                    dump({kk: vv}, f, protocol=-1)
                except PicklingError as e:
                    print(f'could not pickle "{kk}"')
                    raise e

    def get_metadata(v):
        if isinstance(v, np.ndarray):
            return {'type': 'numpy.ndarray',
                    'shape': list(v.shape),
                    'dtype': str(v.dtype)}
        elif isinstance(v, list):
            info = {'len': len(v),
                    'total_elements': 0,
                    'max_depth': 0,
                    'max_len': len(v),
                    'element_types': set()}
            def process_list(l, depth):
                for x in l:
                    if isinstance(x, list):
                        info['max_len'] = max(info['max_len'], len(x))
                        info['max_depth'] = max(info['max_depth'], depth + 1)
                        process_list(x, depth+1)
                    else:
                        info['total_elements'] += 1
                        info['element_types'].add(type(x).__name__)
            process_list(v, 0)
            info['element_types'] = sorted(info['element_types'])
            if info['max_depth']:
                info['type'] = 'list of lists'
                return info
            else:
                return {'type': 'list',
                        'len': len(v),
                        'element_types': info['element_types']}
        else:
            return type(v).__name__

    with open(os.path.join(_current_dataset, 'INDEX'), 'wt') as f:
        yaml.dump({k: get_metadata(v) for k, v in _current_dataset_data.items()}, f)

    durations = {k: [stop - start for start, stop in zip_longest(v, _current_dataset_stop_times[k], fillvalue=0)]
                 for k, v in _current_dataset_start_times.items()}

    with open(os.path.join(_current_dataset, 'TIMES'), 'wt') as f:
        yaml.dump({'start': dict(_current_dataset_start_times),
                   'stop': dict(_current_dataset_stop_times),
                   'duration': durations},
                  f)

    if successful:
        with open(os.path.join(_current_dataset, 'FINISHED'), 'wt') as f:
            yaml.dump(datetime.datetime.now(), f)
def main():
    # command line argument parsing
    ###############################
    import sys
    if len(sys.argv) != 6:
        print(__doc__)
        sys.exit(1)
    MODEL, ALG, SNAPSHOTS, RBSIZE, TEST = sys.argv[1:]
    MODEL, ALG, SNAPSHOTS, RBSIZE, TEST = MODEL.lower(), ALG.lower(), int(
        SNAPSHOTS), int(RBSIZE), int(TEST)

    # discretize
    ############
    if MODEL == 'pymor':
        fom = discretize_pymor()
    elif MODEL == 'fenics':
        fom = discretize_fenics()
    elif MODEL == 'ngsolve':
        fom = discretize_ngsolve()
    elif MODEL == 'pymor-text':
        fom = discretize_pymor_text()
    else:
        raise NotImplementedError

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional(
        'min(diffusion)', fom.parameter_type)
    reductor = CoerciveRBReductor(fom,
                                  product=fom.h1_0_semi_product,
                                  coercivity_estimator=coercivity_estimator,
                                  check_orthonormality=False)

    # generate reduced model
    ########################
    if ALG == 'naive':
        rom = reduce_naive(fom, reductor, RBSIZE)
    elif ALG == 'greedy':
        rom = reduce_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'adaptive_greedy':
        rom = reduce_adaptive_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'pod':
        rom = reduce_pod(fom, reductor, SNAPSHOTS, RBSIZE)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(rom,
                                       fom=fom,
                                       reductor=reductor,
                                       estimator=True,
                                       error_norms=[fom.h1_0_semi_norm],
                                       condition=True,
                                       test_mus=TEST,
                                       random_seed=999,
                                       plot=True)

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot
    matplotlib.pyplot.show(results['figure'])

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rom, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = fom.solve(mumax)
    U_RB = reductor.reconstruct(rom.solve(mumax))
    fom.visualize((U, U_RB, U - U_RB),
                  legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                  separate_colorbars=True,
                  block=True)
Ejemplo n.º 28
0
def thermalblock_demo(args):
    args['--grid'] = int(args['--grid'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--ipython-engines'] = int(args['--ipython-engines'])
    args['--estimator-norm'] = args['--estimator-norm'].lower()
    assert args['--estimator-norm'] in {'trivial', 'h1'}
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {'trivial', 'gram_schmidt', 'h1_gram_schmidt'}
    args['--reductor'] = args['--reductor'].lower()
    assert args['--reductor'] in {'traditional', 'residual_basis'}
    args['--cache-region'] = args['--cache-region'].lower()
    args['--validation-mus'] = int(args['--validation-mus'])
    args['--rho'] = float(args['--rho'])
    args['--gamma'] = float(args['--gamma'])
    args['--theta'] = float(args['--theta'])

    print('Solving on TriaGrid(({0},{0}))'.format(args['--grid']))

    print('Setup Problem ...')
    problem = ThermalBlockProblem(num_blocks=(2, 2))
    functionals = [ExpressionParameterFunctional('diffusion[0]', {'diffusion': (2,)}),
                   ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': (2,)}),
                   ExpressionParameterFunctional('diffusion[0]', {'diffusion': (2,)}),
                   ExpressionParameterFunctional('diffusion[1]', {'diffusion': (2,)})]
    problem = EllipticProblem(domain=problem.domain,
                              diffusion_functions=problem.diffusion_functions,
                              diffusion_functionals=functionals,
                              rhs=problem.rhs,
                              parameter_space=CubicParameterSpace({'diffusion': (2,)}, 0.1, 1.))

    print('Discretize ...')
    discretization, _ = discretize_elliptic_cg(problem, diameter=1. / args['--grid'])

    if args['--list-vector-array']:
        from pymor.playground.discretizers.numpylistvectorarray import convert_to_numpy_list_vector_array
        discretization = convert_to_numpy_list_vector_array(discretization)

    if args['--cache-region'] != 'none':
        discretization.enable_caching(args['--cache-region'])

    print('The parameter type is {}'.format(discretization.parameter_type))

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in discretization.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        discretization.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True)

    print('RB generation ...')

    product = discretization.h1_0_semi_product if args['--estimator-norm'] == 'h1' else None
    coercivity_estimator=ExpressionParameterFunctional('min([diffusion[0], diffusion[1]**2])', discretization.parameter_type)
    reductors = {'residual_basis': partial(reduce_coercive, product=product,
                                   coercivity_estimator=coercivity_estimator),
                 'traditional': partial(reduce_coercive_simple, product=product,
                                        coercivity_estimator=coercivity_estimator)}
    reductor = reductors[args['--reductor']]
    extension_algorithms = {'trivial': trivial_basis_extension,
                            'gram_schmidt': gram_schmidt_basis_extension,
                            'h1_gram_schmidt': partial(gram_schmidt_basis_extension, product=discretization.h1_0_semi_product)}
    extension_algorithm = extension_algorithms[args['--extension-alg']]

    pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile'])
    greedy_data = adaptive_greedy(discretization, reductor,
                                  validation_mus=args['--validation-mus'], rho=args['--rho'], gamma=args['--gamma'],
                                  theta=args['--theta'],
                                  use_estimator=not args['--without-estimator'], error_norm=discretization.h1_0_semi_norm,
                                  extension_algorithm=extension_algorithm, max_extensions=args['RBSIZE'],
                                  visualize=args['--visualize-refinement'])

    rb_discretization, reconstructor = greedy_data['reduced_discretization'], greedy_data['reconstructor']

    if args['--pickle']:
        print('\nWriting reduced discretization to file {} ...'.format(args['--pickle'] + '_reduced'))
        with open(args['--pickle'] + '_reduced', 'wb') as f:
            dump(rb_discretization, f)
        print('Writing detailed discretization and reconstructor to file {} ...'.format(args['--pickle'] + '_detailed'))
        with open(args['--pickle'] + '_detailed', 'wb') as f:
            dump((discretization, reconstructor), f)

    print('\nSearching for maximum error on random snapshots ...')

    results = reduction_error_analysis(rb_discretization,
                                       discretization=discretization,
                                       reconstructor=reconstructor,
                                       estimator=True,
                                       error_norms=(discretization.h1_0_semi_norm,),
                                       condition=True,
                                       test_mus=args['--test'],
                                       basis_sizes=25 if args['--plot-error-sequence'] else 1,
                                       plot=True,
                                       pool=pool)

    real_rb_size = rb_discretization.solution_space.dim

    print('''
*** RESULTS ***

Problem:
   number of blocks:                   2x2
   h:                                  sqrt(2)/{args[--grid]}

Greedy basis generation:
   estimator disabled:                 {args[--without-estimator]}
   estimator norm:                     {args[--estimator-norm]}
   extension method:                   {args[--extension-alg]}
   prescribed basis size:              {args[RBSIZE]}
   actual basis size:                  {real_rb_size}
   elapsed time:                       {greedy_data[time]}
'''.format(**locals()))
    print(results['summary'])

    sys.stdout.flush()

    if args['--plot-error-sequence']:
        from matplotlib import pyplot as plt
        plt.show(results['figure'])
    if args['--plot-err']:
        mumax = results['max_error_mus'][0, -1]
        U = discretization.solve(mumax)
        URB = reconstructor.reconstruct(rb_discretization.solve(mumax))
        discretization.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                                 title='Maximum Error Solution', separate_colorbars=True, block=True)
Ejemplo n.º 29
0
def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--estimator-norm'] = args['--estimator-norm'].lower()
    assert args['--estimator-norm'] in {'trivial', 'h1'}
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {'trivial', 'gram_schmidt', 'h1_gram_schmidt'}
    args['--reductor'] = args['--reductor'].lower()
    assert args['--reductor'] in {'traditional', 'residual_basis'}

    print('Solving on TriaGrid(({0},{0}))'.format(args['--grid']))

    print('Setup Problem ...')
    problem = ThermalBlockProblem(num_blocks=(args['XBLOCKS'], args['YBLOCKS']))

    print('Discretize ...')
    discretization, _ = discretize_elliptic_cg(problem, diameter=1. / args['--grid'])

    print('The parameter type is {}'.format(discretization.parameter_type))

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = tuple()
        legend = tuple()
        for mu in discretization.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        discretization.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True)

    print('RB generation ...')

    error_product = discretization.h1_product if args['--estimator-norm'] == 'h1' else None
    coercivity_estimator=ExpressionParameterFunctional('min(diffusion)', discretization.parameter_type)
    reductors = {'residual_basis': partial(reduce_stationary_coercive, error_product=error_product,
                                   coercivity_estimator=coercivity_estimator),
                 'traditional': partial(reduce_stationary_affine_linear, error_product=error_product,
                                        coercivity_estimator=coercivity_estimator)}
    reductor = reductors[args['--reductor']]
    extension_algorithms = {'trivial': trivial_basis_extension,
                            'gram_schmidt': gram_schmidt_basis_extension,
                            'h1_gram_schmidt': partial(gram_schmidt_basis_extension, product=discretization.h1_product)}
    extension_algorithm = extension_algorithms[args['--extension-alg']]
    greedy_data = greedy(discretization, reductor, discretization.parameter_space.sample_uniformly(args['SNAPSHOTS']),
                         use_estimator=args['--with-estimator'], error_norm=discretization.h1_norm,
                         extension_algorithm=extension_algorithm, max_extensions=args['RBSIZE'])
    rb_discretization, reconstructor = greedy_data['reduced_discretization'], greedy_data['reconstructor']

    if args['--pickle']:
        print('\nWriting reduced discretization to file {} ...'.format(args['--pickle'] + '_reduced'))
        with open(args['--pickle'] + '_reduced', 'w') as f:
            dump(rb_discretization, f)
        print('Writing detailed discretization and reconstructor to file {} ...'.format(args['--pickle'] + '_detailed'))
        with open(args['--pickle'] + '_detailed', 'w') as f:
            dump((discretization, reconstructor), f)

    print('\nSearching for maximum error on random snapshots ...')

    def error_analysis(d, rd, rc, mus):
        print('N = {}: '.format(rd.operator.source.dim), end='')
        h1_err_max = -1
        h1_est_max = -1
        cond_max = -1
        for mu in mus:
            print('.', end='')
            sys.stdout.flush()
            u = rd.solve(mu)
            URB = rc.reconstruct(u)
            U = d.solve(mu)
            h1_err = d.h1_norm(U - URB)[0]
            h1_est = rd.estimate(u, mu=mu)
            cond = np.linalg.cond(rd.operator.assemble(mu)._matrix)
            if h1_err > h1_err_max:
                h1_err_max = h1_err
                mumax = mu
            if h1_est > h1_est_max:
                h1_est_max = h1_est
                mu_est_max = mu
            if cond > cond_max:
                cond_max = cond
                cond_max_mu = mu
        print()
        return h1_err_max, mumax, h1_est_max, mu_est_max, cond_max, cond_max_mu

    tic = time.time()

    real_rb_size = len(greedy_data['basis'])
    if args['--plot-error-sequence']:
        N_count = min(real_rb_size - 1, 25)
        Ns = np.linspace(1, real_rb_size, N_count).astype(np.int)
    else:
        Ns = np.array([real_rb_size])
    rd_rcs = [reduce_to_subbasis(rb_discretization, N, reconstructor)[:2] for N in Ns]
    mus = list(discretization.parameter_space.sample_randomly(args['--test']))

    errs, err_mus, ests, est_mus, conds, cond_mus = zip(*(error_analysis(discretization, rd, rc, mus)
                                                        for rd, rc in rd_rcs))
    h1_err_max = errs[-1]
    mumax = err_mus[-1]
    cond_max = conds[-1]
    cond_max_mu = cond_mus[-1]
    toc = time.time()
    t_est = toc - tic

    print('''
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    Greedy basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       used estimator:                     {args[--with-estimator]}
       estimator norm:                     {args[--estimator-norm]}
       extension method:                   {args[--extension-alg]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {greedy_data[time]}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()

    if args['--plot-error-sequence']:
        plt.semilogy(Ns, errs, Ns, ests)
        plt.legend(('error', 'estimator'))
        plt.show()
    if args['--plot-err']:
        U = discretization.solve(mumax)
        URB = reconstructor.reconstruct(rb_discretization.solve(mumax))
        discretization.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                                 title='Maximum Error Solution', separate_colorbars=True, block=True)
Ejemplo n.º 30
0
def main():
    # command line argument parsing
    ###############################
    import sys
    if len(sys.argv) != 6:
        print(__doc__)
        sys.exit(1)
    MODEL, ALG, SNAPSHOTS, RBSIZE, TEST = sys.argv[1:]
    MODEL, ALG, SNAPSHOTS, RBSIZE, TEST = MODEL.lower(), ALG.lower(), int(SNAPSHOTS), int(RBSIZE), int(TEST)


    # discretize
    ############
    if MODEL == 'pymor':
        d = discretize_pymor()
    elif MODEL == 'fenics':
        d = discretize_fenics()
    else:
        raise NotImplementedError


    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', d.parameter_type)
    reductor = partial(reduce_coercive,
                       error_product=d.h1_0_semi_product, coercivity_estimator=coercivity_estimator)


    # generate reduced model
    ########################
    if ALG == 'naive':
        rd, rc = reduce_naive(d, reductor, RBSIZE)
    elif ALG == 'greedy':
        rd, rc = reduce_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'adaptive_greedy':
        rd, rc = reduce_adaptive_greedy(d, reductor, SNAPSHOTS, RBSIZE)
    elif ALG == 'pod':
        rd, rc = reduce_pod(d, reductor, SNAPSHOTS, RBSIZE)
    else:
        raise NotImplementedError


    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(rd, discretization=d, reconstructor=rc, estimator=True,
                                       error_norms=[d.h1_0_semi_norm], condition=True,
                                       test_mus=TEST, random_seed=999, plot=True)


    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot
    matplotlib.pyplot.show(results['figure'])


    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rd, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))


    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = d.solve(mumax)
    U_RB = rc.reconstruct(rd.solve(mumax))
    d.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                separate_colorbars=True, block=True)
Ejemplo n.º 31
0
 def dump_file(k, v):
     filename = 'DATA.' + k + '.0'
     with open(os.path.join(_current_dataset, filename), 'wb') as f:
         dump(v, f, protocol=-1)
     return filename