def reduce_adaptive_greedy(fom, reductor, validation_mus, extension_alg_name, max_extensions, use_estimator, rho, gamma, theta, pool): from pymor.algorithms.adaptivegreedy import rb_adaptive_greedy # run greedy greedy_data = rb_adaptive_greedy(fom, reductor, validation_mus=-validation_mus, use_estimator=use_estimator, error_norm=fom.h1_0_semi_norm, extension_params={'method': extension_alg_name}, max_extensions=max_extensions, rho=rho, gamma=gamma, theta=theta, pool=pool) rom = greedy_data['rom'] # generate summary real_rb_size = rom.solution_space.dim # the validation set consists of `validation_mus` random parameters plus the centers of the adaptive sample set cells validation_mus += 1 summary = f'''Adaptive greedy basis generation: initial size of validation set: {validation_mus} error estimator used: {use_estimator} extension method: {extension_alg_name} prescribed basis size: {max_extensions} actual basis size: {real_rb_size} elapsed time: {greedy_data["time"]} ''' return rom, summary
def thermalblock_demo(args): args['--grid'] = int(args['--grid']) args['RBSIZE'] = int(args['RBSIZE']) args['--test'] = int(args['--test']) args['--ipython-engines'] = int(args['--ipython-engines']) args['--extension-alg'] = args['--extension-alg'].lower() assert args['--extension-alg'] in {'trivial', 'gram_schmidt'} args['--product'] = args['--product'].lower() assert args['--product'] in {'trivial', 'h1'} args['--reductor'] = args['--reductor'].lower() assert args['--reductor'] in {'traditional', 'residual_basis'} args['--cache-region'] = args['--cache-region'].lower() args['--validation-mus'] = int(args['--validation-mus']) args['--rho'] = float(args['--rho']) args['--gamma'] = float(args['--gamma']) args['--theta'] = float(args['--theta']) problem = thermal_block_problem(num_blocks=(2, 2)) functionals = [ ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[1]', {'diffusion': 2}) ] problem = problem.with_( diffusion=problem.diffusion.with_(coefficients=functionals), ) print('Discretize ...') fom, _ = discretize_stationary_cg(problem, diameter=1. / args['--grid']) if args['--list-vector-array']: from pymor.discretizers.builtin.list import convert_to_numpy_list_vector_array fom = convert_to_numpy_list_vector_array(fom) if args['--cache-region'] != 'none': # building a cache_id is only needed for persistent CacheRegions cache_id = f"pymordemos.thermalblock_adaptive {args['--grid']}" fom.enable_caching(args['--cache-region'], cache_id) if args['--plot-solutions']: print('Showing some solutions') Us = () legend = () for mu in problem.parameter_space.sample_randomly(2): print(f"Solving for diffusion = \n{mu['diffusion']} ... ") sys.stdout.flush() Us = Us + (fom.solve(mu), ) legend = legend + (str(mu['diffusion']), ) fom.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True) print('RB generation ...') product = fom.h1_0_semi_product if args['--product'] == 'h1' else None coercivity_estimator = ExpressionParameterFunctional( 'min([diffusion[0], diffusion[1]**2])', fom.parameters) reductors = { 'residual_basis': CoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator), 'traditional': SimpleCoerciveRBReductor(fom, product=product, coercivity_estimator=coercivity_estimator) } reductor = reductors[args['--reductor']] pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile']) greedy_data = rb_adaptive_greedy( fom, reductor, problem.parameter_space, validation_mus=args['--validation-mus'], rho=args['--rho'], gamma=args['--gamma'], theta=args['--theta'], use_estimator=not args['--without-estimator'], error_norm=fom.h1_0_semi_norm, max_extensions=args['RBSIZE'], visualize=not args['--no-visualize-refinement']) rom = greedy_data['rom'] if args['--pickle']: print( f"\nWriting reduced model to file {args['--pickle']}_reduced ...") with open(args['--pickle'] + '_reduced', 'wb') as f: dump(rom, f) print( f"Writing detailed model and reductor to file {args['--pickle']}_detailed ..." ) with open(args['--pickle'] + '_detailed', 'wb') as f: dump((fom, reductor), f) print('\nSearching for maximum error on random snapshots ...') results = reduction_error_analysis( rom, fom=fom, reductor=reductor, estimator=True, error_norms=(fom.h1_0_semi_norm, ), condition=True, test_mus=problem.parameter_space.sample_randomly(args['--test']), basis_sizes=25 if args['--plot-error-sequence'] else 1, plot=True, pool=pool) real_rb_size = rom.solution_space.dim print(''' *** RESULTS *** Problem: number of blocks: 2x2 h: sqrt(2)/{args[--grid]} Greedy basis generation: estimator disabled: {args[--without-estimator]} extension method: {args[--extension-alg]} product: {args[--product]} prescribed basis size: {args[RBSIZE]} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals())) print(results['summary']) sys.stdout.flush() if args['--plot-error-sequence']: from matplotlib import pyplot as plt plt.show(results['figure']) if args['--plot-err']: mumax = results['max_error_mus'][0, -1] U = fom.solve(mumax) URB = reductor.reconstruct(rom.solve(mumax)) fom.visualize( (U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'), title='Maximum Error Solution', separate_colorbars=True, block=True)
def main( rbsize: int = Argument(..., help='Size of the reduced basis.'), cache_region: Choices('none memory disk persistent') = Option( 'none', help='Name of cache region to use for caching solution snapshots.' ), error_estimator: bool = Option(True, help='Use error estimator for basis generation.'), gamma: float = Option(0.2, help='Weight factor for age penalty term in refinement indicators.'), grid: int = Option(100, help='Use grid with 2*NI*NI elements.'), ipython_engines: int = Option( 0, help='If positive, the number of IPython cluster engines to use for parallel greedy search. ' 'If zero, no parallelization is performed.' ), ipython_profile: str = Option(None, help='IPython profile to use for parallelization.'), list_vector_array: bool = Option( False, help='Solve using ListVectorArray[NumpyVector] instead of NumpyVectorArray.' ), pickle: str = Option( None, help='Pickle reduced discretization, as well as reductor and high-dimensional model to files with this prefix.' ), plot_err: bool = Option(False, help='Plot error.'), plot_solutions: bool = Option(False, help='Plot some example solutions.'), plot_error_sequence: bool = Option(False, help='Plot reduction error vs. basis size.'), product: Choices('euclidean h1') = Option( 'h1', help='Product w.r.t. which to orthonormalize and calculate Riesz representatives.' ), reductor: Choices('traditional residual_basis') = Option( 'residual_basis', help='Reductor (error estimator) to choose (traditional, residual_basis).' ), rho: float = Option(1.1, help='Maximum allowed ratio between error on validation set and on training set.'), test: int = Option(10, help='Use COUNT snapshots for stochastic error estimation.'), theta: float = Option(0., help='Ratio of elements to refine.'), validation_mus: int = Option(0, help='Size of validation set.'), visualize_refinement: bool = Option(True, help='Visualize the training set refinement indicators.'), ): """Modified thermalblock demo using adaptive greedy basis generation algorithm.""" problem = thermal_block_problem(num_blocks=(2, 2)) functionals = [ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[0]', {'diffusion': 2}), ExpressionParameterFunctional('diffusion[1]', {'diffusion': 2})] problem = problem.with_( diffusion=problem.diffusion.with_(coefficients=functionals), ) print('Discretize ...') fom, _ = discretize_stationary_cg(problem, diameter=1. / grid) if list_vector_array: from pymor.discretizers.builtin.list import convert_to_numpy_list_vector_array fom = convert_to_numpy_list_vector_array(fom) if cache_region != 'none': # building a cache_id is only needed for persistent CacheRegions cache_id = f"pymordemos.thermalblock_adaptive {grid}" fom.enable_caching(cache_region.value, cache_id) if plot_solutions: print('Showing some solutions') Us = () legend = () for mu in problem.parameter_space.sample_randomly(2): print(f"Solving for diffusion = \n{mu['diffusion']} ... ") sys.stdout.flush() Us = Us + (fom.solve(mu),) legend = legend + (str(mu['diffusion']),) fom.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True) print('RB generation ...') product_op = fom.h1_0_semi_product if product == 'h1' else None coercivity_estimator = ExpressionParameterFunctional('min([diffusion[0], diffusion[1]**2])', fom.parameters) reductors = {'residual_basis': CoerciveRBReductor(fom, product=product_op, coercivity_estimator=coercivity_estimator), 'traditional': SimpleCoerciveRBReductor(fom, product=product_op, coercivity_estimator=coercivity_estimator)} reductor = reductors[reductor] pool = new_parallel_pool(ipython_num_engines=ipython_engines, ipython_profile=ipython_profile) greedy_data = rb_adaptive_greedy( fom, reductor, problem.parameter_space, validation_mus=validation_mus, rho=rho, gamma=gamma, theta=theta, use_error_estimator=error_estimator, error_norm=fom.h1_0_semi_norm, max_extensions=rbsize, visualize=visualize_refinement ) rom = greedy_data['rom'] if pickle: print(f"\nWriting reduced model to file {pickle}_reduced ...") with open(pickle + '_reduced', 'wb') as f: dump(rom, f) print(f"Writing detailed model and reductor to file {pickle}_detailed ...") with open(pickle + '_detailed', 'wb') as f: dump((fom, reductor), f) print('\nSearching for maximum error on random snapshots ...') results = reduction_error_analysis(rom, fom=fom, reductor=reductor, error_estimator=True, error_norms=(fom.h1_0_semi_norm,), condition=True, test_mus=problem.parameter_space.sample_randomly(test), basis_sizes=25 if plot_error_sequence else 1, plot=True, pool=pool) real_rb_size = rom.solution_space.dim print(''' *** RESULTS *** Problem: number of blocks: 2x2 h: sqrt(2)/{grid} Greedy basis generation: error estimator enalbed: {error_estimator} product: {product} prescribed basis size: {rbsize} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals())) print(results['summary']) sys.stdout.flush() if plot_error_sequence: from matplotlib import pyplot as plt plt.show() if plot_err: mumax = results['max_error_mus'][0, -1] U = fom.solve(mumax) URB = reductor.reconstruct(rom.solve(mumax)) fom.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'), title='Maximum Error Solution', separate_colorbars=True, block=True)