def reduce_adaptive_greedy(d, reductor, validation_mus, extension_alg_name, max_extensions, use_estimator, rho, gamma, theta, pool): from pymor.algorithms.adaptivegreedy import adaptive_greedy # run greedy greedy_data = adaptive_greedy(d, reductor, validation_mus=-validation_mus, use_estimator=use_estimator, error_norm=d.h1_0_semi_norm, extension_params={'method': extension_alg_name}, max_extensions=max_extensions, rho=rho, gamma=gamma, theta=theta, pool=pool) rd = greedy_data['rd'] # generate summary real_rb_size = rd.solution_space.dim # the validation set consists of `validation_mus` random parameters plus the centers of the adaptive sample set cells validation_mus += 1 summary = '''Adaptive greedy basis generation: initial size of validation set: {validation_mus} error estimator used: {use_estimator} extension method: {extension_alg_name} prescribed basis size: {max_extensions} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals()) return rd, summary
def reduce_adaptive_greedy(d, reductor, validation_mus, extension_alg_name, max_extensions, use_estimator, rho, gamma, theta, pool): from pymor.algorithms.adaptivegreedy import adaptive_greedy # run greedy greedy_data = adaptive_greedy( d, reductor, validation_mus=-validation_mus, use_estimator=use_estimator, error_norm=d.h1_0_semi_norm, extension_params={'method': extension_alg_name}, max_extensions=max_extensions, rho=rho, gamma=gamma, theta=theta, pool=pool) rd = greedy_data['reduced_discretization'] # generate summary real_rb_size = rd.solution_space.dim # the validation set consists of `validation_mus` random parameters plus the centers of the adaptive sample set cells validation_mus += 1 summary = '''Adaptive greedy basis generation: initial size of validation set: {validation_mus} error estimator used: {use_estimator} extension method: {extension_alg_name} prescribed basis size: {max_extensions} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals()) return rd, summary
def reduce_adaptive_greedy( d, reductor, validation_mus, extension_alg_name, max_extensions, use_estimator, rho, gamma, theta, pool ): from pymor.algorithms.basisextension import trivial_basis_extension, gram_schmidt_basis_extension from pymor.algorithms.adaptivegreedy import adaptive_greedy # choose basis extension algorithm if extension_alg_name == "trivial": extension_algorithm = trivial_basis_extension elif extension_alg_name == "gram_schmidt": extension_algorithm = gram_schmidt_basis_extension elif extension_alg_name == "h1_gram_schmidt": extension_algorithm = partial(gram_schmidt_basis_extension, product=d.h1_0_semi_product) else: assert False # run greedy greedy_data = adaptive_greedy( d, reductor, validation_mus=-validation_mus, use_estimator=use_estimator, error_norm=d.h1_0_semi_norm, extension_algorithm=extension_algorithm, max_extensions=max_extensions, rho=rho, gamma=gamma, theta=theta, pool=pool, ) rd, rc = greedy_data["reduced_discretization"], greedy_data["reconstructor"] # generate summary real_rb_size = rd.solution_space.dim validation_mus += 1 # the validation set consists of `validation_mus` random parameters # plus the centers of the adaptive sample set cells summary = """Adaptive greedy basis generation: initial size of validation set: {validation_mus} error estimator used: {use_estimator} extension method: {extension_alg_name} prescribed basis size: {max_extensions} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} """.format( **locals() ) return rd, rc, summary
def reduce_adaptive_greedy(d, reductor, validation_mus, extension_alg_name, max_extensions, use_estimator, rho, gamma, theta, pool): from pymor.algorithms.basisextension import trivial_basis_extension, gram_schmidt_basis_extension from pymor.algorithms.adaptivegreedy import adaptive_greedy # choose basis extension algorithm if extension_alg_name == 'trivial': extension_algorithm = trivial_basis_extension elif extension_alg_name == 'gram_schmidt': extension_algorithm = gram_schmidt_basis_extension elif extension_alg_name == 'h1_gram_schmidt': extension_algorithm = partial(gram_schmidt_basis_extension, product=d.h1_0_semi_product) else: assert False # run greedy greedy_data = adaptive_greedy(d, reductor, validation_mus=-validation_mus, use_estimator=use_estimator, error_norm=d.h1_0_semi_norm, extension_algorithm=extension_algorithm, max_extensions=max_extensions, rho=rho, gamma=gamma, theta=theta, pool=pool) rd, rc = greedy_data['reduced_discretization'], greedy_data[ 'reconstructor'] # generate summary real_rb_size = rd.solution_space.dim validation_mus += 1 # the validation set consists of `validation_mus` random parameters # plus the centers of the adaptive sample set cells summary = '''Adaptive greedy basis generation: initial size of validation set: {validation_mus} error estimator used: {use_estimator} extension method: {extension_alg_name} prescribed basis size: {max_extensions} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals()) return rd, rc, summary
def thermalblock_demo(args): args['--grid'] = int(args['--grid']) args['RBSIZE'] = int(args['RBSIZE']) args['--test'] = int(args['--test']) args['--ipython-engines'] = int(args['--ipython-engines']) args['--estimator-norm'] = args['--estimator-norm'].lower() assert args['--estimator-norm'] in {'trivial', 'h1'} args['--extension-alg'] = args['--extension-alg'].lower() assert args['--extension-alg'] in {'trivial', 'gram_schmidt', 'h1_gram_schmidt'} args['--reductor'] = args['--reductor'].lower() assert args['--reductor'] in {'traditional', 'residual_basis'} args['--cache-region'] = args['--cache-region'].lower() args['--validation-mus'] = int(args['--validation-mus']) args['--rho'] = float(args['--rho']) args['--gamma'] = float(args['--gamma']) args['--theta'] = float(args['--theta']) print('Solving on TriaGrid(({0},{0}))'.format(args['--grid'])) print('Setup Problem ...') problem = ThermalBlockProblem(num_blocks=(2, 2)) functionals = [ExpressionParameterFunctional('diffusion[0]', {'diffusion': (2,)}), ExpressionParameterFunctional('diffusion[1]**2', {'diffusion': (2,)}), ExpressionParameterFunctional('diffusion[0]', {'diffusion': (2,)}), ExpressionParameterFunctional('diffusion[1]', {'diffusion': (2,)})] problem = EllipticProblem(domain=problem.domain, diffusion_functions=problem.diffusion_functions, diffusion_functionals=functionals, rhs=problem.rhs, parameter_space=CubicParameterSpace({'diffusion': (2,)}, 0.1, 1.)) print('Discretize ...') discretization, _ = discretize_elliptic_cg(problem, diameter=1. / args['--grid']) if args['--list-vector-array']: from pymor.playground.discretizers.numpylistvectorarray import convert_to_numpy_list_vector_array discretization = convert_to_numpy_list_vector_array(discretization) if args['--cache-region'] != 'none': discretization.enable_caching(args['--cache-region']) print('The parameter type is {}'.format(discretization.parameter_type)) if args['--plot-solutions']: print('Showing some solutions') Us = () legend = () for mu in discretization.parameter_space.sample_randomly(2): print('Solving for diffusion = \n{} ... '.format(mu['diffusion'])) sys.stdout.flush() Us = Us + (discretization.solve(mu),) legend = legend + (str(mu['diffusion']),) discretization.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True) print('RB generation ...') product = discretization.h1_0_semi_product if args['--estimator-norm'] == 'h1' else None coercivity_estimator=ExpressionParameterFunctional('min([diffusion[0], diffusion[1]**2])', discretization.parameter_type) reductors = {'residual_basis': partial(reduce_coercive, product=product, coercivity_estimator=coercivity_estimator), 'traditional': partial(reduce_coercive_simple, product=product, coercivity_estimator=coercivity_estimator)} reductor = reductors[args['--reductor']] extension_algorithms = {'trivial': trivial_basis_extension, 'gram_schmidt': gram_schmidt_basis_extension, 'h1_gram_schmidt': partial(gram_schmidt_basis_extension, product=discretization.h1_0_semi_product)} extension_algorithm = extension_algorithms[args['--extension-alg']] pool = new_parallel_pool(ipython_num_engines=args['--ipython-engines'], ipython_profile=args['--ipython-profile']) greedy_data = adaptive_greedy(discretization, reductor, validation_mus=args['--validation-mus'], rho=args['--rho'], gamma=args['--gamma'], theta=args['--theta'], use_estimator=not args['--without-estimator'], error_norm=discretization.h1_0_semi_norm, extension_algorithm=extension_algorithm, max_extensions=args['RBSIZE'], visualize=args['--visualize-refinement']) rb_discretization, reconstructor = greedy_data['reduced_discretization'], greedy_data['reconstructor'] if args['--pickle']: print('\nWriting reduced discretization to file {} ...'.format(args['--pickle'] + '_reduced')) with open(args['--pickle'] + '_reduced', 'wb') as f: dump(rb_discretization, f) print('Writing detailed discretization and reconstructor to file {} ...'.format(args['--pickle'] + '_detailed')) with open(args['--pickle'] + '_detailed', 'wb') as f: dump((discretization, reconstructor), f) print('\nSearching for maximum error on random snapshots ...') results = reduction_error_analysis(rb_discretization, discretization=discretization, reconstructor=reconstructor, estimator=True, error_norms=(discretization.h1_0_semi_norm,), condition=True, test_mus=args['--test'], basis_sizes=25 if args['--plot-error-sequence'] else 1, plot=True, pool=pool) real_rb_size = rb_discretization.solution_space.dim print(''' *** RESULTS *** Problem: number of blocks: 2x2 h: sqrt(2)/{args[--grid]} Greedy basis generation: estimator disabled: {args[--without-estimator]} estimator norm: {args[--estimator-norm]} extension method: {args[--extension-alg]} prescribed basis size: {args[RBSIZE]} actual basis size: {real_rb_size} elapsed time: {greedy_data[time]} '''.format(**locals())) print(results['summary']) sys.stdout.flush() if args['--plot-error-sequence']: from matplotlib import pyplot as plt plt.show(results['figure']) if args['--plot-err']: mumax = results['max_error_mus'][0, -1] U = discretization.solve(mumax) URB = reconstructor.reconstruct(rb_discretization.solve(mumax)) discretization.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'), title='Maximum Error Solution', separate_colorbars=True, block=True)