configfile = "test_elasticity_pcg.conf" config = ExperimentStarter._parse_config(configfile=os.path.join(path, configfile)) # propagate config values for sec in config.keys(): if sec == "LOGGING": continue secconf = config[sec] for key, val in secconf.iteritems(): print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key] exec "CONF_" + key + "= secconf['" + key + "']" # setup logging print "LOG_LEVEL = logging." + config["LOGGING"]["level"] exec "LOG_LEVEL = logging." + config["LOGGING"]["level"] logger = setup_logging(LOG_LEVEL) # save current settings ExperimentStarter._extract_config(globals(), savefile=os.path.join(path, "demo_resest_navierlame-save.conf")) # ============================================================ # PART A: Simulation Options # ============================================================ # flags for residual, projection, new mi refinement REFINEMENT = {"RES":CONF_refine_residual, "PROJ":CONF_refine_projection, "MI":CONF_refine_Lambda} # initial mesh elements initial_mesh_N = CONF_initial_mesh_N
def run_MC(opts, conf): # propagate config values _G = globals() for sec in conf.keys(): if sec == "LOGGING": continue secconf = conf[sec] for key, val in secconf.iteritems(): print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key] _G["CONF_" + key] = secconf[key] # setup logging _G["LOG_LEVEL"] = eval("logging." + conf["LOGGING"]["level"]) print "LOG_LEVEL = logging." + conf["LOGGING"]["level"] setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_MC-P{0}".format(CONF_FEM_degree)) # determine path of this module path = os.path.dirname(__file__) # ============================================================ # PART A: Setup Problem # ============================================================ # get boundaries mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N) # define coefficient field coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant") from itertools import count if CONF_mu is not None: muparam = (CONF_mu, (0 for _ in count())) else: muparam = None coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma, freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam) # setup boundary conditions and pde # initial_mesh_N = CONF_initial_mesh_N pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field) # define multioperator A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs) # ============================================================ # PART B: Import Solution # ============================================================ import pickle PATH_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) FILE_SOLUTION = 'SFEM2-SOLUTIONS-P{0}.pkl'.format(CONF_FEM_degree) FILE_STATS = 'SIM2-STATS-P{0}.pkl'.format(CONF_FEM_degree) print "LOADING solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION) logger.info("LOADING solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION)) # load solutions with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'rb') as fin: w_history = pickle.load(fin) # load simulation data logger.info("LOADING statistics from %s" % os.path.join(PATH_SOLUTION, FILE_STATS)) with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'rb') as fin: sim_stats = pickle.load(fin) logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices()) # ============================================================ # PART C: MC Error Sampling # ============================================================ # determine reference setting ref_mesh, ref_Lambda = generate_reference_setup(PATH_SOLUTION) MC_N = CONF_N MC_HMAX = CONF_maxh if CONF_runs > 0: # determine reference mesh w = w_history[-1] # ref_mesh = w.basis.basis.mesh for _ in range(CONF_ref_mesh_refine): ref_mesh = refine(ref_mesh) # TODO: the following association with the sampling order does not make too much sense... ref_maxm = CONF_sampling_order if CONF_sampling_order > 0 else max(len(mu) for mu in ref_Lambda) + CONF_sampling_order_increase stored_rv_samples = [] for i, w in enumerate(w_history): # if i == 0: # continue # memory usage info import resource logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n") logger.info("================>>> MC error sampling for w[%i] (of %i) on %i cells with maxm %i <<<================" % (i, len(w_history), ref_mesh.num_cells(), ref_maxm)) MC_start = 0 old_stats = sim_stats[i] if opts.continueMC: try: MC_start = sim_stats[i]["MC-N"] logger.info("CONTINUING MC of %s for solution (iteration) %s of %s", PATH_SOLUTION, i, len(w_history)) except: logger.info("STARTING MC of %s for solution (iteration) %s of %s", PATH_SOLUTION, i, len(w_history)) if MC_start <= 0: sim_stats[i]["MC-N"] = 0 sim_stats[i]["MC-ERROR-L2"] = 0 sim_stats[i]["MC-ERROR-H1A"] = 0 # sim_stats[i]["MC-ERROR-L2_a0"] = 0 # sim_stats[i]["MC-ERROR-H1_a0"] = 0 MC_RUNS = max(CONF_runs - MC_start, 0) if MC_RUNS > 0: logger.info("STARTING %s MC RUNS", MC_RUNS) # L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, mesh0, ref_maxm, MC_RUNS, MC_N, MC_HMAX) L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, ref_mesh, ref_maxm, MC_RUNS, MC_N, MC_HMAX, stored_rv_samples, CONF_quadrature_degree) # combine current and previous results sim_stats[i]["MC-N"] = N + old_stats["MC-N"] sim_stats[i]["MC-ERROR-L2"] = (L2err * N + old_stats["MC-ERROR-L2"]) / sim_stats[i]["MC-N"] sim_stats[i]["MC-ERROR-H1A"] = (H1err * N + old_stats["MC-ERROR-H1A"]) / sim_stats[i]["MC-N"] # sim_stats[i]["MC-ERROR-L2_a0"] = (L2err_a0 * N + old_stats["MC-ERRORL2_a0"]) / sim_stats[i]["MC-N"] # sim_stats[i]["MC-ERROR-H1A_a0"] = (H1err_a0 * N + old_stats["MC-ERROR-H1A_a0"]) / sim_stats[i]["MC-N"] print "MC-ERROR-H1A (N:%i) = %f" % (sim_stats[i]["MC-N"], sim_stats[i]["MC-ERROR-H1A"]) else: logger.info("SKIPPING MC RUN since sufficiently many samples are available") # ============================================================ # PART D: Export Updated Data and Plotting # ============================================================ # save updated data if opts.saveData: # save updated statistics print "SAVING statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS) print sim_stats[-1].keys() logger.info("SAVING statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS)) with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'wb') as fout: pickle.dump(sim_stats, fout) # plot residuals if opts.plotEstimator and len(sim_stats) > 1: try: from matplotlib.pyplot import figure, show, legend X = [s["DOFS"] for s in sim_stats] err_L2 = [s["MC-ERROR-L2"] for s in sim_stats] err_H1A = [s["MC-ERROR-H1A"] for s in sim_stats] err_est = [s["ERROR-EST"] for s in sim_stats] err_res = [s["ERROR-RES"] for s in sim_stats] err_tail = [s["ERROR-TAIL"] for s in sim_stats] mi = [s["MI"] for s in sim_stats] num_mi = [len(m) for m in mi] eff_H1A = [est / err for est, err in zip(err_est, err_H1A)] # -------- # figure 1 # -------- fig1 = figure() fig1.suptitle("residual estimator") ax = fig1.add_subplot(111) if REFINEMENT["TAIL"]: ax.loglog(X, num_mi, '--y+', label='active mi') ax.loglog(X, eff_H1A, '--yo', label='efficiency') ax.loglog(X, err_L2, '-.b>', label='L2 error') ax.loglog(X, err_H1A, '-.r>', label='H1A error') ax.loglog(X, err_est, '-g<', label='error estimator') ax.loglog(X, err_res, '-.cx', label='residual') ax.loglog(X, err_tail, '-.m>', label='tail') legend(loc='upper right') print "error L2", err_L2 print "error H1A", err_H1A print "EST", err_est print "RES", err_res print "TAIL", err_tail show() # this invalidates the figure instances... except: import traceback print traceback.format_exc() logger.info("skipped plotting since matplotlib is not available...")
def run_SFEM(opts, conf): # propagate config values _G = globals() for sec in conf.keys(): if sec == "LOGGING": continue secconf = conf[sec] for key, val in secconf.iteritems(): print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key] _G["CONF_" + key] = secconf[key] # setup logging _G["LOG_LEVEL"] = eval("logging." + conf["LOGGING"]["level"]) exec "LOG_LEVEL = logging." + conf["LOGGING"]["level"] setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_SFEM-P{0}".format(CONF_FEM_degree)) # determine path of this module path = os.path.dirname(__file__) # ============================================================ # PART A: Simulation Options # ============================================================ # flags for residual and tail refinement REFINEMENT = {"RES":CONF_refine_residual, "TAIL":CONF_refine_tail, "OSC":CONF_refine_osc} # ============================================================ # PART B: Problem Setup # ============================================================ # define initial multiindices mis = [Multiindex(mis) for mis in MultiindexSet.createCompleteOrderSet(CONF_initial_Lambda, 1)] # setup domain and meshes mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N) #meshes = SampleProblem.setupMesh(mesh0, num_refine=10, randref=(0.4, 0.3)) mesh0 = SampleProblem.setupMesh(mesh0, num_refine=0) # define coefficient field # NOTE: for proper treatment of corner points, see elasticity_residual_estimator coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant") from itertools import count if CONF_mu is not None: muparam = (CONF_mu, (0 for _ in count())) else: muparam = None coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma, freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam) # setup boundary conditions and pde pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field) # define multioperator A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs) # setup initial solution multivector w = SampleProblem.setupMultiVector(mis, pde, mesh0, CONF_FEM_degree) logger.info("active indices of w after initialisation: %s", w.active_indices()) sim_stats = None w_history = [] PATH_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) try: os.makedirs(PATH_SOLUTION) except: pass FILE_SOLUTION = 'SFEM2-SOLUTIONS-P{0}.pkl'.format(CONF_FEM_degree) FILE_STATS = 'SIM2-STATS-P{0}.pkl'.format(CONF_FEM_degree) if opts.continueSFEM: try: logger.info("CONTINUING EXPERIMENT: loading previous data of %s...", CONF_experiment_name) import pickle logger.info("loading solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION)) # load solutions with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'rb') as fin: w_history = pickle.load(fin) # convert to MultiVectorWithProjection for i, mv in enumerate(w_history): w_history[i] = MultiVectorSharedBasis(multivector=w_history[i]) # load simulation data logger.info("loading statistics from %s" % os.path.join(PATH_SOLUTION, FILE_STATS)) with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'rb') as fin: sim_stats = pickle.load(fin) logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices()) w0 = w_history[-1] except: logger.warn("FAILED LOADING EXPERIMENT %s --- STARTING NEW DATA", CONF_experiment_name) w0 = w else: w0 = w # ============================================================ # PART C: Adaptive Algorithm # ============================================================ # refinement loop # =============== w, sim_stats = AdaptiveSolver(A, coeff_field, pde, mis, w0, mesh0, CONF_FEM_degree, # marking parameters rho=CONF_rho, # tail factor theta_x=CONF_theta_x, # residual marking bulk parameter theta_y=CONF_theta_y, # tail bound marking bulk paramter maxh=CONF_maxh, # maximal mesh width for coefficient maximum norm evaluation add_maxm=CONF_add_maxm, # maximal search length for new new # error estimator evaluation estimator_type=CONF_estimator_type, quadrature_degree=CONF_quadrature_degree, # pcg solver pcg_eps=CONF_pcg_eps, pcg_maxiter=CONF_pcg_maxiter, # adaptive algorithm threshold error_eps=CONF_error_eps, # refinements max_refinements=CONF_iterations, max_dof=CONF_max_dof, do_refinement=REFINEMENT, do_uniform_refinement=CONF_uniform_refinement, refine_osc_factor=CONF_refine_osc_factor, w_history=w_history, sim_stats=sim_stats) from operator import itemgetter active_mi = [(mu, w[mu]._fefunc.function_space().mesh().num_cells()) for mu in w.active_indices()] active_mi = sorted(active_mi, key=itemgetter(1), reverse=True) logger.info("==== FINAL MESHES ====") for mu in active_mi: logger.info("--- %s has %s cells", mu[0], mu[1]) print "ACTIVE MI:", active_mi print # memory usage info import resource logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n") # ============================================================ # PART D: Export of Solutions and Simulation Data # ============================================================ # flag for final solution export if opts.saveData: import pickle try: os.makedirs(PATH_SOLUTION) except: pass logger.info("saving solutions into %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION)) # save solutions with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'wb') as fout: pickle.dump(w_history, fout) # save simulation data sim_stats[0]["OPTS"] = opts sim_stats[0]["CONF"] = conf logger.info("saving statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS)) with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'wb') as fout: pickle.dump(sim_stats, fout) # ============================================================ # PART E: Plotting # ============================================================ # plot residuals if opts.plotEstimator and len(sim_stats) > 1: try: from matplotlib.pyplot import figure, show, legend X = [s["DOFS"] for s in sim_stats] print "DOFS", X err_est = [s["ERROR-EST"] for s in sim_stats] err_res = [s["ERROR-RES"] for s in sim_stats] err_tail = [s["ERROR-TAIL"] for s in sim_stats] res_L2 = [s["RESIDUAL-L2"] for s in sim_stats] res_H1A = [s["RESIDUAL-H1A"] for s in sim_stats] mi = [s["MI"] for s in sim_stats] num_mi = [len(m) for m in mi] # -------- # figure 1 # -------- fig1 = figure() fig1.suptitle("residual estimator") ax = fig1.add_subplot(111) if REFINEMENT["TAIL"]: ax.loglog(X, num_mi, '--y+', label='active mi') ax.loglog(X, err_est, '-g<', label='error estimator') ax.loglog(X, err_res, '-.cx', label='residual') ax.loglog(X, err_tail, '-.m>', label='tail') legend(loc='upper right') print "RESIDUAL L2", res_L2 print "RESIDUAL H1A", res_H1A print "EST", err_est print "RES", err_res print "TAIL", err_tail show() # this invalidates the figure instances... except: import traceback print traceback.format_exc() logger.info("skipped plotting since matplotlib is not available...") # plot final meshes if opts.plotMesh: w = w_history[-1] viz_mesh = plot(w.basis.basis.mesh, title="shared mesh", interactive=False) interactive() # plot sample solution if opts.plotSolution: w = w_history[-1] # get random field sample and evaluate solution (direct and parametric) RV_samples = coeff_field.sample_rvs() ref_maxm = w_history[-1].max_order sub_spaces = w[Multiindex()].basis.num_sub_spaces degree = w[Multiindex()].basis.degree maxh = min(w[Multiindex()].basis.minh / 4, CONF_maxh) maxh = w[Multiindex()].basis.minh projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces) sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis) sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis) sol_variance = compute_solution_variance(coeff_field, w, projection_basis) # plot print sub_spaces if sub_spaces == 0: viz_p = plot(sample_sol_param._fefunc, title="parametric solution") viz_d = plot(sample_sol_direct._fefunc, title="direct solution") if ref_maxm > 0: viz_v = plot(sol_variance._fefunc, title="solution variance") else: mesh_param = sample_sol_param._fefunc.function_space().mesh() mesh_direct = sample_sol_direct._fefunc.function_space().mesh() wireframe = True viz_p = plot(sample_sol_param._fefunc, title="parametric solution", mode="displacement", mesh=mesh_param, wireframe=wireframe)#, rescale=False) viz_d = plot(sample_sol_direct._fefunc, title="direct solution", mode="displacement", mesh=mesh_direct, wireframe=wireframe)#, rescale=False) interactive()
def run_SFEM(opts, conf): # propagate config values for sec in conf.keys(): if sec == "LOGGING": continue secconf = conf[sec] for key, val in secconf.iteritems(): print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key] exec "CONF_" + key + "= secconf['" + key + "']" # setup logging print "LOG_LEVEL = logging." + conf["LOGGING"]["level"] exec "LOG_LEVEL = logging." + conf["LOGGING"]["level"] setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_SFEM") # determine path of this module path = os.path.dirname(__file__) # ============================================================ # PART A: Simulation Options # ============================================================ # flags for residual, projection, new mi refinement REFINEMENT = {"RES":CONF_refine_residual, "PROJ":CONF_refine_projection, "MI":CONF_refine_Lambda} # ============================================================ # PART B: Problem Setup # ============================================================ # define initial multiindices mis = [Multiindex(mis) for mis in MultiindexSet.createCompleteOrderSet(CONF_initial_Lambda, 1)] # setup domain and meshes mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N) #meshes = SampleProblem.setupMeshes(mesh0, len(mis), num_refine=10, randref=(0.4, 0.3)) meshes = SampleProblem.setupMeshes(mesh0, len(mis), num_refine=0) # define coefficient field # NOTE: for proper treatment of corner points, see elasticity_residual_estimator coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant") from itertools import count if CONF_mu is not None: muparam = (CONF_mu, (0 for _ in count())) else: muparam = None coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma, freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam) # setup boundary conditions and pde pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field) # define multioperator A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs, assembly_type=eval("ASSEMBLY_TYPE." + CONF_assembly_type)) # setup initial solution multivector w = SampleProblem.setupMultiVector(dict([(mu, m) for mu, m in zip(mis, meshes)]), functools.partial(setup_vector, pde=pde, degree=CONF_FEM_degree)) logger.info("active indices of w after initialisation: %s", w.active_indices()) sim_stats = None w_history = [] if opts.continueSFEM: try: logger.info("CONTINUIING EXPERIMENT: loading previous data of %s...", CONF_experiment_name) import pickle LOAD_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) logger.info("loading solutions from %s" % os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl')) # load solutions with open(os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl'), 'rb') as fin: w_history = pickle.load(fin) # convert to MultiVectorWithProjection for i, mv in enumerate(w_history): w_history[i] = MultiVectorWithProjection(cache_active=True, multivector=w_history[i]) # load simulation data logger.info("loading statistics from %s" % os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl')) with open(os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl'), 'rb') as fin: sim_stats = pickle.load(fin) logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices()) w0 = w_history[-1] except: logger.warn("FAILED LOADING EXPERIMENT %s --- STARTING NEW DATA", CONF_experiment_name) w0 = w else: w0 = w # ============================================================ # PART C: Adaptive Algorithm # ============================================================ # refinement loop # =============== w, sim_stats = AdaptiveSolver(A, coeff_field, pde, mis, w0, mesh0, CONF_FEM_degree, gamma=CONF_gamma, cQ=CONF_cQ, ceta=CONF_ceta, # marking parameters theta_eta=CONF_theta_eta, theta_zeta=CONF_theta_zeta, min_zeta=CONF_min_zeta, maxh=CONF_maxh, newmi_add_maxm=CONF_newmi_add_maxm, theta_delta=CONF_theta_delta, marking_strategy=CONF_marking_strategy, max_Lambda_frac=CONF_max_Lambda_frac, # residual error evaluation quadrature_degree=CONF_quadrature_degree, # projection error evaluation projection_degree_increase=CONF_projection_degree_increase, refine_projection_mesh=CONF_refine_projection_mesh, # pcg solver pcg_eps=CONF_pcg_eps, pcg_maxiter=CONF_pcg_maxiter, # adaptive algorithm threshold error_eps=CONF_error_eps, # refinements max_refinements=CONF_iterations, max_dof=CONF_max_dof, do_refinement=REFINEMENT, do_uniform_refinement=CONF_uniform_refinement, w_history=w_history, sim_stats=sim_stats) from operator import itemgetter active_mi = [(mu, w[mu]._fefunc.function_space().mesh().num_cells()) for mu in w.active_indices()] active_mi = sorted(active_mi, key=itemgetter(1), reverse=True) logger.info("==== FINAL MESHES ====") for mu in active_mi: logger.info("--- %s has %s cells", mu[0], mu[1]) print "ACTIVE MI:", active_mi print # memory usage info import resource logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n") # ============================================================ # PART D: Export of Solutions and Simulation Data # ============================================================ # flag for final solution export if opts.saveData: import pickle SAVE_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) try: os.makedirs(SAVE_SOLUTION) except: pass logger.info("saving solutions into %s" % os.path.join(SAVE_SOLUTION, 'SFEM-SOLUTIONS.pkl')) # save solutions with open(os.path.join(SAVE_SOLUTION, 'SFEM-SOLUTIONS.pkl'), 'wb') as fout: pickle.dump(w_history, fout) # save simulation data sim_stats[0]["OPTS"] = opts sim_stats[0]["CONF"] = conf logger.info("saving statistics into %s" % os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl')) with open(os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl'), 'wb') as fout: pickle.dump(sim_stats, fout) # ============================================================ # PART E: Plotting # ============================================================ # plot residuals if opts.plotEstimator and len(sim_stats) > 1: try: from matplotlib.pyplot import figure, show, legend X = [s["DOFS"] for s in sim_stats] print "DOFS", X L2 = [s["L2"] for s in sim_stats] H1 = [s["H1"] for s in sim_stats] errest = [sqrt(s["EST"]) for s in sim_stats] res_part = [s["RES-PART"] for s in sim_stats] proj_part = [s["PROJ-PART"] for s in sim_stats] pcg_part = [s["PCG-PART"] for s in sim_stats] _reserrmu = [s["RES-mu"] for s in sim_stats] _projerrmu = [s["PROJ-mu"] for s in sim_stats] proj_max_zeta = [s["PROJ-MAX-ZETA"] for s in sim_stats] proj_max_inactive_zeta = [s["PROJ-MAX-INACTIVE-ZETA"] for s in sim_stats] try: proj_inactive_zeta = sorted([v for v in sim_stats[-2]["PROJ-INACTIVE-ZETA"].values()], reverse=True) except: proj_inactive_zeta = None mi = [s["MI"] for s in sim_stats] num_mi = [len(m) for m in mi] time_pcg = [s["TIME-PCG"] for s in sim_stats] time_estimator = [s["TIME-ESTIMATOR"] for s in sim_stats] time_inactive_mi = [s["TIME-INACTIVE-MI"] for s in sim_stats] time_marking = [s["TIME-MARKING"] for s in sim_stats] reserrmu = defaultdict(list) for rem in _reserrmu: for mu, v in rem: reserrmu[mu].append(v) projerrmu = defaultdict(list) for pem in _projerrmu: for mu, v in pem: projerrmu[mu].append(v) print "errest", errest # -------- # figure 2 # -------- fig2 = figure() fig2.suptitle("error estimator") ax = fig2.add_subplot(111) ax.loglog(X, errest, '-g<', label='error estimator') legend(loc='upper right') # -------- # figure 3a # -------- if opts.plotEstimatorAll: max_mu_plotting = 7 fig3 = figure() fig3.suptitle("residual contributions") ax = fig3.add_subplot(111) for i, muv in enumerate(reserrmu.iteritems()): mu, v = muv if i < max_mu_plotting: mu, v = muv ms = str(mu) ms = ms[ms.find('=') + 1:-1] ax.loglog(X[-len(v):], v, '-g<', label=ms) legend(loc='upper right') # -------- # figure 3b # -------- if opts.plotEstimatorAll: fig3b = figure() fig3b.suptitle("projection contributions") ax = fig3b.add_subplot(111) for i, muv in enumerate(projerrmu.iteritems()): mu, v = muv if max(v) > 1e-10 and i < max_mu_plotting: ms = str(mu) ms = ms[ms.find('=') + 1:-1] ax.loglog(X[-len(v):], v, '-g<', label=ms) legend(loc='upper right') # -------- # figure 4 # -------- if opts.plotEstimatorAll: fig4 = figure() fig4.suptitle("projection $\zeta$") ax = fig4.add_subplot(111) ax.loglog(X[1:], proj_max_zeta[1:], '-g<', label='max active $\zeta$') ax.loglog(X[1:], proj_max_inactive_zeta[1:], '-b^', label='max inactive $\zeta$') legend(loc='upper right') # -------- # figure 5 # -------- fig5 = figure() fig5.suptitle("timings") ax = fig5.add_subplot(111) ax.loglog(X, time_pcg, '-g<', label='pcg') ax.loglog(X, time_estimator, '-b^', label='estimator') ax.loglog(X, time_inactive_mi, '-c+', label='inactive_mi') ax.loglog(X, time_marking, '-ro', label='marking') legend(loc='upper right') # -------- # figure 6 # -------- if opts.plotEstimatorAll: fig6 = figure() fig6.suptitle("projection error") ax = fig6.add_subplot(111) ax.loglog(X[1:], proj_part[1:], '-.m>', label='projection part') legend(loc='upper right') # -------- # figure 7 # -------- if opts.plotEstimatorAll and proj_inactive_zeta is not None: fig7 = figure() fig7.suptitle("inactive multiindex $\zeta$") ax = fig7.add_subplot(111) ax.loglog(range(len(proj_inactive_zeta)), proj_inactive_zeta, '-.m>', label='inactive $\zeta$') legend(loc='lower right') # -------- # figure 1 # -------- fig1 = figure() fig1.suptitle("residual estimator") ax = fig1.add_subplot(111) if REFINEMENT["MI"]: ax.loglog(X, num_mi, '--y+', label='active mi') ax.loglog(X, errest, '-g<', label='error estimator') ax.loglog(X, res_part, '-.cx', label='residual part') ax.loglog(X[1:], proj_part[1:], '-.m>', label='projection part') ax.loglog(X, pcg_part, '-.b>', label='pcg part') legend(loc='upper right') show() # this invalidates the figure instances... except: import traceback print traceback.format_exc() logger.info("skipped plotting since matplotlib is not available...") # plot final meshes if opts.plotMesh: USE_MAYAVI = Plotter.hasMayavi() and False w = w_history[-1] for mu, vec in w.iteritems(): if USE_MAYAVI: # mesh # Plotter.figure(bgcolor=(1, 1, 1)) # mesh = vec.basis.mesh # Plotter.plotMesh(mesh.coordinates(), mesh.cells(), representation='mesh') # Plotter.axes() # Plotter.labels() # Plotter.title(str(mu)) # function Plotter.figure(bgcolor=(1, 1, 1)) mesh = vec.basis.mesh Plotter.plotMesh(mesh.coordinates(), mesh.cells(), vec.coeffs) Plotter.axes() Plotter.labels() Plotter.title(str(mu)) else: viz_mesh = plot(vec.basis.mesh, title="mesh " + str(mu), interactive=False) # if SAVE_SOLUTION != '': # viz_mesh.write_png(SAVE_SOLUTION + '/mesh' + str(mu) + '.png') # viz_mesh.write_ps(SAVE_SOLUTION + '/mesh' + str(mu), format='pdf') # vec.plot(title=str(mu), interactive=False) if USE_MAYAVI: Plotter.show(stop=True) Plotter.close(allfig=True) else: interactive() # plot sample solution if opts.plotSolution: w = w_history[-1] # get random field sample and evaluate solution (direct and parametric) RV_samples = coeff_field.sample_rvs() ref_maxm = w_history[-1].max_order sub_spaces = w[Multiindex()].basis.num_sub_spaces degree = w[Multiindex()].basis.degree maxh = min(w[Multiindex()].basis.minh / 4, CONF_max_h) maxh = w[Multiindex()].basis.minh projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces) sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis) sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis) sol_variance = compute_solution_variance(coeff_field, w, projection_basis) # plot print sub_spaces if sub_spaces == 0: viz_p = plot(sample_sol_param._fefunc, title="parametric solution") viz_d = plot(sample_sol_direct._fefunc, title="direct solution") if ref_maxm > 0: viz_v = plot(sol_variance._fefunc, title="solution variance") # debug--- if not True: for mu in w.active_indices(): for i, wi in enumerate(w_history): if i == len(w_history) - 1 or True: plot(wi[mu]._fefunc, title="parametric solution " + str(mu) + " iteration " + str(i)) # plot(wi[mu]._fefunc.function_space().mesh(), title="parametric solution " + str(mu) + " iteration " + str(i), axes=True) interactive() # ---debug # for mu in w.active_indices(): # plot(w[mu]._fefunc, title="parametric solution " + str(mu)) else: mesh_param = sample_sol_param._fefunc.function_space().mesh() mesh_direct = sample_sol_direct._fefunc.function_space().mesh() wireframe = True viz_p = plot(sample_sol_param._fefunc, title="parametric solution", mode="displacement", mesh=mesh_param, wireframe=wireframe)#, rescale=False) viz_d = plot(sample_sol_direct._fefunc, title="direct solution", mode="displacement", mesh=mesh_direct, wireframe=wireframe)#, rescale=False) # for mu in w.active_indices(): # viz_p = plot(w[mu]._fefunc, title="parametric solution: " + str(mu), mode="displacement", mesh=mesh_param, wireframe=wireframe) interactive() if opts.plotFlux: w = w_history[-1] # get random field sample and evaluate solution (direct and parametric) RV_samples = coeff_field.sample_rvs() ref_maxm = w_history[-1].max_order sub_spaces = w[Multiindex()].basis.num_sub_spaces degree = w[Multiindex()].basis.degree maxh = min(w[Multiindex()].basis.minh / 4, CONF_max_h) maxh = w[Multiindex()].basis.minh projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces) vec_projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=2) sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis) sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis) sol_variance = compute_solution_variance(coeff_field, w, projection_basis) sol_param_flux = compute_solution_flux(pde, RV_samples, coeff_field, sample_sol_param, ref_maxm, projection_basis, vec_projection_basis) sol_direct_flux = compute_solution_flux(pde, RV_samples, coeff_field, sample_sol_direct, ref_maxm, projection_basis, vec_projection_basis) # plot if sub_spaces == 0: #viz_p = plot(sol_param_flux._fefunc, title="parametric solution flux") flux_x, flux_y = sol_param_flux._fefunc.split(deepcopy=True) viz_x = plot(flux_x, title="parametric solution flux x") viz_y = plot(flux_y, title="parametric solution flux y") flux_x, flux_y = sol_direct_flux._fefunc.split(deepcopy=True) viz_x = plot(flux_x, title="direct solution flux x") viz_y = plot(flux_y, title="direct solution flux y") else: raise Exception("not implemented"); interactive()
def run_MC(opts, conf): # propagate config values _G = globals() for sec in conf.keys(): if sec == "LOGGING": continue secconf = conf[sec] for key, val in secconf.iteritems(): print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key] _G["CONF_" + key] = secconf[key] # exec "CONF_" + key + "= secconf['" + key + "']" # setup logging _G["LOG_LEVEL"] = eval("logging." + conf["LOGGING"]["level"]) print "LOG_LEVEL = logging." + conf["LOGGING"]["level"] # exec "LOG_LEVEL = logging." + conf["LOGGING"]["level"] setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_MC") # determine path of this module path = os.path.dirname(__file__) # ============================================================ # PART A: Setup Problem # ============================================================ # get boundaries mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N) # define coefficient field coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant") from itertools import count if CONF_mu is not None: muparam = (CONF_mu, (0 for _ in count())) else: muparam = None coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma, freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam) # setup boundary conditions and pde # initial_mesh_N = CONF_initial_mesh_N pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field) # define multioperator A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs, assembly_type=eval("ASSEMBLY_TYPE." + CONF_assembly_type)) # ============================================================ # PART B: Import Solution # ============================================================ import pickle LOAD_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) logger.info("loading solutions from %s" % os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl')) # load solutions with open(os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl'), 'rb') as fin: w_history = pickle.load(fin) # load simulation data logger.info("loading statistics from %s" % os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl')) with open(os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl'), 'rb') as fin: sim_stats = pickle.load(fin) logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices()) # ============================================================ # PART C: MC Error Sampling # ============================================================ MC_N = CONF_N MC_HMAX = CONF_max_h if CONF_runs > 0: # determine reference mesh w = w_history[-1] ref_mesh, _ = create_joint_mesh([w[mu].mesh for mu in w.active_indices()]) for _ in range(CONF_ref_mesh_refine): ref_mesh = refine(ref_mesh) ref_maxm = CONF_sampling_order if CONF_sampling_order > 0 else w.max_order + CONF_sampling_order_increase for i, w in enumerate(w_history): # if i == 0: # continue logger.info("MC error sampling for w[%i] (of %i)", i, len(w_history)) # memory usage info import resource logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n") MC_start = 0 old_stats = sim_stats[i] if opts.continueMC: try: MC_start = sim_stats[i]["MC-N"] logger.info("CONTINUING MC of %s for solution (iteration) %s of %s", LOAD_SOLUTION, i, len(w_history)) except: logger.info("STARTING MC of %s for solution (iteration) %s of %s", LOAD_SOLUTION, i, len(w_history)) if MC_start <= 0: sim_stats[i]["MC-N"] = 0 sim_stats[i]["MC-L2ERR"] = 0 sim_stats[i]["MC-H1ERR"] = 0 sim_stats[i]["MC-L2ERR_a0"] = 0 sim_stats[i]["MC-H1ERR_a0"] = 0 MC_RUNS = max(CONF_runs - MC_start, 0) if MC_RUNS > 0: logger.info("STARTING %s MC RUNS", MC_RUNS) # L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, mesh0, ref_maxm, MC_RUNS, MC_N, MC_HMAX) L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, ref_mesh, ref_maxm, MC_RUNS, MC_N, MC_HMAX) # combine current and previous results sim_stats[i]["MC-N"] = N + old_stats["MC-N"] sim_stats[i]["MC-L2ERR"] = (L2err * N + old_stats["MC-L2ERR"]) / sim_stats[i]["MC-N"] sim_stats[i]["MC-H1ERR"] = (H1err * N + old_stats["MC-H1ERR"]) / sim_stats[i]["MC-N"] sim_stats[i]["MC-L2ERR_a0"] = (L2err_a0 * N + old_stats["MC-L2ERR_a0"]) / sim_stats[i]["MC-N"] sim_stats[i]["MC-H1ERR_a0"] = (H1err_a0 * N + old_stats["MC-H1ERR_a0"]) / sim_stats[i]["MC-N"] print "MC-H1ERR (N:%i) = %f" % (sim_stats[i]["MC-N"], sim_stats[i]["MC-H1ERR"]) else: logger.info("SKIPPING MC RUN since sufficiently many samples are available") # ============================================================ # PART D: Export Updated Data and Plotting # ============================================================ # save updated data if opts.saveData: # save updated statistics import pickle SAVE_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name) try: os.makedirs(SAVE_SOLUTION) except: pass logger.info("saving statistics into %s" % os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl')) with open(os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl'), 'wb') as fout: pickle.dump(sim_stats, fout) # plot residuals if opts.plotEstimator and len(sim_stats) > 1: try: from matplotlib.pyplot import figure, show, legend x = [s["DOFS"] for s in sim_stats] L2 = [s["L2"] for s in sim_stats] H1 = [s["H1"] for s in sim_stats] errest = [sqrt(s["EST"]) for s in sim_stats] res_part = [s["RES-PART"] for s in sim_stats] proj_part = [s["PROJ-PART"] for s in sim_stats] pcg_part = [s["PCG-PART"] for s in sim_stats] _reserrmu = [s["RES-mu"] for s in sim_stats] _projerrmu = [s["PROJ-mu"] for s in sim_stats] if CONF_runs > 0: mcL2 = [s["MC-L2ERR"] for s in sim_stats] mcH1 = [s["MC-H1ERR"] for s in sim_stats] mcL2_a0 = [s["MC-L2ERR_a0"] for s in sim_stats] mcH1_a0 = [s["MC-H1ERR_a0"] for s in sim_stats] effest = [est / err for est, err in zip(errest, mcH1)] mi = [s["MI"] for s in sim_stats] num_mi = [len(m) for m in mi] reserrmu = defaultdict(list) for rem in _reserrmu: for mu, v in rem: reserrmu[mu].append(v) print "errest", errest if CONF_runs > 0: print "mcH1", mcH1 print "efficiency", [est / err for est, err in zip(errest, mcH1)] # -------- # figure 2 # -------- fig2 = figure() fig2.suptitle("residual estimator") ax = fig2.add_subplot(111) if CONF_refine_Lambda: ax.loglog(x, num_mi, '--y+', label='active mi') ax.loglog(x, errest, '-g<', label='error estimator') ax.loglog(x, res_part, '-.cx', label='residual part') ax.loglog(x[1:], proj_part[1:], '-.m>', label='projection part') ax.loglog(x, pcg_part, '-.b>', label='pcg part') if MC_RUNS > 0: ax.loglog(x, mcH1, '-b^', label='MC H1 error') ax.loglog(x, mcL2, '-ro', label='MC L2 error') # ax.loglog(x, H1, '-b^', label='H1 residual') # ax.loglog(x, L2, '-ro', label='L2 residual') legend(loc='upper right') # -------- # figure 3 # -------- fig3 = figure() fig3.suptitle("efficiency residual estimator") ax = fig3.add_subplot(111) ax.loglog(x, errest, '-g<', label='error estimator') if MC_RUNS > 0: ax.loglog(x, mcH1, '-b^', label='MC H1 error') ax.loglog(x, effest, '-ro', label='efficiency') legend(loc='upper right') # # -------- # # figure 4 # # -------- # fig4 = figure() # fig4.suptitle("residual contributions") # ax = fig4.add_subplot(111) # for mu, v in reserrmu.iteritems(): # ms = str(mu) # ms = ms[ms.find('=') + 1:-1] # ax.loglog(x[-len(v):], v, '-g<', label=ms) # legend(loc='upper right') show() # this invalidates the figure instances... except: import traceback print traceback.format_exc() logger.info("skipped plotting since matplotlib is not available...")