def Rayleigh_Benard(Rayleigh=1e6, Prandtl=1, nz=64, nx=None, ny=None, aspect=4, fixed_flux=False, fixed_T=False, mixed_flux_T=True, stress_free=False, no_slip=True, restart=None, run_time=23.5, run_time_buoyancy=None, run_time_iter=np.inf, run_time_therm=1, max_writes=20, max_slice_writes=20, output_dt=0.2, data_dir='./', coeff_output=True, verbose=False, no_join=False, do_bvp=False, num_bvps=10, bvp_convergence_factor=1e-3, bvp_equil_time=10, bvp_resolution_factor=1, bvp_transient_time=30, bvp_final_equil_time=None, min_bvp_time=50, first_bvp_time=20, first_bvp_convergence_factor=1e-2, threeD=False, seed=42, mesh=None, overwrite=False): import os from dedalus.tools.config import config config['logging']['filename'] = os.path.join(data_dir, 'logs/dedalus_log') config['logging']['file_level'] = 'DEBUG' import mpi4py.MPI if mpi4py.MPI.COMM_WORLD.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.makedirs('{:s}/'.format(data_dir)) logdir = os.path.join(data_dir, 'logs') if not os.path.exists(logdir): os.mkdir(logdir) logger = logging.getLogger(__name__) logger.info("saving run in: {}".format(data_dir)) import time from dedalus import public as de from dedalus.extras import flow_tools from dedalus.tools import post # input parameters logger.info("Ra = {}, Pr = {}".format(Rayleigh, Prandtl)) # Parameters Lz = 1. Lx = aspect * Lz Ly = aspect * Lz if nx is None: nx = int(nz * aspect) if ny is None: ny = int(nz * aspect) if threeD: logger.info("resolution: [{}x{}x{}]".format(nx, ny, nz)) equations = BoussinesqEquations3D(nx=nx, ny=ny, nz=nz, Lx=Lx, Ly=Ly, Lz=Lz, mesh=mesh) else: logger.info("resolution: [{}x{}]".format(nx, nz)) equations = BoussinesqEquations2D(nx=nx, nz=nz, Lx=Lx, Lz=Lz) equations.set_IVP(Rayleigh, Prandtl) bc_dict = { 'fixed_flux': None, 'fixed_temperature': None, 'mixed_flux_temperature': None, 'mixed_temperature_flux': None, 'stress_free': None, 'no_slip': None } if mixed_flux_T: bc_dict['mixed_flux_temperature'] = True elif fixed_T: bc_dict['fixed_temperature'] = True elif fixed_flux: bc_dict['fixed_flux'] = True if stress_free: bc_dict['stress_free'] = True elif no_slip: bc_dict['no_slip'] = True supercrit = Rayleigh / RA_CRIT equations.set_BC(**bc_dict) # Build solver ts = de.timesteppers.RK443 cfl_safety = 0.8 solver = equations.problem.build_solver(ts) logger.info('Solver built') checkpoint = Checkpoint(data_dir) if isinstance(restart, type(None)): equations.set_IC(solver, seed=seed) dt = None mode = 'overwrite' else: logger.info("restarting from {}".format(restart)) checkpoint.restart(restart, solver) if overwrite: mode = 'overwrite' else: mode = 'append' checkpoint.set_checkpoint(solver, wall_dt=checkpoint_min * 60, mode=mode) # Integration parameters # if not isinstance(run_time_therm, type(None)): # solver.stop_sim_time = run_time_therm*equations.thermal_time + solver.sim_time # elif not isinstance(run_time_buoyancy, type(None)): # solver.stop_sim_time = run_time_buoyancy + solver.sim_time # else: solver.stop_sim_time = np.inf solver.stop_wall_time = run_time * 3600. solver.stop_iteration = run_time_iter Hermitian_cadence = 100 # Analysis max_dt = output_dt analysis_tasks = equations.initialize_output(solver, data_dir, coeff_output=coeff_output, output_dt=output_dt, mode=mode) # CFL CFL = flow_tools.CFL(solver, initial_dt=0.1, cadence=1, safety=cfl_safety, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=0.1) if threeD: CFL.add_velocities(('u', 'v', 'w')) else: CFL.add_velocities(('u', 'w')) # Flow properties flow = flow_tools.GlobalFlowProperty(solver, cadence=1) flow.add_property("Re", name='Re') # flow.add_property("interp(w, z=0.95)", name='w near top') # u, v, w = solver.state['u'], solver.state['v'], solver.state['w'] if do_bvp: if not threeD: ny = 0 atmo_class = BoussinesqEquations2D else: atmo_class = BoussinesqEquations3D bvp_solver = BoussinesqBVPSolver(atmo_class, nx, ny, nz, \ flow, equations.domain.dist.comm_cart, \ solver, num_bvps, bvp_equil_time, \ threeD=threeD, bvp_transient_time=bvp_transient_time, \ bvp_run_threshold=bvp_convergence_factor, \ bvp_l2_check_time=1, mesh=mesh,\ first_bvp_time=first_bvp_time, first_run_threshold=first_bvp_convergence_factor,\ plot_dir='{}/bvp_plots/'.format(data_dir),\ min_avg_dt=1e-10, final_equil_time=bvp_final_equil_time, min_bvp_time=min_bvp_time) bc_dict.pop('stress_free') bc_dict.pop('no_slip') # print(equations.domain.grid(0), equations.domain.grid(1), equations.domain.grid(2)) first_step = True # Main loop try: logger.info('Starting loop') Re_avg = 0 continue_bvps = True not_corrected_times = True init_time = solver.sim_time start_iter = solver.iteration while (solver.ok and np.isfinite(Re_avg)) and continue_bvps: dt = CFL.compute_dt() solver.step(dt) #, trim=True) # Solve for blow-up over long timescales in 3D due to hermitian-ness effective_iter = solver.iteration - start_iter if threeD and effective_iter % Hermitian_cadence == 0: for field in solver.state.fields: field.require_grid_space() Re_avg = flow.grid_average('Re') log_string = 'Iteration: {:5d}, '.format(solver.iteration) log_string += 'Time: {:8.3e} ({:8.3e} therm), dt: {:8.3e}, '.format( solver.sim_time, solver.sim_time / equations.thermal_time, dt) log_string += 'Re: {:8.3e}/{:8.3e}'.format(Re_avg, flow.max('Re')) logger.info(log_string) if not_corrected_times and Re_avg > 1: if not isinstance(run_time_therm, type(None)): solver.stop_sim_time = run_time_therm * equations.thermal_time + solver.sim_time elif not isinstance(run_time_buoyancy, type(None)): solver.stop_sim_time = run_time_buoyancy + solver.sim_time not_corrected_times = False if do_bvp: bvp_solver.update_avgs(dt, Re_avg, min_Re=np.sqrt(supercrit)) if bvp_solver.check_if_solve(): atmo_kwargs = {'nz': nz * bvp_resolution_factor, 'Lz': Lz} diff_args = [Rayleigh, Prandtl] bvp_solver.solve_BVP(atmo_kwargs, diff_args, bc_dict) if bvp_solver.terminate_IVP(): continue_bvps = False if first_step: if verbose: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.spy(solver.pencils[0].L, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern.png", dpi=1200) import scipy.sparse.linalg as sla LU = sla.splu(solver.pencils[0].LHS.tocsc(), permc_spec='NATURAL') fig = plt.figure() ax = fig.add_subplot(1, 2, 1) ax.spy(LU.L.A, markersize=1, markeredgewidth=0.0) ax = fig.add_subplot(1, 2, 2) ax.spy(LU.U.A, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern_LU.png", dpi=1200) logger.info("{} nonzero entries in LU".format(LU.nnz)) logger.info("{} nonzero entries in LHS".format( solver.pencils[0].LHS.tocsc().nnz)) logger.info("{} fill in factor".format( LU.nnz / solver.pencils[0].LHS.tocsc().nnz)) first_step = False start_time = time.time() except: raise logger.error('Exception raised, triggering end of main loop.') finally: end_time = time.time() main_loop_time = end_time - start_time n_iter_loop = solver.iteration - 1 logger.info('Iterations: {:d}'.format(n_iter_loop)) logger.info('Sim end time: {:f}'.format(solver.sim_time)) logger.info('Run time: {:f} sec'.format(main_loop_time)) logger.info('Run time: {:f} cpu-hr'.format( main_loop_time / 60 / 60 * equations.domain.dist.comm_cart.size)) logger.info('iter/sec: {:f} (main loop only)'.format(n_iter_loop / main_loop_time)) try: final_checkpoint = Checkpoint(data_dir, checkpoint_name='final_checkpoint') final_checkpoint.set_checkpoint(solver, wall_dt=1, mode=mode) solver.step(dt) #clean this up in the future...works for now. post.merge_process_files(data_dir + '/final_checkpoint/', cleanup=False) except: raise print('cannot save final checkpoint') finally: if not no_join: logger.info('beginning join operation') post.merge_analysis(data_dir + 'checkpoints') for task in analysis_tasks: logger.info(task.base_path) post.merge_analysis(task.base_path) logger.info(40 * "=") logger.info('Iterations: {:d}'.format(n_iter_loop)) logger.info('Sim end time: {:f}'.format(solver.sim_time)) logger.info('Run time: {:f} sec'.format(main_loop_time)) logger.info('Run time: {:f} cpu-hr'.format( main_loop_time / 60 / 60 * equations.domain.dist.comm_cart.size)) logger.info('iter/sec: {:f} (main loop only)'.format( n_iter_loop / main_loop_time))
def FC_convection(Rayleigh=1e6, Prandtl=1, stiffness=1e4, m_rz=3, gamma=5/3, n_rho_cz=3.5, n_rho_rz=1, nz_cz=128, nz_rz=128, nx = None, width=None, single_chebyshev=False, rk222=False, superstep=False, dense=False, nz_dense=64, oz=False, fixed_flux=False, run_time=23.5, run_time_buoyancies=np.inf, run_time_iter=np.inf, dynamic_diffusivities=False, max_writes=20,out_cadence=0.1, no_coeffs=False, no_join=False, restart=None, data_dir='./', verbose=False, label=None): def format_number(number, no_format_min=0.1, no_format_max=10): if number > no_format_max or number < no_format_min: try: mantissa = "{:e}".format(number).split("+")[0].split("e")[0].rstrip("0") or "0" power = "{:e}".format(number).split("+")[1].lstrip("0") or "0" except: mantissa = "{:e}".format(number).split("-")[0].split("e")[0].rstrip("0") or "0" power = "{:e}".format(number).split("-")[1].lstrip("0") or "0" power = "-"+power if mantissa[-1]==".": mantissa = mantissa[:-1] mantissa += "e" else: mantissa = "{:f}".format(number).rstrip("0") or "0" if mantissa[-1]==".": mantissa = mantissa[:-1] power = "" number_string = mantissa+power return number_string # save data in directory named after script if data_dir[-1] != '/': data_dir += '/' data_dir += sys.argv[0].split('.py')[0] if fixed_flux: data_dir += '_flux' if dynamic_diffusivities: data_dir += '_dynamic' if oz: data_dir += '_oz' data_dir += "_nrhocz{}_Ra{}_S{}".format(format_number(n_rho_cz), format_number(Rayleigh), format_number(stiffness)) if width: data_dir += "_erf{}".format(format_number(width)) if label: data_dir += "_{}".format(label) data_dir += '/' from dedalus.tools.config import config config['logging']['filename'] = os.path.join(data_dir,'logs/dedalus_log') config['logging']['file_level'] = 'DEBUG' import mpi4py.MPI if mpi4py.MPI.COMM_WORLD.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.makedirs('{:s}/'.format(data_dir)) logdir = os.path.join(data_dir,'logs') if not os.path.exists(logdir): os.mkdir(logdir) logger = logging.getLogger(__name__) logger.info("saving run in: {}".format(data_dir)) import dedalus.public as de from dedalus.tools import post from dedalus.extras import flow_tools from dedalus.core.future import FutureField from stratified_dynamics import multitropes from tools.checkpointing import Checkpoint checkpoint_min = 30 initial_time = time.time() logger.info("Starting Dedalus script {:s}".format(sys.argv[0])) constant_Prandtl=True mixed_temperature_flux=None if oz: stable_top=True if not fixed_flux: mixed_temperature_flux=True else: stable_top=False # Set domain if nx is None: nx = nz_cz*4 if single_chebyshev: nz = nz_cz nz_list = [nz_cz] else: if dense: nz = nz_rz+nz_dense+nz_cz #nz_list = [nz_rz, int(nz_dense/2), int(nz_dense/2), nz_cz] nz_list = [nz_rz, nz_dense, nz_cz] else: nz = nz_rz+nz_cz nz_list = [nz_rz, nz_cz] if dynamic_diffusivities: atmosphere = multitropes.FC_multitrope_2d_kappa_mu(nx=nx, nz=nz_list, stiffness=stiffness, m_rz=m_rz, gamma=gamma, n_rho_cz=n_rho_cz, n_rho_rz=n_rho_rz, verbose=verbose, width=width, constant_Prandtl=constant_Prandtl, stable_top=stable_top) else: atmosphere = multitropes.FC_multitrope(nx=nx, nz=nz_list, stiffness=stiffness, m_rz=m_rz, gamma=gamma, n_rho_cz=n_rho_cz, n_rho_rz=n_rho_rz, verbose=verbose, width=width, constant_Prandtl=constant_Prandtl, stable_top=stable_top) atmosphere.set_IVP_problem(Rayleigh, Prandtl) atmosphere.set_BC(mixed_temperature_flux=mixed_temperature_flux, fixed_flux=fixed_flux) problem = atmosphere.get_problem() if atmosphere.domain.distributor.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.makedirs('{:s}/'.format(data_dir)) if rk222: logger.info("timestepping using RK222") ts = de.timesteppers.RK222 cfl_safety_factor = 0.2*2 else: logger.info("timestepping using RK443") ts = de.timesteppers.RK443 cfl_safety_factor = 0.2*4 # Build solver solver = problem.build_solver(ts) # initial conditions if restart is None: mode = "overwrite" else: mode = "append" logger.info("checkpointing in {}".format(data_dir)) checkpoint = Checkpoint(data_dir) if restart is None: atmosphere.set_IC(solver) dt = None else: logger.info("restarting from {}".format(restart)) dt = checkpoint.restart(restart, solver) checkpoint.set_checkpoint(solver, wall_dt=checkpoint_min*60, mode=mode) logger.info("thermal_time = {:g}, top_thermal_time = {:g}".format(atmosphere.thermal_time, atmosphere.top_thermal_time)) max_dt = atmosphere.min_BV_time max_dt = atmosphere.buoyancy_time*out_cadence if dt is None: dt = max_dt/5 report_cadence = 1 output_time_cadence = out_cadence*atmosphere.buoyancy_time solver.stop_sim_time = solver.sim_time + run_time_buoyancies*atmosphere.buoyancy_time solver.stop_iteration = solver.iteration + run_time_iter solver.stop_wall_time = run_time*3600 logger.info("output cadence = {:g}".format(output_time_cadence)) analysis_tasks = atmosphere.initialize_output(solver, data_dir, coeffs_output=not(no_coeffs), sim_dt=output_time_cadence, max_writes=max_writes, mode=mode) cfl_cadence = 1 CFL = flow_tools.CFL(solver, initial_dt=dt, cadence=cfl_cadence, safety=cfl_safety_factor, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=0.1) if superstep: CFL_traditional = flow_tools.CFL(solver, initial_dt=max_dt, cadence=cfl_cadence, safety=cfl_safety_factor, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=0.1) CFL_traditional.add_velocities(('u', 'w')) vel_u = FutureField.parse('u', CFL.solver.evaluator.vars, CFL.solver.domain) delta_x = atmosphere.Lx/nx CFL.add_frequency(vel_u/delta_x) vel_w = FutureField.parse('w', CFL.solver.evaluator.vars, CFL.solver.domain) mean_delta_z_cz = atmosphere.Lz_cz/nz_cz CFL.add_frequency(vel_w/mean_delta_z_cz) else: CFL.add_velocities(('u', 'w')) # Flow properties flow = flow_tools.GlobalFlowProperty(solver, cadence=1) flow.add_property("Re_rms", name='Re') try: logger.info("starting main loop") start_time = time.time() start_iter = solver.iteration good_solution = True first_step = True while solver.ok and good_solution: dt = CFL.compute_dt() # advance solver.step(dt) effective_iter = solver.iteration - start_iter # update lists if effective_iter % report_cadence == 0: Re_avg = flow.grid_average('Re') log_string = 'Iteration: {:5d}, Time: {:8.3e} ({:8.3e}), '.format(solver.iteration, solver.sim_time, solver.sim_time/atmosphere.buoyancy_time) log_string += 'dt: {:8.3e}'.format(dt) if superstep: dt_traditional = CFL_traditional.compute_dt() log_string += ' (vs {:8.3e})'.format(dt_traditional) log_string += ', ' log_string += 'Re: {:8.3e}/{:8.3e}'.format(Re_avg, flow.max('Re')) logger.info(log_string) if not np.isfinite(Re_avg): good_solution = False logger.info("Terminating run. Trapped on Reynolds = {}".format(Re_avg)) if first_step: if verbose: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.spy(solver.pencils[0].L, markersize=0.5, markeredgewidth=0.0) fig.savefig(data_dir+"sparsity_pattern.png", dpi=2400) #fig.savefig(data_dir+"sparsity_pattern.svg", format="svg") import scipy.sparse.linalg as sla LU = sla.splu(solver.pencils[0].LHS.tocsc(), permc_spec='NATURAL') fig = plt.figure() ax = fig.add_subplot(1,2,1) ax.spy(LU.L.A, markersize=1, markeredgewidth=0.0) ax = fig.add_subplot(1,2,2) ax.spy(LU.U.A, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir+"sparsity_pattern_LU.png", dpi=1200) #fig.savefig(data_dir+"sparsity_pattern_LU.svg", format="svg") logger.info("{} nonzero entries in LU".format(LU.nnz)) logger.info("{} nonzero entries in LHS".format(solver.pencils[0].LHS.tocsc().nnz)) logger.info("{} fill in factor".format(LU.nnz/solver.pencils[0].LHS.tocsc().nnz)) first_step = False start_time = time.time() except: logger.error('Exception raised, triggering end of main loop.') raise finally: end_time = time.time() # Print statistics elapsed_time = end_time - start_time elapsed_sim_time = solver.sim_time N_iterations = solver.iteration - 1 logger.info('main loop time: {:e}'.format(elapsed_time)) logger.info('Iterations: {:d}'.format(N_iterations)) logger.info('iter/sec: {:g}'.format(N_iterations/(elapsed_time))) if N_iterations > 0: logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) logger.info('beginning join operation') try: final_checkpoint = Checkpoint(data_dir, checkpoint_name='final_checkpoint') final_checkpoint.set_checkpoint(solver, wall_dt=1, mode="append") solver.step(dt) #clean this up in the future...works for now. post.merge_process_files(data_dir+'/final_checkpoint/') except: print('cannot save final checkpoint') if not(no_join): logger.info(data_dir+'/checkpoint/') post.merge_process_files(data_dir+'/checkpoint/') for task in analysis_tasks: logger.info(analysis_tasks[task].base_path) post.merge_process_files(analysis_tasks[task].base_path) if (atmosphere.domain.distributor.rank==0): N_TOTAL_CPU = atmosphere.domain.distributor.comm_cart.size # Print statistics print('-' * 40) total_time = end_time-initial_time main_loop_time = end_time - start_time startup_time = start_time-initial_time n_steps = solver.iteration-1 print(' startup time:', startup_time) print('main loop time:', main_loop_time) print(' total time:', total_time) if n_steps > 0: print(' iterations:', n_steps) print(' loop sec/iter:', main_loop_time/n_steps) print(' average dt:', solver.sim_time/n_steps) print(" N_cores, Nx, Nz, startup main loop, main loop/iter, main loop/iter/grid, n_cores*main loop/iter/grid") print('scaling:', ' {:d} {:d} {:d}'.format(N_TOTAL_CPU,nx,nz), ' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:8.3g}'.format(startup_time, main_loop_time, main_loop_time/n_steps, main_loop_time/n_steps/(nx*nz), N_TOTAL_CPU*main_loop_time/n_steps/(nx*nz))) print('-' * 40) return data_dir
def FC_polytrope(dynamics_file, Rayleigh=1e4, Prandtl=1, aspect_ratio=4, Taylor=None, theta=0, nz=128, nx=None, ny=None, threeD=False, mesh=None, n_rho_cz=3, epsilon=1e-4, gamma=5 / 3, run_time=23.5, run_time_buoyancies=None, run_time_iter=np.inf, fixed_T=False, fixed_flux=False, mixed_flux_T=False, const_mu=True, const_kappa=True, dynamic_diffusivities=False, split_diffusivities=False, chemistry=True, ChemicalPrandtl=1, Qu_0=5e-8, phi_0=10, restart=None, start_new_files=False, scalar_file=None, rk222=False, safety_factor=0.2, max_writes=20, data_dir='./', out_cadence=0.1, no_coeffs=False, no_join=False, verbose=False): import dedalus.public as de from dedalus.tools import post from dedalus.extras import flow_tools import time import os import sys from stratified_dynamics import polytropes from tools.checkpointing import Checkpoint checkpoint_min = 30 initial_time = time.time() logger.info("Starting Dedalus script {:s}".format(sys.argv[0])) if nx is None: nx = int(np.round(nz * aspect_ratio)) if threeD and ny is None: ny = nx eqn_dict = { 'nx': nx, 'nz': nz, 'constant_kappa': const_kappa, 'constant_mu': const_mu, 'epsilon': epsilon, 'gamma': gamma, 'n_rho_cz': n_rho_cz, 'aspect_ratio': aspect_ratio, 'fig_dir': data_dir } if threeD: eqn_dict['mesh'] = mesh eqn_dict['nz'] = nz atmosphere = polytropes.FC_polytrope_rxn_3d(**eqn_dict) else: if dynamic_diffusivities: atmosphere = polytropes.FC_polytrope_2d_kappa(**eqn_dict) else: if chemistry: atmosphere = polytropes.FC_polytrope_rxn_2d(**eqn_dict) else: atmosphere = polytropes.FC_polytrope_2d(**eqn_dict) if epsilon < 1e-4: ncc_cutoff = 1e-14 elif epsilon > 1e-1: ncc_cutoff = 1e-6 else: ncc_cutoff = 1e-10 problem_dict = { 'ncc_cutoff': ncc_cutoff, 'split_diffusivities': split_diffusivities } if threeD: problem_dict['Taylor'] = Taylor problem_dict['theta'] = theta if chemistry: problem_dict['ChemicalPrandtl'] = ChemicalPrandtl problem_dict['Qu_0'] = Qu_0 problem_dict['phi_0'] = phi_0 atmosphere.set_IVP_problem(Rayleigh, Prandtl, **problem_dict) if fixed_flux: atmosphere.set_BC(fixed_flux=True, stress_free=True) elif mixed_flux_T: atmosphere.set_BC(mixed_flux_temperature=True, stress_free=True) else: atmosphere.set_BC(fixed_temperature=True, stress_free=True) problem = atmosphere.get_problem() if atmosphere.domain.distributor.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.mkdir('{:s}/'.format(data_dir)) if rk222: logger.info("timestepping using RK222") ts = de.timesteppers.RK222 cfl_safety_factor = safety_factor * 2 else: logger.info("timestepping using RK443") ts = de.timesteppers.RK443 cfl_safety_factor = safety_factor * 4 # Build solver solver = problem.build_solver(ts) #Check atmosphere logger.info("thermal_time = {:g}, top_thermal_time = {:g}".format(atmosphere.thermal_time,\ atmosphere.top_thermal_time)) logger.info("full atm HS check") atmosphere.check_atmosphere(make_plots=False, rho=atmosphere.get_full_rho(solver), T=atmosphere.get_full_T(solver)) if restart is None: mode = "overwrite" else: mode = "append" logger.info('checkpointing in {}'.format(data_dir)) checkpoint = Checkpoint(data_dir) if restart is None: atmosphere.set_IC(solver) dt = None else: logger.info("restarting from {}".format(restart)) dt = checkpoint.restart(restart, solver) checkpoint.set_checkpoint(solver, wall_dt=checkpoint_min * 60, mode=mode) if run_time_buoyancies != None: solver.stop_sim_time = solver.sim_time + run_time_buoyancies * atmosphere.buoyancy_time else: solver.stop_sim_time = 100 * atmosphere.thermal_time solver.stop_iteration = solver.iteration + run_time_iter solver.stop_wall_time = run_time * 3600 report_cadence = 1 output_time_cadence = out_cadence * atmosphere.buoyancy_time Hermitian_cadence = 100 logger.info("stopping after {:g} time units".format(solver.stop_sim_time)) logger.info("output cadence = {:g}".format(output_time_cadence)) analysis_tasks = atmosphere.initialize_output( solver, data_dir, sim_dt=output_time_cadence, coeffs_output=not (no_coeffs), mode=mode, max_writes=max_writes) # Reinjecting dynamics and tracers if desired logger.info("Re-injecting scalars") def reset_variable(key, h5_val, grid=False, grad=False): # Get variable k = solver.state[key] k.set_scales(1, keep_data=True) # Set grid or coefficient space if grid: gc = 'g' slice = solver.domain.dist.grid_layout.slices(k.meta[:]['scale']) else: gc = 'c' slice = solver.domain.dist.coeff_layout.slices(k.meta[:]['scale']) # Set initial value of variable k[gc] = h5_val[slice] if grad: # Set gradient if called for k.differentiate('z', out=k) logger.info("Re-injecting: {}".format(key)) return k objects = {} # Set all the dynamic variables if threeD: keys = ['u', 'u_z', 'v', 'v_z', 'w', 'w_z', 'T1', 'T1_z', 'ln_rho1'] grads = [ False, True, False, True, False, True, False, True, False, False, True, False, True ] h5_keys = ['u', 'u', 'v', 'v', 'w', 'w', 'T', 'T', 'ln_rho'] else: keys = ['u', 'u_z', 'w', 'w_z', 'T1', 'T1_z', 'ln_rho1'] grads = [ False, True, False, True, False, True, False, False, True, False, True ] h5_keys = ['u', 'u', 'w', 'w', 'T', 'T', 'ln_rho'] h5File_c = h5py.File(dynamics_file, 'r') dt = h5File_c['scales']['timestep'][-1] h5File_c = h5File_c['tasks'] for i, K in enumerate(keys): # Restarting with final profile of run objects[K] = reset_variable(K, h5File_c[h5_keys[i]][-1], grad=grads[i]) if scalar_file != 'None': keys = ['f', 'f_z', 'C', 'C_z', 'G', 'G_z'] grads = [False, True, False, True] h5_keys = ['f', 'f', 'C', 'C', 'G', 'G'] h5File_g = h5py.File(scalar_file, 'r')['tasks'] for i, K in enumerate(keys): # Restarting with initial profile objects[K] = reset_variable(K, h5File_g[h5_keys[i]][0], grid=True, grad=grads[i]) #Set up timestep defaults max_dt = output_time_cadence / 2 if dt is None: dt = max_dt cfl_cadence = 1 cfl_threshold = 0.1 CFL = flow_tools.CFL(solver, initial_dt=dt, cadence=cfl_cadence, safety=cfl_safety_factor, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=cfl_threshold) if threeD: CFL.add_velocities(('u', 'v', 'w')) else: CFL.add_velocities(('u', 'w')) # Flow properties flow = flow_tools.GlobalFlowProperty(solver, cadence=1) flow.add_property("Re_rms", name='Re') if verbose: flow.add_property("Pe_rms", name='Pe') flow.add_property("Nusselt_AB17", name='Nusselt') start_iter = solver.iteration start_sim_time = solver.sim_time try: start_time = time.time() start_iter = solver.iteration logger.info('starting main loop') good_solution = True first_step = True while solver.ok and good_solution: dt = CFL.compute_dt() # advance solver.step(dt) effective_iter = solver.iteration - start_iter if threeD and effective_iter % Hermitian_cadence == 0: for field in solver.state.fields: field.require_grid_space() # update lists if effective_iter % report_cadence == 0: Re_avg = flow.grid_average('Re') log_string = 'Iteration: {:5d}, Time: {:8.3e} ({:8.3e}), dt: {:8.3e}, '.format( solver.iteration - start_iter, solver.sim_time, (solver.sim_time - start_sim_time) / atmosphere.buoyancy_time, dt) if verbose: log_string += '\n\t\tRe: {:8.5e}/{:8.5e}'.format( Re_avg, flow.max('Re')) log_string += '; Pe: {:8.5e}/{:8.5e}'.format( flow.grid_average('Pe'), flow.max('Pe')) log_string += '; Nu: {:8.5e}/{:8.5e}'.format( flow.grid_average('Nusselt'), flow.max('Nusselt')) else: log_string += 'Re: {:8.3e}/{:8.3e}'.format( Re_avg, flow.max('Re')) logger.info(log_string) if not np.isfinite(Re_avg): good_solution = False logger.info( "Terminating run. Trapped on Reynolds = {}".format( Re_avg)) if first_step: if verbose: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.spy(solver.pencils[0].L, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern.png", dpi=1200) import scipy.sparse.linalg as sla LU = sla.splu(solver.pencils[0].LHS.tocsc(), permc_spec='NATURAL') fig = plt.figure() ax = fig.add_subplot(1, 2, 1) ax.spy(LU.L.A, markersize=1, markeredgewidth=0.0) ax = fig.add_subplot(1, 2, 2) ax.spy(LU.U.A, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern_LU.png", dpi=1200) logger.info("{} nonzero entries in LU".format(LU.nnz)) logger.info("{} nonzero entries in LHS".format( solver.pencils[0].LHS.tocsc().nnz)) logger.info("{} fill in factor".format( LU.nnz / solver.pencils[0].LHS.tocsc().nnz)) first_step = False start_time = time.time() except: logger.error('Exception raised, triggering end of main loop.') raise finally: end_time = time.time() # Print statistics elapsed_time = end_time - start_time elapsed_sim_time = solver.sim_time N_iterations = solver.iteration - 1 logger.info('main loop time: {:e}'.format(elapsed_time)) logger.info('Iterations: {:d}'.format(N_iterations)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) if N_iterations > 0: logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) if not no_join: logger.info('beginning join operation') try: final_checkpoint = Checkpoint( data_dir, checkpoint_name='final_checkpoint') final_checkpoint.set_checkpoint(solver, wall_dt=1, mode="append") solver.step(dt) #clean this up in the future...works for now. post.merge_process_files(data_dir + '/final_checkpoint/', cleanup=True) except: print('cannot save final checkpoint') logger.info(data_dir + '/checkpoint/') post.merge_process_files(data_dir + '/checkpoint/', cleanup=True) for task in analysis_tasks.keys(): logger.info(analysis_tasks[task].base_path) post.merge_process_files(analysis_tasks[task].base_path, cleanup=True) if (atmosphere.domain.distributor.rank == 0): logger.info('main loop time: {:e}'.format(elapsed_time)) if start_iter > 1: logger.info('Iterations (this run): {:d}'.format(N_iterations - start_iter)) logger.info('Iterations (total): {:d}'.format(N_iterations - start_iter)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) if N_iterations > 0: logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) N_TOTAL_CPU = atmosphere.domain.distributor.comm_cart.size # Print statistics print('-' * 40) total_time = end_time - initial_time main_loop_time = end_time - start_time startup_time = start_time - initial_time n_steps = solver.iteration - 1 print(' startup time:', startup_time) print('main loop time:', main_loop_time) print(' total time:', total_time) if n_steps > 0: print(' iterations:', n_steps) print(' loop sec/iter:', main_loop_time / n_steps) print(' average dt:', solver.sim_time / n_steps) print( " N_cores, Nx, Nz, startup main loop, main loop/iter, main loop/iter/grid, n_cores*main loop/iter/grid" ) print( 'scaling:', ' {:d} {:d} {:d}'.format(N_TOTAL_CPU, nx, nz), ' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:8.3g}'.format( startup_time, main_loop_time, main_loop_time / n_steps, main_loop_time / n_steps / (nx * nz), N_TOTAL_CPU * main_loop_time / n_steps / (nx * nz))) print('-' * 40)
def FC_polytrope(Rayleigh=1e4, Prandtl=1, aspect_ratio=4, Taylor=None, theta=0, nz=128, nx=None, ny=None, threeD=False, mesh=None, n_rho_cz=3, epsilon=1e-4, gamma=5 / 3, run_time=23.5, run_time_buoyancies=None, run_time_iter=np.inf, fixed_T=False, fixed_flux=False, mixed_flux_T=False, const_mu=True, const_kappa=True, dynamic_diffusivities=False, split_diffusivities=False, restart=None, start_new_files=False, rk222=False, safety_factor=0.2, max_writes=20, no_slip=False, data_dir='./', out_cadence=0.1, no_coeffs=False, no_volumes=False, no_join=False, verbose=False): import dedalus.public as de from dedalus.tools import post from dedalus.extras import flow_tools import time import os import sys from stratified_dynamics import polytropes from tools.checkpointing import Checkpoint checkpoint_min = 30 initial_time = time.time() logger.info("Starting Dedalus script {:s}".format(sys.argv[0])) if nx is None: nx = int(np.round(nz * aspect_ratio)) if threeD and ny is None: ny = nx if threeD: atmosphere = polytropes.FC_polytrope_3d(nx=nx, ny=ny, nz=nz, mesh=mesh, constant_kappa=const_kappa, constant_mu=const_mu,\ epsilon=epsilon, gamma=gamma, n_rho_cz=n_rho_cz, aspect_ratio=aspect_ratio,\ fig_dir=data_dir) else: if dynamic_diffusivities: atmosphere = polytropes.FC_polytrope_2d_kappa_mu(nx=nx, nz=nz, constant_kappa=const_kappa, constant_mu=const_mu,\ epsilon=epsilon, gamma=gamma, n_rho_cz=n_rho_cz, aspect_ratio=aspect_ratio,\ fig_dir=data_dir) else: atmosphere = polytropes.FC_polytrope_2d(nx=nx, nz=nz, constant_kappa=const_kappa, constant_mu=const_mu,\ epsilon=epsilon, gamma=gamma, n_rho_cz=n_rho_cz, aspect_ratio=aspect_ratio,\ fig_dir=data_dir) if epsilon < 1e-4: ncc_cutoff = 1e-14 elif epsilon > 1e-1: ncc_cutoff = 1e-6 else: ncc_cutoff = 1e-10 if threeD: atmosphere.set_IVP_problem(Rayleigh, Prandtl, Taylor=Taylor, theta=theta, ncc_cutoff=ncc_cutoff, split_diffusivities=split_diffusivities) else: atmosphere.set_IVP_problem(Rayleigh, Prandtl, ncc_cutoff=ncc_cutoff, split_diffusivities=split_diffusivities) bc_dict = { 'stress_free': False, 'no_slip': False, 'fixed_flux': False, 'mixed_flux_temperature': False, 'fixed_temperature': False } if no_slip: bc_dict['no_slip'] = True else: bc_dict['stress_free'] = True if fixed_flux: bc_dict['fixed_flux'] = True elif mixed_flux_T: bc_dict['mixed_flux_temperature'] = True else: bc_dict['fixed_temperature'] = True atmosphere.set_BC(**bc_dict) problem = atmosphere.get_problem() if atmosphere.domain.distributor.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.mkdir('{:s}/'.format(data_dir)) if rk222: logger.info("timestepping using RK222") ts = de.timesteppers.RK222 cfl_safety_factor = safety_factor * 2 else: logger.info("timestepping using RK443") ts = de.timesteppers.RK443 cfl_safety_factor = safety_factor * 4 # Build solver solver = problem.build_solver(ts) #Check atmosphere logger.info("thermal_time = {:g}, top_thermal_time = {:g}".format(atmosphere.thermal_time,\ atmosphere.top_thermal_time)) logger.info("full atm HS check") atmosphere.check_atmosphere(make_plots=False, rho=atmosphere.get_full_rho(solver), T=atmosphere.get_full_T(solver)) if restart is None or start_new_files: mode = "overwrite" else: mode = "append" logger.info('checkpointing in {}'.format(data_dir)) checkpoint = Checkpoint(data_dir) if restart is None: atmosphere.set_IC(solver) dt = None else: logger.info("restarting from {}".format(restart)) dt = checkpoint.restart(restart, solver) checkpoint.set_checkpoint(solver, wall_dt=checkpoint_min * 60, mode=mode) if run_time_buoyancies != None: solver.stop_sim_time = solver.sim_time + run_time_buoyancies * atmosphere.buoyancy_time else: solver.stop_sim_time = 100 * atmosphere.thermal_time solver.stop_iteration = solver.iteration + run_time_iter solver.stop_wall_time = run_time * 3600 report_cadence = 1 output_time_cadence = out_cadence * atmosphere.buoyancy_time Hermitian_cadence = 100 logger.info("stopping after {:g} time units".format(solver.stop_sim_time)) logger.info("output cadence = {:g}".format(output_time_cadence)) if threeD: analysis_tasks = atmosphere.initialize_output( solver, data_dir, sim_dt=output_time_cadence, coeffs_output=not (no_coeffs), mode=mode, max_writes=max_writes, volumes_output=not (no_volumes)) else: analysis_tasks = atmosphere.initialize_output( solver, data_dir, sim_dt=output_time_cadence, coeffs_output=not (no_coeffs), mode=mode, max_writes=max_writes) #Set up timestep defaults max_dt = output_time_cadence if dt is None: dt = max_dt cfl_cadence = 1 cfl_threshold = 0.1 CFL = flow_tools.CFL(solver, initial_dt=dt, cadence=cfl_cadence, safety=cfl_safety_factor, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=cfl_threshold) if threeD: CFL.add_velocities(('u', 'v', 'w')) else: CFL.add_velocities(('u', 'w')) # Flow properties flow = flow_tools.GlobalFlowProperty(solver, cadence=1) flow.add_property("Re_rms", name='Re') if verbose: flow.add_property("Pe_rms", name='Pe') flow.add_property("Nusselt_AB17", name='Nusselt') start_iter = solver.iteration start_sim_time = solver.sim_time try: start_time = time.time() start_iter = solver.iteration logger.info('starting main loop') good_solution = True first_step = True while solver.ok and good_solution: dt = CFL.compute_dt() # advance solver.step(dt) effective_iter = solver.iteration - start_iter Re_avg = flow.grid_average('Re') if threeD and effective_iter % Hermitian_cadence == 0: for field in solver.state.fields: field.require_grid_space() # update lists if effective_iter % report_cadence == 0: log_string = 'Iteration: {:5d}, Time: {:8.3e} ({:8.3e}), dt: {:8.3e}, '.format( solver.iteration - start_iter, solver.sim_time, (solver.sim_time - start_sim_time) / atmosphere.buoyancy_time, dt) if verbose: log_string += '\n\t\tRe: {:8.5e}/{:8.5e}'.format( Re_avg, flow.max('Re')) log_string += '; Pe: {:8.5e}/{:8.5e}'.format( flow.grid_average('Pe'), flow.max('Pe')) log_string += '; Nu: {:8.5e}/{:8.5e}'.format( flow.grid_average('Nusselt'), flow.max('Nusselt')) else: log_string += 'Re: {:8.3e}/{:8.3e}'.format( Re_avg, flow.max('Re')) logger.info(log_string) if not np.isfinite(Re_avg): good_solution = False logger.info( "Terminating run. Trapped on Reynolds = {}".format( Re_avg)) if first_step: if verbose: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.spy(solver.pencils[0].L, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern.png", dpi=1200) import scipy.sparse.linalg as sla LU = sla.splu(solver.pencils[0].LHS.tocsc(), permc_spec='NATURAL') fig = plt.figure() ax = fig.add_subplot(1, 2, 1) ax.spy(LU.L.A, markersize=1, markeredgewidth=0.0) ax = fig.add_subplot(1, 2, 2) ax.spy(LU.U.A, markersize=1, markeredgewidth=0.0) fig.savefig(data_dir + "sparsity_pattern_LU.png", dpi=1200) logger.info("{} nonzero entries in LU".format(LU.nnz)) logger.info("{} nonzero entries in LHS".format( solver.pencils[0].LHS.tocsc().nnz)) logger.info("{} fill in factor".format( LU.nnz / solver.pencils[0].LHS.tocsc().nnz)) first_step = False start_time = time.time() except: logger.error('Exception raised, triggering end of main loop.') finally: end_time = time.time() # Print statistics elapsed_time = end_time - start_time elapsed_sim_time = solver.sim_time N_iterations = solver.iteration - 1 logger.info('main loop time: {:e}'.format(elapsed_time)) logger.info('Iterations: {:d}'.format(N_iterations)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) if N_iterations > 0: logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) if not no_join: logger.info('beginning join operation') try: final_checkpoint = Checkpoint( data_dir, checkpoint_name='final_checkpoint') final_checkpoint.set_checkpoint(solver, wall_dt=1, mode="append") solver.step(dt) #clean this up in the future...works for now. post.merge_process_files(data_dir + '/final_checkpoint/', cleanup=False) except: print('cannot save final checkpoint') logger.info(data_dir + '/checkpoint/') post.merge_process_files(data_dir + '/checkpoint/', cleanup=False) for task in analysis_tasks.keys(): logger.info(analysis_tasks[task].base_path) post.merge_process_files(analysis_tasks[task].base_path, cleanup=False) if (atmosphere.domain.distributor.rank == 0): logger.info('main loop time: {:e}'.format(elapsed_time)) if start_iter > 1: logger.info('Iterations (this run): {:d}'.format(N_iterations - start_iter)) logger.info('Iterations (total): {:d}'.format(N_iterations - start_iter)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) if N_iterations > 0: logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) N_TOTAL_CPU = atmosphere.domain.distributor.comm_cart.size # Print statistics print('-' * 40) total_time = end_time - initial_time main_loop_time = end_time - start_time startup_time = start_time - initial_time n_steps = solver.iteration - 1 print(' startup time:', startup_time) print('main loop time:', main_loop_time) print(' total time:', total_time) if n_steps > 0: print(' iterations:', n_steps) print(' loop sec/iter:', main_loop_time / n_steps) print(' average dt:', solver.sim_time / n_steps) print( " N_cores, Nx, Nz, startup main loop, main loop/iter, main loop/iter/grid, n_cores*main loop/iter/grid" ) print( 'scaling:', ' {:d} {:d} {:d}'.format(N_TOTAL_CPU, nx, nz), ' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:8.3g}'.format( startup_time, main_loop_time, main_loop_time / n_steps, main_loop_time / n_steps / (nx * nz), N_TOTAL_CPU * main_loop_time / n_steps / (nx * nz))) print('-' * 40)
def FC_convection(Rayleigh=1e6, Prandtl=1, stiffness=3, m_rz=3, gamma=5 / 3, MHD=False, MagneticPrandtl=1, B0_amplitude=1, n_rho_cz=1, n_rho_rz=5, nz_cz=128, nz_rz=128, nx=None, width=None, single_chebyshev=False, rk222=False, superstep=False, dense=False, nz_dense=64, oz=False, fixed_flux=False, run_time=23.5, run_time_buoyancies=np.inf, run_time_iter=np.inf, dynamic_diffusivities=False, max_writes=20, out_cadence=0.1, no_coeffs=False, no_join=False, restart=None, data_dir='./', verbose=False, label=None): def format_number(number, no_format_min=0.1, no_format_max=10): if number > no_format_max or number < no_format_min: try: mantissa = "{:e}".format(number).split("+")[0].split( "e")[0].rstrip("0") or "0" power = "{:e}".format(number).split("+")[1].lstrip("0") or "0" except: mantissa = "{:e}".format(number).split("-")[0].split( "e")[0].rstrip("0") or "0" power = "{:e}".format(number).split("-")[1].lstrip("0") or "0" power = "-" + power if mantissa[-1] == ".": mantissa = mantissa[:-1] mantissa += "e" else: mantissa = "{:f}".format(number).rstrip("0") or "0" if mantissa[-1] == ".": mantissa = mantissa[:-1] power = "" number_string = mantissa + power return number_string data_dir = './' # save data in directory named after script if data_dir[-1] != '/': data_dir += '/' data_dir += sys.argv[0].split('.py')[0] data_dir += "_nrhocz{}_Ra{}_S{}".format(format_number(n_rho_cz), format_number(Rayleigh), format_number(stiffness)) if width: data_dir += "_erf{}".format(format_number(width)) if args['--MHD']: data_dir += '_MHD' if label: data_dir += "_{}".format(label) data_dir += '/' from dedalus.tools.config import config config['logging']['filename'] = os.path.join(data_dir, 'logs/dedalus_log') config['logging']['file_level'] = 'DEBUG' import mpi4py.MPI if mpi4py.MPI.COMM_WORLD.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.makedirs('{:s}/'.format(data_dir)) logdir = os.path.join(data_dir, 'logs') if not os.path.exists(logdir): os.mkdir(logdir) logger = logging.getLogger(__name__) logger.info("saving run in: {}".format(data_dir)) import dedalus.public as de from dedalus.tools import post from dedalus.extras import flow_tools from dedalus.core.future import FutureField from stratified_dynamics import multitropes from tools.checkpointing import Checkpoint checkpoint_min = 30 initial_time = time.time() logger.info("Starting Dedalus script {:s}".format(sys.argv[0])) constant_Prandtl = True stable_top = True mixed_temperature_flux = True # Set domain if nx is None: nx = nz_cz * 4 if single_chebyshev: nz = nz_cz nz_list = [nz_cz] else: nz = nz_rz + nz_cz nz_list = [nz_rz, nz_cz] eqns_dict = { 'stiffness': stiffness, 'nx': nx, 'nz': nz_list, 'n_rho_cz': n_rho_cz, 'n_rho_rz': n_rho_rz, 'verbose': verbose, 'width': width, 'constant_Prandtl': constant_Prandtl, 'stable_top': stable_top, 'gamma': gamma, 'm_rz': m_rz } if MHD: atmosphere = multitropes.FC_MHD_multitrope_guidefield_2d(**eqns_dict) atmosphere.set_IVP_problem(Rayleigh, Prandtl, MagneticPrandtl, guidefield_amplitude=B0_amplitude) else: atmosphere = multitropes.FC_multitrope(**eqns_dict) atmosphere.set_IVP_problem(Rayleigh, Prandtl) atmosphere.set_BC() problem = atmosphere.get_problem() if atmosphere.domain.distributor.rank == 0: if not os.path.exists('{:s}/'.format(data_dir)): os.mkdir('{:s}/'.format(data_dir)) if rk222: logger.info("timestepping using RK222") ts = de.timesteppers.RK222 cfl_safety_factor = 0.2 * 2 else: logger.info("timestepping using RK443") ts = de.timesteppers.RK443 cfl_safety_factor = 0.2 * 4 # Build solver solver = problem.build_solver(ts) if restart is None: mode = "overwrite" else: mode = "append" checkpoint = Checkpoint(data_dir) # initial conditions if restart is None: atmosphere.set_IC(solver) dt = None else: logger.info("restarting from {}".format(restart)) dt = checkpoint.restart(restart, solver) checkpoint.set_checkpoint(solver, wall_dt=checkpoint_min * 60, mode=mode) logger.info("thermal_time = {:g}, top_thermal_time = {:g}".format( atmosphere.thermal_time, atmosphere.top_thermal_time)) max_dt = atmosphere.min_BV_time max_dt = atmosphere.buoyancy_time * out_cadence if dt is None: dt = max_dt report_cadence = 1 output_time_cadence = out_cadence * atmosphere.buoyancy_time solver.stop_sim_time = solver.sim_time + run_time_buoyancies * atmosphere.buoyancy_time solver.stop_iteration = solver.iteration + run_time_iter solver.stop_wall_time = run_time * 3600 logger.info("output cadence = {:g}".format(output_time_cadence)) analysis_tasks = atmosphere.initialize_output( solver, data_dir, coeffs_output=not (no_coeffs), sim_dt=output_time_cadence, max_writes=max_writes, mode=mode) cfl_cadence = 1 CFL = flow_tools.CFL(solver, initial_dt=dt, cadence=cfl_cadence, safety=cfl_safety_factor, max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=0.1) CFL.add_velocities(('u', 'w')) if MHD: CFL.add_velocities( ('Bx/sqrt(4*pi*rho_full)', 'Bz/sqrt(4*pi*rho_full)')) # Flow properties flow = flow_tools.GlobalFlowProperty(solver, cadence=1) flow.add_property("Re_rms", name='Re') if MHD: flow.add_property("abs(dx(Bx) + dz(Bz))", name='divB') try: start_time = time.time() while solver.ok: dt = CFL.compute_dt() # advance solver.step(dt) # update lists if solver.iteration % report_cadence == 0: Re_avg = flow.grid_average('Re') if not np.isfinite(Re_avg): solver.ok = False log_string = 'Iteration: {:5d}, Time: {:8.3e} ({:8.3e}), dt: {:8.3e}, '.format( solver.iteration, solver.sim_time, solver.sim_time / atmosphere.buoyancy_time, dt) log_string += 'Re: {:8.3e}/{:8.3e}'.format( Re_avg, flow.max('Re')) if MHD: log_string += ', divB: {:8.3e}/{:8.3e}'.format( flow.grid_average('divB'), flow.max('divB')) logger.info(log_string) except: logger.error('Exception raised, triggering end of main loop.') raise finally: end_time = time.time() # Print statistics elapsed_time = end_time - start_time elapsed_sim_time = solver.sim_time N_iterations = solver.iteration logger.info('main loop time: {:e}'.format(elapsed_time)) logger.info('Iterations: {:d}'.format(N_iterations)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) if not no_join: logger.info('beginning join operation') logger.info(data_dir + '/checkpoint/') post.merge_process_files(data_dir + '/checkpoint/', cleanup=False) for task in analysis_tasks: logger.info(analysis_tasks[task].base_path) post.merge_process_files(analysis_tasks[task].base_path, cleanup=False) if (atmosphere.domain.distributor.rank == 0): logger.info('main loop time: {:e}'.format(elapsed_time)) logger.info('Iterations: {:d}'.format(N_iterations)) logger.info('iter/sec: {:g}'.format(N_iterations / (elapsed_time))) logger.info('Average timestep: {:e}'.format(elapsed_sim_time / N_iterations)) N_TOTAL_CPU = atmosphere.domain.distributor.comm_cart.size # Print statistics print('-' * 40) total_time = end_time - initial_time main_loop_time = end_time - start_time startup_time = start_time - initial_time n_steps = solver.iteration - 1 print(' startup time:', startup_time) print('main loop time:', main_loop_time) print(' total time:', total_time) print('Iterations:', solver.iteration) print('Average timestep:', solver.sim_time / n_steps) print( " N_cores, Nx, Nz, startup main loop, main loop/iter, main loop/iter/grid, n_cores*main loop/iter/grid" ) print( 'scaling:', ' {:d} {:d} {:d}'.format(N_TOTAL_CPU, nx, nz), ' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:8.3g}'.format( startup_time, main_loop_time, main_loop_time / n_steps, main_loop_time / n_steps / (nx * nz), N_TOTAL_CPU * main_loop_time / n_steps / (nx * nz))) print('-' * 40)