def ales244_static_les_test(pp=None, sp=None): """ Arguments: ---------- pp: (optional) program parameters, parsed by argument parser provided by this file sp: (optional) solver parameters, parsed by spectralLES.parser """ if comm.rank == 0: print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation of problem \n" "`Homogeneous Isotropic Turbulence' started with " "{} tasks at {}.".format(comm.size, timeofday())) print("----------------------------------------------------------") # if function called without passing in parsed arguments, then parse # the arguments from the command line if pp is None: pp = hit_parser.parse_known_args()[0] if sp is None: sp = spectralLES.parser.parse_known_args()[0] if comm.rank == 0: print('\nProblem Parameters:\n-------------------') for k, v in vars(pp).items(): print(k, v) print('\nSpectralLES Parameters:\n-----------------------') for k, v in vars(sp).items(): print(k, v) print("\n----------------------------------------------------------\n") assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program ' 'requires equal mesh dimensions') N = pp.N[0] assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program ' 'requires equal domain dimensions') L = pp.L[0] if N % comm.size > 0: if comm.rank == 0: print('Error: job started with improper number of MPI tasks for ' 'the size of the data specified!') MPI.Finalize() sys.exit(1) # ------------------------------------------------------------------------- # Configure the solver, writer, and analyzer # -- construct solver instance from sp's attribute dictionary solver = ales244_solver(comm, **vars(sp)) U_hat = solver.U_hat U = solver.U omega = solver.omega K = solver.K # -- configure solver instance to solve the NSE with the vorticity # formulation of the advective term, linear forcing, and # the ales244 SGS model solver.computeAD = solver.computeAD_vorticity_form Sources = [ solver.computeSource_linear_forcing, solver.computeSource_ales244_SGS ] H_244 = np.loadtxt('h_ij.dat', usecols=(1, 2, 3, 4, 5, 6), unpack=True) kwargs = {'H_244': H_244, 'dvScale': None} # -- form HIT initial conditions from either user-defined values or # physics-based relationships using epsilon and L Urms = 1.2 * (pp.epsilon * L)**(1. / 3.) # empirical coefficient Einit = getattr(pp, 'Einit', None) or Urms**2 # == 2*KE_equilibrium kexp = getattr(pp, 'kexp', None) or -1. / 3. # -> E(k) ~ k^(-2./3.) kpeak = getattr(pp, 'kpeak', None) or N // 4 # ~ kmax/2 # -- currently using a fixed random seed of comm.rank for testing solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank) # -- configure the writer and analyzer from both pp and sp attributes writer = mpiWriter(comm, odir=pp.odir, N=N) analyzer = mpiAnalyzer(comm, odir=pp.adir, pid=pp.pid, L=L, N=N, config='hit', method='spectral') Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format # ------------------------------------------------------------------------- # Setup the various time and IO counters tauK = sqrt(pp.nu / pp.epsilon) # Kolmogorov time-scale taul = 0.2 * L * sqrt(3) / Urms # 0.2 is empirical coefficient c = pp.cfl * sqrt(2 * Einit) / Urms dt = solver.new_dt_constant_nu(c) # use as estimate if pp.tlimit == np.Inf: # put a very large but finite limit on the run pp.tlimit = 262 * taul # such as (256+6)*tau, for spinup and 128 samples dt_rst = getattr(pp, 'dt_rst', None) or 2 * taul dt_spec = getattr(pp, 'dt_spec', None) or max(0.1 * taul, tauK, 10 * dt) dt_drv = getattr(pp, 'dt_drv', None) or max(tauK, 10 * dt) t_sim = t_rst = t_spec = t_drv = 0.0 tstep = irst = ispec = 0 # ------------------------------------------------------------------------- # Run the simulation while t_sim < pp.tlimit + 1.e-8: # -- Update the dynamic dt based on CFL constraint dt = solver.new_dt_constant_nu(pp.cfl) t_test = t_sim + 0.5 * dt # -- output log messages every step if needed/wanted KE = 0.5 * comm.allreduce(psum(np.square(U))) / solver.Nx if comm.rank == 0: print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) # - output snapshots and data analysis products if t_test >= t_spec: analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0]), omega[2]) irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2]), omega[1]) irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]), omega[0]) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) t_spec += dt_spec ispec += 1 if t_test >= t_rst: writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) t_rst += dt_rst irst += 1 # -- Update the forcing pattern if t_test >= t_drv: # call solver.computeSource_linear_forcing to compute dvScale only kwargs['dvScale'] = Sources[0](computeRHS=False) t_drv += dt_drv if comm.rank == 0: print("------ updated linear forcing pattern ------") # -- integrate the solution forward in time solver.RK4_integrate(dt, *Sources, **kwargs) t_sim += dt tstep += 1 sys.stdout.flush() # forces Python 3 to flush print statements # ------------------------------------------------------------------------- # Finalize the simulation irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0]), omega[2]) irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2]), omega[1]) irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]), omega[0]) analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) return
def homogeneous_isotropic_turbulence(pp=None, sp=None): """ Arguments: ---------- pp: (optional) program parameters, parsed by argument parser provided by this file sp: (optional) solver parameters, parsed by spectralLES.parser """ if comm.rank == 0: print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation of problem \n" "`Homogeneous Isotropic Turbulence' started with " "{} tasks at {}.".format(comm.size, timeofday())) print("----------------------------------------------------------") # if function called without passing in parsed arguments, then parse # the arguments from the command line if pp is None: pp = hit_parser.parse_known_args()[0] if sp is None: sp = spectralLES.parser.parse_known_args()[0] if comm.rank == 0: print('\nProblem Parameters:\n-------------------') for k, v in vars(pp).items(): print(k, v) print('\nSpectralLES Parameters:\n-----------------------') for k, v in vars(sp).items(): print(k, v) print("\n----------------------------------------------------------\n") assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program ' 'requires equal mesh dimensions') N = pp.N[0] assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program ' 'requires equal domain dimensions') L = pp.L[0] if N % comm.size > 0: if comm.rank == 0: print('Error: job started with improper number of MPI tasks for ' 'the size of the data specified!') MPI.Finalize() sys.exit(1) # ------------------------------------------------------------------------- # Configure the solver, writer, and analyzer # -- construct solver instance from sp's attribute dictionary solver = spectralLES(comm, **vars(sp)) # -- configure solver instance to solve the NSE with the vorticity # formulation of the advective term, linear forcing, and # Smagorinsky SGS model. solver.computeAD = solver.computeAD_vorticity_form Sources = [ solver.computeSource_linear_forcing, solver.computeSource_Smagorinksy_SGS ] Ck = 1.6 Cs = sqrt((pi**-2) * ((3 * Ck)**-1.5)) # == 0.098... # Cs = 0.2 kwargs = {'Cs': Cs, 'dvScale': None} # -- form HIT initial conditions from either user-defined values or # physics-based relationships using epsilon and L Urms = 1.2 * (pp.epsilon * L)**(1. / 3.) # empirical coefficient Einit = getattr(pp, 'Einit', None) or Urms**2 # == 2*KE_equilibrium kexp = getattr(pp, 'kexp', None) or -1. / 3. # -> E(k) ~ k^(-2./3.) kpeak = getattr(pp, 'kpeak', None) or N // 4 # ~ kmax/2 # ! currently using a fixed random seed of comm.rank for testing solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank) U_hat = solver.U_hat U = solver.U omega = solver.omega K = solver.K # -- configure the writer and analyzer from both pp and sp attributes writer = mpiWriter(comm, odir=pp.odir, N=N) analyzer = mpiAnalyzer(comm, odir=pp.adir, pid=pp.pid, L=L, N=N, config='hit', method='spectral') Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format emin = np.inf emax = np.NINF analyzer.mpi_moments_file = '%s%s.moments' % (analyzer.odir, pp.pid) # ------------------------------------------------------------------------- # Setup the various time and IO counters tauK = sqrt(pp.nu / pp.epsilon) # Kolmogorov time-scale taul = 0.2 * L * sqrt(3) / Urms # 0.2 is empirical coefficient c = pp.cfl * sqrt(2 * Einit) / Urms dt = solver.new_dt_constant_nu(c) # use as estimate print("Integral time scale = {}".format(taul)) if pp.tlimit == np.Inf: # put a very large but finite limit on the run pp.tlimit = 262 * taul # such as (256+6)*tau, for spinup and 128 samples dt_rst = getattr(pp, 'dt_rst', None) or 4 * taul dt_bin = getattr(pp, 'dt_bin', None) or taul dt_stat = getattr(pp, 'dt_stat', None) or max(0.2 * taul, 2 * tauK, 20 * dt) dt_spec = getattr(pp, 'dt_spec', None) or max(0.1 * taul, tauK, 10 * dt) dt_drv = getattr(pp, 'dt_drv', None) or max(tauK, 10 * dt) t_sim = t_rst = t_bin = t_stat = t_spec = t_drv = 0.0 tstep = irst = ibin = istat = ispec = 0 # -- ensure that analysis and simulation outputs are properly synchronized # This assumes that dt_spec < dt_stat < dt_bin < dt_rst, and that # division remainders smaller than 0.1 are potentially # consequential round-off errors in what should be integer multiples # due to the user supplying insufficient significant digits if ((dt_stat % dt_spec) < 0.1 * dt_spec): dt_stat -= dt_stat % dt_spec if ((dt_bin % dt_spec) < 0.1 * dt_spec): dt_bin -= dt_bin % dt_spec if ((dt_bin % dt_stat) < 0.1 * dt_stat): dt_bin -= dt_bin % dt_stat if ((dt_rst % dt_bin) < 0.1 * dt_bin): dt_rst -= dt_rst % dt_bin # ------------------------------------------------------------------------- # Run the simulation while t_sim < pp.tlimit + 1.e-8: # -- Update the dynamic dt based on CFL constraint dt = solver.new_dt_constant_nu(pp.cfl) t_test = t_sim + 0.5 * dt compute_vorticity = True # reset the vorticity computation flag # -- output log messages every step if needed/wanted KE = 0.5 * comm.allreduce(np.sum(np.square(U))) * (1. / N)**3 if comm.rank == 0: print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) # - output snapshots and data analysis products if t_test >= t_spec: analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) omega[2] = irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0])) omega[1] = irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2])) omega[0] = irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1])) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) t_spec += dt_spec ispec += 1 compute_vorticity = False # if t_test >= t_stat: # if compute_vorticity: # omega[2] = irfft3(comm, 1j*(K[0]*U_hat[1] - K[1]*U_hat[0])) # omega[1] = irfft3(comm, 1j*(K[2]*U_hat[0] - K[0]*U_hat[2])) # omega[0] = irfft3(comm, 1j*(K[1]*U_hat[2] - K[2]*U_hat[1])) # enst = 0.5*np.sum(np.square(omega), axis=0) # emin = min(emin, comm.allreduce(np.min(enst), op=MPI.MIN)) # emax = max(emax, comm.allreduce(np.max(enst), op=MPI.MAX)) # scalar_analysis(analyzer, enst, (emin, emax), None, None, # '%3.3d_enst' % istat, 'enstrophy', '\Omega') # t_stat += dt_stat # istat += 1 # -- output singe-precision binary files and restart checkpoints # if t_test >= t_bin: # writer.write_scalar('Enstrophy_%3.3d.bin' % ibin, enst, np.float32) # t_bin += dt_bin # ibin += 1 if t_test >= t_rst: writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) t_rst += dt_rst irst += 1 # -- Update the forcing pattern if t_test >= t_drv: # call solver.computeSource_linear_forcing to compute dvScale only kwargs['dvScale'] = Sources[0](computeRHS=False) t_drv += dt_drv if comm.rank == 0: print("------ updated dvScale for linear forcing ------") # print(kwargs['dvScale']) # -- integrate the solution forward in time solver.RK4_integrate(dt, *Sources, **kwargs) t_sim += dt tstep += 1 sys.stdout.flush() # forces Python 3 to flush print statements # ------------------------------------------------------------------------- # Finalize the simulation omega[2] = irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0])) omega[1] = irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2])) omega[0] = irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1])) enst = 0.5 * np.sum(np.square(omega), axis=0) analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) # emin = min(emin, comm.allreduce(np.min(enst), op=MPI.MIN)) # emax = max(emax, comm.allreduce(np.max(enst), op=MPI.MAX)) # scalar_analysis(analyzer, enst, (emin, emax), None, None, # '%3.3d_enst' % istat, 'enstrophy', '\Omega') writer.write_scalar('Enstrophy_%3.3d.bin' % ibin, enst, np.float32) writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) return
def ABC_static_test(pp=None, sp=None): """ Arguments: ---------- pp: (optional) program parameters, parsed by argument parser provided by this file sp: (optional) solver parameters, parsed by spectralLES.parser """ if comm.rank == 0: print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation of problem \n" "`Homogeneous Isotropic Turbulence' started with " "{} tasks at {}.".format(comm.size, timeofday())) print("----------------------------------------------------------") # ------------------------------------------------------------------ # Get the problem and solver parameters and assert compliance if pp is None: pp = hit_parser.parse_known_args()[0] if sp is None: sp = spectralLES.parser.parse_known_args()[0] if comm.rank == 0: print('\nProblem Parameters:\n-------------------') for k, v in vars(pp).items(): print(k, v) print('\nSpectralLES Parameters:\n-----------------------') for k, v in vars(sp).items(): print(k, v) print("\n----------------------------------------------------------\n") assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program ' 'requires equal mesh dimensions') N = pp.N[0] assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program ' 'requires equal domain dimensions') L = pp.L[0] if N % comm.size > 0: if comm.rank == 0: print('Error: job started with improper number of MPI tasks' ' for the size of the data specified!') MPI.Finalize() sys.exit(1) # ------------------------------------------------------------------ # Configure the LES solver solver = staticGeneralizedEddyViscosityLES( Smagorinsky=True, comm=comm, **vars(sp)) solver.computeAD = solver.computeAD_vorticity_form Sources = [solver.computeSource_linear_forcing, solver.computeSource_Smagorinsky_SGS, # solver.computeSource_4termGEV_SGS, ] # C1 = np.array([-6.39e-02]) C3 = np.array([-3.75e-02, 6.2487e-02, 6.9867e-03, 0.0]) C4 = np.array([-3.15e-02, -5.25e-02, 2.7e-02, 2.7e-02]) kwargs = dict(C1=-6.39e-02, C=C3*solver.D_les**2, dvScale=None) U_hat = solver.U_hat U = solver.U Kmod = np.floor(np.sqrt(solver.Ksq)).astype(int) # ------------------------------------------------------------------ # form HIT initial conditions from either user-defined values or # physics-based relationships Urms = 1.083*(pp.epsilon*L)**(1./3.) # empirical coefficient Einit= getattr(pp, 'Einit', None) or Urms**2 # == 2*KE_equilibrium kexp = getattr(pp, 'kexp', None) or -1./3. # -> E(k) ~ k^(-2./3.) kpeak= getattr(pp, 'kpeak', None) or N//4 # ~ kmax/2 # currently using a fixed random seed for testing solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank) # ------------------------------------------------------------------ # Configure a spatial field writer writer = mpiWriter(comm, odir=pp.odir, N=N) Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format # ------------------------------------------------------------------------- # Setup the various time and IO counters tauK = sqrt(pp.nu/pp.epsilon) # Kolmogorov time-scale taul = 0.11*sqrt(3)*L/Urms # 0.11 is empirical coefficient if pp.tlimit == np.Inf: pp.tlimit = 200*taul dt_rst = getattr(pp, 'dt_rst', None) or taul dt_spec= getattr(pp, 'dt_spec', None) or 0.2*taul dt_drv = getattr(pp, 'dt_drv', None) or 0.25*tauK t_sim = t_rst = t_spec = t_drv = 0.0 tstep = irst = ispec = 0 tseries = [] if comm.rank == 0: print('\ntau_ell = %.6e\ntau_K = %.6e\n' % (taul, tauK)) # ------------------------------------------------------------------------- # Run the simulation if comm.rank == 0: t1 = time.time() while t_sim < pp.tlimit+1.e-8: # -- Update the dynamic dt based on CFL constraint dt = solver.new_dt_constant_nu(pp.cfl) t_test = t_sim + 0.5*dt # -- output/store a log every step if needed/wanted KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx tseries.append([tstep, t_sim, KE]) # -- output KE and enstrophy spectra if t_test >= t_spec: # -- output message log to screen on spectrum output only if comm.rank == 0: print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) # -- output kinetic energy spectrum to file spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0) spect3d[..., 0] *= 0.5 spect1d = shell_average(comm, spect3d, Kmod) if comm.rank == 0: fname = '%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec) fh = open(fname, 'w') metadata = Ek_fmt('u_i') fh.write('%s\n' % metadata) spect1d.tofile(fh, sep='\n', format='% .8e') fh.close() t_spec += dt_spec ispec += 1 # -- output physical-space solution fields for restarting and analysis if t_test >= t_rst: writer.write_scalar('%s-Velocity1_%3.3d.rst' % (pp.pid, irst), U[0], np.float64) writer.write_scalar('%s-Velocity2_%3.3d.rst' % (pp.pid, irst), U[1], np.float64) writer.write_scalar('%s-Velocity3_%3.3d.rst' % (pp.pid, irst), U[2], np.float64) t_rst += dt_rst irst += 1 # -- Update the forcing mean scaling if t_test >= t_drv: # call solver.computeSource_linear_forcing to compute dvScale only kwargs['dvScale'] = Sources[0](computeRHS=False) t_drv += dt_drv # -- integrate the solution forward in time solver.RK4_integrate(dt, *Sources, **kwargs) t_sim += dt tstep += 1 sys.stdout.flush() # forces Python 3 to flush print statements # ------------------------------------------------------------------------- # Finalize the simulation if comm.rank == 0: t2 = time.time() print('Program took %12.7f s' % ((t2-t1))) KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx tseries.append([tstep, t_sim, KE]) if comm.rank == 0: fname = '%s/%s-%3.3d_KE_tseries.txt' % (pp.adir, pp.pid, ispec) header = 'Kinetic Energy Timeseries,\n# columns: tstep, time, KE' np.savetxt(fname, tseries, fmt='%10.5e', header=header) print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation finished at {}." .format(timeofday())) print("----------------------------------------------------------") # -- output kinetic energy spectrum to file spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0) spect3d[..., 0] *= 0.5 spect1d = shell_average(comm, spect3d, Kmod) if comm.rank == 0: fh = open('%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec), 'w') metadata = Ek_fmt('u_i') fh.write('%s\n' % metadata) spect1d.tofile(fh, sep='\n', format='% .8e') fh.close() # -- output physical-space solution fields for restarting and analysis writer.write_scalar('%s-Velocity1_%3.3d.rst' % (pp.pid, irst), U[0], np.float64) writer.write_scalar('%s-Velocity2_%3.3d.rst' % (pp.pid, irst), U[1], np.float64) writer.write_scalar('%s-Velocity3_%3.3d.rst' % (pp.pid, irst), U[2], np.float64) return