コード例 #1
0
def ABC_static_test(pp=None, sp=None):
    """
    Arguments:
    ----------
    pp: (optional) program parameters, parsed by argument parser
        provided by this file
    sp: (optional) solver parameters, parsed by spectralLES.parser
    """

    if comm.rank == 0:
        print("\n----------------------------------------------------------")
        print("MPI-parallel Python spectralLES simulation of problem \n"
              "`Homogeneous Isotropic Turbulence' started with "
              "{} tasks at {}.".format(comm.size, timeofday()))
        print("----------------------------------------------------------")

    # ------------------------------------------------------------------
    # Get the problem and solver parameters and assert compliance
    if pp is None:
        pp = hit_parser.parse_known_args()[0]

    if sp is None:
        sp = spectralLES.parser.parse_known_args()[0]

    if comm.rank == 0:
        print('\nProblem Parameters:\n-------------------')
        for k, v in vars(pp).items():
            print(k, v)
        print('\nSpectralLES Parameters:\n-----------------------')
        for k, v in vars(sp).items():
            print(k, v)
        print("\n----------------------------------------------------------\n")

    assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program '
                                 'requires equal mesh dimensions')
    N = pp.N[0]
    assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program '
                                 'requires equal domain dimensions')
    L = pp.L[0]

    if N % comm.size > 0:
        if comm.rank == 0:
            print('Error: job started with improper number of MPI tasks'
                  ' for the size of the data specified!')
        MPI.Finalize()
        sys.exit(1)

    # ------------------------------------------------------------------
    # Configure the LES solver
    solver = staticGeneralizedEddyViscosityLES(
                Smagorinsky=True, comm=comm, **vars(sp))

    solver.computeAD = solver.computeAD_vorticity_form
    Sources = [solver.computeSource_linear_forcing,
               solver.computeSource_Smagorinsky_SGS,
               # solver.computeSource_4termGEV_SGS,
               ]

    # C1 = np.array([-6.39e-02])
    C3 = np.array([-3.75e-02, 6.2487e-02, 6.9867e-03, 0.0])
    C4 = np.array([-3.15e-02, -5.25e-02, 2.7e-02, 2.7e-02])
    kwargs = dict(C1=-6.39e-02, C=C3*solver.D_les**2, dvScale=None)

    U_hat = solver.U_hat
    U = solver.U
    Kmod = np.floor(np.sqrt(solver.Ksq)).astype(int)

    # ------------------------------------------------------------------
    # form HIT initial conditions from either user-defined values or
    # physics-based relationships
    Urms = 1.083*(pp.epsilon*L)**(1./3.)             # empirical coefficient
    Einit= getattr(pp, 'Einit', None) or Urms**2   # == 2*KE_equilibrium
    kexp = getattr(pp, 'kexp', None) or -1./3.     # -> E(k) ~ k^(-2./3.)
    kpeak= getattr(pp, 'kpeak', None) or N//4      # ~ kmax/2

    # currently using a fixed random seed for testing
    solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank)

    # ------------------------------------------------------------------
    # Configure a spatial field writer
    writer = mpiWriter(comm, odir=pp.odir, N=N)
    Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format

    # -------------------------------------------------------------------------
    # Setup the various time and IO counters
    tauK = sqrt(pp.nu/pp.epsilon)           # Kolmogorov time-scale
    taul = 0.11*sqrt(3)*L/Urms              # 0.11 is empirical coefficient

    if pp.tlimit == np.Inf:
        pp.tlimit = 200*taul

    dt_rst = getattr(pp, 'dt_rst', None) or taul
    dt_spec= getattr(pp, 'dt_spec', None) or 0.2*taul
    dt_drv = getattr(pp, 'dt_drv', None) or 0.25*tauK

    t_sim = t_rst = t_spec = t_drv = 0.0
    tstep = irst = ispec = 0
    tseries = []

    if comm.rank == 0:
        print('\ntau_ell = %.6e\ntau_K = %.6e\n' % (taul, tauK))

    # -------------------------------------------------------------------------
    # Run the simulation
    if comm.rank == 0:
        t1 = time.time()

    while t_sim < pp.tlimit+1.e-8:

        # -- Update the dynamic dt based on CFL constraint
        dt = solver.new_dt_constant_nu(pp.cfl)
        t_test = t_sim + 0.5*dt

        # -- output/store a log every step if needed/wanted
        KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx
        tseries.append([tstep, t_sim, KE])

        # -- output KE and enstrophy spectra
        if t_test >= t_spec:

            # -- output message log to screen on spectrum output only
            if comm.rank == 0:
                print("cycle = %7d  time = %15.8e  dt = %15.8e  KE = %15.8e"
                      % (tstep, t_sim, dt, KE))

            # -- output kinetic energy spectrum to file
            spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0)
            spect3d[..., 0] *= 0.5
            spect1d = shell_average(comm, spect3d, Kmod)

            if comm.rank == 0:
                fname = '%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec)
                fh = open(fname, 'w')
                metadata = Ek_fmt('u_i')
                fh.write('%s\n' % metadata)
                spect1d.tofile(fh, sep='\n', format='% .8e')
                fh.close()

            t_spec += dt_spec
            ispec += 1

        # -- output physical-space solution fields for restarting and analysis
        if t_test >= t_rst:
            writer.write_scalar('%s-Velocity1_%3.3d.rst' %
                                (pp.pid, irst), U[0], np.float64)
            writer.write_scalar('%s-Velocity2_%3.3d.rst' %
                                (pp.pid, irst), U[1], np.float64)
            writer.write_scalar('%s-Velocity3_%3.3d.rst' %
                                (pp.pid, irst), U[2], np.float64)
            t_rst += dt_rst
            irst += 1

        # -- Update the forcing mean scaling
        if t_test >= t_drv:
            # call solver.computeSource_linear_forcing to compute dvScale only
            kwargs['dvScale'] = Sources[0](computeRHS=False)
            t_drv += dt_drv

        # -- integrate the solution forward in time
        solver.RK4_integrate(dt, *Sources, **kwargs)

        t_sim += dt
        tstep += 1

        sys.stdout.flush()  # forces Python 3 to flush print statements

    # -------------------------------------------------------------------------
    # Finalize the simulation
    if comm.rank == 0:
        t2 = time.time()
        print('Program took %12.7f s' % ((t2-t1)))

    KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx
    tseries.append([tstep, t_sim, KE])

    if comm.rank == 0:
        fname = '%s/%s-%3.3d_KE_tseries.txt' % (pp.adir, pp.pid, ispec)
        header = 'Kinetic Energy Timeseries,\n# columns: tstep, time, KE'
        np.savetxt(fname, tseries, fmt='%10.5e', header=header)

        print("cycle = %7d  time = %15.8e  dt = %15.8e  KE = %15.8e"
              % (tstep, t_sim, dt, KE))
        print("\n----------------------------------------------------------")
        print("MPI-parallel Python spectralLES simulation finished at {}."
              .format(timeofday()))
        print("----------------------------------------------------------")

    # -- output kinetic energy spectrum to file
    spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0)
    spect3d[..., 0] *= 0.5
    spect1d = shell_average(comm, spect3d, Kmod)

    if comm.rank == 0:
        fh = open('%s/%s-%3.3d_KE.spectra' %
                  (pp.adir, pp.pid, ispec), 'w')
        metadata = Ek_fmt('u_i')
        fh.write('%s\n' % metadata)
        spect1d.tofile(fh, sep='\n', format='% .8e')
        fh.close()

    # -- output physical-space solution fields for restarting and analysis
    writer.write_scalar('%s-Velocity1_%3.3d.rst' %
                        (pp.pid, irst), U[0], np.float64)
    writer.write_scalar('%s-Velocity2_%3.3d.rst' %
                        (pp.pid, irst), U[1], np.float64)
    writer.write_scalar('%s-Velocity3_%3.3d.rst' %
                        (pp.pid, irst), U[2], np.float64)

    return
コード例 #2
0
def staticSmag_HIT_demo(pp=None, sp=None):
    """
    Arguments:
    ----------
    pp: (optional) program parameters, parsed by argument parser
        provided by this file
    sp: (optional) solver parameters, parsed by staticSmagorinskyLES.parser
    """

    if comm.rank == 0:
        print("\n----------------------------------------------------------")
        print("MPI-parallel Python spectralLES simulation of problem \n"
              "`Homogeneous Isotropic Turbulence' started with "
              "{} tasks at {}.".format(comm.size, timeofday()))
        print("----------------------------------------------------------")

    # if function called without passing in parsed arguments, then parse
    # the arguments from the command line

    if pp is None:
        pp = hit_parser.parse_known_args()[0]

    if sp is None:
        sp = staticSmagorinskyLES.parser.parse_known_args()[0]

    if comm.rank == 0:
        print('\nProblem Parameters:\n-------------------')
        for k, v in vars(pp).items():
            print(k, v)
        print('\nSpectralLES Parameters:\n-----------------------')
        for k, v in vars(sp).items():
            print(k, v)
        print("\n----------------------------------------------------------\n")

    assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program '
                                 'requires equal mesh dimensions')
    N = pp.N[0]
    assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program '
                                 'requires equal domain dimensions')
    L = pp.L[0]

    if N % comm.size > 0:
        if comm.rank == 0:
            print('Error: job started with improper number of MPI tasks for '
                  'the size of the data specified!')
        MPI.Finalize()
        sys.exit(1)

    # ------------------------------------------------------------------
    # Configure the LES solver
    solver = staticSmagorinskyLES(comm=comm, **vars(sp))

    solver.computeAD = solver.computeAD_vorticity_form
    Sources = [
        solver.computeSource_linear_forcing,
        solver.computeSource_Smagorinsky_SGS,
    ]

    Ck = 1.6
    Cs = sqrt((pi**-2) * ((3 * Ck)**-1.5))  # == 0.098...
    # Cs = 0.22
    kwargs = {
        'dvScale': None,
        'Cs': Cs,
    }

    U_hat = solver.U_hat
    U = solver.U
    Kmod = np.floor(np.sqrt(solver.Ksq)).astype(int)

    # ------------------------------------------------------------------
    # form HIT initial conditions from either user-defined values or
    # physics-based relationships
    Urms = 1.083 * (pp.epsilon * L)**(1. / 3.)  # empirical coefficient
    Einit = getattr(pp, 'Einit', None) or Urms**2  # == 2*KE_equilibrium
    kexp = getattr(pp, 'kexp', None) or -1. / 3.  # -> E(k) ~ k^(-2./3.)
    kpeak = getattr(pp, 'kpeak', None) or N // 4  # ~ kmax/2

    # -- currently using a fixed random seed of comm.rank for testing
    solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank)

    # ------------------------------------------------------------------
    # Configure a spatial field writer
    # writer = mpiWriter(comm, odir=pp.odir, N=N)
    # MAKE ODIR, CHECKING IF IT IS A VALID PATH.
    if comm.rank == 0:
        try:
            os.makedirs(pp.odir)
        except OSError as e:
            if not os.path.isdir(pp.odir):
                raise e
            else:
                status = e
        finally:
            if os.path.isdir(pp.odir):
                status = 0
    else:
        status = None

    status = comm.bcast(status)
    if status != 0:
        MPI.Finalize()
        sys.exit(999)

    Ek_fmt = "\\widehat{{{0}}}^*\\widehat{{{0}}}".format

    # -------------------------------------------------------------------------
    # Setup the various time and IO counters
    tauK = sqrt(pp.nu / pp.epsilon)  # Kolmogorov time-scale
    taul = 0.11 * sqrt(3) * L / Urms  # 0.11 is empirical coefficient

    if pp.tlimit == np.Inf:
        pp.tlimit = 200 * taul

    dt_rst = getattr(pp, 'dt_rst', None) or taul
    dt_spec = getattr(pp, 'dt_spec', None) or 0.2 * taul
    dt_drv = getattr(pp, 'dt_drv', None) or 0.25 * tauK

    t_sim = t_rst = t_spec = t_drv = 0.0
    tstep = irst = ispec = 0
    tseries = []

    if comm.rank == 0:
        print('\ntau_ell = %.6e\ntau_K = %.6e\n' % (taul, tauK))

    # -------------------------------------------------------------------------
    # Run the simulation

    while t_sim < pp.tlimit + 1.e-8:

        # -- Update the dynamic dt based on CFL constraint
        dt = solver.new_dt_constant_nu(pp.cfl)
        t_test = t_sim + 0.5 * dt

        # -- output/store a log every step if needed/wanted
        KE = 0.5 * comm.allreduce(psum(np.square(U))) / solver.Nx
        tseries.append([tstep, t_sim, KE])

        # -- output KE and enstrophy spectra
        if t_test >= t_spec:

            # -- output message log to screen on spectrum output only
            if comm.rank == 0:
                print("cycle = %7d  time = %15.8e  dt = %15.8e  KE = %15.8e" %
                      (tstep, t_sim, dt, KE))

            # -- output kinetic energy spectrum to file
            spect3d = np.sum(np.real(U_hat * np.conj(U_hat)), axis=0)
            spect3d[..., 0] *= 0.5
            spect1d = shell_average(comm, spect3d, Kmod)

            if comm.rank == 0:
                fname = '%s/%s-%3.3d_KE.spectra' % (pp.odir, pp.pid, ispec)
                fh = open(fname, 'w')
                metadata = Ek_fmt('u_i')
                fh.write('%s\n' % metadata)
                spect1d.tofile(fh, sep='\n', format='% .8e')
                fh.close()

            t_spec += dt_spec
            ispec += 1

        # # -- output physical-space solution fields for restarting and analysis
        # if t_test >= t_rst:
        #     writer.write_scalar('%s-Velocity1_%3.3d.rst' %
        #                         (pp.pid, irst), U[0], np.float64)
        #     writer.write_scalar('%s-Velocity2_%3.3d.rst' %
        #                         (pp.pid, irst), U[1], np.float64)
        #     writer.write_scalar('%s-Velocity3_%3.3d.rst' %
        #                         (pp.pid, irst), U[2], np.float64)
        #     t_rst += dt_rst
        #     irst += 1

        # -- Update the forcing mean scaling
        if t_test >= t_drv:
            # call solver.computeSource_linear_forcing to compute dvScale only
            kwargs['dvScale'] = Sources[0](computeRHS=False)
            t_drv += dt_drv

        # -- integrate the solution forward in time
        solver.RK4_integrate(dt, *Sources, **kwargs)

        t_sim += dt
        tstep += 1

        sys.stdout.flush()  # forces Python 3 to flush print statements

    # -------------------------------------------------------------------------
    # Finalize the simulation

    KE = 0.5 * comm.allreduce(psum(np.square(U))) / solver.Nx
    tseries.append([tstep, t_sim, KE])

    if comm.rank == 0:
        fname = '%s/%s-%3.3d_KE_tseries.txt' % (pp.adir, pp.pid, ispec)
        header = 'Kinetic Energy Timeseries,\n# columns: tstep, time, KE'
        np.savetxt(fname, tseries, fmt='%10.5e', header=header)

        print("cycle = %7d  time = %15.8e  dt = %15.8e  KE = %15.8e" %
              (tstep, t_sim, dt, KE))
        print("\n----------------------------------------------------------")
        print("MPI-parallel Python spectralLES simulation finished at {}.".
              format(timeofday()))
        print("----------------------------------------------------------")

    # -- output kinetic energy spectrum to file
    spect3d = np.sum(np.real(U_hat * np.conj(U_hat)), axis=0)
    spect3d[..., 0] *= 0.5
    spect1d = shell_average(comm, spect3d, Kmod)

    if comm.rank == 0:
        fh = open('%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec), 'w')
        metadata = Ek_fmt('u_i')
        fh.write('%s\n' % metadata)
        spect1d.tofile(fh, sep='\n', format='% .8e')
        fh.close()

    # # -- output physical-space solution fields for restarting and analysis
    # writer.write_scalar('%s-Velocity1_%3.3d.rst' %
    #                     (pp.pid, irst), U[0], np.float64)
    # writer.write_scalar('%s-Velocity2_%3.3d.rst' %
    #                     (pp.pid, irst), U[1], np.float64)
    # writer.write_scalar('%s-Velocity3_%3.3d.rst' %
    #                     (pp.pid, irst), U[2], np.float64)

    return