Ejemplo n.º 1
0
    def dummy_setup_if_no_setup(self,
                                platform_resources: JobPlatformResources):
        """
        Setup a dummy parallelization dimension to use one rank on one node and all cores on the node
        """

        if self.pardims == None:
            dummy = JobParallelizationDimOptions("dummy")
            dummy.num_cores = platform_resources.num_cores_per_node
            dummy.num_cores_per_rank = dummy.num_cores
            dummy.num_threads_per_rank = dummy.num_cores
            dummy.num_ranks = 1
            self.setup([dummy], platform_resources)
            self.print()
Ejemplo n.º 2
0
                ['ln_erk', 4, 4, 0],
            ]

        #
        # Reference solution
        #
        if gen_reference_solution:
            tsm = ts_methods[0]
            jg.runtime.timestep_size = timestep_size_reference

            jg.runtime.timestepping_method = tsm[0]
            jg.runtime.timestepping_order = tsm[1]
            jg.runtime.timestepping_order2 = tsm[2]

            # Update TIME parallelization
            ptime = JobParallelizationDimOptions('time')
            ptime.num_cores_per_rank = 1
            ptime.num_threads_per_rank = 1  #pspace.num_cores_per_rank
            ptime.num_ranks = 1

            pspace = JobParallelizationDimOptions('space')
            pspace.num_cores_per_rank = 1
            pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
            pspace.num_ranks = 1

            # Setup parallelization
            jg.setup_parallelization([pspace, ptime])

            if verbose:
                pspace.print()
                ptime.print()
Ejemplo n.º 3
0
            p.runtime.output_timestep_size = p.runtime.max_simulation_time

            #
            # Reference solution
            #
            if gen_reference_solution:
                tsm = ts_methods[0]
                p.runtime.timestep_size = timestep_size_reference

                p.runtime.timestepping_method = tsm[0]
                p.runtime.timestepping_order = tsm[1]
                p.runtime.timestepping_order2 = tsm[2]

                # Update TIME parallelization
                ptime = JobParallelizationDimOptions('time')
                ptime.num_cores_per_rank = 1
                ptime.num_threads_per_rank = 1  #pspace.num_cores_per_rank
                ptime.num_ranks = 1

                pspace = JobParallelizationDimOptions('space')
                pspace.num_cores_per_rank = 1
                pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[
                    -1]
                pspace.num_ranks = 1

                # Setup parallelization
                p.setup_parallelization([pspace, ptime])

                if verbose:
                    pspace.print()
Ejemplo n.º 4
0
#Classes containing sweet compile/run basic option
#from mule_local.JobGeneration import *

#Create main compile/run options
jg = JobGeneration()

# Request dedicated compile script
jg.compilecommand_in_jobscript = True

# Wallclock time
max_wallclock_seconds = 2 * 24 * 60 * 60
ref_max_wallclock_seconds = 48 * 60 * 60
jg.parallelization.max_wallclock_seconds = ref_max_wallclock_seconds

# HPC stuff
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = jg.platform_resources.num_cores_per_node / 2
pspace.num_threads_per_rank = 8
pspace.num_ranks = 1
jg.setup_parallelization([pspace])

#Basic plane options
CompileSWEPlane(jg)

# Activate benchmark timers
jg.compile.benchmark_timings = 'enable'

# Verbosity mode
jg.runtime.verbosity = 3

# Benchmark ID