def dummy_setup_if_no_setup(self, platform_resources: JobPlatformResources): """ Setup a dummy parallelization dimension to use one rank on one node and all cores on the node """ if self.pardims == None: dummy = JobParallelizationDimOptions("dummy") dummy.num_cores = platform_resources.num_cores_per_node dummy.num_cores_per_rank = dummy.num_cores dummy.num_threads_per_rank = dummy.num_cores dummy.num_ranks = 1 self.setup([dummy], platform_resources) self.print()
# # Reference solution # if gen_reference_solution: tsm = ts_methods[0] p.runtime.timestep_size = timestep_size_reference p.runtime.timestepping_method = tsm[0] p.runtime.timestepping_order = tsm[1] p.runtime.timestepping_order2 = tsm[2] # Update TIME parallelization ptime = JobParallelizationDimOptions('time') ptime.num_cores_per_rank = 1 ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank ptime.num_ranks = 1 pspace = JobParallelizationDimOptions('space') pspace.num_cores_per_rank = 1 pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[ -1] pspace.num_ranks = 1 # Setup parallelization p.setup_parallelization([pspace, ptime]) if verbose: pspace.print() ptime.print() p.parallelization.print()
# # Reference solution # if gen_reference_solution: tsm = ts_methods[0] jg.runtime.timestep_size = timestep_size_reference jg.runtime.timestepping_method = tsm[0] jg.runtime.timestepping_order = tsm[1] jg.runtime.timestepping_order2 = tsm[2] # Update TIME parallelization ptime = JobParallelizationDimOptions('time') ptime.num_cores_per_rank = 1 ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank ptime.num_ranks = 1 pspace = JobParallelizationDimOptions('space') pspace.num_cores_per_rank = 1 pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1] pspace.num_ranks = 1 # Setup parallelization jg.setup_parallelization([pspace, ptime]) if verbose: pspace.print() ptime.print() jg.parallelization.print() if len(tsm) > 4:
# p.reference_job_unique_id = None if gen_reference_solution: tsm = ts_methods[0] p.runtime.timestep_size = params_timestep_size_reference p.runtime.timestepping_method = tsm[0] p.runtime.timestepping_order = tsm[1] p.runtime.timestepping_order2 = tsm[2] pspace = JobParallelizationDimOptions('space') pspace.num_cores_per_rank = 1 pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1] pspace.num_ranks = 1 # Setup parallelization p.setup_parallelization([pspace]) if verbose: pspace.print() p.parallelization.print() p.parallelization.max_wallclock_seconds = estimateWallclockTime(p) p.gen_jobscript_directory('job_benchref_' + p.getUniqueID()) # Use this as a reference job p.reference_job_unique_id = p.job_unique_id