def jobscript_get_exec_command(jg: JobGeneration): """ Prefix to executable command Returns ------- string multiline text for scripts """ content = """ """ + p_gen_script_info(jg) + """ EXEC=\"""" + jg.get_program_exec() + """\" echo \"$EXEC\" $EXEC || exit 1 """ return content
#SBATCH --clusters=serial #SBATCH --time=96:00:00 # # if a job might take over 2 days to be executed import os import sys import stat import math #Classes containing sweet compile/run basic option from mule_local.JobGeneration import * from mule.SWEETRuntimeParametersScenarios import * #Create main compile/run options jg = JobGeneration() # Request dedicated compile script jg.compilecommand_in_jobscript = False # Wallclock time max_wallclock_seconds = 2 * 24 * 60 * 60 ref_max_wallclock_seconds = 48 * 60 * 60 #Get Earth parameters (if necessary) earth = EarthMKSDimensions() # # Run simulation on plane or sphere # #Basic plane options
def __load_job_raw_data(self, jobdir=None): """ Parse all output.out files and extract all kind of job output information Return a dictionary with content from the job directories { # # Dictionary with data from job generation # (read from [jobdir]/jobgeneration.pickle) # 'jobgeneration': # From jobgeneration.pickle { 'compile': [...], 'runtime': [...], 'parallelization': [...], 'platforms_platform': [...], 'platform_resources': [...], }, 'output': # From output.out with prefix [MULE] { 'simulation_benchmark_timings.main': [value], 'simulation_benchmark_timings.main_simulationLoop': [value], [...] }, '[filename(.pickle)]': { [...] } }, '[filename(.pickle)]': { [...] } """ # self.__job_raw_data = {} if self.verbosity > 5: self.info("") self.info("Processing '" + jobdir + "'") """ * Process 'output.out' """ if self.verbosity > 5: self.info("Loading job output file 'output.out'") outfile = jobdir + '/output.out' try: #if True: with open(outfile, 'r') as f: content = f.readlines() self.__job_raw_data['output'] = self.__parse_job_output( content) except Exception as err: print("*" * 80) print("* WARNINNG: opening '" + outfile + "' (ignoring)") print("* " + str(err)) print("*" * 80) """ Process 'jobgeneration.pickle' """ # Ensure that 'jobgeneration.pickle' exists jobgenerationfile = jobdir + '/jobgeneration.pickle' if self.verbosity > 5: self.info("Loading 'jobgeneration.pickle'") j = JobGeneration(dummy_init=True) self.__job_raw_data['jobgeneration'] = j.load_attributes_dict( jobgenerationfile) """ Process other '*.pickle' """ pickle_files = glob.glob(jobdir + '/*.pickle') # Iterate over all found pickle files for picklefile in pickle_files: filename = os.path.basename(picklefile) tag = filename.replace('.pickle', '') if tag == 'jobgeneration': continue if self.verbosity > 5: self.info("Loading pickle file '" + filename + "'") import pickle with open(picklefile, 'rb') as f: self.__job_raw_data[tag] = pickle.load(f)
from itertools import product # REXI from mule_local.rexi.REXICoefficients import * from mule_local.rexi.trexi.TREXI import * from mule_local.rexi.cirexi.CIREXI import * from mule_local.rexi.brexi.BREXI import * efloat_mode = "float" #efloat_mode = "mpfloat" from mule_local.JobGeneration import * from mule.JobParallelization import * from mule.JobParallelizationDimOptions import * jg = JobGeneration() verbose = False #verbose = True ################################################## ################################################## jg.compile.mode = 'release' if '_gnu' in os.getenv('MULE_PLATFORM_ID'): jg.compile.compiler = 'gnu' else: jg.compile.compiler = 'intel' jg.compile.sweet_mpi = 'enable'
#! /usr/bin/env python3 import os import sys import math from itertools import product from mule_local.JobGeneration import * from mule.JobParallelizationDimOptions import * from mule.JobParallelization import * p = JobGeneration() verbose = False #verbose = True ################################################## ################################################## p.compile.mode = 'release' if '_gnu' in os.getenv('MULE_PLATFORM_ID'): p.compile.compiler = 'gnu' else: p.compile.compiler = 'intel' p.compile.sweet_mpi = 'enable' p.compile.sweet_mpi = 'disable' p.runtime.space_res_spectral = 128 p.runtime.reuse_plans = -1 # enforce using plans (todo, enforcing not yet implemented)!
def jobscript_get_header(jg : JobGeneration): """ These headers typically contain the information on e.g. Job exection, number of compute nodes, etc. Returns ------- string multiline text for scripts """ job_id = jg.getUniqueID() p = jg.parallelization time_str = p.get_max_wallclock_seconds_hh_mm_ss() # Available queues: # premium (only use this in extreme cases) # regular # economy queue = 'economy' # Use regular queue if we need more than 32 nodes # Otherwise, the job doesn't seem to be scheduled if p.num_nodes >= 32: queue = 'premium' elif p.num_nodes >= 16: queue = 'regular' # # See https://www.lrz.de/services/compute/linux-cluster/batch_parallel/example_jobs/ # content = """#! /bin/bash # ## project code #PBS -A NCIS0002 #PBS -q """+queue+""" ## wall-clock time (hrs:mins:secs) #PBS -l walltime="""+time_str+""" ## select: number of nodes ## ncpus: number of CPUs per node ## mpiprocs: number of ranks per node #PBS -l select="""+str(p.num_nodes)+""":ncpus="""+str(p.num_cores_per_node)+""":mpiprocs="""+str(p.num_ranks_per_node)+""":ompthreads="""+str(p.num_threads_per_rank)+"\n" #"default": 2301000 #"turbo": 2301000 #"rated": 2300000 #"slow": 1200000 if p.force_turbo_off: content += "#PBS -l select=cpufreq=2300000\n" content += """# #PBS -N """+job_id[0:100]+""" #PBS -o """+jg.p_job_stdout_filepath+""" #PBS -e """+jg.p_job_stderr_filepath+""" #source /etc/profile.d/modules.sh #module load openmpi """+("module load mkl" if jg.compile.mkl==True or jg.compile.mkl=='enable' else "")+""" """+p_gen_script_info(jg)+""" echo echo "hostname" hostname echo echo echo "lscpu -e" lscpu -e echo echo echo "CPU Frequencies (uniquely reduced):" cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u echo """ if jg.compile.threading != 'off': content += """ export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+""" export OMP_DISPLAY_ENV=VERBOSE """ if p.core_oversubscription: raise Exception("Not supported with this script!") else: if p.core_affinity != None: content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n" if p.core_affinity == 'compact': content += "source $MULE_ROOT/platforms/bin/setup_omp_places.sh nooversubscription close\n" #content += "\nexport OMP_PROC_BIND=close\n" elif p.core_affinity == 'scatter': raise Exception("Affinity '"+str(p.core_affinity)+"' not supported") content += "\nexport OMP_PROC_BIND=spread\n" else: raise Exception("Affinity '"+str(p.core_affinity)+"' not supported") content += "\n" return content
#! /usr/bin/env python3 import os import sys import stat import math from mule_local.JobGeneration import * jg = JobGeneration() # # Run simulation on plane or sphere # jg.compile.program = 'swe_plane' jg.compile.plane_spectral_space = 'enable' jg.compile.plane_spectral_dealiasing = 'enable' jg.compile.sphere_spectral_space = 'disable' jg.compile.sphere_spectral_dealiasing = 'disable' jg.compile.numa_block_allocator = 0 # Verbosity mode jg.runtime.verbosity = 3 # # Mode and Physical resolution # jg.runtime.space_res_spectral = -1 jg.runtime.space_res_physical = 512
def p_gen_script_info(jg : JobGeneration): return """# # Generating function: """+_whoami(2)+""" # Platform: """+get_platform_id()+""" # Job id: """+jg.getUniqueID()+"""
#! /usr/bin/env python3 import os import sys import math import numpy as np from itertools import product from mule_local.JobGeneration import * from mule.JobParallelization import * from mule.JobParallelizationDimOptions import * p = JobGeneration() verbose = False #verbose = True ################################################## ################################################## p.compile.mode = 'release' #p.compile.sweet_mpi = 'disable' p.runtime.space_res_spectral = 128 p.parallelization.core_oversubscription = False p.parallelization.core_affinity = 'compact' p.compile.threading = 'omp'
#! /usr/bin/env python3 import os import sys import math efloat_mode = "float" #efloat_mode = "mpfloat" from mule_local.JobGeneration import * from mule.JobParallelization import * from mule.JobParallelizationDimOptions import * jg = JobGeneration() verbose = False #verbose = True ################################################## # Software enviroment stuff ################################################## jg.compile.mode = 'release' if '_gnu' in os.getenv('MULE_PLATFORM_ID'): jg.compile.compiler = 'gnu' else: jg.compile.compiler = 'intel' jg.compile.sweet_mpi = 'disable' jg.parallelization.core_oversubscription = False jg.parallelization.core_affinity = 'compact'
#! /usr/bin/env python3 import os import sys import stat import math from mule_local.JobGeneration import * #Create main compile/run options jg = JobGeneration() jg.compile.program = "swe_plane" jg.runtime.verbosity = 2 jg.runtime.space_res_spectral = 128 jg.runtime.benchmark_name = 'benchmark_id_1' jg.runtime.gravitation= 1 jg.runtime.sphere_rotating_coriolis_omega = 1 jg.runtime.h0 = 1 jg.runtime.plane_domain_size = 1 jg.runtime.rexi_method = 'direct' jg.runtime.viscosity = 0.0 jg.runtime.max_simulation_time = 0.1 jg.runtime.output_timestep_size = jg.runtime.max_simulation_time
import os import sys import stat import math from mule.SWEETRuntimeParametersScenarios import * from mule_local.JobGeneration import * from mule.JobParallelization import * from mule.JobParallelizationDimOptions import * #Classes containing sweet compile/run basic option #from mule_local.JobGeneration import * #Create main compile/run options jg = JobGeneration() # Request dedicated compile script jg.compilecommand_in_jobscript = True # Wallclock time max_wallclock_seconds = 2 * 24 * 60 * 60 ref_max_wallclock_seconds = 48 * 60 * 60 jg.parallelization.max_wallclock_seconds = ref_max_wallclock_seconds # HPC stuff pspace = JobParallelizationDimOptions('space') pspace.num_cores_per_rank = jg.platform_resources.num_cores_per_node / 2 pspace.num_threads_per_rank = 8 pspace.num_ranks = 1 jg.setup_parallelization([pspace])