class Inventory(CitcomComponent.Inventory): import pyre.inventory as inv tracer = inv.bool("tracer", default=False) # tracer_ic_method=0 (random generated array) # tracer_ic_method=1 (all proc read the same file) # tracer_ic_method=2 (each proc reads its own file) tracer_ic_method = inv.int("tracer_ic_method", default=0) # (tracer_ic_method == 0) tracers_per_element = inv.int("tracers_per_element", default=10) # (tracer_ic_method == 1) tracer_file = inv.str("tracer_file", default="tracer.dat") # How many flavors of tracers # If tracer_flavors > 0, each element will report the number of # tracers of each flavor inside it. This information can be used # later for many purposes. One of it is to compute composition, # either using absolute method or ratio method. tracer_flavors = inv.int("tracer_flavors", default=0) # How to initialize tracer flavors ic_method_for_flavors = inv.int("ic_method_for_flavors", default=0) z_interface = inv.list("z_interface", default=[0.7]) ictracer_grd_file = inv.str("ictracer_grd_file", default="") ictracer_grd_layers = inv.int("ictracer_grd_layers", default=2) # Warning level itracer_warnings = inv.bool("itracer_warnings", default=True) # Enriched internal heat production tracer_enriched = inv.bool("tracer_enriched", default=False) Q0_enriched = inv.float("Q0_enriched", default=0.0) # Regular grid parameters regular_grid_deltheta = inv.float("regular_grid_deltheta", default=1.0) regular_grid_delphi = inv.float("regular_grid_delphi", default=1.0) # Analytical Test Function #analytical_tracer_test = inv.int("analytical_tracer_test", default=0) chemical_buoyancy = inv.bool("chemical_buoyancy", default=True) # ibuoy_type=0 (absolute method, not implemented) # ibuoy_type=1 (ratio method) buoy_type = inv.int("buoy_type", default=1) buoyancy_ratio = inv.list("buoyancy_ratio", default=[1.0]) # DJB hybrid_method = inv.int("hybrid_method", default=0) # This is not used anymore and is left here for backward compatibility reset_initial_composition = inv.bool("reset_initial_composition", default=False)
class Launcher(Component): import pyre.inventory as pyre dry = pyre.bool("dry") nodes = pyre.int("nodes", default=1); nodes.meta['tip'] = """number of machine nodes""" nodelist = pyre.slice("nodelist"); executable = pyre.str("executable") arguments = pyre.list("arguments") nodelist.meta['tip'] = """a comma-separated list of machine names in square brackets (e.g., [101-103,105,107])""" def launch(self): raise NotImplementedError("class '%s' must override 'launch'" % self.__class__.__name__) def argv(self): raise NotImplementedError("class '%s' must override 'argv'" % self.__class__.__name__) def comments(self): return ["command: " + ' '.join(self.argv())]
class Inventory(CitcomComponent.Inventory): import pyre.inventory as inv output_num_shells = inv.int("output_num_shells", default=0) output_shell_rids = inv.list("output_shell_rids", default=[]) output_shell_num_buffered_timesteps = inv.int( "output_shell_num_buffered_timesteps", default=5) output_shell_every = inv.int("output_shell_every", default=4) output_format = inv.str("output_format", default="ascii", validator=inv.choice( ["ascii", "ascii-gz", "vtk", "hdf5"])) output_optional = inv.str("output_optional", default="surf,botm,tracer") # experimental vtk output gzdir_vtkio = inv.int("gzdir_vtkio", default=0) # remove net rotation gzdir_rnr = inv.bool("gzdir_rnr", default=False) # write additional heat flux files? if yes, how frequent? write_q_files = inv.int("write_q_files", default=0) # max. degree for spherical harmonics output output_ll_max = inv.int("output_ll_max", default=20) # self-gravitation, for geoid only self_gravitation = inv.bool("self_gravitation", default=False) # compute stress/topography by consistent-boundary-flux method use_cbf_topo = inv.bool("use_cbf_topo", default=False) mega1 = 1024 * 1024 #megaq = 256*1024 # size of collective buffer used by MPI-IO cb_block_size = inv.int("cb_block_size", default=mega1) cb_buffer_size = inv.int("cb_buffer_size", default=mega1 * 4) # size of data sieve buffer used by HDF5 sieve_buf_size = inv.int("sieve_buf_size", default=mega1) # memory alignment used by HDF5 output_alignment = inv.int("output_alignment", default=mega1 / 4) output_alignment_threshold = inv.int("output_alignment_threshold", default=mega1 / 2) # cache for chunked dataset used by HDF5 cache_mdc_nelmts = inv.int("cache_mdc_nelmts", default=10330) cache_rdcc_nelmts = inv.int("cache_rdcc_nelmts", default=521) cache_rdcc_nbytes = inv.int("cache_rdcc_nbytes", default=mega1)
class Job(Component): name = "job" import pyre.inventory as pyre import pyre.util as util from pyre.units.time import minute task = pyre.str("name") # 'task' internally, to avoid name conflict queue = pyre.str("queue") mail = pyre.bool("mail", default=False) dwalltime = pyre.dimensional("walltime", default=0 * minute) stdin = pyre.str("stdin", default=util.devnull()) stdout = pyre.str("stdout", default="stdout.txt") stderr = pyre.str("stderr", default="stderr.txt") environment = pyre.list("environment") executable = pyre.str("executable") arguments = pyre.list("arguments") comments = pyre.list("comments") def __init__(self): super(Job, self).__init__() self.nodes = 1 self.id = None def getStateArgs(self, stage): state = [] if stage == 'launch': state.append("--macros.job.name=%s" % self.task) elif stage == 'compute': state.append("--macros.job.id=%s" % self.id) return state
class SchedulerTACCRanger(SchedulerSGE): name = "tacc-ranger" import pyre.inventory as pyre command = pyre.str("command", default="qsub") tpn = pyre.int("tpn", default=16, validator=pyre.choice([1, 2, 4, 8, 12, 15, 16])) tpn.meta['tip'] = 'Task per node' qsubOptions = pyre.list("qsub-options") def schedule(self, job): from math import ceil # round up to multiple of 16 nodes = ceil(job.nodes / float(self.tpn)) self.cores = int(nodes * 16) SchedulerSGE.schedule(self, job)
class SchedulerPBS(BatchScheduler): name = "pbs" import pyre.inventory as pyre command = pyre.str("command", default="qsub") qsubOptions = pyre.list("qsub-options") resourceList = pyre.list("resource-list") ppn = pyre.int("ppn", default=1) def schedule(self, job): import pyre.util as util # Fix-up the job. if not job.task: job.task = "jobname" job.walltime = util.hms(job.dwalltime.value) job.arguments = ' '.join(job.arguments) job.resourceList = self.buildResourceList(job) # Generate the main PBS batch script. script = self.retrieveTemplate('batch.sh', ['schedulers', 'scripts', self.name]) if script is None: self._error.log("could not locate batch script template for '%s'" % self.name) sys.exit(1) script.scheduler = self script.job = job if self.dry: print script return try: import os from popen2 import Popen4 cmd = [self.command] self._info.log("spawning: %s" % ' '.join(cmd)) child = Popen4(cmd) self._info.log("spawned process %d" % child.pid) print >> child.tochild, script child.tochild.close() for line in child.fromchild: self._info.line(" " + line.rstrip()) status = child.wait() self._info.log() exitStatus = None if (os.WIFSIGNALED(status)): statusStr = "signal %d" % os.WTERMSIG(status) elif (os.WIFEXITED(status)): exitStatus = os.WEXITSTATUS(status) statusStr = "exit %d" % exitStatus else: statusStr = "status %d" % status self._info.log("%s: %s" % (cmd[0], statusStr)) except IOError, e: self._error.log("%s: %s" % (self.command, e)) return if exitStatus == 0: pass else: sys.exit("%s: %s: %s" % (sys.argv[0], cmd[0], statusStr)) return
class SchedulerSGE(BatchScheduler): name = "sge" import pyre.inventory as pyre command = pyre.str("command", default="qsub") peName = pyre.str("pe-name", default="mpi") peNumber = pyre.str("pe-number", default="n") qsubOptions = pyre.list("qsub-options") def schedule(self, job): import pyre.util as util # Fix-up the job. if not job.task: job.task = "jobname" job.walltime = util.hms(job.dwalltime.value) job.arguments = ' '.join(job.arguments) # Generate the main SGE batch script. script = self.retrieveTemplate('batch.sh', ['schedulers', 'scripts', self.name]) if script is None: self._error.log("could not locate batch script template for '%s'" % self.name) sys.exit(1) script.scheduler = self script.job = job if self.dry: print script return try: import os, tempfile filename = tempfile.mktemp() s = open(filename, 'w') print >>s, script s.close() cmd = [self.command, filename] self._info.log("spawning: %s" % ' '.join(cmd)) status = os.spawnvp(os.P_WAIT, cmd[0], cmd) os.remove(filename) exitStatus = None if (os.WIFSIGNALED(status)): statusStr = "signal %d" % os.WTERMSIG(status) elif (os.WIFEXITED(status)): exitStatus = os.WEXITSTATUS(status) statusStr = "exit %d" % exitStatus else: statusStr = "status %d" % status self._info.log("%s: %s" % (cmd[0], statusStr)) except IOError, e: self._error.log("%s: %s" % (self.command, e)) return if exitStatus == 0: pass else: sys.exit("%s: %s: %s" % (sys.argv[0], cmd[0], statusStr)) return
class SchedulerLSF(BatchScheduler): name = "lsf" import pyre.inventory as pyre command = pyre.str("command", default="bsub") bsubOptions = pyre.list("bsub-options") def schedule(self, job): import pyre.util as util # Fix-up the job. if not job.task: # LSF scripts must have a job name; otherwise strange # "/bin/sh: Event not found" errors occur (tested on # TACC's Lonestar system). job.task = "jobname" job.walltime = util.hms(job.dwalltime.value) job.arguments = ' '.join(job.arguments) # Generate the main LSF batch script. script = self.retrieveTemplate('batch.sh', ['schedulers', 'scripts', self.name]) if script is None: self._error.log("could not locate batch script template for '%s'" % self.name) sys.exit(1) script.scheduler = self script.job = job if self.dry: print script return try: from popen2 import Popen4 cmd = [self.command] if self.wait: cmd.append("-K") self._info.log("spawning: %s" % ' '.join(cmd)) child = Popen4(cmd) self._info.log("spawned process %d" % child.pid) print >> child.tochild, script child.tochild.close() if self.wait: self._info.log("Waiting for dispatch...") for line in child.fromchild: self._info.line(" " + line.rstrip()) status = child.wait() self._info.log() exitStatus = None if (os.WIFSIGNALED(status)): statusStr = "signal %d" % os.WTERMSIG(status) elif (os.WIFEXITED(status)): exitStatus = os.WEXITSTATUS(status) statusStr = "exit %d" % exitStatus else: statusStr = "status %d" % status self._info.log("%s: %s" % (cmd[0], statusStr)) except IOError, e: self._error.log("%s: %s" % (self.command, e)) return # "[When given the -K option], bsub will exit with the same # exit code as the job so that job scripts can take # appropriate actions based on the exit codes. bsub exits with # value 126 if the job was terminated while pending." if exitStatus == 0: pass elif self.wait: sys.exit(exitStatus) else: sys.exit("%s: %s: %s" % (sys.argv[0], cmd[0], statusStr)) return