def create_command_job(servcfg, params, prof, stage): """Create the SalomeLauncher job.""" stagedir = stage.folder export = osp.join(stagedir, "export") fname = osp.join(stagedir, "launcher_script") rcdef = servcfg['rc_definition'] with open(fname, "wb") as fobj: fobj.write(os.linesep.join([ "#!/bin/bash", "{} shell -- as_run export", ""]).format(osp.join(rcdef.applipath, 'salome'))) os.chmod(fname, 0755) salome_job = salome.JobParameters() salome_job.job_name = to_str(prof["nomjob"][0]) salome_job.job_type = str("command") salome_job.wckey = to_str(CFG.get_wckey() or '') salome_job.job_file = to_str(fname) salome_job.result_directory = to_str(stagedir) salome_job.work_directory = str(new_directory(servcfg)) salome_job.maximum_duration = str(params['time']) # In files, do not take distant input databases dbtype, _ = prof.get_base('D') local_in_files = [i.path for i in prof.get_data() \ if not (i.host and i.type == dbtype)] local_in_files.append(export) salome_job.in_files = [to_str(i) for i in local_in_files] out_files = [osp.basename(i.path) for i in prof.get_result()] salome_job.out_files = [to_str(i).split(str(":"))[-1] for i in out_files] salome_job.resource_required = resource_parameters(params) # Now, profil methods from asrun are called (see profil.py) # Deepcopy of the profil object exported = prof.copy() # Loop study files for entry in exported.get_collection(): entry.host, entry.user, entry.passwd = '', '', '' entry.path = osp.basename(entry.path) # Warning: despite of the method's name, the entry # (i.e. asrun obj referencing a file) # is updated in place and not added, # because the entry object is already referenced # by the profil (see implementation in asrun/profil.py). # Updating the entry is required to update the export content exported.add(entry) exported.WriteExportTo(export) return salome_job
def __getJobParameters__(self): """ Sets and returns the JobManager single job parameters. """ # --------------------------------- resManager = salome.lcc.getResourcesManager() job_params = salome.JobParameters() # --------------------------------- # --------------------------------- job_params.resource_required = salome.ResourceParameters() job_params.resource_required.name = self.host job_params.resource_required.nb_proc = int(self.cfg.get('batch_parameters', 'nprocs')) job_params.resource_required.type = 'rsync' # Jobmanager wall clock format is hh:mm ! wall_clock = self.cfg.get('batch_parameters','wall_clock') days, hms = wall_clock.split("-") wch, wcm, wcs = hms.split(':') jp_wc = "%d:%s:%s" % (int(days)*24+int(wch), wcm, wcs) job_params.maximum_duration = jp_wc job_params.wckey = self.cfg.get('batch_parameters', 'wckey') job_params.job_name = "CS_OT" # --------------------------------- # --------------------------------- job_params.work_directory = os.path.join(self.dist_wdir, self.study_name, self.case_name) job_params.local_directory = os.path.join(self.case_dir, 'RESU', self.run_id) job_params.result_directory = job_params.local_directory # --------------------------------- # --------------------------------- job_params.in_files = [ os.path.join(self.case_dir, 'DATA'), os.path.join(self.case_dir, 'SRC'), os.path.join(self.case_dir, 'RESU') ] # --------------------------------- # --------------------------------- job_params.out_files = [] for f in (self.results_file, 'run_status.exceeded_time_limit'): df = os.path.join(job_params.work_directory, 'RESU', self.run_id, f) job_params.out_files.append(df) # --------------------------------- # --------------------------------- job_params.job_type = 'command' job_params.pre_command = os.path.join(self.case_dir, 'prepare_cs_case.sh') job_params.job_file = os.path.join(self.case_dir, 'run_cs_case.sh') # --------------------------------- return job_params
def __getJobParameters__(self): """ Sets and returns the JobManager single job parameters. """ # --------------------------------- resManager = salome.lcc.getResourcesManager() rdef = resManager.GetResourceDefinition(self.host) dist_wdir = os.path.split(rdef.working_directory)[0] job_params = salome.JobParameters() # --------------------------------- # --------------------------------- job_params.resource_required = salome.ResourceParameters() job_params.resource_required.name = self.host job_params.resource_required.nb_proc = int(self.cfg.get('batch_parameters', 'nprocs')) job_params.resource_required.type = 'rsync' job_params.maximum_duration = self.cfg.get('batch_parameters', 'wall_clock') job_params.wckey = self.cfg.get('batch_parameters', 'wckey') job_params.job_name = "CS_OT" # --------------------------------- # --------------------------------- job_params.work_directory = os.path.join(dist_wdir, self.study_name, self.case_name) job_params.local_directory = os.path.join(self.case_dir, 'RESU', self.run_id) job_params.result_directory = job_params.local_directory # --------------------------------- # --------------------------------- job_params.in_files = [ os.path.join(self.case_dir, 'DATA'), os.path.join(self.case_dir, 'SRC'), os.path.join(self.case_dir, 'SCRIPTS'), os.path.join(self.case_dir, 'RESU') ] # --------------------------------- # --------------------------------- job_params.out_files = [] for f in (self.results_file, 'status.exceeded_time_limit'): df = os.path.join(job_params.work_directory, 'RESU', self.run_id, f) job_params.out_files.append(df) # --------------------------------- # --------------------------------- job_params.job_type = 'command' job_params.pre_command = os.path.join(self.case_dir, 'prepare_cs_case.sh') job_params.job_file = os.path.join(self.case_dir, 'run_cs_case.sh') # --------------------------------- return job_params