예제 #1
0
def resource_parameters(params):
    """Create ResourceParameters from the job parameters"""
    debug_message2("ResourceParameters from:", params)
    use_batch = params.get('mode') == Job.BatchText
    res = salome.ResourceParameters()
    res.name = to_str(params['server'])
    res.can_launch_batch_jobs = use_batch
    # setting mem_mb raises: ulimit: error setting limit (Invalid argument)
    res.mem_mb = int(params['memory'])
    res.nb_proc = params.get('mpicpu', 1)
    res.nb_node = params.get('nodes', 1)
    return res
예제 #2
0
 def available_servers(self):
     """Return the list of available servers."""
     # If it is already filled, return
     if self._servers:
         return self._servers
     # Ask for SALOME the known resources
     param = salome.ResourceParameters()
     param.can_launch_batch_jobs = True
     rc_manager = salome.lcc.getResourcesManager()
     self._servers = rc_manager.GetFittingResources(param)
     # Put localhost at first position to ensure a quick refresh
     # at the first opening of Run dialog
     if 'localhost' in self._servers:
         self._servers.remove('localhost')
         self._servers.insert(0, 'localhost')
     for server in self._servers:
         self._set_rcdef(server)
     return self._servers
    def __getJobParameters__(self):
        """
        Sets and returns the JobManager single job parameters.
        """

        # ---------------------------------
        resManager = salome.lcc.getResourcesManager()

        job_params = salome.JobParameters()
        # ---------------------------------

        # ---------------------------------
        job_params.resource_required         = salome.ResourceParameters()
        job_params.resource_required.name    = self.host
        job_params.resource_required.nb_proc = int(self.cfg.get('batch_parameters', 'nprocs'))
        job_params.resource_required.type    = 'rsync'

        # Jobmanager wall clock format is hh:mm !
        wall_clock = self.cfg.get('batch_parameters','wall_clock')
        days, hms = wall_clock.split("-")
        wch, wcm, wcs = hms.split(':')

        jp_wc = "%d:%s:%s" % (int(days)*24+int(wch), wcm, wcs)
        job_params.maximum_duration = jp_wc

        job_params.wckey            = self.cfg.get('batch_parameters', 'wckey')
        job_params.job_name         = "CS_OT"
        # ---------------------------------

        # ---------------------------------
        job_params.work_directory   = os.path.join(self.dist_wdir,
                                                   self.study_name,
                                                   self.case_name)

        job_params.local_directory = os.path.join(self.case_dir,
                                                  'RESU',
                                                  self.run_id)

        job_params.result_directory = job_params.local_directory
        # ---------------------------------

        # ---------------------------------
        job_params.in_files = [ os.path.join(self.case_dir, 'DATA'),
                                os.path.join(self.case_dir, 'SRC'),
                                os.path.join(self.case_dir, 'RESU') ]
        # ---------------------------------

        # ---------------------------------
        job_params.out_files = []
        for f in (self.results_file, 'run_status.exceeded_time_limit'):
            df = os.path.join(job_params.work_directory,
                              'RESU',
                              self.run_id,
                              f)

            job_params.out_files.append(df)
        # ---------------------------------

        # ---------------------------------
        job_params.job_type = 'command'
        job_params.pre_command = os.path.join(self.case_dir, 'prepare_cs_case.sh')

        job_params.job_file = os.path.join(self.case_dir, 'run_cs_case.sh')
        # ---------------------------------

        return job_params
예제 #4
0
    def __getJobParameters__(self):
        """
        Sets and returns the JobManager single job parameters.
        """

        # ---------------------------------
        resManager = salome.lcc.getResourcesManager()
        rdef       = resManager.GetResourceDefinition(self.host)

        dist_wdir  = os.path.split(rdef.working_directory)[0]

        job_params = salome.JobParameters()
        # ---------------------------------

        # ---------------------------------
        job_params.resource_required         = salome.ResourceParameters()
        job_params.resource_required.name    = self.host
        job_params.resource_required.nb_proc = int(self.cfg.get('batch_parameters', 'nprocs'))
        job_params.resource_required.type    = 'rsync'

        job_params.maximum_duration = self.cfg.get('batch_parameters',
                                                   'wall_clock')

        job_params.wckey            = self.cfg.get('batch_parameters', 'wckey')
        job_params.job_name         = "CS_OT"
        # ---------------------------------

        # ---------------------------------
        job_params.work_directory   = os.path.join(dist_wdir,
                                                   self.study_name,
                                                   self.case_name)

        job_params.local_directory = os.path.join(self.case_dir,
                                                  'RESU',
                                                  self.run_id)

        job_params.result_directory = job_params.local_directory
        # ---------------------------------

        # ---------------------------------
        job_params.in_files = [ os.path.join(self.case_dir, 'DATA'),
                                os.path.join(self.case_dir, 'SRC'),
                                os.path.join(self.case_dir, 'SCRIPTS'),
                                os.path.join(self.case_dir, 'RESU') ]
        # ---------------------------------

        # ---------------------------------
        job_params.out_files = []
        for f in (self.results_file, 'status.exceeded_time_limit'):
            df = os.path.join(job_params.work_directory,
                              'RESU',
                              self.run_id,
                              f)

            job_params.out_files.append(df)
        # ---------------------------------

        # ---------------------------------
        job_params.job_type = 'command'
        job_params.pre_command = os.path.join(self.case_dir, 'prepare_cs_case.sh')

        job_params.job_file = os.path.join(self.case_dir, 'run_cs_case.sh')
        # ---------------------------------

        return job_params