def _get_account(): account = get_project() return account
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key,value in gridinfo.items(): logger.debug("Set grid %s %s"%(key,value)) self.set_lookup_value(key,value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH",machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s"%(nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect(machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" %(compiler, machine_name)) self.set_value("COMPILER",compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler":compiler}) else: expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), "MPIlib %s is not supported on machine %s" %(mpilib, machine_name)) self.set_value("MPILIB",mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe payout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: if component_class == "DRV": component_class = "CPL" string = "NTASKS_" + component_class pes_ntasks[string] = opti_tasks string = "NTHRDS_" + component_class pes_nthrds[string] = opti_thrds string = "ROOTPE_" + component_class pes_rootpe[string] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} # Since other items may include PES_PER_NODE we need to do this first # we can get rid of this code when all of the perl is removed for key, value in other.items(): self.set_value(key, value) pes_per_node = self.get_value("PES_PER_NODE") for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) maxval = 1 if mpilib != "mpi-serial": for key, val in totaltasks.items(): if val < 0: val = -1*val*pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s"%compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s"%compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key,1) key = "NTHRDS_%s"%compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass,1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC",1) mach_pes_obj.set_value("NTHRDS_GLC",1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self.get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET",self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " %self._compsetname) logger.info(" Grid is: %s " %self._gridname ) logger.info(" Components in compset are: %s " %self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s"%wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test"% wdir) response = raw_input("\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?"% wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() if model == "cesm" and not test: self.set_value("DOUT_S",True)
def __init__(self, test_names, test_data=None, no_run=False, no_build=False, no_setup=False, no_batch=None, test_root=None, test_id=None, machine_name=None, compiler=None, baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, clean=False, namelists_only=False, project=None, parallel_jobs=None, walltime=None, proc_pool=None, use_existing=False, save_timing=False, queue=None, allow_baseline_overwrite=False, output_root=None, force_procs=None, force_threads=None, mpilib=None, input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False, non_local=False): ########################################################################### self._cime_root = get_cime_root() self._cime_model = get_model() self._cime_driver = "mct" self._save_timing = save_timing self._queue = queue self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} self._mpilib = mpilib # allow override of default mpilib self._completed_tests = 0 self._input_dir = input_dir self._pesfile = pesfile self._allow_baseline_overwrite = allow_baseline_overwrite self._allow_pnl = allow_pnl self._non_local = non_local self._mail_user = mail_user self._mail_type = mail_type self._machobj = Machines(machine=machine_name) self._model_build_cost = 4 # If user is forcing procs or threads, re-write test names to reflect this. if force_procs or force_threads: test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads) self._no_setup = no_setup self._no_build = no_build or no_setup or namelists_only self._no_run = no_run or self._no_build self._output_root = output_root # Figure out what project to use if project is None: self._project = get_project() if self._project is None: self._project = self._machobj.get_value("PROJECT") else: self._project = project # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() expect(not (self._no_batch and self._queue is not None), "Does not make sense to request a queue without batch system") # Determine and resolve test_root if test_root is not None: self._test_root = test_root elif self._output_root is not None: self._test_root = self._output_root else: self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") if self._project is not None: self._test_root = self._test_root.replace("$PROJECT", self._project) self._test_root = os.path.abspath(self._test_root) self._test_id = test_id if test_id is not None else get_timestamp() self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler self._clean = clean self._namelists_only = namelists_only self._walltime = walltime if parallel_jobs is None: self._parallel_jobs = min(len(test_names), self._machobj.get_value("MAX_MPITASKS_PER_NODE")) else: self._parallel_jobs = parallel_jobs self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None # Compute baseline_root self._baseline_root = baseline_root if baseline_root is not None \ else self._machobj.get_value("BASELINE_ROOT") if self._project is not None: self._baseline_root = self._baseline_root.replace("$PROJECT", self._project) self._baseline_root = os.path.abspath(self._baseline_root) if baseline_cmp_name or baseline_gen_name: if self._baseline_cmp_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) expect(os.path.isdir(full_baseline_dir), "Missing baseline comparison directory {}".format(full_baseline_dir)) # the following is to assure that the existing generate directory is not overwritten if self._baseline_gen_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) existing_baselines = [] for test_name in test_names: test_baseline = os.path.join(full_baseline_dir, test_name) if os.path.isdir(test_baseline): existing_baselines.append(test_baseline) expect(allow_baseline_overwrite or len(existing_baselines) == 0, "Baseline directories already exists {}\n" \ "Use -o to avoid this error".format(existing_baselines)) if self._cime_model == "e3sm": _order_tests_by_runtime(test_names, self._baseline_root) # This is the only data that multiple threads will simultaneously access # Each test has it's own value and setting/retrieving items from a dict # is atomic, so this should be fine to use without mutex. # name -> (phase, status) self._tests = OrderedDict() for test_name in test_names: self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) # Oversubscribe by 1/4 if proc_pool is None: pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE")) self._proc_pool = int(pes * 1.25) else: self._proc_pool = int(proc_pool) self._procs_avail = self._proc_pool # Setup phases self._phases = list(PHASES) if self._no_setup: self._phases.remove(SETUP_PHASE) if self._no_build: self._phases.remove(SHAREDLIB_BUILD_PHASE) self._phases.remove(MODEL_BUILD_PHASE) if self._no_run: self._phases.remove(RUN_PHASE) if use_existing: for test in self._tests: with TestStatus(self._get_test_dir(test)) as ts: for phase, status in ts: if phase in CORE_PHASES: if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: if status == TEST_FAIL_STATUS: # Import for potential subsequent waits ts.set_status(phase, TEST_PEND_STATUS) # We need to pick up here break else: if phase != SUBMIT_PHASE: # Somewhat subtle. Create_test considers submit/run to be the run phase, # so don't try to update test status for a passed submit phase self._update_test_status(test, phase, TEST_PEND_STATUS) self._update_test_status(test, phase, status) if phase == RUN_PHASE: logger.info("Test {} passed and will not be re-run".format(test)) logger.info("Using existing test directory {}".format(self._get_test_dir(test))) else: # None of the test directories should already exist. for test in self._tests: expect(not os.path.exists(self._get_test_dir(test)), "Cannot create new case in directory '{}', it already exists." " Pick a different test-id".format(self._get_test_dir(test))) logger.info("Creating test directory {}".format(self._get_test_dir(test)))
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None, output_root=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key, value in gridinfo.items(): logger.debug("Set grid %s %s" % (key, value)) self.set_lookup_value(key, value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH", machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s" % (nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect( machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" % (compiler, machine_name)) self.set_value("COMPILER", compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) else: expect( machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), "MPIlib %s is not supported on machine %s" % (mpilib, machine_name)) self.set_value("MPILIB", mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe payout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: if component_class == "DRV": component_class = "CPL" string = "NTASKS_" + component_class pes_ntasks[string] = opti_tasks string = "NTHRDS_" + component_class pes_nthrds[string] = opti_thrds string = "ROOTPE_" + component_class pes_rootpe[string] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout( self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} # Since other items may include PES_PER_NODE we need to do this first # we can get rid of this code when all of the perl is removed for key, value in other.items(): self.set_value(key, value) pes_per_node = self.get_value("PES_PER_NODE") for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) maxval = 1 if mpilib != "mpi-serial": for key, val in totaltasks.items(): if val < 0: val = -1 * val * pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s" % compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s" % compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key, 1) key = "NTHRDS_%s" % compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass, 1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC", 1) mach_pes_obj.set_value("NTHRDS_GLC", 1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self.get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET", self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " % self._compsetname) logger.info(" Grid is: %s " % self._gridname) logger.info(" Components in compset are: %s " % self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Resolve the CIME_OUTPUT_ROOT variable, other than this # we don't want to resolve variables until we need them if output_root is None: output_root = self.get_value("CIME_OUTPUT_ROOT") self.set_value("CIME_OUTPUT_ROOT", output_root) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s" % wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test" % wdir) response = raw_input( "\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?" % wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() self.set_model_version(model) if model == "cesm" and not test: self.set_value("DOUT_S", True) self.set_value("TIMER_LEVEL", 4) if test: self.set_value("TEST", True) self.initialize_derived_attributes()
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile); grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key,value in gridinfo.items(): logger.debug("Set grid %s %s"%(key,value)) self.set_value(key,value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() # Add the group and elements for the config_files.xml for idx, config_file in enumerate(self._component_config_files): self.set_value(config_file[0],config_file[1]) #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH",machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename) type_str = self.get_type_info(nodename) if type_str is not None: self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect(machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" %(compiler, machine_name)) self.set_value("COMPILER",compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler":compiler}) else: expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), "MPIlib %s is not supported on machine %s" %(mpilib, machine_name)) self.set_value("MPILIB",mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test"% wdir) response = raw_input("\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?"% wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # the following go into the env_mach_specific file vars = ("module_system", "environment_variables", "mpirun") env_mach_specific_obj = self._get_env("mach_specific") for var in vars: nodes = machobj.get_first_child_nodes(var) for node in nodes: env_mach_specific_obj.add_child(node) #-------------------------------------------- # pe payout #-------------------------------------------- pesobj = Pes(self._pesfile) #FIXME - add pesize_opts as optional argument below pes_ntasks, pes_nthrds, pes_rootpe = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self._get_env("mach_pes") totaltasks = {} for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key,int(value)) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key,int(value)) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key,int(value)) maxval = 1 pes_per_node = mach_pes_obj.get_value("PES_PER_NODE") for key, val in totaltasks.items(): if val < 0: val = -1*val*pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s"%compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s"%compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key,1) key = "NTHRDS_%s"%compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass,1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC",1) mach_pes_obj.set_value("NTHRDS_GLC",1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self._get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval) self._env_files_that_need_rewrite.add(env_batch) self.set_value("COMPSET",self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " %self._compsetname) logger.info(" Grid is: %s " %self._gridname ) logger.info(" Components in compset are: %s " %self._components) # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found")