def test_get_grid_info_extra_gridmaps(self): """Test of get_grid_info with some extra gridmaps""" model_grid_entries = self._MODEL_GRID_F09_G17 domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 gridmap_entries = self._GRIDMAP_F09_G17 # These are some extra gridmaps that aren't in the required list gridmap_entries += """ <gridmap atm_grid="0.9x1.25" ocn_grid="gx1v7"> <map name="ATM2OCN_EXTRA">map_fv0.9x1.25_TO_gx1v7_extra.nc</map> <map name="OCN2ATM_EXTRA">map_gx1v7_TO_fv0.9x1.25_extra.nc</map> </gridmap> """ self._create_grids_xml( model_grid_entries=model_grid_entries, domain_entries=domain_entries, gridmap_entries=gridmap_entries, ) grids = Grids(self._xml_filepath) grid_info = grids.get_grid_info( name="f09_g17", compset="NOT_IMPORTANT", driver="nuopc", ) self.assert_grid_info_f09_g17(grid_info) self.assertEqual(grid_info["ATM2OCN_EXTRA"], "map_fv0.9x1.25_TO_gx1v7_extra.nc") self.assertEqual(grid_info["OCN2ATM_EXTRA"], "map_gx1v7_TO_fv0.9x1.25_extra.nc")
def test_get_grid_info_extra_required_gridmaps(self): """Test of get_grid_info with some extra required gridmaps""" model_grid_entries = self._MODEL_GRID_F09_G17 domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 gridmap_entries = self._GRIDMAP_F09_G17 # These are some extra required gridmaps that aren't explicitly specified extra_required_gridmaps = """ <required_gridmap grid1="atm_grid" grid2="ocn_grid">ATM2OCN_EXTRA</required_gridmap> <required_gridmap grid1="ocn_grid" grid2="atm_grid">OCN2ATM_EXTRA</required_gridmap> """ self._create_grids_xml( model_grid_entries=model_grid_entries, domain_entries=domain_entries, gridmap_entries=gridmap_entries, extra_required_gridmaps=extra_required_gridmaps, ) grids = Grids(self._xml_filepath) grid_info = grids.get_grid_info( name="f09_g17", compset="NOT_IMPORTANT", driver="nuopc", ) self.assert_grid_info_f09_g17(grid_info) self.assertEqual(grid_info["ATM2OCN_EXTRA"], "unset") self.assertEqual(grid_info["OCN2ATM_EXTRA"], "unset")
def test_get_grid_info_3glc(self): """Test of get_grid_info with 3 glc grids""" model_grid_entries = self._MODEL_GRID_F09_G17_3GLC domain_entries = (self._DOMAIN_F09 + self._DOMAIN_G17 + self._DOMAIN_GRIS4 + self._DOMAIN_AIS8 + self._DOMAIN_LIS12) gridmap_entries = (self._GRIDMAP_F09_G17 + self._GRIDMAP_GRIS4_G17 + self._GRIDMAP_AIS8_G17 + self._GRIDMAP_LIS12_G17) # Claim that a glc2atm gridmap is required in order to test the logic that handles # an unset required gridmap for a component with multiple grids. extra_required_gridmaps = """ <required_gridmap grid1="glc_grid" grid2="atm_grid">GLC2ATM_EXTRA</required_gridmap> """ self._create_grids_xml(model_grid_entries=model_grid_entries, domain_entries=domain_entries, gridmap_entries=gridmap_entries, extra_required_gridmaps=extra_required_gridmaps) grids = Grids(self._xml_filepath) grid_info = grids.get_grid_info(name="f09_g17_3glc", compset="NOT_IMPORTANT", driver="nuopc") self.assert_grid_info_f09_g17_3glc(grid_info, nuopc=True) self.assertEqual(grid_info['GLC2ATM_EXTRA'], 'unset')
def test_get_grid_info_basic(self): """Basic test of get_grid_info""" model_grid_entries = self._MODEL_GRID_F09_G17 domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 gridmap_entries = self._GRIDMAP_F09_G17 self._create_grids_xml(model_grid_entries=model_grid_entries, domain_entries=domain_entries, gridmap_entries=gridmap_entries) grids = Grids(self._xml_filepath) grid_info = grids.get_grid_info(name="f09_g17", compset="NOT_IMPORTANT", driver="nuopc") self.assert_grid_info_f09_g17(grid_info, nuopc=True)
def query_grids(files, long_output, xml=False): """ query all grids. """ config_file = files.get_value("GRIDS_SPEC_FILE") expect( os.path.isfile(config_file), "Cannot find config_file {} on disk".format(config_file), ) grids = Grids(config_file) if xml: print("{}".format(grids.get_raw_record().decode("UTF-8"))) elif long_output: grids.print_values(long_output=long_output) else: grids.print_values()
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None, output_root=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key, value in gridinfo.items(): logger.debug("Set grid %s %s" % (key, value)) self.set_lookup_value(key, value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH", machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s" % (nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect( machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" % (compiler, machine_name)) self.set_value("COMPILER", compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) else: expect( machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), "MPIlib %s is not supported on machine %s" % (mpilib, machine_name)) self.set_value("MPILIB", mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe payout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: if component_class == "DRV": component_class = "CPL" string = "NTASKS_" + component_class pes_ntasks[string] = opti_tasks string = "NTHRDS_" + component_class pes_nthrds[string] = opti_thrds string = "ROOTPE_" + component_class pes_rootpe[string] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout( self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} # Since other items may include PES_PER_NODE we need to do this first # we can get rid of this code when all of the perl is removed for key, value in other.items(): self.set_value(key, value) pes_per_node = self.get_value("PES_PER_NODE") for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) maxval = 1 if mpilib != "mpi-serial": for key, val in totaltasks.items(): if val < 0: val = -1 * val * pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s" % compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s" % compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key, 1) key = "NTHRDS_%s" % compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass, 1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC", 1) mach_pes_obj.set_value("NTHRDS_GLC", 1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self.get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET", self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " % self._compsetname) logger.info(" Grid is: %s " % self._gridname) logger.info(" Components in compset are: %s " % self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Resolve the CIME_OUTPUT_ROOT variable, other than this # we don't want to resolve variables until we need them if output_root is None: output_root = self.get_value("CIME_OUTPUT_ROOT") self.set_value("CIME_OUTPUT_ROOT", output_root) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s" % wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test" % wdir) response = raw_input( "\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?" % wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() self.set_model_version(model) if model == "cesm" and not test: self.set_value("DOUT_S", True) self.set_value("TIMER_LEVEL", 4) if test: self.set_value("TEST", True) self.initialize_derived_attributes()
def list_grids(self): self.ResList.gridsobj = Grids() all_grids = self.ResList.gridsobj.find_valid_alias_list() for grid in all_grids: self.ResList.addItem(grid[0])