def get_resolved_value(self, raw_value): """ A value in the xml file may contain references to other xml variables or to environment variables. These are refered to in the perl style with $name and $ENV{name}. >>> obj = GenericXML() >>> os.environ["FOO"] = "BAR" >>> os.environ["BAZ"] = "BARF" >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three") 'one BAR two BARF three' >>> obj.get_resolved_value("2 + 3 - 1") '4' >>> obj.get_resolved_value("0001-01-01") '0001-01-01' """ logger.debug("raw_value %s" % raw_value) reference_re = re.compile(r'\${?(\w+)}?') env_ref_re = re.compile(r'\$ENV\{(\w+)\}') math_re = re.compile(r'\s[+-/*]\s') item_data = raw_value if item_data is None: return None if type(item_data) is not str: return item_data for m in env_ref_re.finditer(item_data): logger.debug("look for %s in env" % item_data) env_var = m.groups()[0] expect(env_var in os.environ, "Undefined env var '%s'" % env_var) item_data = item_data.replace(m.group(), os.environ[env_var]) for m in reference_re.finditer(item_data): var = m.groups()[0] logger.debug("find: %s" % var) ref = self.get_value(var) if ref is not None: logger.debug("resolve: " + str(ref)) item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref))) elif var == "CIMEROOT": cimeroot = get_cime_root() item_data = item_data.replace(m.group(), cimeroot) elif var == "SRCROOT": srcroot = os.path.join(get_cime_root(),"..") item_data = item_data.replace(m.group(), srcroot) elif var in os.environ: # this is a list of suppressed warnings (things normally expected to be resolved in env) if var not in ("USER",): logging.warn("Resolved from env: " + var) item_data = item_data.replace(m.group(), os.environ[var]) if math_re.search(item_data): try: tmp = eval(item_data) except: tmp = item_data item_data = str(tmp) return item_data
def get_resolved_value(self, raw_value): """ A value in the xml file may contain references to other xml variables or to environment variables. These are refered to in the perl style with $name and $ENV{name}. >>> obj = GenericXML() >>> os.environ["FOO"] = "BAR" >>> os.environ["BAZ"] = "BARF" >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three") 'one BAR two BARF three' >>> obj.get_resolved_value("2 + 3 - 1") '4' >>> obj.get_resolved_value("0001-01-01") '0001-01-01' """ logger.debug("raw_value %s" % raw_value) reference_re = re.compile(r'\${?(\w+)}?') env_ref_re = re.compile(r'\$ENV\{(\w+)\}') math_re = re.compile(r'\s[+-/*]\s') item_data = raw_value if item_data is None: return None if type(item_data) is not str: return item_data for m in env_ref_re.finditer(item_data): logger.debug("look for %s in env" % item_data) env_var = m.groups()[0] expect(env_var in os.environ, "Undefined env var '%s'" % env_var) item_data = item_data.replace(m.group(), os.environ[env_var]) for m in reference_re.finditer(item_data): var = m.groups()[0] logger.debug("find: %s" % var) ref = self.get_value(var) if ref is not None: logger.debug("resolve: " + str(ref)) item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref))) elif var == "CIMEROOT": cimeroot = get_cime_root() item_data = item_data.replace(m.group(), cimeroot) elif var == "SRCROOT": srcroot = os.path.join(get_cime_root(),"..") item_data = item_data.replace(m.group(), srcroot) elif var in os.environ: # this is a list of suppressed warnings (things normally expected to be resolved in env) if var not in ("USER",): logging.debug("Resolved from env: " + var) item_data = item_data.replace(m.group(), os.environ[var]) if math_re.search(item_data): try: tmp = eval(item_data) except: tmp = item_data item_data = str(tmp) return item_data
def __init__(self, case_root=None, read_only=True): if case_root is None: case_root = os.getcwd() self._caseroot = case_root logger.debug("Initializing Case.") self._env_files_that_need_rewrite = set() self._read_only_mode = True self._force_read_only = read_only self._env_entryid_files = [] self._env_generic_files = [] self._files = [] self.read_xml() # Hold arbitary values. In create_newcase we may set values # for xml files that haven't been created yet. We need a place # to store them until we are ready to create the file. At file # creation we get the values for those fields from this lookup # table and then remove the entry. self.lookups = {} self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root())) self._compsetname = None self._gridname = None self._compsetsfile = None self._pesfile = None self._gridfile = None self._components = [] self._component_classes = []
def run_phase(self): rundir = self._case.get_value("RUNDIR") exeroot = self._case.get_value("EXEROOT") mach = self._case.get_value("MACH") log = os.path.join(rundir, "funit.log") if os.path.exists(log): os.remove(log) test_spec_dir = self.get_test_spec_dir() unit_test_tool = os.path.abspath( os.path.join(get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py")) args = "--build-dir {} --test-spec-dir {} --machine {}".format( exeroot, test_spec_dir, mach) # BUG(wjs, 2022-01-07, ESMCI/CIME#4136) For now, these Fortran unit tests only # work with the old config_compilers.xml-based configuration my_env = os.environ.copy() my_env["CIME_NO_CMAKE_MACRO"] = "ON" stat = run_cmd( "{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir, env=my_env, )[0] append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) expect(stat == 0, "RUN FAIL for FUNIT")
def _run_pylint(on_file, interactive): ############################################################################### pylint = find_executable("pylint") cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" cmd_options += ",logging-format-interpolation,no-name-in-module" cimeroot = get_cime_root() if "scripts/Tools" in on_file: cmd_options +=",relative-import" # add init-hook option cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\ (os.path.join(cimeroot,"scripts","lib"), os.path.join(cimeroot,"scripts","Tools"), os.path.join(cimeroot,"scripts","fortran_unit_testing","python")) cmd = "%s %s %s" % (pylint, cmd_options, on_file) logger.debug("pylint command is %s"%cmd) stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) if stat != 0: if interactive: logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) logger.info(out + "\n" + err) return (on_file, out + "\n" + err) else: if interactive: logger.info("File %s has no pylint problems" % on_file) return (on_file, "")
def get_all_checkable_files(): ############################################################################### cimeroot = get_cime_root() all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() if get_cime_default_driver() == "nuopc": srcroot = get_src_root() nuopc_git_files = [] try: nuopc_git_files = run_cmd_no_fail( "git ls-files", from_dir=os.path.join(srcroot, "components", "cmeps"), verbose=False, ).splitlines() except: logger.warning("No nuopc driver found in source") all_git_files.extend([ os.path.join(srcroot, "components", "cmeps", _file) for _file in nuopc_git_files ]) files_to_test = [ item for item in all_git_files if ((item.endswith(".py") or is_python_executable( os.path.join(cimeroot, item))) and not _should_pylint_skip(item)) ] return files_to_test
def test_k_append_config(self): machlist_before = self.MACHINE.list_available_machines() self.assertEqual( len(machlist_before) > 1, True, msg="Problem reading machine list" ) newmachfile = os.path.join( utils.get_cime_root(), "config", "xml_schemas", "config_machines_template.xml", ) self.MACHINE.read(newmachfile) machlist_after = self.MACHINE.list_available_machines() self.assertEqual( len(machlist_after) - len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format( len(machlist_after), len(machlist_before) ), ) self.assertEqual( "mymachine" in machlist_after, True, msg="Not able to append config_machines.xml", )
def __init__(self, caseroot=None, infile="env_mach_specific.xml", components=None, unit_testing=False, read_only=False, standalone_configure=False, comp_interface=None): """ initialize an object interface to file env_mach_specific.xml in the case directory Notes on some arguments: standalone_configure: logical - whether this is being called from the standalone configure utility, outside of a case """ schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_specific.xsd") EnvBase.__init__(self, caseroot, infile, schema=schema, read_only=read_only) self._allowed_mpi_attributes = ("compiler", "mpilib", "threaded", "unit_testing", "queue", "comp_interface") self._comp_interface = comp_interface self._unit_testing = unit_testing self._standalone_configure = standalone_configure
def _run_pylint(on_file, interactive): ############################################################################### pylint = find_executable("pylint") cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement" cimeroot = get_cime_root() if "scripts/Tools" in on_file: cmd_options += ",relative-import" # add init-hook option cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\"))'"%\ (os.path.join(cimeroot,"utils","python"), os.path.join(cimeroot,"scripts","Tools")) cmd = "%s %s %s" % (pylint, cmd_options, on_file) logger.debug("pylint command is %s" % cmd) stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) if stat != 0: if interactive: logger.info( "File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) logger.info(out + "\n" + err) return (on_file, out + "\n" + err) else: if interactive: logger.info("File %s has no pylint problems" % on_file) return (on_file, "")
def test_configure(self): testname = "SMS.f09_g16.X" casedir = self._create_test([testname, "--no-build"], test_id=self._baseline_name) manual_config_dir = os.path.join(casedir, "manual_config") os.mkdir(manual_config_dir) utils.run_cmd_no_fail( "{} --machine={} --compiler={}".format( os.path.join(utils.get_cime_root(), "tools", "configure"), self._machine, self._compiler, ), from_dir=manual_config_dir, ) with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd: case_env_contents = fd.read() with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd: man_env_contents = fd.read() self.assertEqual(case_env_contents, man_env_contents)
def __init__(self, comp_interface="mct"): """ initialize an object >>> files = Files() >>> files.get_value('CASEFILE_HEADERS',resolved=False) '$CIMEROOT/config/config_headers.xml' """ cimeroot = get_cime_root() infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml") expect(os.path.isfile(infile), "Could not find or open file {}".format(infile)) schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd") EntryID.__init__(self, infile, schema=schema) config_files_override = os.path.join(os.path.dirname(cimeroot), ".config_files.xml") # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only self.COMP_ROOT_DIR = {} self._comp_interface = comp_interface # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files if os.path.isfile(config_files_override): self.read(config_files_override) self.overwrite_existing_entries()
def __init__(self, machobj, schema_path=None): """Construct a Build given machine-specific information. In the process some information about possible variables is read in from the schema file. Arguments: machobj - A Machines object for this machine. schema_path (optional) - Path to config_build.xsd within CIME. >>> "CFLAGS" in Build('MyMach').flag_vars True >>> "MPICC" in Build('MyMach').flag_vars False """ self.machobj = machobj # The schema is used to figure out which variables contain # command-line arguments (e.g. compiler flags), since these are # processed in a more complex manner than other variables. if schema_path is None: schema_path = os.path.join(get_cime_root(), "cime_config", "xml_schemas", "config_build.xsd") # Run an XPath query to extract the list of flag variable names. ns = {"xs": "http://www.w3.org/2001/XMLSchema"} flag_xpath = ".//xs:group[@name='compilerVars']/xs:choice/xs:element[@type='flagsVar']" flag_elems = ET.parse(schema_path).getroot().findall(flag_xpath, ns) self.flag_vars = set(elem.get('name') for elem in flag_elems)
def get_resolved_value(self, raw_value): """ A value in the xml file may contain references to other xml variables or to environment variables. These are refered to in the perl style with $name and $ENV{name}. >>> obj = GenericXML() >>> os.environ["FOO"] = "BAR" >>> os.environ["BAZ"] = "BARF" >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three") 'one BAR two BARF three' """ logger.debug("raw_value %s" % raw_value) reference_re = re.compile(r'\${?(\w+)}?') env_ref_re = re.compile(r'\$ENV\{(\w+)\}') item_data = raw_value if item_data is None: return None if type(item_data) is not str: return item_data for m in env_ref_re.finditer(item_data): logger.debug("look for %s in env" % item_data) env_var = m.groups()[0] expect(env_var in os.environ, "Undefined env var '%s'" % env_var) item_data = item_data.replace(m.group(), os.environ[env_var]) for m in reference_re.finditer(item_data): var = m.groups()[0] logger.debug("find: %s" % var) ref = self.get_value(var) if ref is not None: logger.debug("resolve: " + str(ref)) item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref))) elif var == "CIMEROOT": cimeroot = get_cime_root() item_data = item_data.replace(m.group(), cimeroot) elif var == "SRCROOT": srcroot = os.path.join(get_cime_root(),"..") item_data = item_data.replace(m.group(), srcroot) elif var in os.environ: logging.debug("resolve from env: " + var) item_data = item_data.replace(m.group(), os.environ[var]) return item_data
def __init__(self, infile=None): """ initialize an object """ if infile is None: infile = os.path.join(get_cime_root(), "cime_config", get_model(), "archive.xml") GenericXML.__init__(self, infile)
def get_all_checkable_files(): ############################################################################### cimeroot = get_cime_root() all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() files_to_test = [item for item in all_git_files if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] return files_to_test
def __init__(self, infile=None): """ initialize an object """ if infile is None: infile = os.path.join(get_cime_root(), "cime_config", get_model(), "config_archive.xml") GenericXML.__init__(self, infile)
def __init__(self, case_root=None, infile="env_batch.xml", read_only=False): """ initialize an object interface to file env_batch.xml in the case directory """ self._batchtype = None # This arbitrary setting should always be overwritten self._default_walltime = "00:20:00" schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd") super(EnvBatch,self).__init__(case_root, infile, schema=schema, read_only=read_only)
def __init__(self, caseroot=None, infile="env_mach_specific.xml", components=None, unit_testing=False, read_only=False): """ initialize an object interface to file env_mach_specific.xml in the case directory """ schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_specific.xsd") EnvBase.__init__(self, caseroot, infile, schema=schema, read_only=read_only) self._allowed_mpi_attributes = ("compiler", "mpilib", "threaded", "unit_testing", "queue") self._unit_testing = unit_testing
def __init__(self, caseroot=None, infile="env_mach_specific.xml", components=None, unit_testing=False): """ initialize an object interface to file env_mach_specific.xml in the case directory """ schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_specific.xsd") EnvBase.__init__(self, caseroot, infile, schema=schema) self._allowed_mpi_attributes = ("compiler", "mpilib", "threaded", "unit_testing") self._unit_testing = unit_testing
def __init__(self, infile, attributes=None): """Construct a `NamelistDefinition` from an XML file.""" super(NamelistDefinition, self).__init__(infile) self._attributes = attributes # if the file is invalid we may not be able to check the version # but we need to do it this way until we remove the version 1 files if self.get_version() == "2.0": cimeroot = get_cime_root() schema = os.path.join(cimeroot,"cime_config","xml_schemas","entry_id_namelist.xsd") self.validate_xml_file(infile, schema)
def __init__(self, case_root=None, infile="env_batch.xml"): """ initialize an object interface to file env_batch.xml in the case directory """ self.prereq_jobid = None self.batchtype = None # This arbitrary setting should always be overwritten self._default_walltime = "00:20:00" schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd") EnvBase.__init__(self, case_root, infile, schema=schema)
def __init__(self, case_root=None, infile="env_batch.xml"): """ initialize an object interface to file env_batch.xml in the case directory """ self._prereq_jobid = None self._batchtype = None # This arbitrary setting should always be overwritten self._default_walltime = "00:20:00" schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd") EnvBase.__init__(self, case_root, infile, schema=schema)
def __init__(self, machine, infile=None): """ initialize an object """ if (infile is None): infile = os.path.join(get_cime_root(), "cime_config", get_model(), "machines", "config_lt_archive.xml") GenericXML.__init__(self, infile) self.machine = machine
def __init__(self): """ initialize an object >>> files = Files() >>> files.get_value('CASEFILE_HEADERS',resolved=False) '$CIMEROOT/cime_config/config_headers.xml' """ infile = os.path.join(get_cime_root(), "cime_config", get_model(), "config_files.xml") EntryID.__init__(self, infile)
def __init__(self, infile): """ initialize an object """ files = Files() schema = None # not checking schema on external components yet cimeroot = get_cime_root() if cimeroot in os.path.abspath(infile): schema = files.get_schema("CONFIG_CPL_FILE") EntryID.__init__(self, infile, schema=schema)
def __init__(self, infile, attributes=None): """Construct a `NamelistDefinition` from an XML file.""" super(NamelistDefinition, self).__init__(infile) self._attributes = attributes # if the file is invalid we may not be able to check the version # but we need to do it this way until we remove the version 1 files if self.get_version() == "2.0": cimeroot = get_cime_root() schema = os.path.join(cimeroot, "cime_config", "xml_schemas", "entry_id_namelist.xsd") self.validate_xml_file(infile, schema)
def _run_pylint(all_files, interactive): ############################################################################### pylint = find_executable("pylint") cmd_options = ( " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" ) cmd_options += ( ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement") cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed" cmd_options += " -j 0 -f json" cimeroot = get_cime_root() srcroot = get_src_root() # if "scripts/Tools" in on_file: # cmd_options +=",relative-import" # add init-hook option cmd_options += ' --init-hook=\'sys.path.extend(("%s","%s","%s","%s"))\'' % ( os.path.join(cimeroot, "CIME"), os.path.join(cimeroot, "CIME", "Tools"), os.path.join(cimeroot, "scripts", "fortran_unit_testing", "python"), os.path.join(srcroot, "components", "cmeps", "cime_config", "runseq"), ) files = " ".join(all_files) cmd = "%s %s %s" % (pylint, cmd_options, files) logger.debug("pylint command is %s" % cmd) stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) data = json.loads(out) result = {} for item in data: if item["type"] != "error": continue path = item["path"] message = item["message"] line = item["line"] if path in result: result[path].append(f"{message}:{line}") else: result[path] = [ message, ] for k in result.keys(): result[k] = "\n".join(set(result[k])) return result
def __init__(self, infile): """ initialize an object """ files = Files() schema = None # not checking schema on external components yet cimeroot = get_cime_root() if cimeroot in os.path.abspath(infile): schema = files.get_schema("CONFIG_DRV_FILE") EntryID.__init__(self, infile, schema=schema)
def create_new_case(self): srcroot = os.path.dirname(os.path.abspath(get_cime_root())) casename = self.CreateNewcaseArgs["case"] with Case(os.path.join(self._casedir, casename), read_only=False) as case: case.create(casename, srcroot, self.CreateNewcaseArgs["compset"], self.CreateNewcaseArgs["res"], machine_name=self.CreateNewcaseArgs["machine"], compiler=self.CreateNewcaseArgs["compiler"], mpilib=self.CreateNewcaseArgs["mpilib"])
def __init__(self, infile): """ initialize an object """ files = Files() schema = files.get_schema("CONFIG_DRV_FILE") if schema is not None: # not checking schema on external components yet cimeroot = get_cime_root() if cimeroot in os.path.abspath(infile): self.validate_xml_file(infile, schema) EntryID.__init__(self, infile)
def __init__(self): """ initialize an object >>> files = Files() >>> files.get_value('CASEFILE_HEADERS',resolved=False) '$CIMEROOT/config/config_headers.xml' """ cimeroot = get_cime_root() infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml") expect(os.path.isfile(infile), "Could not find or open file {}".format(infile)) schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd") EntryID.__init__(self, infile, schema=schema)
def submit_single_job(self, case, job, depid=None, no_batch=False): logger.warn("Submit job %s"%job) caseroot = case.get_value("CASEROOT") batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) if batch_system is None or batch_system == "none" or no_batch: # Import here to avoid circular include from CIME.case_test import case_test # pylint: disable=unused-variable from CIME.case_run import case_run # pylint: disable=unused-variable from CIME.case_st_archive import case_st_archive # pylint: disable=unused-variable from CIME.case_lt_archive import case_lt_archive # pylint: disable=unused-variable logger.info("Starting job script %s" % job) # Hack until all testcases are ported to python testcase = case.get_value("TESTCASE") cimeroot = get_cime_root() testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) if job == "case.test" and testcase is not None and os.path.exists(testscript): run_cmd_no_fail("%s --caseroot %s" % (os.path.join(".", job), caseroot)) else: # This is what we want longterm function_name = job.replace(".", "_") success = locals()[function_name](case) expect(success, "%s failed" % function_name) return submitargs = self.get_submit_args(case, job) if depid is not None: dep_string = self.get_value("depend_string", subgroup=None) dep_string = dep_string.replace("jobid",depid.strip()) # pylint: disable=maybe-no-member submitargs += " " + dep_string batchsubmit = self.get_value("batch_submit", subgroup=None) expect(batchsubmit is not None, "Unable to determine the correct command for batch submission.") batchredirect = self.get_value("batch_redirect", subgroup=None) submitcmd = '' for string in (batchsubmit, submitargs, batchredirect, job): if string is not None: submitcmd += string + " " logger.info("Submitting job script %s"%submitcmd) output = run_cmd_no_fail(submitcmd) jobid = self.get_job_id(output) logger.debug("Submitted job id is %s"%jobid) return jobid
def __init__(self, batch_system=None, machine=None, infile=None): """ initialize an object """ if infile is None: infile = os.path.join(get_cime_root(), "cime_config", get_model(), "machines", "config_batch.xml") GenericXML.__init__(self, infile) self.batch_system_node = None self.machine_node = None self.batch_system = batch_system self.machine = machine if self.batch_system is not None: self.set_batch_system(self.batch_system, machine=machine)
def check_code(files, num_procs=10, interactive=False): ############################################################################### """ Check all python files in the given directory Returns True if all files had no problems """ # Get list of files to check, we look to see if user-provided file argument # is a valid file, if not, we search the repo for a file with similar name. files_to_check = [] if files: repo_files = run_cmd_no_fail('git ls-files', from_dir=get_cime_root(), verbose=False).splitlines() for filearg in files: if os.path.exists(filearg): files_to_check.append(os.path.abspath(filearg)) else: found = False for repo_file in repo_files: if repo_file.endswith(filearg): found = True files_to_check.append( repo_file) # could have multiple matches if not found: logger.warning( "Could not find file matching argument '%s'" % filearg) else: # Check every python file files_to_check = get_all_checkable_files() if "scripts/lib/six.py" in files_to_check: files_to_check.remove("scripts/lib/six.py") logger.info("Not checking contributed file six.py") expect(len(files_to_check) > 0, "No matching files found") # No point in using more threads than files if len(files_to_check) < num_procs: num_procs = len(files_to_check) pool = ThreadPool(num_procs) results = pool.map(lambda x: _run_pylint(x, interactive), files_to_check) pool.close() pool.join() return dict(results)
def test_m_createnewcase_alternate_drivers(self): # Test that case.setup runs for nuopc and moab drivers cls = self.__class__ model = utils.get_model() for driver in ("nuopc", "moab"): if not os.path.exists( os.path.join(utils.get_cime_root(), "src", "drivers", driver) ): self.skipTest( "Skipping driver test for {}, driver not found".format(driver) ) if (model == "cesm" and driver == "moab") or ( model == "e3sm" and driver == "nuopc" ): continue testdir = os.path.join(cls._testroot, "testcreatenewcase.{}".format(driver)) if os.path.exists(testdir): shutil.rmtree(testdir) args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format( driver, testdir, cls._testroot ) if model == "cesm": args += " --run-unsupported" if self.TEST_COMPILER is not None: args = args + " --compiler %s" % self.TEST_COMPILER if self.TEST_MPILIB is not None: args = args + " --mpilib %s" % self.TEST_MPILIB args += f" --machine {self.MACHINE.get_machine_name()}" cls._testdirs.append(testdir) self.run_cmd_assert_result( "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR ) self.assertTrue(os.path.exists(testdir)) self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) self.run_cmd_assert_result("./case.setup", from_dir=testdir) with Case(testdir, read_only=False) as case: comp_interface = case.get_value("COMP_INTERFACE") self.assertTrue( driver == comp_interface, msg="%s != %s" % (driver, comp_interface) ) cls._do_teardown.append(testdir)
def run_phase(self): rundir = self._case.get_value("RUNDIR") exeroot = self._case.get_value("EXEROOT") mach = self._case.get_value("MACH") log = os.path.join(rundir, "funit.log") if os.path.exists(log): os.remove(log) test_spec_dir = self.get_test_spec_dir() unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) args = "--build-dir {} --test-spec-dir {} --machine {}".format(exeroot, test_spec_dir, mach) stat = run_cmd("{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir)[0] append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) expect(stat == 0, "RUN FAIL for FUNIT")
def test_gen_domain(self): if utils.get_model() != "e3sm": self.skipTest("Skipping gen_domain test. Depends on E3SM tools") cime_root = utils.get_cime_root() inputdata = self.MACHINE.get_value("DIN_LOC_ROOT") tool_name = "test_gen_domain" tool_location = os.path.join(cime_root, "tools", "mapping", "gen_domain_files", "test_gen_domain.sh") args = "--cime_root={} --inputdata_root={}".format( cime_root, inputdata) cls = self.__class__ test_dir = os.path.join(cls._testroot, tool_name) cls._testdirs.append(test_dir) os.makedirs(test_dir) self.run_cmd_assert_result("{} {}".format(tool_location, args), from_dir=test_dir) cls._do_teardown.append(test_dir)
def check_code(files, num_procs=10, interactive=False): ############################################################################### """ Check all python files in the given directory Returns True if all files had no problems """ # Get list of files to check, we look to see if user-provided file argument # is a valid file, if not, we search the repo for a file with similar name. files_to_check = [] if files: repo_files = run_cmd_no_fail('git ls-files', from_dir=get_cime_root(), verbose=False).splitlines() for filearg in files: if os.path.exists(filearg): files_to_check.append(os.path.abspath(filearg)) else: found = False for repo_file in repo_files: if repo_file.endswith(filearg): found = True files_to_check.append(repo_file) # could have multiple matches if not found: logger.warning("Could not find file matching argument '%s'" % filearg) else: # Check every python file files_to_check = get_all_checkable_files() if "scripts/lib/six.py" in files_to_check: files_to_check.remove("scripts/lib/six.py") logger.info("Not checking contributed file six.py") expect(len(files_to_check) > 0, "No matching files found") # No point in using more threads than files if len(files_to_check) < num_procs: num_procs = len(files_to_check) pool = ThreadPool(num_procs) results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) pool.close() pool.join() return dict(results)
def test_a_unit_test(self): cls = self.__class__ if not self._has_unit_test_support(): self.skipTest( "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" ) test_dir = os.path.join(cls._testroot, "unit_tester_test") cls._testdirs.append(test_dir) os.makedirs(test_dir) unit_test_tool = os.path.abspath( os.path.join(utils.get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py")) test_spec_dir = os.path.join(os.path.dirname(unit_test_tool), "Examples", "interpolate_1d", "tests") args = "--build-dir {} --test-spec-dir {}".format( test_dir, test_spec_dir) args += " --machine {}".format(self.MACHINE.get_machine_name()) utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) cls._do_teardown.append(test_dir)
def __init__(self, case_root=None, read_only=True): if case_root is None: case_root = os.getcwd() self._caseroot = case_root logger.debug("Initializing Case.") self._env_files_that_need_rewrite = set() self._read_only_mode = True self._force_read_only = read_only self._env_entryid_files = [] self._env_generic_files = [] self._files = [] self.read_xml() # Hold arbitary values. In create_newcase we may set values # for xml files that haven't been created yet. We need a place # to store them until we are ready to create the file. At file # creation we get the values for those fields from this lookup # table and then remove the entry. self.lookups = {} self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root())) self._compsetname = None self._gridname = None self._compsetsfile = None self._pesfile = None self._gridfile = None self._components = [] self._component_classes = [] self._is_env_loaded = False self.thread_count = None self.tasks_per_node = None self.num_nodes = None self.tasks_per_numa = None self.cores_per_task = None # check if case has been configured and if so initialize derived if self.get_value("CASEROOT") is not None: self.initialize_derived_attributes()
def __init__(self, case_root=None, read_only=True): if case_root is None: case_root = os.getcwd() self._caseroot = case_root logger.debug("Initializing Case.") self._env_files_that_need_rewrite = set() self._read_only_mode = True self._force_read_only = read_only self._env_entryid_files = [] self._env_generic_files = [] self._files = [] self.read_xml() # Hold arbitary values. In create_newcase we may set values # for xml files that haven't been created yet. We need a place # to store them until we are ready to create the file. At file # creation we get the values for those fields from this lookup # table and then remove the entry. self.lookups = {} self.set_lookup_value('CIMEROOT', os.path.abspath(get_cime_root())) self._compsetname = None self._gridname = None self._compsetsfile = None self._pesfile = None self._gridfile = None self._components = [] self._component_classes = [] self._is_env_loaded = False self.thread_count = None self.tasks_per_node = None self.num_nodes = None self.tasks_per_numa = None self.cores_per_task = None # check if case has been configured and if so initialize derived if self.get_value("CASEROOT") is not None: self.initialize_derived_attributes()
def test_b_cime_f90_unit_tests(self): cls = self.__class__ if self.FAST_ONLY: self.skipTest("Skipping slow test") if not self._has_unit_test_support(): self.skipTest( "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" ) test_dir = os.path.join(cls._testroot, "driver_f90_tests") cls._testdirs.append(test_dir) os.makedirs(test_dir) test_spec_dir = utils.get_cime_root() unit_test_tool = os.path.abspath( os.path.join(test_spec_dir, "scripts", "fortran_unit_testing", "run_tests.py")) args = "--build-dir {} --test-spec-dir {}".format( test_dir, test_spec_dir) args += " --machine {}".format(self.MACHINE.get_machine_name()) utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) cls._do_teardown.append(test_dir)
def __init__(self, batch_system=None, machine=None, infile=None): """ initialize an object """ if infile is None: infile = os.path.join(get_cime_root(), "cime_config", get_model(), "machines", "config_batch.xml") GenericXML.__init__(self, infile) self.batch_system_node = None self.machine_node = None self.batch_system = batch_system self.machine = machine #Append the contents of $HOME/.cime/config_batch.xml if it exists #This could cause problems if node matchs are repeated when only one is expected infile = os.path.join(os.environ.get("HOME"),".cime","config_batch.xml") if os.path.exists(infile): GenericXML.read(self, infile) if self.batch_system is not None: self.set_batch_system(self.batch_system, machine=machine)
def run_phase(self): rundir = self._case.get_value("RUNDIR") exeroot = self._case.get_value("EXEROOT") mach = self._case.get_value("MACH") log = os.path.join(rundir, "funit.log") if os.path.exists(log): os.remove(log) test_spec_dir = self.get_test_spec_dir() unit_test_tool = os.path.abspath( os.path.join(get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py")) args = "--build-dir {} --test-spec-dir {} --machine {}".format( exeroot, test_spec_dir, mach) stat = run_cmd("{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir)[0] append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) expect(stat == 0, "RUN FAIL for FUNIT")
def __init__(self, case_root=None): if case_root is None: case_root = os.getcwd() # Init first, if no valid case_root expect fails and tears down object, __del__ expects self._env_files_that_need_rewrite self._env_files_that_need_rewrite = set() logger.debug("Initializing Case.") self._env_entryid_files = [] self._env_entryid_files.append(EnvRun(case_root)) self._env_entryid_files.append(EnvBuild(case_root)) self._env_entryid_files.append(EnvMachPes(case_root)) self._env_entryid_files.append(EnvCase(case_root)) self._env_entryid_files.append(EnvBatch(case_root)) if os.path.isfile(os.path.join(case_root,"env_test.xml")): self._env_entryid_files.append(EnvTest(case_root)) self._env_generic_files = [] self._env_generic_files.append(EnvMachSpecific(case_root)) self._env_generic_files.append(EnvArchive(case_root)) self._files = self._env_entryid_files + self._env_generic_files # Hold arbitary values. In create_newcase we may set values # for xml files that haven't been created yet. We need a place # to store them until we are ready to create the file. At file # creation we get the values for those fields from this lookup # table and then remove the entry. This was what I came up # with in the perl anyway and I think that we still need it here. self.lookups = {} self.lookups['CIMEROOT'] = os.path.abspath(get_cime_root()) self._compsetname = None self._gridname = None self._compsetsfile = None self._pesfile = None self._gridfile = None self._components = [] self._component_config_files = []
def __init__(self): """ initialize an object >>> files = Files() >>> files.get_value('CASEFILE_HEADERS',resolved=False) '$CIMEROOT/config/config_headers.xml' """ cimeroot = get_cime_root() infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml") expect(os.path.isfile(infile), "Could not find or open file {}".format(infile)) schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd") EntryID.__init__(self, infile, schema=schema) config_files_override = os.path.join(os.path.dirname(cimeroot),".config_files.xml") # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only self.COMP_ROOT_DIR = {} # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files if os.path.isfile(config_files_override): self.read(config_files_override) self.overwrite_existing_entries()
def __init__(self, infile=None): """ Initialize an object """ self.tree = None self.lookups = {} self.lookups["CIMEROOT"] = get_cime_root() if infile == None: # if file is not defined just return self.filename = None return if os.path.isfile(infile) and os.access(infile, os.R_OK): # If file is defined and exists, read it self.filename = infile self.read(infile) else: # if file does not exist create a root xml element # and set it's id to file self.filename = infile root = ET.Element("xml") root.set("version", "1.0") self.root = ET.SubElement(root, "file") self.root.set("id", infile) self.tree = ET.ElementTree(root)
import CIME.utils from CIME.utils import expect, convert_to_seconds, parse_test_name, get_cime_root, get_model from CIME.XML.machines import Machines import six, sys, os # Expect that, if a model wants to use python-based test lists, they will have a file # config/$model/tests.py , containing a test dictionary called _TESTS sys.path.insert(0, os.path.join(get_cime_root(), "config", get_model())) _ALL_TESTS = {} try: from tests import _TESTS # pylint: disable=import-error _ALL_TESTS.update(_TESTS) except: pass # Here are the tests belonging to e3sm suites. Format is # <test>.<grid>.<compset>. # suite_name -> (inherits_from, timelimit, [test [, mods[, machines]]]) # To elaborate, if no mods are needed, a string representing the testname is all that is needed. # If testmods are needed, a 2-ple must be provided (test, mods) # If you want to restrict the test mods to certain machines, than a 3-ple is needed (test, mods, [machines]) _CIME_TESTS = { "cime_tiny" : (None, "0:10:00", ("ERS.f19_g16_rx1.A", "NCK.f19_g16_rx1.A") ), "cime_test_only_pass" : (None, "0:10:00",
def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): ############################################################################### os.chdir(caseroot) msg = "case.setup starting" append_status(msg, caseroot=caseroot, sfile="CaseStatus") cimeroot = get_cime_root(case) # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests din_loc_root = case.get_value("DIN_LOC_ROOT") testcase = case.get_value("TESTCASE") expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), "inputdata root is not a directory: \"$din_loc_root\" ") # Check that userdefine settings are specified before expanding variable for vid, value in case: expect(not (type(value) is str and "USERDEFINED_required_build" in value), "Parameter '%s' must be defined" % vid) # Create batch script if reset or clean: # Clean batch script backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S") if not os.path.isdir(backup_dir): os.makedirs(backup_dir) # back up relevant files for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]: for filename in glob.glob(fileglob): shutil.copy(filename, backup_dir) if os.path.exists("case.run"): os.remove("case.run") # only do the following if are NOT in testmode if not test_mode: # rebuild the models (even on restart) case.set_value("BUILD_COMPLETE", False) # backup and then clean test script if os.path.exists("case.test"): shutil.copy("case.test", backup_dir) os.remove("case.test") logger.info("Successfully cleaned test script case.test") if os.path.exists("case.testdriver"): shutil.copy("case.testdriver", backup_dir) os.remove("case.testdriver") logger.info("Successfully cleaned test script case.testdriver") logger.info("Successfully cleaned batch script case.run") logger.info("Successfully cleaned batch script case.run") logger.info("Some files have been saved to %s" % backup_dir) msg = "case.setup clean complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") if not clean: case.load_env() models = case.get_values("COMP_CLASSES") mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") expect(mach is not None, "xml variable MACH is not set") # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"): configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, sysos) # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": for vid, value in case: if vid.startswith("NTASKS_") and value != 1: case.set_value(vid, 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. for comp in models: if comp == "DRV": continue ninst = case.get_value("NINST_%s" % comp) ntasks = case.get_value("NTASKS_%s" % comp) if ninst > ntasks: if ntasks == 1: case.set_value("NTASKS_%s" % comp, ninst) else: expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst, comp, ntasks)) if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") else: _check_pelayouts_require_rebuild(case, models) if os.path.exists("LockedFiles/env_build.xml"): os.remove("LockedFiles/env_build.xml") case.flush() check_lockedfiles() env_mach_pes = case.get_env("mach_pes") pestot = env_mach_pes.get_total_tasks(models) logger.debug("at update TOTALPES = %s"%pestot) case.set_value("TOTALPES", pestot) thread_count = env_mach_pes.get_max_thread_count(models) if thread_count > 1: case.set_value("BUILD_THREADED", True) expect(not (case.get_value("BUILD_THREADED") and compiler == "nag"), "it is not possible to run with OpenMP if using the NAG Fortran compiler") cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) # create batch file logger.info("Creating batch script case.run") # Use BatchFactory to get the appropriate instance of a BatchMaker, # use it to create our batch scripts env_batch = case.get_env("batch") num_nodes = env_mach_pes.get_total_nodes(pestot, thread_count) tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) for job in env_batch.get_jobs(): input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: logger.info("Writing %s script" % job) testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) # Short term fix to be removed when csh tests are removed if not os.path.exists(testscript): env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) elif job != "case.test": logger.info("Writing %s script from input template %s" % (job, input_batch_script)) env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked logger.info("Locking file env_mach_pes.xml") case.flush() logger.debug("at copy TOTALPES = %s"%case.get_value("TOTALPES")) shutil.copy("env_mach_pes.xml", "LockedFiles") # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") # loop over models for model in models: comp = case.get_value("COMP_%s" % model) logger.info("Building %s usernl files"%model) _build_usernl_files(case, model, comp) if comp == "cism": run_cmd_no_fail("%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot)) _build_usernl_files(case, "drv", "cpl") # Create needed directories for case create_dirs(case) logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") # Create test script if appropriate # Short term fix to be removed when csh tests are removed if os.path.exists("env_test.xml"): if not os.path.exists("case.test"): logger.info("Starting testcase.setup") run_cmd_no_fail("./testcase.setup -caseroot %s" % caseroot) logger.info("Finished testcase.setup") # some tests need namelists created here (ERP) if test_mode: create_namelists(case) msg = "case.setup complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") # Record env information env_module = case.get_env("mach_specific") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "sh") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "csh") with open("software_environment.txt", "w") as f: f.write(env_module.list_modules()) run_cmd_no_fail("echo -e '\n' >> software_environment.txt && \ env >> software_environment.txt")
def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", output_format="make"): """ Parse the config_compiler.xml file into a Macros file for the given machine and compiler. """ # A few things can be used from environ if not in XML for item in ["MPI_PATH", "NETCDF_PATH"]: if not item in macros and item in os.environ: logger.warning("Setting {} from Environment".format(item)) macros[item] = os.environ[item] with open(macros_file, "w") as fd: fd.write( """# # COMPILER={} # OS={} # MACH={} """.format(compiler, os_, machine) ) if output_format == "make": fd.write("#\n# Makefile Macros \n") # print the settings out to the Macros file for key, value in sorted(macros.items()): if key == "_COND_": pass elif key.startswith("ADD_"): fd.write("{}+={}\n\n".format(key[4:], value)) else: fd.write("{}:={}\n\n".format(key, value)) elif output_format == "cmake": fd.write( '''# # cmake Macros generated from $compiler_file # set(CMAKE_MODULE_PATH %s) include(Compilers) set(CMAKE_C_FLAGS_RELEASE "" CACHE STRING "Flags used by c compiler." FORCE) set(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "Flags used by c compiler." FORCE) set(CMAKE_Fortran_FLAGS_RELEASE "" CACHE STRING "Flags used by Fortran compiler." FORCE) set(CMAKE_Fortran_FLAGS_DEBUG "" CACHE STRING "Flags used by Fortran compiler." FORCE) set(all_build_types "None Debug Release RelWithDebInfo MinSizeRel") set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING "Choose the type of build, options are: ${all_build_types}." FORCE) ''' % os.path.join(get_cime_root(), "src", "CMake")) # print the settings out to the Macros file, do it in # two passes so that path values appear first in the # file. for key, value in sorted(macros.items()): if key == "_COND_": pass else: value = value.replace("(", "{").replace(")", "}") if key.endswith("_PATH"): if value.startswith("$"): value = "$ENV{}".format(value[1:]) cmake_var = key.replace("NETCDF_PATH", "NetCDF_PATH").replace("PNETCDF_PATH", "Pnetcdf_PATH") fd.write("set({} {})\n".format(cmake_var, value)) fd.write("list(APPEND CMAKE_PREFIX_PATH {})\n\n".format(value)) for key, value in sorted(macros.items()): if key == "_COND_": pass else: value = value.replace("(", "{").replace(")", "}") if "CFLAGS" in key or "FFLAGS" in key or "CPPDEFS" in key or "SLIBS" in key or "LDFLAGS" in key: if "shell " in value: components = _get_components(value) idx = 0 for is_shell, component in components: component = component.replace("NETCDF", "NetCDF").replace("PNETCDF_PATH", "Pnetcdf_PATH") if is_shell: fd.write('execute_process(COMMAND {} OUTPUT_VARIABLE TEMP{:d})\n'.format(component, idx)) fd.write('string(REGEX REPLACE "\\n$" "" TEMP{:d} "${{TEMP{:d}}}")\n'.format(idx, idx)) else: fd.write('set(TEMP{:d} "{}")\n'.format(idx, component)) idx += 1 fd.write('set(TEMP "{}")\n'.format(" ".join(["${{TEMP{:d}}}".format(i) for i in range(idx)]))) else: fd.write('set(TEMP "{}")\n'.format(value)) if "CFLAGS" in key: fd.write("add_flags(CFLAGS ${TEMP})\n\n") elif "FFLAGS" in key: fd.write("add_flags(FFLAGS ${TEMP})\n\n") elif "CPPDEFS" in key: fd.write("list(APPEND CPPDEFS ${TEMP})\n\n") elif "SLIBS" in key or "LDFLAGS" in key: fd.write("add_flags(LDFLAGS ${TEMP})\n\n") # Recursively print the conditionals, combining tests to avoid repetition _parse_hash(macros["_COND_"], fd, 0, output_format)
def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None): newcaseroot = os.path.abspath(newcase) expect(not os.path.isdir(newcaseroot), "New caseroot directory %s already exists" % newcaseroot) newcasename = os.path.basename(newcaseroot) newcase_cimeroot = os.path.abspath(get_cime_root()) # create clone from self to case clone_cimeroot = self.get_value("CIMEROOT") if newcase_cimeroot != clone_cimeroot: logger.warning(" case CIMEROOT is %s " %newcase_cimeroot) logger.warning(" clone CIMEROOT is %s " %clone_cimeroot) logger.warning(" It is NOT recommended to clone cases from different versions of CIME.") # *** create case object as deepcopy of clone object *** srcroot = os.path.join(newcase_cimeroot,"..") newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot) newcase.set_value("CIMEROOT", newcase_cimeroot) # determine if will use clone executable or not if keepexe: orig_exeroot = self.get_value("EXEROOT") newcase.set_value("EXEROOT", orig_exeroot) newcase.set_value("BUILD_COMPLETE","TRUE") else: newcase.set_value("BUILD_COMPLETE","FALSE") # set machdir if mach_dir is not None: newcase.set_value("MACHDIR", mach_dir) # Set project id # Note: we do not just copy this from the clone because it seems likely that # users will want to change this sometimes, especially when cloning another # user's case. However, note that, if a project is not given, the fallback will # be to copy it from the clone, just like other xml variables are copied. if project is None: project = self.get_value("PROJECT", subgroup="case.run") if project is not None: newcase.set_value("PROJECT", project) # create caseroot newcase.create_caseroot(clone=True) newcase.flush(flushall=True) # copy user_nl_files cloneroot = self._caseroot files = glob.glob(cloneroot + '/user_nl_*') for item in files: shutil.copy(item, newcaseroot) # copy SourceMod and Buildconf files for casesub in ("SourceMods", "Buildconf"): shutil.copytree(os.path.join(cloneroot, casesub), os.path.join(newcaseroot, casesub)) # copy env_case.xml to LockedFiles shutil.copy(os.path.join(newcaseroot,"env_case.xml"), os.path.join(newcaseroot,"LockedFiles")) # Update README.case fclone = open(cloneroot + "/README.case", "r") fnewcase = open(newcaseroot + "/README.case", "a") fnewcase.write("\n *** original clone README follows ****") fnewcase.write("\n " + fclone.read()) clonename = self.get_value("CASE") logger.info(" Successfully created new case %s from clone case %s " %(newcasename, clonename)) case_setup(newcase, clean=False, test_mode=False) return newcase
def __init__(self, test_names, test_data=None, no_run=False, no_build=False, no_setup=False, no_batch=None, test_root=None, test_id=None, machine_name=None, compiler=None, baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, clean=False, namelists_only=False, project=None, parallel_jobs=None, walltime=None, proc_pool=None, use_existing=False, save_timing=False, queue=None, allow_baseline_overwrite=False, output_root=None, force_procs=None, force_threads=None, mpilib=None, input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False, non_local=False): ########################################################################### self._cime_root = get_cime_root() self._cime_model = get_model() self._cime_driver = "mct" self._save_timing = save_timing self._queue = queue self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} self._mpilib = mpilib # allow override of default mpilib self._completed_tests = 0 self._input_dir = input_dir self._pesfile = pesfile self._allow_baseline_overwrite = allow_baseline_overwrite self._allow_pnl = allow_pnl self._non_local = non_local self._mail_user = mail_user self._mail_type = mail_type self._machobj = Machines(machine=machine_name) self._model_build_cost = 4 # If user is forcing procs or threads, re-write test names to reflect this. if force_procs or force_threads: test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads) self._no_setup = no_setup self._no_build = no_build or no_setup or namelists_only self._no_run = no_run or self._no_build self._output_root = output_root # Figure out what project to use if project is None: self._project = get_project() if self._project is None: self._project = self._machobj.get_value("PROJECT") else: self._project = project # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() expect(not (self._no_batch and self._queue is not None), "Does not make sense to request a queue without batch system") # Determine and resolve test_root if test_root is not None: self._test_root = test_root elif self._output_root is not None: self._test_root = self._output_root else: self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") if self._project is not None: self._test_root = self._test_root.replace("$PROJECT", self._project) self._test_root = os.path.abspath(self._test_root) self._test_id = test_id if test_id is not None else get_timestamp() self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler self._clean = clean self._namelists_only = namelists_only self._walltime = walltime if parallel_jobs is None: self._parallel_jobs = min(len(test_names), self._machobj.get_value("MAX_MPITASKS_PER_NODE")) else: self._parallel_jobs = parallel_jobs self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None # Compute baseline_root self._baseline_root = baseline_root if baseline_root is not None \ else self._machobj.get_value("BASELINE_ROOT") if self._project is not None: self._baseline_root = self._baseline_root.replace("$PROJECT", self._project) self._baseline_root = os.path.abspath(self._baseline_root) if baseline_cmp_name or baseline_gen_name: if self._baseline_cmp_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) expect(os.path.isdir(full_baseline_dir), "Missing baseline comparison directory {}".format(full_baseline_dir)) # the following is to assure that the existing generate directory is not overwritten if self._baseline_gen_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) existing_baselines = [] for test_name in test_names: test_baseline = os.path.join(full_baseline_dir, test_name) if os.path.isdir(test_baseline): existing_baselines.append(test_baseline) expect(allow_baseline_overwrite or len(existing_baselines) == 0, "Baseline directories already exists {}\n" \ "Use -o to avoid this error".format(existing_baselines)) if self._cime_model == "e3sm": _order_tests_by_runtime(test_names, self._baseline_root) # This is the only data that multiple threads will simultaneously access # Each test has it's own value and setting/retrieving items from a dict # is atomic, so this should be fine to use without mutex. # name -> (phase, status) self._tests = OrderedDict() for test_name in test_names: self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) # Oversubscribe by 1/4 if proc_pool is None: pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE")) self._proc_pool = int(pes * 1.25) else: self._proc_pool = int(proc_pool) self._procs_avail = self._proc_pool # Setup phases self._phases = list(PHASES) if self._no_setup: self._phases.remove(SETUP_PHASE) if self._no_build: self._phases.remove(SHAREDLIB_BUILD_PHASE) self._phases.remove(MODEL_BUILD_PHASE) if self._no_run: self._phases.remove(RUN_PHASE) if use_existing: for test in self._tests: with TestStatus(self._get_test_dir(test)) as ts: for phase, status in ts: if phase in CORE_PHASES: if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: if status == TEST_FAIL_STATUS: # Import for potential subsequent waits ts.set_status(phase, TEST_PEND_STATUS) # We need to pick up here break else: if phase != SUBMIT_PHASE: # Somewhat subtle. Create_test considers submit/run to be the run phase, # so don't try to update test status for a passed submit phase self._update_test_status(test, phase, TEST_PEND_STATUS) self._update_test_status(test, phase, status) if phase == RUN_PHASE: logger.info("Test {} passed and will not be re-run".format(test)) logger.info("Using existing test directory {}".format(self._get_test_dir(test))) else: # None of the test directories should already exist. for test in self._tests: expect(not os.path.exists(self._get_test_dir(test)), "Cannot create new case in directory '{}', it already exists." " Pick a different test-id".format(self._get_test_dir(test))) logger.info("Creating test directory {}".format(self._get_test_dir(test)))
import CIME.utils from CIME.utils import expect, convert_to_seconds, parse_test_name, get_cime_root, get_model from CIME.XML.machines import Machines import six, sys, os # Expect that, if a model wants to use python-based test lists, they will have a file # config/$model/tests.py , containing a test dictionary called _TESTS sys.path.append(os.path.join(get_cime_root(), "config", get_model())) _ALL_TESTS = {} try: from tests import _TESTS # pylint: disable=import-error _ALL_TESTS.update(_TESTS) except: pass # Here are the tests belonging to e3sm suites. Format is # <test>.<grid>.<compset>. # suite_name -> (inherits_from, timelimit, [test [, mods[, machines]]]) # To elaborate, if no mods are needed, a string representing the testname is all that is needed. # If testmods are needed, a 2-ple must be provided (test, mods) # If you want to restrict the test mods to certain machines, than a 3-ple is needed (test, mods, [machines]) _CIME_TESTS = { "cime_tiny" : (None, "0:10:00", ("ERS.f19_g16_rx1.A", "NCK.f19_g16_rx1.A") ), "cime_test_only_pass" : (None, "0:10:00",