def infer_machine_name_from_tests(testargs): ############################################################################### """ >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu"]) 'melvin' >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A"]) >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A", "NCK.f19_g16_rx1.A.melvin_gnu"]) 'melvin' >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu", "NCK.f19_g16_rx1.A.melvin_gnu"]) 'melvin' """ e3sm_test_suites = get_test_suites() machine = None for testarg in testargs: testarg = testarg.strip() if testarg.startswith("^"): testarg = testarg[1:] if testarg not in e3sm_test_suites: machine_for_this_test = parse_test_name(testarg)[4] if machine_for_this_test is not None: if machine is None: machine = machine_for_this_test else: expect(machine == machine_for_this_test, "Must have consistent machine '%s' != '%s'" % (machine, machine_for_this_test)) return machine
def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): ############################################################################### new_test_names = [] caseopts = [] for test_name in test_names: testcase, caseopts, grid, compset, machine, compiler, testmod = parse_test_name( test_name) rewrote_caseopt = False if caseopts is not None: for idx, caseopt in enumerate(caseopts): if caseopt.startswith("P"): caseopt = caseopt[1:] if "x" in caseopt: old_procs, old_thrds = caseopt.split("x") else: old_procs, old_thrds = caseopt, None new_procs = force_procs if force_procs is not None else old_procs new_thrds = force_threads if force_threads is not None else old_thrds newcaseopt = ( "P{}".format(new_procs)) if new_thrds is None else ( "P{}x{}".format(new_procs, new_thrds)) caseopts[idx] = newcaseopt rewrote_caseopt = True break if not rewrote_caseopt: force_procs = "M" if force_procs is None else force_procs newcaseopt = ( "P{}".format(force_procs)) if force_threads is None else ( "P{}x{}".format(force_procs, force_threads)) if caseopts is None: caseopts = [newcaseopt] else: caseopts.append(newcaseopt) new_test_name = get_full_test_name(testcase, caseopts=caseopts, grid=grid, compset=compset, machine=machine, compiler=compiler, testmod=testmod) new_test_names.append(new_test_name) return new_test_names
def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): ############################################################################### new_test_names = [] for test_name in test_names: testcase, caseopts, grid, compset, machine, compiler, testmod = parse_test_name( test_name) rewrote_caseopt = False if caseopts is not None: for idx, caseopt in enumerate(caseopts): if caseopt.startswith("P"): caseopt = caseopt[1:] if "x" in caseopt: old_procs, old_thrds = caseopt.split("x") else: old_procs, old_thrds = caseopt, None new_procs = force_procs if force_procs is not None else old_procs new_thrds = force_threads if force_threads is not None else old_thrds newcaseopt = ("P%s" % new_procs) if new_thrds is None else ( "P%sx%s" % (new_procs, new_thrds)) # No idea why pylint thinks this is unsubscriptable caseopts[idx] = newcaseopt # pylint: disable=unsubscriptable-object rewrote_caseopt = True break if not rewrote_caseopt: force_procs = "M" if force_procs is None else force_procs newcaseopt = ("P%s" % force_procs) if force_threads is None else ( "P%sx%s" % (force_procs, force_threads)) if caseopts is None: caseopts = [newcaseopt] else: caseopts.append(newcaseopt) new_test_name = get_full_test_name(testcase, caseopts=caseopts, grid=grid, compset=compset, machine=machine, compiler=compiler, testmod=testmod) new_test_names.append(new_test_name) return new_test_names
def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): ############################################################################### new_test_names = [] caseopts = [] for test_name in test_names: testcase, caseopts, grid, compset, machine, compiler, testmod = parse_test_name(test_name) rewrote_caseopt = False if caseopts is not None: for idx, caseopt in enumerate(caseopts): if caseopt.startswith("P"): caseopt = caseopt[1:] if "x" in caseopt: old_procs, old_thrds = caseopt.split("x") else: old_procs, old_thrds = caseopt, None new_procs = force_procs if force_procs is not None else old_procs new_thrds = force_threads if force_threads is not None else old_thrds newcaseopt = ("P{}".format(new_procs)) if new_thrds is None else ("P{}x{}".format(new_procs, new_thrds)) caseopts[idx] = newcaseopt rewrote_caseopt = True break if not rewrote_caseopt: force_procs = "M" if force_procs is None else force_procs newcaseopt = ("P{}".format(force_procs)) if force_threads is None else ("P{}x{}".format(force_procs, force_threads)) if caseopts is None: caseopts = [newcaseopt] else: caseopts.append(newcaseopt) new_test_name = get_full_test_name(testcase, caseopts=caseopts, grid=grid, compset=compset, machine=machine, compiler=compiler, testmod=testmod) new_test_names.append(new_test_name) return new_test_names
logger.info("Building %s usernl files" % model) _build_usernl_files(case, model, comp) if comp == "cism": run_cmd_no_fail( "%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot)) _build_usernl_files(case, "drv", "cpl") user_mods_path = case.get_value("USER_MODS_FULLPATH") if user_mods_path is not None: apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) elif case.get_value("TEST"): test_mods = parse_test_name(casebaseid)[6] if test_mods is not None: user_mods_path = os.path.join(case.get_value("TESTS_MODS_DIR"), test_mods) apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) # Run preview namelists for scripts logger.info("preview_namelists") preview_namelists(case) logger.info("See ./CaseDoc for component namelists") logger.info( "If an old case build already exists, might want to run \'case.build --clean\' before building" )
def _case_setup_impl(case, caseroot, casebaseid, clean=False, test_mode=False, reset=False): ############################################################################### os.chdir(caseroot) msg = "case.setup starting" append_status(msg, caseroot=caseroot, sfile="CaseStatus") cimeroot = os.environ["CIMEROOT"] # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests din_loc_root = case.get_value("DIN_LOC_ROOT") testcase = case.get_value("TESTCASE") expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), "inputdata root is not a directory: \"$din_loc_root\" ") # Check that userdefine settings are specified before expanding variable for vid, value in case: expect(not (type(value) is str and "USERDEFINED_required_build" in value), "Parameter '%s' must be defined" % vid) # Create batch script if reset or clean: # Clean batch script backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S") if not os.path.isdir(backup_dir): os.makedirs(backup_dir) # back up relevant files for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]: for filename in glob.glob(fileglob): shutil.copy(filename, backup_dir) if os.path.exists("case.run"): os.remove("case.run") # only do the following if are NOT in testmode if not test_mode: # rebuild the models (even on restart) case.set_value("BUILD_COMPLETE", False) # backup and then clean test script if os.path.exists("case.test"): shutil.copy("case.test", backup_dir) os.remove("case.test") logger.info("Successfully cleaned test script case.test") if os.path.exists("case.testdriver"): shutil.copy("case.testdriver", backup_dir) os.remove("case.testdriver") logger.info("Successfully cleaned test script case.testdriver") logger.info("Successfully cleaned batch script case.run") logger.info("Successfully cleaned batch script case.run") logger.info("Some files have been saved to %s" % backup_dir) msg = "case.setup clean complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") if not clean: drv_comp = Component() models = drv_comp.get_valid_model_components() models.remove("DRV") mach, compiler, debug, mpilib = \ case.get_value("MACH"), case.get_value("COMPILER"), case.get_value("DEBUG"), case.get_value("MPILIB") expect(mach is not None, "xml variable MACH is not set") # Create Macros file only if it does not exist if not os.path.exists("Macros"): logger.debug("Creating Macros file for %s" % mach) compilers = Compilers(compiler=compiler, machine=mach, os_=case.get_value("OS"), mpilib=mpilib) compilers.write_macros_file() else: logger.debug("Macros script already created ...skipping") # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": for vid, value in case: if vid.startswith("NTASKS_") and value != 1: case.set_value(vid, 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. # Save ninst in a dict to use later in apply_user_mods ninst = dict() for comp in models: comp_model = case.get_value("COMP_%s" % comp) ninst[comp_model] = case.get_value("NINST_%s" % comp) ntasks = case.get_value("NTASKS_%s" % comp) if ninst[comp_model] > ntasks: if ntasks == 1: case.set_value("NTASKS_%s" % comp, ninst[comp_model]) else: expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst[comp_model], comp, ntasks)) expect(not (case.get_value("BUILD_THREADED") and compiler == "nag"), "it is not possible to run with OpenMP if using the NAG Fortran compiler") if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") else: _check_pelayouts_require_rebuild(case, models) if os.path.exists("LockedFiles/env_build.xml"): os.remove("LockedFiles/env_build.xml") case.flush() check_lockedfiles() tm = TaskMaker(case) mtpn = case.get_value("MAX_TASKS_PER_NODE") pespn = case.get_value("PES_PER_NODE") # This is hardcoded because on yellowstone by default we # run with 15 pes per node # but pay for 16 pes per node. See github issue #518 if case.get_value("MACH") == "yellowstone": pespn = 16 pestot = tm.totaltasks if mtpn > pespn: pestot = pestot * (mtpn // pespn) case.set_value("COST_PES", tm.num_nodes*pespn) else: # reset cost_pes to totalpes case.set_value("COST_PES", 0) case.set_value("TOTALPES", pestot) # Compute cost based on PE count pval = 1 pcnt = 0 while pval < pestot: pval *= 2 pcnt += 6 # (scaling like sqrt(6/10)) pcost = 3 - pcnt / 10 # (3 is 64 with 6) # Compute cost based on DEBUG dcost = 3 if debug else 0 # Compute cost based on run length # For simplicity, we use a heuristic just based on STOP_OPTION (not considering # STOP_N), and only deal with options longer than ndays lcost = 0 if "nmonth" in case.get_value("STOP_OPTION"): # N months costs 30x as much as N days; since cost is based on log-base-2, add 5 lcost = 5 elif "nyear" in case.get_value("STOP_OPTION"): # N years costs 365x as much as N days; since cost is based on log-base-2, add 9 lcost = 9 estcost = pcost + dcost + lcost for cost in ["CCSM_CCOST", "CCSM_GCOST", "CCSM_TCOST", "CCSM_CCOST"]: estcost += case.get_value(cost) case.set_value("CCSM_PCOST", pcost) case.set_value("CCSM_ESTCOST", estcost) # create batch file logger.info("Creating batch script case.run") # Use BatchFactory to get the appropriate instance of a BatchMaker, # use it to create our batch scripts env_batch = case.get_env("batch") for job in env_batch.get_jobs(): input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: logger.info("Writing %s script" % job) testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) # Short term fix to be removed when csh tests are removed if not os.path.exists(testscript): env_batch.make_batch_script(input_batch_script, job, case) elif job != "case.test": logger.info("Writing %s script" % job) env_batch.make_batch_script(input_batch_script, job, case) # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked logger.info("Locking file env_mach_pes.xml") case.flush() shutil.copy("env_mach_pes.xml", "LockedFiles") # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") # loop over models for model in models: comp = case.get_value("COMP_%s" % model) logger.info("Building %s usernl files"%model) _build_usernl_files(case, model, comp) if comp == "cism": run_cmd_no_fail("%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot)) _build_usernl_files(case, "drv", "cpl") user_mods_path = case.get_value("USER_MODS_FULLPATH") if user_mods_path is not None: apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) elif case.get_value("TEST"): test_mods = parse_test_name(casebaseid)[6] if test_mods is not None: user_mods_path = os.path.join(case.get_value("TESTS_MODS_DIR"), test_mods) apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) # Run preview namelists for scripts logger.info("preview_namelists") preview_namelists(case) logger.info("See ./CaseDoc for component namelists") logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") # Create test script if appropriate # Short term fix to be removed when csh tests are removed if os.path.exists("env_test.xml"): if not os.path.exists("case.test"): logger.info("Starting testcase.setup") run_cmd_no_fail("./testcase.setup -caseroot %s" % caseroot) logger.info("Finished testcase.setup") msg = "case.setup complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") # Record env information env_module = case.get_env("mach_specific") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "sh") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "csh") with open("software_environment.txt", "w") as f: f.write(env_module.list_modules()) run_cmd_no_fail("echo -e '\n' >> software_environment.txt && \ env >> software_environment.txt")
def _xml_phase(self, test): ########################################################################### test_case = parse_test_name(test)[0] # Create, fill and write an envtest object test_dir = self._get_test_dir(test) envtest = EnvTest(test_dir) # Determine list of component classes that this coupler/driver knows how # to deal with. This list follows the same order as compset longnames follow. files = Files(comp_interface=self._cime_driver) drv_config_file = files.get_value("CONFIG_CPL_FILE") drv_comp = Component(drv_config_file, "CPL") envtest.add_elements_by_group(files, {}, "env_test.xml") envtest.add_elements_by_group(drv_comp, {}, "env_test.xml") envtest.set_value("TESTCASE", test_case) envtest.set_value("TEST_TESTID", self._test_id) envtest.set_value("CASEBASEID", test) if test in self._test_data and "options" in self._test_data[test] and \ "memleak_tolerance" in self._test_data[test]['options']: envtest.set_value("TEST_MEMLEAK_TOLERANCE", self._test_data[test]['options']['memleak_tolerance']) test_argv = "-testname {} -testroot {}".format(test, self._test_root) if self._baseline_gen_name: test_argv += " -generate {}".format(self._baseline_gen_name) basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) logger.debug("basegen_case is {}".format(basegen_case_fullpath)) envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name) envtest.set_value("BASEGEN_CASE", os.path.join(self._baseline_gen_name, test)) if self._baseline_cmp_name: test_argv += " -compare {}".format(self._baseline_cmp_name) envtest.set_value("BASELINE_NAME_CMP", self._baseline_cmp_name) envtest.set_value("BASECMP_CASE", os.path.join(self._baseline_cmp_name, test)) envtest.set_value("TEST_ARGV", test_argv) envtest.set_value("CLEANUP", self._clean) envtest.set_value("BASELINE_ROOT", self._baseline_root) envtest.set_value("GENERATE_BASELINE", self._baseline_gen_name is not None) envtest.set_value("COMPARE_BASELINE", self._baseline_cmp_name is not None) envtest.set_value("CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False)) tput_tolerance = self._machobj.get_value("TEST_TPUT_TOLERANCE", resolved=False) if test in self._test_data and "options" in self._test_data[test] and \ "tput_tolerance" in self._test_data[test]['options']: tput_tolerance = self._test_data[test]['options']['tput_tolerance'] envtest.set_value("TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance) # Add the test instructions from config_test to env_test in the case config_test = Tests() testnode = config_test.get_test_node(test_case) envtest.add_test(testnode) # Determine the test_case from the test name test_case, case_opts = parse_test_name(test)[:2] # Determine case_opts from the test_case if case_opts is not None: logger.debug("case_opts are {} ".format(case_opts)) for opt in case_opts: # pylint: disable=not-an-iterable logger.debug("case_opt is {}".format(opt)) if opt == 'D': envtest.set_test_parameter("DEBUG", "TRUE") logger.debug (" DEBUG set to TRUE") elif opt == 'E': envtest.set_test_parameter("USE_ESMF_LIB", "TRUE") logger.debug (" USE_ESMF_LIB set to TRUE") elif opt == 'CG': envtest.set_test_parameter("CALENDAR", "GREGORIAN") logger.debug (" CALENDAR set to {}".format(opt)) elif opt.startswith('L'): match = re.match('L([A-Za-z])([0-9]*)', opt) stop_option = {"y":"nyears", "m":"nmonths", "d":"ndays", "h":"nhours", "s":"nseconds", "n":"nsteps"} opt = match.group(1) envtest.set_test_parameter("STOP_OPTION",stop_option[opt]) opti = match.group(2) envtest.set_test_parameter("STOP_N", opti) logger.debug (" STOP_OPTION set to {}".format(stop_option[opt])) logger.debug (" STOP_N set to {}".format(opti)) elif opt.startswith('R'): # R option is for testing in PTS_MODE or Single Column Model # (SCM) mode envtest.set_test_parameter("PTS_MODE", "TRUE") # For PTS_MODE, compile with mpi-serial envtest.set_test_parameter("MPILIB", "mpi-serial") elif (opt.startswith('I') or # Marker to distinguish tests with same name - ignored opt.startswith('M') or # handled in create_newcase opt.startswith('P') or # handled in create_newcase opt.startswith('N') or # handled in create_newcase opt.startswith('C') or # handled in create_newcase opt.startswith('V')): # handled in create_newcase pass elif opt.startswith('IOP'): logger.warning("IOP test option not yet implemented") else: expect(False, "Could not parse option '{}' ".format(opt)) envtest.write() lock_file("env_run.xml", caseroot=test_dir, newname="env_run.orig.xml") with Case(test_dir, read_only=False) as case: if self._output_root is None: self._output_root = case.get_value("CIME_OUTPUT_ROOT") # if we are running a single test we don't need sharedlibroot if len(self._tests) > 1 and self._cime_model != "e3sm": case.set_value("SHAREDLIBROOT", os.path.join(self._output_root, "sharedlibroot.{}".format(self._test_id))) envtest.set_initial_values(case) case.set_value("TEST", True) case.set_value("SAVE_TIMING", self._save_timing) # Scale back build parallelism on systems with few cores if self._model_build_cost > self._proc_pool: case.set_value("GMAKE_J", self._proc_pool) self._model_build_cost = self._proc_pool return True, ""
def _xml_phase(self, test): ########################################################################### test_case = parse_test_name(test)[0] # Create, fill and write an envtest object test_dir = self._get_test_dir(test) envtest = EnvTest(test_dir) # Determine list of component classes that this coupler/driver knows how # to deal with. This list follows the same order as compset longnames follow. files = Files(comp_interface=self._cime_driver) drv_config_file = files.get_value("CONFIG_CPL_FILE") drv_comp = Component(drv_config_file, "CPL") envtest.add_elements_by_group(files, {}, "env_test.xml") envtest.add_elements_by_group(drv_comp, {}, "env_test.xml") envtest.set_value("TESTCASE", test_case) envtest.set_value("TEST_TESTID", self._test_id) envtest.set_value("CASEBASEID", test) if test in self._test_data and "options" in self._test_data[test] and \ "memleak_tolerance" in self._test_data[test]['options']: envtest.set_value("TEST_MEMLEAK_TOLERANCE", self._test_data[test]['options']['memleak_tolerance']) test_argv = "-testname {} -testroot {}".format(test, self._test_root) if self._baseline_gen_name: test_argv += " -generate {}".format(self._baseline_gen_name) basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) logger.debug("basegen_case is {}".format(basegen_case_fullpath)) envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name) envtest.set_value("BASEGEN_CASE", os.path.join(self._baseline_gen_name, test)) if self._baseline_cmp_name: test_argv += " -compare {}".format(self._baseline_cmp_name) envtest.set_value("BASELINE_NAME_CMP", self._baseline_cmp_name) envtest.set_value("BASECMP_CASE", os.path.join(self._baseline_cmp_name, test)) envtest.set_value("TEST_ARGV", test_argv) envtest.set_value("CLEANUP", self._clean) envtest.set_value("BASELINE_ROOT", self._baseline_root) envtest.set_value("GENERATE_BASELINE", self._baseline_gen_name is not None) envtest.set_value("COMPARE_BASELINE", self._baseline_cmp_name is not None) envtest.set_value("CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False)) tput_tolerance = self._machobj.get_value("TEST_TPUT_TOLERANCE", resolved=False) if test in self._test_data and "options" in self._test_data[test] and \ "tput_tolerance" in self._test_data[test]['options']: tput_tolerance = self._test_data[test]['options']['tput_tolerance'] envtest.set_value("TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance) # Add the test instructions from config_test to env_test in the case config_test = Tests() testnode = config_test.get_test_node(test_case) envtest.add_test(testnode) # Determine the test_case from the test name test_case, case_opts = parse_test_name(test)[:2] # Determine case_opts from the test_case if case_opts is not None: logger.debug("case_opts are {} ".format(case_opts)) for opt in case_opts: # pylint: disable=not-an-iterable logger.debug("case_opt is {}".format(opt)) if opt == 'D': envtest.set_test_parameter("DEBUG", "TRUE") logger.debug (" DEBUG set to TRUE") elif opt == 'E': envtest.set_test_parameter("USE_ESMF_LIB", "TRUE") logger.debug (" USE_ESMF_LIB set to TRUE") elif opt == 'CG': envtest.set_test_parameter("CALENDAR", "GREGORIAN") logger.debug (" CALENDAR set to {}".format(opt)) elif opt.startswith('L'): match = re.match('L([A-Za-z])([0-9]*)', opt) stop_option = {"y":"nyears", "m":"nmonths", "d":"ndays", "h":"nhours", "s":"nseconds", "n":"nsteps"} opt = match.group(1) envtest.set_test_parameter("STOP_OPTION",stop_option[opt]) opti = match.group(2) envtest.set_test_parameter("STOP_N", opti) logger.debug (" STOP_OPTION set to {}".format(stop_option[opt])) logger.debug (" STOP_N set to {}".format(opti)) elif opt.startswith('R'): # R option is for testing in PTS_MODE or Single Column Model # (SCM) mode envtest.set_test_parameter("PTS_MODE", "TRUE") # For PTS_MODE, compile with mpi-serial envtest.set_test_parameter("MPILIB", "mpi-serial") elif (opt.startswith('I') or # Marker to distinguish tests with same name - ignored opt.startswith('M') or # handled in create_newcase opt.startswith('P') or # handled in create_newcase opt.startswith('N') or # handled in create_newcase opt.startswith('C') or # handled in create_newcase opt.startswith('V')): # handled in create_newcase pass elif opt.startswith('IOP'): logger.warning("IOP test option not yet implemented") else: expect(False, "Could not parse option '{}' ".format(opt)) envtest.write() lock_file("env_run.xml", caseroot=test_dir, newname="env_run.orig.xml") with Case(test_dir, read_only=False) as case: if self._output_root is None: self._output_root = case.get_value("CIME_OUTPUT_ROOT") # if we are running a single test we don't need sharedlibroot if len(self._tests) > 1 and self._cime_model != "e3sm": case.set_value("SHAREDLIBROOT", os.path.join(self._output_root, "sharedlibroot.{}".format(self._test_id))) envtest.set_initial_values(case) case.set_value("TEST", True) case.set_value("SAVE_TIMING", self._save_timing) # Scale back build parallelism on systems with few cores if self._model_build_cost > self._proc_pool: case.set_value("GMAKE_J", self._proc_pool) self._model_build_cost = self._proc_pool
def _create_newcase_phase(self, test): ########################################################################### test_dir = self._get_test_dir(test) _, case_opts, grid, compset,\ machine, compiler, test_mods = parse_test_name(test) create_newcase_cmd = "{} --case {} --res {} --compset {}"\ " --test".format(os.path.join(self._cime_root, "scripts", "create_newcase"), test_dir, grid, compset) if machine is not None: create_newcase_cmd += " --machine {}".format(machine) if compiler is not None: create_newcase_cmd += " --compiler {}".format(compiler) if self._project is not None: create_newcase_cmd += " --project {} ".format(self._project) if self._output_root is not None: create_newcase_cmd += " --output-root {} ".format(self._output_root) if self._input_dir is not None: create_newcase_cmd += " --input-dir {} ".format(self._input_dir) if self._non_local: create_newcase_cmd += " --non-local" if self._pesfile is not None: create_newcase_cmd += " --pesfile {} ".format(self._pesfile) if test_mods is not None: files = Files(comp_interface=self._cime_driver) if test_mods.find('/') != -1: (component, modspath) = test_mods.split('/', 1) else: error = "Missing testmod component. Testmods are specified as '${component}-${testmod}'" self._log_output(test, error) return False, error comp_root_dir_cpl = files.get_value( "COMP_ROOT_DIR_CPL", resolved=False) files.set_value("COMP_ROOT_DIR_CPL", comp_root_dir_cpl) testmods_dir = files.get_value("TESTS_MODS_DIR", {"component": component}) test_mod_file = os.path.join(testmods_dir, component, modspath) if not os.path.exists(test_mod_file): error = "Missing testmod file '{}'".format(test_mod_file) self._log_output(test, error) return False, error create_newcase_cmd += " --user-mods-dir {}".format(test_mod_file) mpilib = None ninst = 1 ncpl = 1 if case_opts is not None: for case_opt in case_opts: # pylint: disable=not-an-iterable if case_opt.startswith('M'): mpilib = case_opt[1:] create_newcase_cmd += " --mpilib {}".format(mpilib) logger.debug (" MPILIB set to {}".format(mpilib)) elif case_opt.startswith('N'): expect(ncpl == 1,"Cannot combine _C and _N options") ninst = case_opt[1:] create_newcase_cmd += " --ninst {}".format(ninst) logger.debug (" NINST set to {}".format(ninst)) elif case_opt.startswith('C'): expect(ninst == 1,"Cannot combine _C and _N options") ncpl = case_opt[1:] create_newcase_cmd += " --ninst {} --multi-driver" .format(ncpl) logger.debug (" NCPL set to {}" .format(ncpl)) elif case_opt.startswith('P'): pesize = case_opt[1:] create_newcase_cmd += " --pecount {}".format(pesize) elif case_opt.startswith('V'): self._cime_driver = case_opt[1:] create_newcase_cmd += " --driver {}".format(self._cime_driver) # create_test mpilib option overrides default but not explicitly set case_opt mpilib if mpilib is None and self._mpilib is not None: create_newcase_cmd += " --mpilib {}".format(self._mpilib) logger.debug (" MPILIB set to {}".format(self._mpilib)) if self._queue is not None: create_newcase_cmd += " --queue={}".format(self._queue) else: # We need to hard code the queue for this test on cheyenne # otherwise it runs in share and fails intermittently test_case = parse_test_name(test)[0] if test_case == "NODEFAIL": machine = machine if machine is not None else self._machobj.get_machine_name() if machine == "cheyenne": create_newcase_cmd += " --queue=regular" if self._walltime is not None: create_newcase_cmd += " --walltime {}".format(self._walltime) else: # model specific ways of setting time if self._cime_model == "e3sm": recommended_time = _get_time_est(test, self._baseline_root) if recommended_time is not None: create_newcase_cmd += " --walltime {}".format(recommended_time) else: if test in self._test_data and "options" in self._test_data[test] and \ "wallclock" in self._test_data[test]['options']: create_newcase_cmd += " --walltime {}".format(self._test_data[test]['options']['wallclock']) logger.debug("Calling create_newcase: " + create_newcase_cmd) return self._shell_cmd_for_phase(test, create_newcase_cmd, CREATE_NEWCASE_PHASE)
def bless_test_results( baseline_name, baseline_root, test_root, compiler, test_id=None, namelists_only=False, hist_only=False, report_only=False, force=False, bless_tests=None, no_skip_pass=False, new_test_root=None, new_test_id=None, ): ############################################################################### test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) # auto-adjust test-id if multiple rounds of tests were matched timestamps = set() for test_status_file in test_status_files: timestamp = os.path.basename(os.path.dirname(test_status_file)).split(".")[-1] timestamps.add(timestamp) if len(timestamps) > 1: logger.warning( "Multiple sets of tests were matched! Selected only most recent tests." ) most_recent = sorted(timestamps)[-1] logger.info("Matched test batch is {}".format(most_recent)) broken_blesses = [] for test_status_file in test_status_files: if not most_recent in test_status_file: logger.info("Skipping {}".format(test_status_file)) continue test_dir = os.path.dirname(test_status_file) ts = TestStatus(test_dir=test_dir) test_name = ts.get_name() testopts = parse_test_name(test_name)[1] testopts = [] if testopts is None else testopts build_only = "B" in testopts if test_name is None: case_dir = os.path.basename(test_dir) test_name = CIME.utils.normalize_case_id(case_dir) if bless_tests in [[], None] or CIME.utils.match_any( test_name, bless_tests ): broken_blesses.append( ( "unknown", "test had invalid TestStatus file: '{}'".format( test_status_file ), ) ) continue else: continue if bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests): overall_result = ts.get_overall_test_status()[0] # See if we need to bless namelist if not hist_only: if no_skip_pass: nl_bless = True else: nl_bless = ts.get_status(NAMELIST_PHASE) != TEST_PASS_STATUS else: nl_bless = False # See if we need to bless baselines if not namelists_only and not build_only: run_result = ts.get_status(RUN_PHASE) if run_result is None: broken_blesses.append((test_name, "no run phase")) logger.warning( "Test '{}' did not make it to run phase".format(test_name) ) hist_bless = False elif run_result != TEST_PASS_STATUS: broken_blesses.append((test_name, "test did not pass")) logger.warning( "Test '{}' did not pass, not safe to bless, test status = {}".format( test_name, ts.phase_statuses_dump() ) ) hist_bless = False elif no_skip_pass: hist_bless = True else: hist_bless = ts.get_status(BASELINE_PHASE) != TEST_PASS_STATUS else: hist_bless = False # Now, do the bless if not nl_bless and not hist_bless: logger.info( "Nothing to bless for test: {}, overall status: {}".format( test_name, overall_result ) ) else: logger.info( "###############################################################################" ) logger.info( "Blessing results for test: {}, most recent result: {}".format( test_name, overall_result ) ) logger.info("Case dir: {}".format(test_dir)) logger.info( "###############################################################################" ) if not force: time.sleep(2) with Case(test_dir) as case: # Resolve baseline_name and baseline_root if baseline_name is None: baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") if not baseline_name_resolved: baseline_name_resolved = CIME.utils.get_current_branch( repo=CIME.utils.get_cime_root() ) else: baseline_name_resolved = baseline_name if baseline_root is None: baseline_root_resolved = case.get_value("BASELINE_ROOT") else: baseline_root_resolved = baseline_root if baseline_name_resolved is None: broken_blesses.append( (test_name, "Could not determine baseline name") ) continue if baseline_root_resolved is None: broken_blesses.append( (test_name, "Could not determine baseline root") ) continue # Bless namelists if nl_bless: success, reason = bless_namelists( test_name, report_only, force, baseline_name_resolved, baseline_root_resolved, new_test_root=new_test_root, new_test_id=new_test_id, ) if not success: broken_blesses.append((test_name, reason)) # Bless hist files if hist_bless: if "HOMME" in test_name: success = False reason = ( "HOMME tests cannot be blessed with bless_for_tests" ) else: success, reason = bless_history( test_name, case, baseline_name_resolved, baseline_root_resolved, report_only, force, ) if not success: broken_blesses.append((test_name, reason)) # Make sure user knows that some tests were not blessed success = True for broken_bless, reason in broken_blesses: logger.warning( "FAILED TO BLESS TEST: {}, reason {}".format(broken_bless, reason) ) success = False return success
def compare_test_results( baseline_name, baseline_root, test_root, compiler, test_id=None, compare_tests=None, namelists_only=False, hist_only=False, ): ############################################################################### """ Compares with baselines for all matching tests Outputs results for each test to stdout (one line per test); possible status codes are: PASS, FAIL, SKIP. (A SKIP denotes a test that did not make it to the run phase or a test for which the run phase did not pass: we skip baseline comparisons in this case.) In addition, creates files named compare.log.BASELINE_NAME.TIMESTAMP in each test directory, which contain more detailed output. Also creates *.cprnc.out.BASELINE_NAME.TIMESTAMP files in each run directory. Returns True if all tests generated either PASS or SKIP results, False if there was at least one FAIL result. """ test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) # ID to use in the log file names, to avoid file name collisions with # earlier files that may exist. log_id = CIME.utils.get_timestamp() all_pass_or_skip = True for test_status_file in test_status_files: test_dir = os.path.dirname(test_status_file) ts = TestStatus(test_dir=test_dir) test_name = ts.get_name() testopts = parse_test_name(test_name)[1] testopts = [] if testopts is None else testopts build_only = "B" in testopts if compare_tests in [[], None] or CIME.utils.match_any( test_name, compare_tests ): if not hist_only: nl_compare_result = None nl_compare_comment = "" nl_result = ts.get_status(SETUP_PHASE) if nl_result is None: nl_compare_result = "SKIP" nl_compare_comment = "Test did not make it to setup phase" nl_do_compare = False else: nl_do_compare = True else: nl_do_compare = False detailed_comments = "" if not namelists_only and not build_only: compare_result = None compare_comment = "" run_result = ts.get_status(RUN_PHASE) if run_result is None: compare_result = "SKIP" compare_comment = "Test did not make it to run phase" do_compare = False elif run_result != TEST_PASS_STATUS: compare_result = "SKIP" compare_comment = "Run phase did not pass" do_compare = False else: do_compare = True else: do_compare = False with Case(test_dir) as case: if baseline_name is None: baseline_name = case.get_value("BASELINE_NAME_CMP") if not baseline_name: baseline_name = CIME.utils.get_current_branch( repo=CIME.utils.get_cime_root() ) if baseline_root is None: baseline_root = case.get_value("BASELINE_ROOT") logfile_name = "compare.log.{}.{}".format( baseline_name.replace("/", "_"), log_id ) append_status_cprnc_log( "Comparing against baseline with compare_test_results:\n" "Baseline: {}\n In baseline_root: {}".format( baseline_name, baseline_root ), logfile_name, test_dir, ) if nl_do_compare or do_compare: if nl_do_compare: nl_success = compare_namelists( case, baseline_name, baseline_root, logfile_name ) if nl_success: nl_compare_result = TEST_PASS_STATUS nl_compare_comment = "" else: nl_compare_result = TEST_FAIL_STATUS nl_compare_comment = "See {}/{}".format(test_dir, logfile_name) all_pass_or_skip = False if do_compare: success, detailed_comments = compare_history( case, baseline_name, baseline_root, log_id ) if success: compare_result = TEST_PASS_STATUS else: compare_result = TEST_FAIL_STATUS all_pass_or_skip = False compare_comment = get_ts_synopsis(detailed_comments) brief_result = "" if not hist_only: brief_result += "{} {} {} {}\n".format( nl_compare_result, test_name, NAMELIST_PHASE, nl_compare_comment ) if not namelists_only: brief_result += "{} {} {}".format( compare_result, test_name, BASELINE_PHASE ) if compare_comment: brief_result += " {}".format(compare_comment) brief_result += "\n" print(brief_result) append_status_cprnc_log(brief_result, logfile_name, test_dir) if detailed_comments: append_status_cprnc_log( "Detailed comments:\n" + detailed_comments, logfile_name, test_dir ) return all_pass_or_skip
def _case_setup_impl(case, caseroot, casebaseid, clean=False, test_mode=False, reset=False): ############################################################################### os.chdir(caseroot) msg = "case.setup starting" append_status(msg, caseroot=caseroot, sfile="CaseStatus") cimeroot = get_cime_root(case) # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests din_loc_root = case.get_value("DIN_LOC_ROOT") testcase = case.get_value("TESTCASE") expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), "inputdata root is not a directory: \"$din_loc_root\" ") # Check that userdefine settings are specified before expanding variable for vid, value in case: expect(not (type(value) is str and "USERDEFINED_required_build" in value), "Parameter '%s' must be defined" % vid) # Create batch script if reset or clean: # Clean batch script backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S") if not os.path.isdir(backup_dir): os.makedirs(backup_dir) # back up relevant files for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]: for filename in glob.glob(fileglob): shutil.copy(filename, backup_dir) if os.path.exists("case.run"): os.remove("case.run") # only do the following if are NOT in testmode if not test_mode: # rebuild the models (even on restart) case.set_value("BUILD_COMPLETE", False) # backup and then clean test script if os.path.exists("case.test"): shutil.copy("case.test", backup_dir) os.remove("case.test") logger.info("Successfully cleaned test script case.test") if os.path.exists("case.testdriver"): shutil.copy("case.testdriver", backup_dir) os.remove("case.testdriver") logger.info("Successfully cleaned test script case.testdriver") logger.info("Successfully cleaned batch script case.run") logger.info("Successfully cleaned batch script case.run") logger.info("Some files have been saved to %s" % backup_dir) msg = "case.setup clean complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") if not clean: models = case.get_values("COMP_CLASSES") mach, compiler, debug, mpilib = \ case.get_value("MACH"), case.get_value("COMPILER"), case.get_value("DEBUG"), case.get_value("MPILIB") expect(mach is not None, "xml variable MACH is not set") # Create Macros file only if it does not exist if not os.path.exists("Macros"): logger.debug("Creating Macros file for %s" % mach) compilers = Compilers(compiler=compiler, machine=mach, os_=case.get_value("OS"), mpilib=mpilib) compilers.write_macros_file() else: logger.debug("Macros script already created ...skipping") # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": for vid, value in case: if vid.startswith("NTASKS_") and value != 1: case.set_value(vid, 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. # Save ninst in a dict to use later in apply_user_mods ninst = dict() for comp in models: if comp == "DRV": continue comp_model = case.get_value("COMP_%s" % comp) ninst[comp_model] = case.get_value("NINST_%s" % comp) ntasks = case.get_value("NTASKS_%s" % comp) if ninst[comp_model] > ntasks: if ntasks == 1: case.set_value("NTASKS_%s" % comp, ninst[comp_model]) else: expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst[comp_model], comp, ntasks)) if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") else: _check_pelayouts_require_rebuild(case, models) if os.path.exists("LockedFiles/env_build.xml"): os.remove("LockedFiles/env_build.xml") case.flush() check_lockedfiles() env_mach_pes = case.get_env("mach_pes") pestot = env_mach_pes.get_total_tasks(models) logger.debug("at update TOTALPES = %s"%pestot) case.set_value("TOTALPES", pestot) thread_count = env_mach_pes.get_max_thread_count(models) if thread_count > 1: case.set_value("BUILD_THREADED", True) expect(not (case.get_value("BUILD_THREADED") and compiler == "nag"), "it is not possible to run with OpenMP if using the NAG Fortran compiler") cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) # create batch file logger.info("Creating batch script case.run") # Use BatchFactory to get the appropriate instance of a BatchMaker, # use it to create our batch scripts env_batch = case.get_env("batch") num_nodes = env_mach_pes.get_total_nodes(pestot, thread_count) tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) for job in env_batch.get_jobs(): input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: logger.info("Writing %s script" % job) testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) # Short term fix to be removed when csh tests are removed if not os.path.exists(testscript): env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) elif job != "case.test": logger.info("Writing %s script from input template %s" % (job, input_batch_script)) env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked logger.info("Locking file env_mach_pes.xml") case.flush() logger.debug("at copy TOTALPES = %s"%case.get_value("TOTALPES")) shutil.copy("env_mach_pes.xml", "LockedFiles") # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") # loop over models for model in models: comp = case.get_value("COMP_%s" % model) logger.info("Building %s usernl files"%model) _build_usernl_files(case, model, comp) if comp == "cism": run_cmd_no_fail("%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot)) _build_usernl_files(case, "drv", "cpl") user_mods_path = case.get_value("USER_MODS_FULLPATH") if user_mods_path is not None: apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) elif case.get_value("TEST"): test_mods = parse_test_name(casebaseid)[6] if test_mods is not None: user_mods_path = os.path.join(case.get_value("TESTS_MODS_DIR"), test_mods) apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst) # Run preview namelists for scripts logger.info("preview_namelists") preview_namelists(case) logger.info("See ./CaseDoc for component namelists") logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") # Create test script if appropriate # Short term fix to be removed when csh tests are removed if os.path.exists("env_test.xml"): if not os.path.exists("case.test"): logger.info("Starting testcase.setup") run_cmd_no_fail("./testcase.setup -caseroot %s" % caseroot) logger.info("Finished testcase.setup") msg = "case.setup complete" append_status(msg, caseroot=caseroot, sfile="CaseStatus") # Record env information env_module = case.get_env("mach_specific") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "sh") env_module.make_env_mach_specific_file(compiler, debug, mpilib, "csh") with open("software_environment.txt", "w") as f: f.write(env_module.list_modules()) run_cmd_no_fail("echo -e '\n' >> software_environment.txt && \ env >> software_environment.txt")