def parse_command_line(args, description): ############################################################################### parser = argparse.ArgumentParser( description=description, formatter_class=RawTextHelpFormatter ) model = CIME.utils.get_model() CIME.utils.setup_standard_logging_options(parser) config = get_cime_config() parser.add_argument( "--no-run", action="store_true", help="Do not run generated tests" ) parser.add_argument( "--no-build", action="store_true", help="Do not build generated tests, implies --no-run", ) parser.add_argument( "--no-setup", action="store_true", help="Do not setup generated tests, implies --no-build and --no-run", ) parser.add_argument( "-u", "--use-existing", action="store_true", help="Use pre-existing case directories they will pick up at the " "\nlatest PEND state or re-run the first failed state. Requires test-id", ) default = get_default_setting(config, "SAVE_TIMING", False, check_main=False) parser.add_argument( "--save-timing", action="store_true", default=default, help="Enable archiving of performance data.", ) parser.add_argument( "--no-batch", action="store_true", help="Do not submit jobs to batch system, run locally." "\nIf false, this will default to machine setting.", ) parser.add_argument( "--single-exe", action="store_true", default=False, help="Use a single build for all cases. This can " "\ndrastically improve test throughput but is currently use-at-your-own risk." "\nIt's up to the user to ensure that all cases are build-compatible." "\nE3SM tests belonging to a suite with share enabled will always share exes.", ) default = get_default_setting(config, "SINGLE_SUBMIT", False, check_main=False) parser.add_argument( "--single-submit", action="store_true", default=default, help="Use a single interactive allocation to run all the tests. This can " "\ndrastically reduce queue waiting but only makes sense on batch machines.", ) default = get_default_setting(config, "TEST_ROOT", None, check_main=False) parser.add_argument( "-r", "--test-root", default=default, help="Where test cases will be created. The default is output root" "\nas defined in the config_machines file", ) default = get_default_setting(config, "OUTPUT_ROOT", None, check_main=False) parser.add_argument( "--output-root", default=default, help="Where the case output is written." ) default = get_default_setting(config, "BASELINE_ROOT", None, check_main=False) parser.add_argument( "--baseline-root", default=default, help="Specifies a root directory for baseline datasets that will " "\nbe used for Bit-for-bit generate and/or compare testing.", ) default = get_default_setting(config, "CLEAN", False, check_main=False) parser.add_argument( "--clean", action="store_true", default=default, help="Specifies if tests should be cleaned after run. If set, all object" "\nexecutables and data files will be removed after the tests are run.", ) default = get_default_setting(config, "MACHINE", None, check_main=True) parser.add_argument( "-m", "--machine", default=default, help="The machine for creating and building tests. This machine must be defined" "\nin the config_machines.xml file for the given model. The default is to " "\nto match the name of the machine in the test name or the name of the " "\nmachine this script is run on to the NODENAME_REGEX field in " "\nconfig_machines.xml. WARNING: This option is highly unsafe and should " "\nonly be used if you are an expert.", ) default = get_default_setting(config, "MPILIB", None, check_main=True) parser.add_argument( "--mpilib", default=default, help="Specify the mpilib. To see list of supported MPI libraries for each machine, " "\ninvoke ./query_config. The default is the first listing .", ) if model in ["cesm", "ufs"]: parser.add_argument( "-c", "--compare", help="While testing, compare baselines against the given compare directory. ", ) parser.add_argument( "-g", "--generate", help="While testing, generate baselines in the given generate directory. " "\nNOTE: this can also be done after the fact with bless_test_results", ) parser.add_argument( "--xml-machine", help="Use this machine key in the lookup in testlist.xml. " "\nThe default is all if any --xml- argument is used.", ) parser.add_argument( "--xml-compiler", help="Use this compiler key in the lookup in testlist.xml. " "\nThe default is all if any --xml- argument is used.", ) parser.add_argument( "--xml-category", help="Use this category key in the lookup in testlist.xml. " "\nThe default is all if any --xml- argument is used.", ) parser.add_argument( "--xml-testlist", help="Use this testlist to lookup tests.The default is specified in config_files.xml", ) parser.add_argument( "--xml-driver", choices=("mct", "nuopc", "moab"), help="Override driver specified in tests and use this one.", ) parser.add_argument( "testargs", nargs="*", help="Tests to run. Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]", ) else: parser.add_argument( "testargs", nargs="+", help="Tests or test suites to run." " Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]", ) parser.add_argument( "-b", "--baseline-name", help="If comparing or generating baselines, use this directory under baseline root. " "\nDefault will be current branch name.", ) parser.add_argument( "-c", "--compare", action="store_true", help="While testing, compare baselines", ) parser.add_argument( "-g", "--generate", action="store_true", help="While testing, generate baselines. " "\nNOTE: this can also be done after the fact with bless_test_results", ) default = get_default_setting(config, "COMPILER", None, check_main=True) parser.add_argument( "--compiler", default=default, help="Compiler for building cime. Default will be the name in the " "\nTestname or the default defined for the machine.", ) parser.add_argument( "-n", "--namelists-only", action="store_true", help="Only perform namelist actions for tests", ) parser.add_argument( "-p", "--project", help="Specify a project id for the case (optional)." "\nUsed for accounting and directory permissions when on a batch system." "\nThe default is user or machine specified by PROJECT." "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.", ) parser.add_argument( "-t", "--test-id", help="Specify an 'id' for the test. This is simply a string that is appended " "\nto the end of a test name. If no test-id is specified, a time stamp plus a " "\nrandom string will be used (ensuring a high probability of uniqueness). " "\nIf a test-id is specified, it is the user's responsibility to ensure that " "\neach run of create_test uses a unique test-id. WARNING: problems will occur " "\nif you use the same test-id twice on the same file system, even if the test " "\nlists are completely different.", ) default = get_default_setting(config, "PARALLEL_JOBS", None, check_main=False) parser.add_argument( "-j", "--parallel-jobs", type=int, default=default, help="Number of tasks create_test should perform simultaneously. The default " "\n is min(num_cores, num_tests).", ) default = get_default_setting(config, "PROC_POOL", None, check_main=False) parser.add_argument( "--proc-pool", type=int, default=default, help="The size of the processor pool that create_test can use. The default is " "\nMAX_MPITASKS_PER_NODE + 25 percent.", ) default = os.getenv("CIME_GLOBAL_WALLTIME") if default is None: default = get_default_setting(config, "WALLTIME", None, check_main=True) parser.add_argument( "--walltime", default=default, help="Set the wallclock limit for all tests in the suite. " "\nUse the variable CIME_GLOBAL_WALLTIME to set this for all tests.", ) default = get_default_setting(config, "JOB_QUEUE", None, check_main=True) parser.add_argument( "-q", "--queue", default=default, help="Force batch system to use a certain queue", ) parser.add_argument( "-f", "--testfile", help="A file containing an ascii list of tests to run" ) default = get_default_setting( config, "ALLOW_BASELINE_OVERWRITE", False, check_main=False ) parser.add_argument( "-o", "--allow-baseline-overwrite", action="store_true", default=default, help="If the --generate option is given, then an attempt to overwrite " "\nan existing baseline directory will raise an error. WARNING: Specifying this " "\noption will allow existing baseline directories to be silently overwritten.", ) default = get_default_setting(config, "WAIT", False, check_main=False) parser.add_argument( "--wait", action="store_true", default=default, help="On batch systems, wait for submitted jobs to complete", ) default = get_default_setting(config, "ALLOW_PNL", False, check_main=False) parser.add_argument( "--allow-pnl", action="store_true", default=default, help="Do not pass skip-pnl to case.submit", ) parser.add_argument( "--check-throughput", action="store_true", help="Fail if throughput check fails. Requires --wait on batch systems", ) parser.add_argument( "--check-memory", action="store_true", help="Fail if memory check fails. Requires --wait on batch systems", ) parser.add_argument( "--ignore-namelists", action="store_true", help="Do not fail if there namelist diffs", ) parser.add_argument( "--ignore-memleak", action="store_true", help="Do not fail if there's a memleak" ) default = get_default_setting(config, "FORCE_PROCS", None, check_main=False) parser.add_argument( "--force-procs", type=int, default=default, help="For all tests to run with this number of processors", ) default = get_default_setting(config, "FORCE_THREADS", None, check_main=False) parser.add_argument( "--force-threads", type=int, default=default, help="For all tests to run with this number of threads", ) default = get_default_setting(config, "INPUT_DIR", None, check_main=True) parser.add_argument( "-i", "--input-dir", default=default, help="Use a non-default location for input files", ) default = get_default_setting(config, "PESFILE", None, check_main=True) parser.add_argument( "--pesfile", default=default, help="Full pathname of an optional pes specification file. The file" "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.", ) default = get_default_setting(config, "RETRY", 0, check_main=False) parser.add_argument( "--retry", type=int, default=default, help="Automatically retry failed tests. >0 implies --wait", ) parser.add_argument( "-N", "--non-local", action="store_true", help="Use when you've requested a machine that you aren't on. " "Will reduce errors for missing directories etc.", ) if config and config.has_option("main", "workflow"): workflow_default = config.get("main", "workflow") else: workflow_default = "default" parser.add_argument( "--workflow", default=workflow_default, help="A workflow from config_workflow.xml to apply to this case. ", ) parser.add_argument( "--chksum", action="store_true", help="Verifies input data checksums." ) srcroot_default = utils.get_src_root() parser.add_argument( "--srcroot", default=srcroot_default, help="Alternative pathname for source root directory. " f"The default is {srcroot_default}", ) CIME.utils.add_mail_type_args(parser) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) CIME.utils.resolve_mail_type_args(args) # generate and compare flags may not point to the same directory if model in ["cesm", "ufs"]: if args.generate is not None: expect( not (args.generate == args.compare), "Cannot generate and compare baselines at the same time", ) if args.xml_testlist is not None: expect( not ( args.xml_machine is None and args.xml_compiler is None and args.xml_category is None ), "If an xml-testlist is present at least one of --xml-machine, " "--xml-compiler, --xml-category must also be present", ) else: expect( not ( args.baseline_name is not None and (not args.compare and not args.generate) ), "Provided baseline name but did not specify compare or generate", ) expect( not (args.compare and args.generate), "Tried to compare and generate at same time", ) expect( not (args.namelists_only and not (args.generate or args.compare)), "Must provide either --compare or --generate with --namelists-only", ) if args.retry > 0: args.wait = True if args.parallel_jobs is not None: expect( args.parallel_jobs > 0, "Invalid value for parallel_jobs: %d" % args.parallel_jobs, ) if args.use_existing: expect(args.test_id is not None, "Must provide test-id of pre-existing cases") if args.no_setup: args.no_build = True if args.no_build: args.no_run = True # Namelist-only forces some other options: if args.namelists_only: expect(not args.no_setup, "Cannot compare namelists without setup") args.no_build = True args.no_run = True args.no_batch = True expect( not (args.non_local and not args.no_build), "Cannot build on non-local machine" ) if args.single_submit: expect( not args.no_run, "Doesn't make sense to request single-submit if no-run is on", ) args.no_build = True args.no_run = True args.no_batch = True if args.test_id is None: args.test_id = "%s_%s" % (CIME.utils.get_timestamp(), CIME.utils.id_generator()) else: expect( CIME.utils.check_name(args.test_id, additional_chars="."), "invalid test-id argument provided", ) if args.testfile is not None: with open(args.testfile, "r") as fd: args.testargs.extend( [ line.strip() for line in fd.read().splitlines() if line.strip() and not line.startswith("#") ] ) # Propagate `srcroot` to `GenericXML` to resolve $SRCROOT # See call to `Machines` below utils.GLOBAL["SRCROOT"] = args.srcroot # Compute list of fully-resolved test_names test_extra_data = {} if model in ["cesm", "ufs"]: machine_name = args.xml_machine if args.machine is None else args.machine # If it's still unclear what machine to use, look at test names if machine_name is None: for test in args.testargs: testsplit = CIME.utils.parse_test_name(test) if testsplit[4] is not None: if machine_name is None: machine_name = testsplit[4] else: expect( machine_name == testsplit[4], "ambiguity in machine, please use the --machine option", ) mach_obj = Machines(machine=machine_name) if args.testargs: args.compiler = ( mach_obj.get_default_compiler() if args.compiler is None else args.compiler ) test_names = get_tests.get_full_test_names( args.testargs, mach_obj.get_machine_name(), args.compiler ) else: expect( not ( args.xml_machine is None and args.xml_compiler is None and args.xml_category is None and args.xml_testlist is None ), "At least one of --xml-machine, --xml-testlist, " "--xml-compiler, --xml-category or a valid test name must be provided.", ) test_data = get_tests_from_xml( xml_machine=args.xml_machine, xml_category=args.xml_category, xml_compiler=args.xml_compiler, xml_testlist=args.xml_testlist, machine=machine_name, compiler=args.compiler, driver=args.xml_driver, ) test_names = [item["name"] for item in test_data] for test_datum in test_data: test_extra_data[test_datum["name"]] = test_datum logger.info("Testnames: %s" % test_names) else: if args.machine is None: args.machine = get_tests.infer_machine_name_from_tests(args.testargs) mach_obj = Machines(machine=args.machine) args.compiler = ( mach_obj.get_default_compiler() if args.compiler is None else args.compiler ) test_names = get_tests.get_full_test_names( args.testargs, mach_obj.get_machine_name(), args.compiler ) expect( mach_obj.is_valid_compiler(args.compiler), "Compiler %s not valid for machine %s" % (args.compiler, mach_obj.get_machine_name()), ) if not args.wait and mach_obj.has_batch_system() and not args.no_batch: expect( not args.check_throughput, "Makes no sense to use --check-throughput without --wait", ) expect( not args.check_memory, "Makes no sense to use --check-memory without --wait" ) # Normalize compare/generate between the models baseline_cmp_name = None baseline_gen_name = None if args.compare or args.generate: if model in ["cesm", "ufs"]: if args.compare is not None: baseline_cmp_name = args.compare if args.generate is not None: baseline_gen_name = args.generate else: baseline_name = ( args.baseline_name if args.baseline_name else CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) ) expect( baseline_name is not None, "Could not determine baseline name from branch, please use -b option", ) if args.compare: baseline_cmp_name = baseline_name elif args.generate: baseline_gen_name = baseline_name if args.input_dir is not None: args.input_dir = os.path.abspath(args.input_dir) # sanity check for name in test_names: dot_count = name.count(".") expect(dot_count > 1 and dot_count <= 4, "Invalid test Name, '{}'".format(name)) # for e3sm, sort by walltime if model == "e3sm": if args.walltime is None: # Longest tests should run first test_names.sort(key=get_tests.key_test_time, reverse=True) else: test_names.sort() return ( test_names, test_extra_data, args.compiler, mach_obj.get_machine_name(), args.no_run, args.no_build, args.no_setup, args.no_batch, args.test_root, args.baseline_root, args.clean, baseline_cmp_name, baseline_gen_name, args.namelists_only, args.project, args.test_id, args.parallel_jobs, args.walltime, args.single_submit, args.proc_pool, args.use_existing, args.save_timing, args.queue, args.allow_baseline_overwrite, args.output_root, args.wait, args.force_procs, args.force_threads, args.mpilib, args.input_dir, args.pesfile, args.retry, args.mail_user, args.mail_type, args.check_throughput, args.check_memory, args.ignore_namelists, args.ignore_memleak, args.allow_pnl, args.non_local, args.single_exe, args.workflow, args.chksum, )
def single_submit_impl( machine_name, test_id, proc_pool, _, args, job_cost_map, wall_time, test_root ): ############################################################################### mach = Machines(machine=machine_name) expect( mach.has_batch_system(), "Single submit does not make sense on non-batch machine '%s'" % mach.get_machine_name(), ) machine_name = mach.get_machine_name() # # Compute arg list for second call to create_test # new_args = list(args) new_args.remove("--single-submit") new_args.append("--no-batch") new_args.append("--use-existing") no_arg_is_a_test_id_arg = True no_arg_is_a_proc_pool_arg = True no_arg_is_a_machine_arg = True for arg in new_args: if arg == "-t" or arg.startswith("--test-id"): no_arg_is_a_test_id_arg = False elif arg.startswith("--proc-pool"): no_arg_is_a_proc_pool_arg = False elif arg == "-m" or arg.startswith("--machine"): no_arg_is_a_machine_arg = True if no_arg_is_a_test_id_arg: new_args.append("-t %s" % test_id) if no_arg_is_a_proc_pool_arg: new_args.append("--proc-pool %d" % proc_pool) if no_arg_is_a_machine_arg: new_args.append("-m %s" % machine_name) # # Resolve batch directives manually. There is currently no other way # to do this without making a Case object. Make a throwaway case object # to help us here. # testcase_dirs = glob.glob("%s/*%s*/TestStatus" % (test_root, test_id)) expect(testcase_dirs, "No test case dirs found!?") first_case = os.path.abspath(os.path.dirname(testcase_dirs[0])) with Case(first_case, read_only=False) as case: env_batch = case.get_env("batch") submit_cmd = env_batch.get_value("batch_submit", subgroup=None) submit_args = env_batch.get_submit_args(case, "case.test") tasks_per_node = mach.get_value("MAX_MPITASKS_PER_NODE") num_nodes = int(math.ceil(float(proc_pool) / tasks_per_node)) if wall_time is None: wall_time = compute_total_time(job_cost_map, proc_pool) wall_time_bab = convert_to_babylonian_time(int(wall_time)) else: wall_time_bab = wall_time queue = env_batch.select_best_queue(num_nodes, proc_pool, walltime=wall_time_bab) wall_time_max_bab = env_batch.get_queue_specs(queue)[3] if wall_time_max_bab is not None: wall_time_max = convert_to_seconds(wall_time_max_bab) if wall_time_max < wall_time: wall_time = wall_time_max wall_time_bab = convert_to_babylonian_time(wall_time) overrides = { "job_id": "create_test_single_submit_%s" % test_id, "num_nodes": num_nodes, "tasks_per_node": tasks_per_node, "totaltasks": tasks_per_node * num_nodes, "job_wallclock_time": wall_time_bab, "job_queue": env_batch.text(queue), } directives = env_batch.get_batch_directives(case, "case.test", overrides=overrides) # # Make simple submit script and submit # script = "#! /bin/bash\n" script += "\n%s" % directives script += "\n" script += "cd %s\n" % os.getcwd() script += "%s %s\n" % (__file__, " ".join(new_args)) submit_cmd = "%s %s" % (submit_cmd, submit_args) logger.info("Script:\n%s" % script) run_cmd_no_fail( submit_cmd, input_str=script, arg_stdout=None, arg_stderr=None, verbose=True )
def _compare_baseline(self): with self._test_status as ts: ts.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS) run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") base_dir = os.path.join( self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASECMP_CASE"), ) test_name = "{}".format(case_name.split(".")[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "tsc.py"), "test-case": case_name, "test-dir": run_dir, "ref-case": "Baseline", "ref-dir": base_dir, "time-slice": [OUT_FREQ, SIM_LENGTH], "inspect-times": INSPECT_AT, "variables": VAR_LIST, "p-threshold": P_THRESHOLD, "component": self.atmmod, } } json_file = os.path.join(run_dir, ".".join([case_name, "json"])) with open(json_file, "w") as config_file: json.dump(evv_config, config_file, indent=4) evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) evv(["-e", json_file, "-o", evv_out_dir]) with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: evv_status = json.load(evv_f) comments = "" for evv_ele in evv_status["Page"]["elements"]: if "Table" in evv_ele: comments = "; ".join( "{}: {}".format(key, val[0]) for key, val in evv_ele["Table"]["data"].items()) if evv_ele["Table"]["data"]["Test status"][0].lower( ) == "pass": self._test_status.set_status( CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS, ) break status = self._test_status.get_status( CIME.test_status.BASELINE_PHASE) mach_name = self._case.get_value("MACH") mach_obj = Machines(machine=mach_name) htmlroot = CIME.utils.get_htmlroot(mach_obj) urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): dir_util.copy_tree( evv_out_dir, os.path.join(htmlroot, "evv", case_name), preserve_mode=False, ) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: viewing = ( "{}\n" " EVV viewing instructions can be found at: " " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" "climate_reproducibility/README.md#test-passfail-and-extended-output" "".format(evv_out_dir)) comments = ("{} {} for test '{}'.\n" " {}\n" " EVV results can be viewed at:\n" " {}".format( CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing, )) CIME.utils.append_testlog(comments, self._orig_caseroot)
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group): ############################################################################### # # Create dart config file # current_time = time.time() utc_time_tuple = time.gmtime(current_time) cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple) hostname = Machines().get_machine_name() if (hostname is None): hostname = socket.gethostname().split(".")[0] logging.warning( "Could not convert hostname '%s' into an ACME machine name" % (hostname)) dart_config = \ """ SourceDirectory: %s BuildDirectory: %s # Site is something like machine.domain, i.e. pragmatic.crd Site: %s # Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++ BuildName: %s # Submission information IsCDash: TRUE CDashVersion: QueryCDashVersion: DropSite: my.cdash.org DropLocation: /submit.php?project=%s DropSiteUser: DropSitePassword: DropSiteMode: DropMethod: http TriggerSite: ScpCommand: %s # Dashboard start time NightlyStartTime: %s UTC """ % (os.getcwd(), os.getcwd(), hostname, cdash_build_name, cdash_project, distutils.spawn.find_executable("scp"), cdash_timestamp) with open("DartConfiguration.tcl", "w") as dart_fd: dart_fd.write(dart_config) utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple) os.makedirs(os.path.join("Testing", utc_time)) # Make tag file with open("Testing/TAG", "w") as tag_fd: tag_fd.write("%s\n%s\n" % (utc_time, cdash_build_group)) create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname) CIME.utils.run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
case.load_env() models = case.get_values("COMP_CLASSES") mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") comp_interface = case.get_value("COMP_INTERFACE") expect(mach is not None, "xml variable MACH is not set") # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile( "env_mach_specific.xml"): configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos) # Also write out Cmake macro file if not os.path.isfile("Macros.cmake"): configure(Machines(machine=mach), caseroot, ["CMake"], compiler, mpilib, debug, comp_interface, sysos) # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": case.set_value("NTASKS", 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. comp_interface = case.get_value("COMP_INTERFACE") if comp_interface == "nuopc":
def __init__(self, test_names, test_data=None, no_run=False, no_build=False, no_setup=False, no_batch=None, test_root=None, test_id=None, machine_name=None, compiler=None, baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, clean=False, namelists_only=False, project=None, parallel_jobs=None, walltime=None, proc_pool=None, use_existing=False, save_timing=False, queue=None, allow_baseline_overwrite=False, output_root=None, force_procs=None, force_threads=None, mpilib=None, input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False): ########################################################################### self._cime_root = CIME.utils.get_cime_root() self._cime_model = get_model() self._save_timing = save_timing self._queue = queue self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} self._mpilib = mpilib # allow override of default mpilib self._completed_tests = 0 self._input_dir = input_dir self._pesfile = pesfile self._allow_baseline_overwrite = allow_baseline_overwrite self._allow_pnl = allow_pnl self._mail_user = mail_user self._mail_type = mail_type self._machobj = Machines(machine=machine_name) self._model_build_cost = 4 # If user is forcing procs or threads, re-write test names to reflect this. if force_procs or force_threads: test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads) self._no_setup = no_setup self._no_build = no_build or no_setup or namelists_only self._no_run = no_run or self._no_build self._output_root = output_root # Figure out what project to use if project is None: self._project = CIME.utils.get_project() if self._project is None: self._project = self._machobj.get_value("PROJECT") else: self._project = project # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() expect(not (self._no_batch and self._queue is not None), "Does not make sense to request a queue without batch system") # Determine and resolve test_root if test_root is not None: self._test_root = test_root elif self._output_root is not None: self._test_root = self._output_root else: self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") if self._project is not None: self._test_root = self._test_root.replace("$PROJECT", self._project) self._test_root = os.path.abspath(self._test_root) self._test_id = test_id if test_id is not None else CIME.utils.get_timestamp() self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler self._clean = clean self._namelists_only = namelists_only self._walltime = walltime if parallel_jobs is None: self._parallel_jobs = min(len(test_names), self._machobj.get_value("MAX_MPITASKS_PER_NODE")) else: self._parallel_jobs = parallel_jobs self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None # Compute baseline_root self._baseline_root = baseline_root if baseline_root is not None \ else self._machobj.get_value("BASELINE_ROOT") if self._project is not None: self._baseline_root = self._baseline_root.replace("$PROJECT", self._project) self._baseline_root = os.path.abspath(self._baseline_root) if baseline_cmp_name or baseline_gen_name: if self._baseline_cmp_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) expect(os.path.isdir(full_baseline_dir), "Missing baseline comparison directory {}".format(full_baseline_dir)) # the following is to assure that the existing generate directory is not overwritten if self._baseline_gen_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) existing_baselines = [] for test_name in test_names: test_baseline = os.path.join(full_baseline_dir, test_name) if os.path.isdir(test_baseline): existing_baselines.append(test_baseline) expect(allow_baseline_overwrite or len(existing_baselines) == 0, "Baseline directories already exists {}\n" \ "Use -o to avoid this error".format(existing_baselines)) if self._cime_model == "e3sm": _order_tests_by_runtime(test_names, self._baseline_root) # This is the only data that multiple threads will simultaneously access # Each test has it's own value and setting/retrieving items from a dict # is atomic, so this should be fine to use without mutex. # name -> (phase, status) self._tests = OrderedDict() for test_name in test_names: self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) # Oversubscribe by 1/4 if proc_pool is None: pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE")) self._proc_pool = int(pes * 1.25) else: self._proc_pool = int(proc_pool) self._procs_avail = self._proc_pool # Setup phases self._phases = list(PHASES) if self._no_setup: self._phases.remove(SETUP_PHASE) if self._no_build: self._phases.remove(SHAREDLIB_BUILD_PHASE) self._phases.remove(MODEL_BUILD_PHASE) if self._no_run: self._phases.remove(RUN_PHASE) if use_existing: for test in self._tests: with TestStatus(self._get_test_dir(test)) as ts: for phase, status in ts: if phase in CORE_PHASES: if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: if status == TEST_FAIL_STATUS: # Import for potential subsequent waits ts.set_status(phase, TEST_PEND_STATUS) # We need to pick up here break else: if phase != SUBMIT_PHASE: # Somewhat subtle. Create_test considers submit/run to be the run phase, # so don't try to update test status for a passed submit phase self._update_test_status(test, phase, TEST_PEND_STATUS) self._update_test_status(test, phase, status) if phase == RUN_PHASE: logger.info("Test {} passed and will not be re-run".format(test)) logger.info("Using existing test directory {}".format(self._get_test_dir(test))) else: # None of the test directories should already exist. for test in self._tests: expect(not os.path.exists(self._get_test_dir(test)), "Cannot create new case in directory '{}', it already exists." " Pick a different test-id".format(self._get_test_dir(test))) logger.info("Creating test directory {}".format(self._get_test_dir(test)))
def load_balancing_submit( compset, res, pesfile, mpilib, compiler, project, machine, extra_options_file, test_id, force_purge, test_root, ): ################################################################################ # Read in list of pes from given file expect(os.access(pesfile, os.R_OK), "ERROR: File %s not found", pesfile) logger.info("Reading XML file %s. Searching for pesize entries:", pesfile) try: pesobj = Pes(pesfile) except ParseError: expect(False, "ERROR: File %s not parseable", pesfile) pesize_list = [] grid_nodes = pesobj.get_children("grid") for gnode in grid_nodes: mach_nodes = pesobj.get_children("mach", root=gnode) for mnode in mach_nodes: pes_nodes = pesobj.get_children("pes", root=mnode) for pnode in pes_nodes: pesize = pesobj.get(pnode, "pesize") if not pesize: logger.critical("No pesize for pes node in file %s", pesfile) if pesize in pesize_list: logger.critical("pesize %s duplicated in file %s", pesize, pesfile) pesize_list.append(pesize) expect(pesize_list, "ERROR: No grid entries found in pes file {}".format(pesfile)) machobj = Machines(machine=machine) if test_root is None: test_root = machobj.get_value("CIME_OUTPUT_ROOT") if machine is None: machine = machobj.get_machine_name() print("machine is {}".format(machine)) if compiler is None: compiler = machobj.get_default_compiler() print("compiler is {}".format(compiler)) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) test_names = [] for i in xrange(len(pesize_list)): test_names.append( get_full_test_name( "PFS_I{}".format(i), grid=res, compset=compset, machine=machine, compiler=compiler, ) ) casedir = os.path.join(test_root, test_names[-1] + "." + test_id) print("casedir is {}".format(casedir)) if os.path.isdir(casedir): if force_purge: logger.info("Removing directory %s", casedir) shutil.rmtree(casedir) else: expect( False, "casedir {} already exists, use the --force-purge option, --test-root or" " --test-id options".format(casedir), ) tests = TestScheduler( test_names, no_setup=True, compiler=compiler, machine_name=machine, mpilib=mpilib, test_root=test_root, test_id=test_id, project=project, ) success = tests.run_tests(wait=True) expect(success, "Error in creating cases") testnames = [] for test in tests.get_testnames(): testname = os.path.join(test_root, test + "." + test_id) testnames.append(testname) logger.info("test is {}".format(testname)) with Case(testname) as case: pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = pesobj.find_pes_layout( "any", "any", "any", pesize_opts=pesize_list.pop(0) ) for key in pes_ntasks: case.set_value(key, pes_ntasks[key]) for key in pes_nthrds: case.set_value(key, pes_nthrds[key]) for key in pes_rootpe: case.set_value(key, pes_rootpe[key])
if not clean: case.load_env() models = case.get_values("COMP_CLASSES") mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") expect(mach is not None, "xml variable MACH is not set") # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"): configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, sysos) # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": for vid, value in case: if vid.startswith("NTASKS") and value != 1: case.set_value(vid, 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. multi_driver = case.get_value("MULTI_DRIVER") for comp in models: ntasks = case.get_value("NTASKS_{}".format(comp)) if comp == "CPL": continue ninst = case.get_value("NINST_{}".format(comp))
def parse_command_line(args, description): ############################################################################### help_str = """ Solve a Mixed Integer Linear Program to find a PE layout that minimizes the wall-clock time per model day. """ parser = argparse.ArgumentParser( usage=help_str, description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter) CIME.utils.setup_standard_logging_options(parser) parser.add_argument('--test-id', default=DEFAULT_TESTID, help='test-id to use for all timing runs') parser.add_argument( "-r", "--test-root", help="Where test cases were created." " Will default to output root as defined in the config_machines file") parser.add_argument('--timing-dir', help='alternative to using casename ' 'to find timing data, instead read all files in' ' this directory') parser.add_argument('--blocksize', help='default minimum size of blocks to assign to all ' 'components. Components can be assigned different ' 'blocksizes using --blocksize_XXX. Default 1', type=int) for c in COMPONENT_LIST: parser.add_argument('--blocksize-%s' % c.lower(), help='minimum blocksize for component %s, if ' 'different from --blocksize', type=int) parser.add_argument('--total-tasks', type=int, help='Number of pes available for assignment') parser.add_argument( "--layout", help="name of layout to solve (default selected internally)") parser.add_argument("--graph-models", action="store_true", help="plot cost v. ntasks models. requires matplotlib") parser.add_argument("--print-models", action="store_true", help="print all costs and ntasks") parser.add_argument("--pe-output", help="write pe layout to file") parser.add_argument('--json-output', help="write MILP data to .json file") parser.add_argument('--json-input', help="solve using data from .json file") args = CIME.utils.parse_args_and_handle_standard_logging_options( args, parser) if args.total_tasks is None and args.json_input is None: expect(args.total_tasks is not None or args.json_input is not None, "--total-tasks or --json-input option must be set") blocksizes = {} for c in COMPONENT_LIST: attrib = 'blocksize_%s' % c.lower() if getattr(args, attrib) is not None: blocksizes[c] = getattr(args, attrib) elif args.blocksize is not None: blocksizes[c] = args.blocksize test_root = args.test_root if test_root is None: machobj = Machines() test_root = machobj.get_value("CIME_OUTPUT_ROOT") return (args.test_id, test_root, args.timing_dir, blocksizes, args.total_tasks, args.layout, args.graph_models, args.print_models, args.pe_output, args.json_output, args.json_input)
def _compare_baseline(self): """ Compare baselines in the pergro test sense. That is, compare PGE from the test simulation with the baseline cloud """ with self._test_status: self._test_status.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS) logger.debug("PGN_INFO:BASELINE COMPARISON STARTS") run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") base_dir = os.path.join( self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASECMP_CASE"), ) var_list = self.get_var_list() test_name = "{}".format(case_name.split(".")[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "pg.py"), "test-case": case_name, "test-name": "Test", "test-dir": run_dir, "ref-name": "Baseline", "ref-dir": base_dir, "variables": var_list, "perturbations": PERTURBATIONS, "pge-cld": FCLD_NC, "ninit": NUMBER_INITIAL_CONDITIONS, "init-file-template": INIT_COND_FILE_TEMPLATE, "instance-file-template": INSTANCE_FILE_TEMPLATE, "init-model": "cam", "component": self.atmmod, } } json_file = os.path.join(run_dir, ".".join([case_name, "json"])) with open(json_file, "w") as config_file: json.dump(evv_config, config_file, indent=4) evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) evv(["-e", json_file, "-o", evv_out_dir]) with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: evv_status = json.load(evv_f) comments = "" for evv_elem in evv_status["Data"]["Elements"]: if (evv_elem["Type"] == "ValSummary" and evv_elem["TableTitle"] == "Perturbation growth test"): comments = "; ".join("{}: {}".format(key, val) for key, val in evv_elem["Data"] [test_name][""].items()) if evv_elem["Data"][test_name][""]["Test status"].lower( ) == "pass": self._test_status.set_status( CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS, ) break status = self._test_status.get_status( CIME.test_status.BASELINE_PHASE) mach_name = self._case.get_value("MACH") mach_obj = Machines(machine=mach_name) htmlroot = CIME.utils.get_htmlroot(mach_obj) urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): dir_util.copy_tree( evv_out_dir, os.path.join(htmlroot, "evv", case_name), preserve_mode=False, ) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: viewing = ( "{}\n" " EVV viewing instructions can be found at: " " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" "climate_reproducibility/README.md#test-passfail-and-extended-output" "".format(evv_out_dir)) comments = ("{} {} for test '{}'.\n" " {}\n" " EVV results can be viewed at:\n" " {}".format( CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing, )) CIME.utils.append_testlog(comments, self._orig_caseroot)
def _compare_baseline(self): with self._test_status as ts: ts.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS) run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASECMP_CASE")) test_name = "{}".format(case_name.split('.')[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "tsc.py"), "test-case": case_name, "test-dir": run_dir, "ref-case": "Baseline", "ref-dir": base_dir, "time-slice": [OUT_FREQ, SIM_LENGTH], "inspect-times": INSPECT_AT, "variables": VAR_LIST, "p-threshold": P_THRESHOLD, "component": self.atmmod, } } json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) with open(json_file, 'w') as config_file: json.dump(evv_config, config_file, indent=4) evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) evv(['-e', json_file, '-o', evv_out_dir]) with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f: evv_status = json.load(evv_f) comments = "" for evv_elem in evv_status['Data']['Elements']: if evv_elem['Type'] == 'ValSummary' \ and evv_elem['TableTitle'] == 'Time step convergence test': comments = "; ".join("{}: {}".format(key, val) for key, val in evv_elem['Data'][test_name][''].items()) if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': self._test_status.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS) break status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) mach_name = self._case.get_value("MACH") mach_obj = Machines(machine=mach_name) htmlroot = CIME.utils.get_htmlroot(mach_obj) urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: viewing = "{}\n" \ " EVV viewing instructions can be found at: " \ " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \ "climate_reproducibility/README.md#test-passfail-and-extended-output" \ "".format(evv_out_dir) comments = "{} {} for test '{}'.\n" \ " {}\n" \ " EVV results can be viewed at:\n" \ " {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing) CIME.utils.append_testlog(comments, self._orig_caseroot)
def __init__(self, test_names, test_data=None, no_run=False, no_build=False, no_setup=False, no_batch=None, test_root=None, test_id=None, machine_name=None, compiler=None, baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, clean=False, namelists_only=False, project=None, parallel_jobs=None, walltime=None, proc_pool=None, use_existing=False, save_timing=False, queue=None, allow_baseline_overwrite=False, output_root=None): ########################################################################### self._cime_root = CIME.utils.get_cime_root() self._cime_model = CIME.utils.get_model() self._allow_baseline_overwrite = allow_baseline_overwrite self._save_timing = save_timing self._queue = queue self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} self._machobj = Machines(machine=machine_name) self._no_setup = no_setup self._no_build = no_build or no_setup or namelists_only self._no_run = no_run or self._no_build self._output_root = output_root # Figure out what project to use if project is None: self._project = CIME.utils.get_project() if self._project is None: self._project = self._machobj.get_value("PROJECT") else: self._project = project # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() expect(not (self._no_batch and self._queue is not None), "Does not make sense to request a queue without batch system") # Determine and resolve test_root if test_root is not None: self._test_root = test_root elif self._output_root is not None: self._test_root = self._output_root else: self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") if self._project is not None: self._test_root = self._test_root.replace("$PROJECT", self._project) self._test_root = os.path.abspath(self._test_root) self._test_id = test_id if test_id is not None else CIME.utils.get_timestamp() self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler self._clean = clean self._namelists_only = namelists_only self._walltime = walltime if parallel_jobs is None: self._parallel_jobs = min(len(test_names), int(self._machobj.get_value("MAX_TASKS_PER_NODE"))) else: self._parallel_jobs = parallel_jobs self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None if baseline_cmp_name or baseline_gen_name: # Compute baseline_root self._baseline_root = baseline_root if baseline_root is not None \ else self._machobj.get_value("CCSM_BASELINE") if self._project is not None: self._baseline_root = self._baseline_root.replace("$PROJECT", self._project) self._baseline_root = os.path.abspath(self._baseline_root) if self._baseline_cmp_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) expect(os.path.isdir(full_baseline_dir), "Missing baseline comparison directory %s" % full_baseline_dir) # the following is to assure that the existing generate directory is not overwritten if self._baseline_gen_name: full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) existing_baselines = [] for test_name in test_names: test_baseline = os.path.join(full_baseline_dir, test_name) if os.path.isdir(test_baseline): existing_baselines.append(test_baseline) expect(allow_baseline_overwrite or len(existing_baselines) == 0, "Baseline directories already exists %s\n"\ "Use --allow_baseline_overwrite to avoid this error"%existing_baselines) else: self._baseline_root = None # This is the only data that multiple threads will simultaneously access # Each test has it's own value and setting/retrieving items from a dict # is atomic, so this should be fine to use without mutex. # name -> (phase, status) self._tests = {} for test_name in test_names: self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) # Oversubscribe by 1/4 if proc_pool is None: pes = int(self._machobj.get_value("PES_PER_NODE")) self._proc_pool = int(pes * 1.25) else: self._proc_pool = int(proc_pool) self._procs_avail = self._proc_pool # Setup phases self._phases = list(PHASES) if self._no_setup: self._phases.remove(SETUP_PHASE) if self._no_build: self._phases.remove(SHAREDLIB_BUILD_PHASE) self._phases.remove(MODEL_BUILD_PHASE) if self._no_run: self._phases.remove(RUN_PHASE) if use_existing: for test in self._tests: ts = TestStatus(self._get_test_dir(test)) for phase, status in ts: if phase in CORE_PHASES: if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: # We need to pick up here break else: self._update_test_status(test, phase, TEST_PEND_STATUS) self._update_test_status(test, phase, status) else: # None of the test directories should already exist. for test in self._tests: expect(not os.path.exists(self._get_test_dir(test)), "Cannot create new case in directory '%s', it already exists." " Pick a different test-id" % self._get_test_dir(test))
if not non_local: case.load_env() models = case.get_values("COMP_CLASSES") mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") comp_interface = case.get_value("COMP_INTERFACE") expect(mach is not None, "xml variable MACH is not set") # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"): configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos) # Also write out Cmake macro file if not os.path.isfile("Macros.cmake"): configure(Machines(machine=mach), caseroot, ["CMake"], compiler, mpilib, debug, comp_interface, sysos) # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": case.set_value("NTASKS", 1) # Check ninst. # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. comp_interface = case.get_value("COMP_INTERFACE") if comp_interface == "nuopc": ninst = case.get_value("NINST")
def _main(): output, build_dir, build_optimized, clean,\ cmake_args, compiler, enable_genf90, machine, machines_dir,\ make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ use_openmp, xml_test_list, verbose \ = parse_command_line(sys.argv) #================================================= # Find directory and file paths. #================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML if xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( TestSuiteSpec("__command_line_test__", ["__command_line_test__"], [os.path.abspath(test_spec_dir)])) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") machobj = Machines(infile=machines_file, machine=machine) else: machobj = Machines(machine=machine) # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): os.mkdir(build_dir) # Switch to the build directory. os.chdir(build_dir) #================================================= # Functions to perform various stages of build. #================================================= if not use_mpi: mpilib = "mpi-serial" elif mpilib is None: mpilib = machobj.get_default_MPIlib() logger.info("Using mpilib: %s" % mpilib) if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is %s" % compiler) compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_, unit_testing=True) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug) machspecific.load_env(fake_case) os.environ["OS"] = os_ os.environ["COMPILER"] = compiler os.environ["DEBUG"] = stringify_bool(debug) os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" else: os.environ["compile_threaded"] = "false" os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH logger.info("Setting NETCDF environment variable: %s" % os.environ["NETCDF_PATH"]) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { "compiler": compiler, "mpilib": mpilib, "threaded": use_openmp, "unit_testing": True } # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '%s'" % mpirun_command) #================================================= # Run tests. #================================================= for spec in suite_specs: os.chdir(build_dir) if os.path.isdir(spec.name): if clean: rmtree(spec.name) if not os.path.isdir(spec.name): os.mkdir(spec.name) for label, directory in spec: os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args) make_stage(name, output, make_j, clean=clean, verbose=verbose) for spec in suite_specs: os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: name = spec.name + "/" + label output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] if verbose: ctest_command.append("-VV") if ctest_args is not None: ctest_command.extend(ctest_args.split(" ")) run_cmd_no_fail(" ".join(ctest_command), from_dir=label, arg_stdout=None, arg_stderr=subprocess.STDOUT)
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None, output_root=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key, value in gridinfo.items(): logger.debug("Set grid %s %s" % (key, value)) self.set_lookup_value(key, value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH", machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s" % (nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect( machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" % (compiler, machine_name)) self.set_value("COMPILER", compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) else: expect( machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), "MPIlib %s is not supported on machine %s" % (mpilib, machine_name)) self.set_value("MPILIB", mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe layout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: string_ = "NTASKS_" + component_class pes_ntasks[string_] = opti_tasks string_ = "NTHRDS_" + component_class pes_nthrds[string_] = opti_thrds string_ = "ROOTPE_" + component_class pes_rootpe[string_] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout( self._gridname, self._compsetname, machine_name, pesize_opts=pecount, mpilib=mpilib) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} if other is not None: for key, value in other.items(): self.set_value(key, value) for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key, int(value)) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key, int(value)) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key, int(value)) maxval = 1 pes_per_node = self.get_value("PES_PER_NODE") for key, val in totaltasks.items(): if val < 0: val = -1 * val * pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "CPL": continue key = "NINST_%s" % compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s" % compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key, 1) key = "NTHRDS_%s" % compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass, 1) #-------------------------------------------- # batch system #-------------------------------------------- env_batch = self.get_env("batch") batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET", self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " % self._compsetname) logger.info(" Grid is: %s " % self._gridname) logger.info(" Components in compset are: %s " % self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Resolve the CIME_OUTPUT_ROOT variable, other than this # we don't want to resolve variables until we need them if output_root is None: output_root = self.get_value("CIME_OUTPUT_ROOT") self.set_value("CIME_OUTPUT_ROOT", output_root) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s" % wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test" % wdir) response = raw_input( "\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?" % wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() self.set_model_version(model) if model == "cesm" and not test: self.set_value("DOUT_S", True) self.set_value("TIMER_LEVEL", 4) if test: self.set_value("TEST", True) total_tasks = self.initialize_derived_attributes() # Make sure that parallel IO is not specified if total_tasks==1 if total_tasks == 1: for compclass in self._component_classes: key = "PIO_TYPENAME_%s" % compclass pio_typename = self.get_value(key) if pio_typename in ("pnetcdf", "netcdf4p"): self.set_value(key, "netcdf")
def configure_tests(timeout, no_fortran_run, fast, no_batch, no_cmake, no_teardown, machine, compiler, mpilib, test_root, **kwargs): config = CIME.utils.get_cime_config() if timeout: BaseTestCase.GLOBAL_TIMEOUT = str(timeout) BaseTestCase.NO_FORTRAN_RUN = no_fortran_run or False BaseTestCase.FAST_ONLY = fast or no_fortran_run BaseTestCase.NO_BATCH = no_batch or False BaseTestCase.NO_CMAKE = no_cmake or False BaseTestCase.NO_TEARDOWN = no_teardown or False # make sure we have default values MACHINE = None TEST_COMPILER = None TEST_MPILIB = None if machine is not None: MACHINE = Machines(machine=machine) os.environ["CIME_MACHINE"] = machine elif "CIME_MACHINE" in os.environ: MACHINE = Machines(machine=os.environ["CIME_MACHINE"]) elif config.has_option("create_test", "MACHINE"): MACHINE = Machines(machine=config.get("create_test", "MACHINE")) elif config.has_option("main", "MACHINE"): MACHINE = Machines(machine=config.get("main", "MACHINE")) else: MACHINE = Machines() BaseTestCase.MACHINE = MACHINE if compiler is not None: TEST_COMPILER = compiler elif config.has_option("create_test", "COMPILER"): TEST_COMPILER = config.get("create_test", "COMPILER") elif config.has_option("main", "COMPILER"): TEST_COMPILER = config.get("main", "COMPILER") BaseTestCase.TEST_COMPILER = TEST_COMPILER if mpilib is not None: TEST_MPILIB = mpilib elif config.has_option("create_test", "MPILIB"): TEST_MPILIB = config.get("create_test", "MPILIB") elif config.has_option("main", "MPILIB"): TEST_MPILIB = config.get("main", "MPILIB") BaseTestCase.TEST_MPILIB = TEST_MPILIB if test_root is not None: TEST_ROOT = test_root elif config.has_option("create_test", "TEST_ROOT"): TEST_ROOT = config.get("create_test", "TEST_ROOT") else: TEST_ROOT = os.path.join( MACHINE.get_value("CIME_OUTPUT_ROOT"), "scripts_regression_test.%s" % CIME.utils.get_timestamp(), ) BaseTestCase.TEST_ROOT = TEST_ROOT write_provenance_info(MACHINE, TEST_COMPILER, TEST_MPILIB, TEST_ROOT) atexit.register(functools.partial(cleanup, TEST_ROOT))
print 'May need to add cime/scripts to PYTHONPATH\n' raise ImportError(e) try: import optimize_model except ImportError, e: print 'Error importing optimize_model' print 'May need to add cime/tools/load_balancing_tool to PYTHONPATH\n' raise ImportError(e) from CIME.utils import run_cmd_no_fail, get_full_test_name from CIME.XML.machines import Machines from CIME.XML import pes import unittest, json, tempfile, sys, re, copy SCRIPT_DIR = CIME.utils.get_scripts_root() MACHINE = Machines() CODE_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool") TEST_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool", "tests") X_OPTIONS = """ STOP_N=1 """ JSON_DICT = { "description": "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", "layout": "IceLndAtmOcn", "totaltasks": 1024, "ATM": { "ntasks": [32, 64, 128, 256, 512], "blocksize": 8, "nthrds": [1],
def query_cime(machine, param): ############################################################################### mach_obj = Machines(machine=machine) return mach_obj.get_value(param)
os.chdir(caseroot) non_local = case.get_value("NONLOCAL") models = case.get_values("COMP_CLASSES") mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") comp_interface = case.get_value("COMP_INTERFACE") extra_machines_dir = case.get_value("EXTRA_MACHDIR") expect(mach is not None, "xml variable MACH is not set") mach_obj = Machines(machine=mach, extra_machines_dir=extra_machines_dir) # Check that $DIN_LOC_ROOT exists or can be created: if not non_local: din_loc_root = case.get_value("DIN_LOC_ROOT") testcase = case.get_value("TESTCASE") if not os.path.isdir(din_loc_root): try: os.makedirs(din_loc_root) except OSError as e: if e.errno == errno.EACCES: logger.info("Invalid permissions to create {}".format( din_loc_root)) expect(
# clean setup-generated files batch_script = get_batch_script_for_job(case.get_primary_job()) files_to_clean = [batch_script, "env_mach_specific.xml", "Macros.make", "Macros.cmake"] for file_to_clean in files_to_clean: if os.path.exists(file_to_clean) and not (keep and file_to_clean in keep): os.remove(file_to_clean) logger.info("Successfully cleaned {}".format(file_to_clean)) if not test_mode: # rebuild the models (even on restart) case.set_value("BUILD_COMPLETE", False) # Cannot leave case in bad state (missing env_mach_specific.xml) if clean and not os.path.isfile("env_mach_specific.xml"): case.flush() configure(Machines(machine=mach, extra_machines_dir=extra_machines_dir), caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos, noenv=True, threaded=case.get_build_threaded(),extra_machines_dir=extra_machines_dir) case.read_xml() if not clean: if not non_local: case.load_env() # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"): reread = not os.path.isfile("env_mach_specific.xml") if reread: case.flush() configure(Machines(machine=mach, extra_machines_dir=extra_machines_dir),
def _compare_baseline(self): with self._test_status: if int(self._case.get_value("RESUBMIT")) > 0: # This is here because the comparison is run for each submission # and we only want to compare once the whole run is finished. We # need to return a pass here to continue the submission process. self._test_status.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS) return self._test_status.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS) run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASECMP_CASE")) test_name = "{}".format(case_name.split('.')[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "ks.py"), "test-case": "Test", "test-dir": run_dir, "ref-case": "Baseline", "ref-dir": base_dir, "var-set": "default", "ninst": NINST, "critical": 13 } } json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) with open(json_file, 'w') as config_file: json.dump(evv_config, config_file, indent=4) evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) evv(['-e', json_file, '-o', evv_out_dir]) with open(os.path.join(evv_out_dir, 'index.json')) as evv_f: evv_status = json.load(evv_f) comments = "" for evv_elem in evv_status['Data']['Elements']: if evv_elem['Type'] == 'ValSummary' \ and evv_elem['TableTitle'] == 'Kolmogorov-Smirnov test': comments = "; ".join("{}: {}".format(key, val) for key, val in evv_elem['Data'][test_name][''].items()) if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': self._test_status.set_status(CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS) break status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) mach_name = self._case.get_value("MACH") mach_obj = Machines(machine=mach_name) htmlroot = CIME.utils.get_htmlroot(mach_obj) urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: viewing = "{}\n" \ " EVV viewing instructions can be found at: " \ " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \ "climate_reproducibility/README.md#test-passfail-and-extended-output" \ "".format(evv_out_dir) comments = "{} {} for test '{}'.\n" \ " {}\n" \ " EVV results can be viewed at:\n" \ " {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing) CIME.utils.append_testlog(comments, self._orig_caseroot)
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False): ############################################################################### # # Create dart config file # current_time = time.time() utc_time_tuple = time.gmtime(current_time) cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple) hostname = Machines().get_machine_name() if hostname is None: hostname = socket.gethostname().split(".")[0] logging.warning( "Could not convert hostname '{}' into an E3SM machine name".format( hostname)) dart_config = """ SourceDirectory: {0} BuildDirectory: {0} # Site is something like machine.domain, i.e. pragmatic.crd Site: {1} # Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++ BuildName: {2} # Submission information IsCDash: TRUE CDashVersion: QueryCDashVersion: DropSite: my.cdash.org DropLocation: /submit.php?project={3} DropSiteUser: DropSitePassword: DropSiteMode: DropMethod: https TriggerSite: ScpCommand: {4} # Dashboard start time NightlyStartTime: {5} UTC UseLaunchers: CurlOptions: CURLOPT_SSL_VERIFYPEER_OFF;CURLOPT_SSL_VERIFYHOST_OFF """.format( os.getcwd(), hostname, cdash_build_name, cdash_project, find_executable("scp"), cdash_timestamp, ) with open("DartConfiguration.tcl", "w") as dart_fd: dart_fd.write(dart_config) utc_time = time.strftime("%Y%m%d-%H%M", utc_time_tuple) os.makedirs(os.path.join("Testing", utc_time)) # Make tag file with open("Testing/TAG", "w") as tag_fd: tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group)) create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) create_cdash_upload_xml( results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload, ) run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
def get_test_suite(suite, machine=None, compiler=None): ############################################################################### """ Return a list of FULL test names for a suite. """ expect(suite in _TEST_SUITES, "Unknown test suite: '{}'".format(suite)) machobj = Machines(machine=machine) machine = machobj.get_machine_name() if (compiler is None): compiler = machobj.get_default_compiler() expect(machobj.is_valid_compiler(compiler), "Compiler {} not valid for machine {}".format(compiler, machine)) inherits_from, _, tests_raw = _TEST_SUITES[suite] tests = [] for item in tests_raw: test_mod = None if (isinstance(item, str)): test_name = item else: expect(isinstance(item, tuple), "Bad item type for item '{}'".format(str(item))) expect( len(item) in [2, 3], "Expected two or three items in item '{}'".format(str(item))) expect( isinstance(item[0], str), "Expected string in first field of item '{}'".format( str(item))) expect( isinstance(item[1], str), "Expected string in second field of item '{}'".format( str(item))) test_name = item[0] if (len(item) == 2): test_mod = item[1] else: expect( type(item[2]) in [str, tuple], "Expected string or tuple for third field of item '{}'". format(str(item))) test_mod_machines = [item[2]] if isinstance(item[2], str) else item[2] if (machine in test_mod_machines): test_mod = item[1] tests.append( CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmod=test_mod)) if (inherits_from is not None): inherits_from = [inherits_from] if isinstance(inherits_from, str) else inherits_from for inherits in inherits_from: inherited_tests = get_test_suite(inherits, machine, compiler) expect( len(set(tests) & set(inherited_tests)) == 0, "Tests {} defined in multiple suites".format( ", ".join(set(tests) & set(inherited_tests)))) tests.extend(inherited_tests) return tests
def _main(): ( output, build_dir, build_optimized, clean, cmake_args, compiler, enable_genf90, machine, machines_dir, make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args, use_openmp, xml_test_list, verbose, comp_interface, ) = parse_command_line(sys.argv) # ================================================= # Find directory and file paths. # ================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML if xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( TestSuiteSpec( "__command_line_test__", ["__command_line_test__"], [os.path.abspath(test_spec_dir)], )) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") machobj = Machines(infile=machines_file, machine=machine) else: machobj = Machines(machine=machine) # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): os.mkdir(build_dir) # Switch to the build directory. os.chdir(build_dir) if clean: pwd_contents = os.listdir(os.getcwd()) # Clear CMake cache. for file_ in pwd_contents: if (file_ in ("Macros.cmake", "env_mach_specific.xml") or file_.startswith("Depends") or file_.startswith(".env_mach_specific")): os.remove(file_) # ================================================= # Functions to perform various stages of build. # ================================================= if not use_mpi: mpilib = "mpi-serial" elif mpilib is None: mpilib = machobj.get_default_MPIlib() logger.info("Using mpilib: {}".format(mpilib)) if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is {}".format(compiler)) debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # configure( machobj, build_dir, ["CMake"], compiler, mpilib, debug, comp_interface, os_, unit_testing=True, ) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug, comp_interface, threading=use_openmp) machspecific.load_env(fake_case) cmake_args = ( "{}-DOS={} -DMACH={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={} -DCASEROOT={}" .format("" if not cmake_args else "{} ".format(cmake_args), os_, machobj.get_machine_name(), compiler, stringify_bool(debug), mpilib, stringify_bool(use_openmp), build_dir)) pfunit_path = find_pfunit(build_dir, cmake_args) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH logger.info("Setting NETCDF environment variable: {}".format( os.environ["NETCDF_PATH"])) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if "NETCDFROOT" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDFROOT logger.info("Setting NETCDF environment variable: {}".format( os.environ["NETCDFROOT"])) os.environ["NETCDF"] = os.environ["NETCDFROOT"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { "compiler": compiler, "mpilib": mpilib, "threaded": use_openmp, "comp_interface": comp_interface, "unit_testing": True, } # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _, _, _ = machspecific.get_mpirun(None, mpi_attribs, None, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '{}'".format(mpirun_command)) # ================================================= # Run tests. # ================================================= for spec in suite_specs: os.chdir(build_dir) if os.path.isdir(spec.name): if clean: rmtree(spec.name) if not os.path.isdir(spec.name): os.mkdir(spec.name) for label, directory in spec: os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi cmake_stage( name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args, ) make_stage(name, output, make_j, clean=clean, verbose=verbose) for spec in suite_specs: os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: name = spec.name + "/" + label output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] if verbose: ctest_command.append("-VV") if ctest_args is not None: ctest_command.extend(ctest_args.split(" ")) logger.info("Running '{}'".format(" ".join(ctest_command))) output = run_cmd_no_fail(" ".join(ctest_command), from_dir=label, combine_output=True) logger.info(output)
mach = case.get_value("MACH") compiler = case.get_value("COMPILER") debug = case.get_value("DEBUG") mpilib = case.get_value("MPILIB") sysos = case.get_value("OS") comp_interface = case.get_value("COMP_INTERFACE") expect(mach is not None, "xml variable MACH is not set") # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler # and env_mach_specific.xml if they don't already exist. if not os.path.isfile("Macros.make") or not os.path.isfile( "env_mach_specific.xml"): reread = not os.path.isfile("env_mach_specific.xml") if reread: case.flush() configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos, noenv=True) if reread: case.read_xml() # Also write out Cmake macro file if not os.path.isfile("Macros.cmake"): configure(Machines(machine=mach), caseroot, ["CMake"], compiler,