def _main(): output, build_dir, build_optimized, clean,\ cmake_args, compiler, enable_genf90, machine, machines_dir,\ make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ use_openmp, xml_test_list, verbose \ = parse_command_line(sys.argv) #================================================= # Find directory and file paths. #================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML if xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( TestSuiteSpec("__command_line_test__", ["__command_line_test__"], [os.path.abspath(test_spec_dir)]) ) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") machobj = Machines(infile=machines_file, machine=machine) else: machobj = Machines(machine=machine) # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): os.mkdir(build_dir) # Switch to the build directory. os.chdir(build_dir) #================================================= # Functions to perform various stages of build. #================================================= if not use_mpi: mpilib = "mpi-serial" elif mpilib is None: mpilib = machobj.get_default_MPIlib() logger.info("Using mpilib: {}".format(mpilib)) if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is {}".format(compiler)) compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) pfunit_path = find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_, unit_testing=True) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug) machspecific.load_env(fake_case) os.environ["OS"] = os_ os.environ["COMPILER"] = compiler os.environ["DEBUG"] = stringify_bool(debug) os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" else: os.environ["compile_threaded"] = "false" os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH logger.info("Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"])) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { "compiler" : compiler, "mpilib" : mpilib, "threaded" : use_openmp, "unit_testing" : True } # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '{}'".format(mpirun_command)) #================================================= # Run tests. #================================================= for spec in suite_specs: os.chdir(build_dir) if os.path.isdir(spec.name): if clean: rmtree(spec.name) if not os.path.isdir(spec.name): os.mkdir(spec.name) for label, directory in spec: os.chdir(os.path.join(build_dir,spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) name = spec.name+"/"+label if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir,"Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args) make_stage(name, output, make_j, clean=clean, verbose=verbose) for spec in suite_specs: os.chdir(os.path.join(build_dir,spec.name)) for label, directory in spec: name = spec.name+"/"+label output.print_header("Running CTest tests for "+name+".") ctest_command = ["ctest", "--output-on-failure"] if verbose: ctest_command.append("-VV") if ctest_args is not None: ctest_command.extend(ctest_args.split(" ")) run_cmd_no_fail(" ".join(ctest_command), from_dir=label, arg_stdout=None, arg_stderr=subprocess.STDOUT)
def _main(): output, build_dir, build_optimized, clean,\ cmake_args, compiler, enable_genf90, machine, machines_dir,\ make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ use_openmp, xml_test_list, verbose \ = parse_command_line(sys.argv) #================================================= # Find directory and file paths. #================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML if xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( TestSuiteSpec("__command_line_test__", ["__command_line_test__"], [os.path.abspath(test_spec_dir)])) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") machobj = Machines(infile=machines_file, machine=machine) else: machobj = Machines(machine=machine) # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): os.mkdir(build_dir) # Switch to the build directory. os.chdir(build_dir) if clean: pwd_contents = os.listdir(os.getcwd()) # Clear CMake cache. for file_ in pwd_contents: if file_ in ("Macros.cmake", "env_mach_specific.xml") \ or file_.startswith('Depends') or file_.startswith(".env_mach_specific"): os.remove(file_) #================================================= # Functions to perform various stages of build. #================================================= if not use_mpi: mpilib = "mpi-serial" elif mpilib is None: mpilib = machobj.get_default_MPIlib() logger.info("Using mpilib: {}".format(mpilib)) if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is {}".format(compiler)) compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) pfunit_path = find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_, unit_testing=True) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug) machspecific.load_env(fake_case) os.environ["OS"] = os_ os.environ["COMPILER"] = compiler os.environ["DEBUG"] = stringify_bool(debug) os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" else: os.environ["compile_threaded"] = "false" os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH logger.info("Setting NETCDF environment variable: {}".format( os.environ["NETCDF_PATH"])) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { "compiler": compiler, "mpilib": mpilib, "threaded": use_openmp, "unit_testing": True } # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(None, mpi_attribs, None, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '{}'".format(mpirun_command)) #================================================= # Run tests. #================================================= for spec in suite_specs: os.chdir(build_dir) if os.path.isdir(spec.name): if clean: rmtree(spec.name) if not os.path.isdir(spec.name): os.mkdir(spec.name) for label, directory in spec: os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args) make_stage(name, output, make_j, clean=clean, verbose=verbose) for spec in suite_specs: os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: name = spec.name + "/" + label output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] if verbose: ctest_command.append("-VV") if ctest_args is not None: ctest_command.extend(ctest_args.split(" ")) run_cmd_no_fail(" ".join(ctest_command), from_dir=label, arg_stdout=None, arg_stderr=subprocess.STDOUT)
def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, machine, extra_options_file, test_id, force_purge, test_root): ################################################################################ # Read in list of pes from given file if not os.access(pesfile, os.R_OK): logger.critical('ERROR: File %s not found', pesfile) raise SystemExit(1) logger.info('Reading XML file %s. Searching for pesize entries:', pesfile) try: pesobj = Pes(pesfile) except ParseError: logger.critical('ERROR: File %s not parseable', pesfile) raise SystemExit(1) pesize_list = [] for node in pesobj.get_nodes('pes'): pesize = node.get('pesize') if not pesize: logger.critical('No pesize for pes node in file %s', pesfile) if pesize in pesize_list: logger.critical('pesize %s duplicated in file %s', pesize, pesfile) pesize_list.append(pesize) if not pesize_list: logger.critical('ERROR: No grid entries found in pes file %s', pesfile) raise SystemExit(1) machobj = Machines(machine=machine) if test_root is None: test_root = machobj.get_value("CIME_OUTPUT_ROOT") if machine is None: machine = machobj.get_machine_name() print "machine is {}".format(machine) if compiler is None: compiler = machobj.get_default_compiler() print "compiler is {}".format(compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) test_names = [] for i in xrange(len(pesize_list)): test_names.append( get_full_test_name("PFS_I{}".format(i), grid=res, compset=compset, machine=machine, compiler=compiler)) casedir = os.path.join(test_root, test_names[-1] + "." + test_id) print "casedir is {}".format(casedir) if os.path.isdir(casedir): if force_purge: logger.info('Removing directory %s', casedir) shutil.rmtree(casedir) else: expect( False, "casedir {} already exists, use the --force-purge option, --test-root or" " --test-id options".format(casedir)) tests = TestScheduler(test_names, no_setup=True, compiler=compiler, machine_name=machine, mpilib=mpilib, test_root=test_root, test_id=test_id, project=project) success = tests.run_tests(wait=True) expect(success, "Error in creating cases") testnames = [] for test in tests.get_testnames(): testname = os.path.join(test_root, test + "." + test_id) testnames.append(testname) logger.info("test is {}".format(testname)) with Case(testname) as case: pes_ntasks, pes_nthrds, pes_rootpe, _ = \ pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize_list.pop(0)) for key in pes_ntasks: case.set_value(key, pes_ntasks[key]) for key in pes_nthrds: case.set_value(key, pes_nthrds[key]) for key in pes_rootpe: case.set_value(key, pes_rootpe[key])
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key,value in gridinfo.items(): logger.debug("Set grid %s %s"%(key,value)) self.set_lookup_value(key,value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH",machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s"%(nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect(machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" %(compiler, machine_name)) self.set_value("COMPILER",compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler":compiler}) else: expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), "MPIlib %s is not supported on machine %s" %(mpilib, machine_name)) self.set_value("MPILIB",mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe payout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: if component_class == "DRV": component_class = "CPL" string = "NTASKS_" + component_class pes_ntasks[string] = opti_tasks string = "NTHRDS_" + component_class pes_nthrds[string] = opti_thrds string = "ROOTPE_" + component_class pes_rootpe[string] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} # Since other items may include PES_PER_NODE we need to do this first # we can get rid of this code when all of the perl is removed for key, value in other.items(): self.set_value(key, value) pes_per_node = self.get_value("PES_PER_NODE") for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key,int(value), pes_per_node=pes_per_node) maxval = 1 if mpilib != "mpi-serial": for key, val in totaltasks.items(): if val < 0: val = -1*val*pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s"%compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s"%compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key,1) key = "NTHRDS_%s"%compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass,1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC",1) mach_pes_obj.set_value("NTHRDS_GLC",1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self.get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET",self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " %self._compsetname) logger.info(" Grid is: %s " %self._gridname ) logger.info(" Components in compset are: %s " %self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s"%wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test"% wdir) response = raw_input("\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?"% wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() if model == "cesm" and not test: self.set_value("DOUT_S",True)
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, walltime=None, queue=None, output_root=None): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile) grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key, value in gridinfo.items(): logger.debug("Set grid %s %s" % (key, value)) self.set_lookup_value(key, value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH", machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) type_str = self.get_type_info(nodename) if type_str is not None: logger.debug("machine nodname %s value %s" % (nodename, value)) self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect( machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" % (compiler, machine_name)) self.set_value("COMPILER", compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler": compiler}) else: expect( machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), "MPIlib %s is not supported on machine %s" % (mpilib, machine_name)) self.set_value("MPILIB", mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) #-------------------------------------------- # pe payout #-------------------------------------------- match1 = re.match('([0-9]+)x([0-9]+)', "" if pecount is None else pecount) match2 = re.match('([0-9]+)', "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} if match1: opti_tasks = match1.group(1) opti_thrds = match1.group(2) elif match2: opti_tasks = match2.group(1) opti_thrds = 1 other = {} if match1 or match2: for component_class in self._component_classes: if component_class == "DRV": component_class = "CPL" string = "NTASKS_" + component_class pes_ntasks[string] = opti_tasks string = "NTHRDS_" + component_class pes_nthrds[string] = opti_thrds string = "ROOTPE_" + component_class pes_rootpe[string] = 0 else: pesobj = Pes(self._pesfile) pes_ntasks, pes_nthrds, pes_rootpe, other = pesobj.find_pes_layout( self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self.get_env("mach_pes") totaltasks = {} # Since other items may include PES_PER_NODE we need to do this first # we can get rid of this code when all of the perl is removed for key, value in other.items(): self.set_value(key, value) pes_per_node = self.get_value("PES_PER_NODE") for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key, int(value), pes_per_node=pes_per_node) maxval = 1 if mpilib != "mpi-serial": for key, val in totaltasks.items(): if val < 0: val = -1 * val * pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s" % compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s" % compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key, 1) key = "NTHRDS_%s" % compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass, 1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC", 1) mach_pes_obj.set_value("NTHRDS_GLC", 1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self.get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) self.schedule_rewrite(env_batch) self.set_value("COMPSET", self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " % self._compsetname) logger.info(" Grid is: %s " % self._gridname) logger.info(" Components in compset are: %s " % self._components) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found") # Resolve the CIME_OUTPUT_ROOT variable, other than this # we don't want to resolve variables until we need them if output_root is None: output_root = self.get_value("CIME_OUTPUT_ROOT") self.set_value("CIME_OUTPUT_ROOT", output_root) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): logging.debug("wdir is %s" % wdir) if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test" % wdir) response = raw_input( "\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?" % wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Turn on short term archiving as cesm default setting model = get_model() self.set_model_version(model) if model == "cesm" and not test: self.set_value("DOUT_S", True) self.set_value("TIMER_LEVEL", 4) if test: self.set_value("TEST", True) self.initialize_derived_attributes()
def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, machine, extra_options_file, test_id, force_purge, test_root): ################################################################################ # Read in list of pes from given file expect(os.access(pesfile, os.R_OK), 'ERROR: File %s not found', pesfile) logger.info('Reading XML file %s. Searching for pesize entries:', pesfile) try: pesobj = Pes(pesfile) except ParseError: expect(False, 'ERROR: File %s not parseable', pesfile) pesize_list = [] grid_nodes = pesobj.get_children("grid") for gnode in grid_nodes: mach_nodes = pesobj.get_children("mach", root=gnode) for mnode in mach_nodes: pes_nodes = pesobj.get_children("pes", root=mnode) for pnode in pes_nodes: pesize = pesobj.get(pnode, 'pesize') if not pesize: logger.critical('No pesize for pes node in file %s', pesfile) if pesize in pesize_list: logger.critical('pesize %s duplicated in file %s', pesize, pesfile) pesize_list.append(pesize) expect(pesize_list, 'ERROR: No grid entries found in pes file {}'.format(pesfile)) machobj = Machines(machine=machine) if test_root is None: test_root = machobj.get_value("CIME_OUTPUT_ROOT") if machine is None: machine = machobj.get_machine_name() print "machine is {}".format(machine) if compiler is None: compiler = machobj.get_default_compiler() print "compiler is {}".format(compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler":compiler}) test_names = [] for i in xrange(len(pesize_list)): test_names.append(get_full_test_name("PFS_I{}".format(i),grid=res, compset=compset, machine=machine, compiler=compiler)) casedir = os.path.join(test_root, test_names[-1] + "." + test_id) print "casedir is {}".format(casedir) if os.path.isdir(casedir): if force_purge: logger.info('Removing directory %s', casedir) shutil.rmtree(casedir) else: expect(False, "casedir {} already exists, use the --force-purge option, --test-root or" " --test-id options".format(casedir)) tests = TestScheduler(test_names, no_setup = True, compiler=compiler, machine_name=machine, mpilib=mpilib, test_root=test_root, test_id=test_id, project=project) success = tests.run_tests(wait=True) expect(success, "Error in creating cases") testnames = [] for test in tests.get_testnames(): testname = os.path.join(test_root, test + "." + test_id) testnames.append( testname) logger.info("test is {}".format(testname)) with Case(testname) as case: pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = \ pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize_list.pop(0)) for key in pes_ntasks: case.set_value(key, pes_ntasks[key]) for key in pes_nthrds: case.set_value(key, pes_nthrds[key]) for key in pes_rootpe: case.set_value(key, pes_rootpe[key]) if extra_options_file is not None: try: extras = open(extra_options_file, 'r') for line in extras.readlines(): split = line.split('=') if len(split) == 2: logger.info('setting %s=%s', split[0], split[1]) case.set_value(split[0], split[1]) else: logger.debug('ignoring line in {}: {}'.format( extra_options_file, line)) extras.close() except IOError: expect(False, "ERROR: Could not read file {}".format(extra_options_file)) tests = TestScheduler(test_names, use_existing=True, test_root=test_root, test_id=test_id) success = tests.run_tests(wait=False) expect(success, "Error in running cases") # need to fix logger.info('Timing jobs submitted. After jobs completed, run to optimize ' 'pe layout:\n load_balancing_solve --test-id {} --test-root {}'. format(test_id, test_root))
def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False): #-------------------------------------------- # compset, pesfile, and compset components #-------------------------------------------- self._set_compset_and_pesfile(compset_name, user_compset=user_compset, pesfile=pesfile) self._components = self.get_compset_components() #FIXME - if --user-compset is True then need to determine that #all of the compset settings are valid #-------------------------------------------- # grid #-------------------------------------------- if user_grid is True and gridfile is not None: self.set_value("GRIDS_SPEC_FILE", gridfile); grids = Grids(gridfile) gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname) self._gridname = gridinfo["GRID"] for key,value in gridinfo.items(): logger.debug("Set grid %s %s"%(key,value)) self.set_value(key,value) #-------------------------------------------- # component config data #-------------------------------------------- self._get_component_config_data() self.get_compset_var_settings() # Add the group and elements for the config_files.xml for idx, config_file in enumerate(self._component_config_files): self.set_value(config_file[0],config_file[1]) #-------------------------------------------- # machine #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) machine_name = machobj.get_machine_name() self.set_value("MACH",machine_name) nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ 'COMPILER' not in x and 'MPILIB' not in x] for nodename in nodenames: value = machobj.get_value(nodename) type_str = self.get_type_info(nodename) if type_str is not None: self.set_value(nodename, convert_to_type(value, type_str, nodename)) if compiler is None: compiler = machobj.get_default_compiler() else: expect(machobj.is_valid_compiler(compiler), "compiler %s is not supported on machine %s" %(compiler, machine_name)) self.set_value("COMPILER",compiler) if mpilib is None: mpilib = machobj.get_default_MPIlib({"compiler":compiler}) else: expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), "MPIlib %s is not supported on machine %s" %(mpilib, machine_name)) self.set_value("MPILIB",mpilib) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Overwriting an existing exeroot or rundir can cause problems exeroot = self.get_value("EXEROOT") rundir = self.get_value("RUNDIR") for wdir in (exeroot, rundir): if os.path.exists(wdir): expect(not test, "Directory %s already exists, aborting test"% wdir) response = raw_input("\nDirectory %s already exists, (r)eplace, (a)bort, or (u)se existing?"% wdir) if response.startswith("r"): shutil.rmtree(wdir) else: expect(response.startswith("u"), "Aborting by user request") # the following go into the env_mach_specific file vars = ("module_system", "environment_variables", "mpirun") env_mach_specific_obj = self._get_env("mach_specific") for var in vars: nodes = machobj.get_first_child_nodes(var) for node in nodes: env_mach_specific_obj.add_child(node) #-------------------------------------------- # pe payout #-------------------------------------------- pesobj = Pes(self._pesfile) #FIXME - add pesize_opts as optional argument below pes_ntasks, pes_nthrds, pes_rootpe = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, pesize_opts=pecount) mach_pes_obj = self._get_env("mach_pes") totaltasks = {} for key, value in pes_ntasks.items(): totaltasks[key[-3:]] = int(value) mach_pes_obj.set_value(key,int(value)) for key, value in pes_rootpe.items(): totaltasks[key[-3:]] += int(value) mach_pes_obj.set_value(key,int(value)) for key, value in pes_nthrds.items(): totaltasks[key[-3:]] *= int(value) mach_pes_obj.set_value(key,int(value)) maxval = 1 pes_per_node = mach_pes_obj.get_value("PES_PER_NODE") for key, val in totaltasks.items(): if val < 0: val = -1*val*pes_per_node if val > maxval: maxval = val # Make sure that every component has been accounted for # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. for compclass in self._component_classes: if compclass == "DRV": continue key = "NINST_%s"%compclass mach_pes_obj.set_value(key, ninst) key = "NTASKS_%s"%compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key,1) key = "NTHRDS_%s"%compclass if compclass not in pes_nthrds.keys(): mach_pes_obj.set_value(compclass,1) # FIXME - this is a short term fix for dealing with the restriction that # CISM1 cannot run on multiple cores if "CISM1" in self._compsetname: mach_pes_obj.set_value("NTASKS_GLC",1) mach_pes_obj.set_value("NTHRDS_GLC",1) #-------------------------------------------- # batch system #-------------------------------------------- batch_system_type = machobj.get_value("BATCH_SYSTEM") batch = Batch(batch_system=batch_system_type, machine=machine_name) bjobs = batch.get_batch_jobs() env_batch = self._get_env("batch") env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) env_batch.set_job_defaults(bjobs, pesize=maxval) self._env_files_that_need_rewrite.add(env_batch) self.set_value("COMPSET",self._compsetname) self._set_pio_xml() logger.info(" Compset is: %s " %self._compsetname) logger.info(" Grid is: %s " %self._gridname ) logger.info(" Components in compset are: %s " %self._components) # miscellaneous settings if self.get_value("RUN_TYPE") == 'hybrid': self.set_value("GET_REFCASE", True) # Set project id if project is None: project = get_project(machobj) if project is not None: self.set_value("PROJECT", project) elif machobj.get_value("PROJECT_REQUIRED"): expect(project is not None, "PROJECT_REQUIRED is true but no project found")