Esempio n. 1
0
    def get_value(self, name, attributes=None, resolved=True, subgroup=None):
        """
        Get Value of fields in the config_machines.xml file
        """
        expect(self.machine_node is not None, "Machine object has no machine defined")
        expect(subgroup is None, "This class does not support subgroups")
        value = None

        # COMPILER and MPILIB are special, if called without arguments they get the default value from the
        # COMPILERS and MPILIBS lists in the file.
        if name == "COMPILER":
            value = self.get_default_compiler()
        elif name == "MPILIB":
            value = self.get_default_MPIlib(attributes)
        else:
            node = self.get_optional_node(name, root=self.machine_node, attributes=attributes)
            if node is not None:
                value = node.text

        if value is None:
            # if all else fails
            value = GenericXML.get_value(self, name)

        if resolved:
            if value is not None:
                value = self.get_resolved_value(value)
            elif name in os.environ:
                value = os.environ[name]

        return value
Esempio n. 2
0
def case_test(case, testname=None):
    if testname is None:
        testname = case.get_value("TESTCASE")

    expect(testname is not None, "testname argument not resolved")
    logging.warn("Running test for %s" % testname)

    _set_up_signal_handlers()

    try:
        # The following line can throw exceptions if the testname is
        # not found or the test constructor throws. We need to be
        # sure to leave TestStatus in the appropriate state if that
        # happens.
        test = find_system_test(testname, case)(case)
    except:
        caseroot = case.get_value("CASEROOT")
        with TestStatus(test_dir=caseroot) as ts:
            ts.set_status(RUN_PHASE, TEST_FAIL_STATUS, comments="failed to initialize")
        append_status(sys.exc_info()[1], sfile="TestStatus.log")
        raise

    success = test.run()

    return success
Esempio n. 3
0
    def update_shr_strdata_nml(self, config, stream, stream_path):
        """Updates values for the `shr_strdata_nml` namelist group.

        This should be done once per stream, and it shouldn't usually be called
        directly, since `create_stream_file` calls this method itself.
        """
        assert config['stream'] == stream, \
            "config stream is %s, but input stream is %s" % \
            (config['stream'], stream)
        # Double-check the years for sanity.
        year_start = int(self.get_default("strm_year_start", config))
        year_end = int(self.get_default("strm_year_end", config))
        year_align = int(self.get_default("strm_year_align", config))
        expect(year_end >= year_start,
               "Stream %s starts at year %d, but ends at earlier year %d." %
               (stream, year_start, year_end))
        # Add to streams file.
        stream_string = "%s %d %d %d" % (os.path.basename(stream_path),
                                         year_align, year_start, year_end)
        self._streams_namelists["streams"].append(stream_string)
        for variable in self._streams_variables:
            default = self.get_default(variable, config)
            expect(len(default) == 1,
                   "Stream %s had multiple settings for variable %s." %
                   (stream, variable))
            self._streams_namelists[variable].append(default[0])
Esempio n. 4
0
def check_all_input_data(case):
###############################################################################

    success = check_input_data(case=case, download=True)
    expect(success, "Failed to download input data")

    get_refcase  = case.get_value("GET_REFCASE")
    run_type     = case.get_value("RUN_TYPE")
    continue_run = case.get_value("CONTINUE_RUN")

    # We do not fully populate the inputdata directory on every
    # machine and do not expect every user to download the 3TB+ of
    # data in our inputdata repository. This code checks for the
    # existence of inputdata in the local inputdata directory and
    # attempts to download data from the server if it's needed and
    # missing.
    if get_refcase and run_type != "startup" and not continue_run:
        din_loc_root = case.get_value("DIN_LOC_ROOT")
        run_refdate  = case.get_value("RUN_REFDATE")
        run_refcase  = case.get_value("RUN_REFCASE")
        run_refdir   = case.get_value("RUN_REFDIR")
        rundir       = case.get_value("RUNDIR")

        refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)
        expect(os.path.isdir(refdir),
"""
*****************************************************************
prestage ERROR: $refdir is not on local disk
obtain this data from the svn input data repository
> mkdir -p %s
> cd %s
> cd ..
> svn export --force https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata/%s
or set GET_REFCASE to FALSE in env_run.xml
and prestage the restart data to $RUNDIR manually
*****************************************************************""" % (refdir, refdir, refdir))

        logger.info(" - Prestaging REFCASE (%s) to %s" % (refdir, rundir))

        # prestage the reference case's files.

        if (not os.path.exists(rundir)):
            logger.debug("Creating run directory: %s"%rundir)
            os.makedirs(rundir)

        for rcfile in glob.iglob(os.path.join(refdir,"*%s*"%run_refcase)):
            logger.debug("Staging file %s"%rcfile)
            rcbaseline = os.path.basename(rcfile)
            if not os.path.exists("%s/%s" % (rundir, rcbaseline)):
                os.symlink(rcfile, "%s/%s" % ((rundir, rcbaseline)))

        # copy the refcases' rpointer files to the run directory
        for rpointerfile in  glob.iglob(os.path.join("%s","*rpointer*") % (refdir)):
            logger.debug("Copy rpointer %s"%rpointerfile)
            shutil.copy(rpointerfile, rundir)


        for cam2file in  glob.iglob(os.path.join("%s","*.cam2.*") % rundir):
            camfile = cam2file.replace("cam2", "cam")
            os.symlink(cam2file, camfile)
Esempio n. 5
0
 def _write_input_files(self, input_data_list):
     """Write input data files to list."""
     for group_name in self._namelist.get_group_names():
         for variable_name in self._namelist.get_variable_names(group_name):
             input_pathname = self._definition.get_node_element_info(variable_name, "input_pathname")
             if input_pathname is not None:
                 # This is where we end up for all variables that are paths
                 # to input data files.
                 literals = self._namelist.get_variable_value(group_name,
                                                              variable_name)
                 for literal in literals:
                     file_path = character_literal_to_string(literal)
                     if input_pathname == 'abs':
                         # No further mangling needed for absolute paths.
                         pass
                     elif input_pathname.startswith('rel:'):
                         # The part past "rel" is the name of a variable that
                         # this variable specifies its path relative to.
                         root_var = input_pathname[4:]
                         root_dir = self.get_value(root_var)
                         file_path = os.path.join(root_dir, file_path)
                     else:
                         expect(False,
                                "Bad input_pathname value: %s." %
                                input_pathname)
                     # Write to the input data list.
                     input_data_list.write("%s = %s\n" %
                                           (variable_name, file_path))
Esempio n. 6
0
    def _create_caseroot_tools(self):
        machines_dir = os.path.abspath(self.get_value("MACHDIR"))
        toolsdir = os.path.join(self.get_value("CIMEROOT"),"scripts","Tools")
        # setup executable files in caseroot/
        exefiles = (os.path.join(toolsdir, "case.setup"),
                    os.path.join(toolsdir, "case.build"),
                    os.path.join(toolsdir, "case.submit"),
                    os.path.join(toolsdir, "preview_namelists"),
                    os.path.join(toolsdir, "check_input_data"),
                    os.path.join(toolsdir, "check_case"),
                    os.path.join(toolsdir, "archive_metadata.sh"),
                    os.path.join(toolsdir, "xmlchange"),
                    os.path.join(toolsdir, "xmlquery"))
        try:
            for exefile in exefiles:
                destfile = os.path.join(self._caseroot,os.path.basename(exefile))
                os.symlink(exefile, destfile)
        except Exception as e:
            logger.warning("FAILED to set up exefiles: %s" % str(e))

        # set up utility files in caseroot/Tools/
        toolfiles = (os.path.join(toolsdir, "check_lockedfiles"),
                     os.path.join(toolsdir, "lt_archive.sh"),
                     os.path.join(toolsdir, "getTiming"),
                     os.path.join(toolsdir, "save_provenance"),
                     os.path.join(machines_dir,"Makefile"),
                     os.path.join(machines_dir,"mkSrcfiles"),
                     os.path.join(machines_dir,"mkDepends"))

        for toolfile in toolfiles:
            destfile = os.path.join(self._caseroot,"Tools",os.path.basename(toolfile))
            expect(os.path.isfile(toolfile)," File %s does not exist"%toolfile)
            try:
                os.symlink(toolfile, destfile)
            except Exception as e:
                logger.warning("FAILED to set up toolfiles: %s %s %s" % (str(e), toolfile, destfile))

        # Create Macros file.
        machine = self.get_value("MACH")
        files = Files()
        # Use config_build if the environment variable is set, or if there is no
        # config_compilers file.
        if os.getenv("CIME_USE_CONFIG_BUILD") == "TRUE" or \
           files.get_value("COMPILERS_SPEC_FILE") is None:
            build_file = files.get_value("BUILD_SPEC_FILE")
            machobj = Machines(machine=machine, files=files)
            macro_maker = Build(machobj)
            macros_path = os.path.join(self._caseroot, "Macros")
            with open(macros_path, "w") as macros_file:
                macro_maker.write_macros('Makefile', build_file, macros_file)

        # Copy any system or compiler Depends files to the case.
        compiler = self.get_value("COMPILER")
        for dep in (machine, compiler):
            dfile = "Depends.%s"%dep
            if os.path.isfile(os.path.join(machines_dir,dfile)):
                shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot,dfile))
        dfile = "Depends.%s.%s"%(machine,compiler)
        if os.path.isfile(os.path.join(machines_dir,dfile)):
            shutil.copyfile(os.path.join(machines_dir,dfile), os.path.join(self._caseroot, dfile))
Esempio n. 7
0
def case_run(case):
###############################################################################
    # Set up the run, run the model, do the postrun steps
    run_with_submit = case.get_value("RUN_WITH_SUBMIT")
    expect (run_with_submit,
            "You are not calling the run script via the submit script. "
            "As a result, short-term archiving will not be called automatically."
            "Please submit your run using the submit script like so:"
            " ./case.submit")

    data_assimilation = case.get_value("DATA_ASSIMILATION")
    data_assimilation_cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
    data_assimilation_script = case.get_value("DATA_ASSIMILATION_SCRIPT")

    # set up the LID
    lid = time.strftime("%y%m%d-%H%M%S")
    os.environ["LID"] = lid

    for _ in range(data_assimilation_cycles):
        preRunCheck(case)
        runModel(case)
        postRunCheck(case, lid)
        saveLogs(case, lid)       # Copy log files back to caseroot
        getTimings(case, lid)     # Run the getTiming script
        if data_assimilation:
            DoDataAssimilation(case, data_assimilation_script, lid)

    resubmitCheck(case)

    return True
Esempio n. 8
0
 def _write_input_files(self, input_data_list):
     """Write input data files to list."""
     for group_name in self._namelist.get_group_names():
         for variable_name in self._namelist.get_variable_names(group_name):
             input_pathname = self._definition.get_node_element_info(variable_name, "input_pathname")
             if input_pathname is not None:
                 # This is where we end up for all variables that are paths
                 # to input data files.
                 literals = self._namelist.get_variable_value(group_name, variable_name)
                 for literal in literals:
                     file_path = character_literal_to_string(literal)
                     # NOTE - these are hard-coded here and a better way is to make these extensible
                     if file_path == 'UNSET' or file_path == 'idmap':
                         continue
                     if input_pathname == 'abs':
                         # No further mangling needed for absolute paths.
                         # At this point, there are overwrites that should be ignored
                         if not os.path.isabs(file_path):
                             continue
                         else:
                             pass
                     elif input_pathname.startswith('rel:'):
                         # The part past "rel" is the name of a variable that
                         # this variable specifies its path relative to.
                         root_var = input_pathname[4:]
                         root_dir = self.get_value(root_var)
                         file_path = os.path.join(root_dir, file_path)
                     else:
                         expect(False,
                                "Bad input_pathname value: {}.".format(input_pathname))
                     # Write to the input data list.
                     input_data_list.write("{} = {}\n".format(variable_name, file_path))
Esempio n. 9
0
    def get_resolved_value(self, raw_value):
        """
        A value in the xml file may contain references to other xml
        variables or to environment variables. These are refered to in
        the perl style with $name and $ENV{name}.

        >>> obj = GenericXML()
        >>> os.environ["FOO"] = "BAR"
        >>> os.environ["BAZ"] = "BARF"
        >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three")
        'one BAR two BARF three'
        """
        logging.debug("raw_value %s" % raw_value)
        reference_re = re.compile(r'\$(\w+)')
        env_ref_re   = re.compile(r'\$ENV\{(\w+)\}')
        item_data = raw_value

        if (item_data is None):
            return None

        for m in env_ref_re.finditer(item_data):
            logging.debug("look for "+item_data+ " in env")
            env_var = m.groups()[0]
            expect(env_var in os.environ, "Undefined env var '%s'" % env_var)
            item_data = item_data.replace(m.group(), os.environ[env_var])

        for m in reference_re.finditer(item_data):
            var = m.groups()[0]
            logging.debug("find: "+var)
            ref = self.get_value(var)
            if(ref is not None):
                logging.debug("resolve: "+ref)
                item_data = item_data.replace(m.group(), self.get_resolved_value(ref))

        return item_data
Esempio n. 10
0
def infer_machine_name_from_tests(testargs):
###############################################################################
    """
    >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu"])
    'melvin'
    >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A"])
    >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A", "NCK.f19_g16_rx1.A.melvin_gnu"])
    'melvin'
    >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu", "NCK.f19_g16_rx1.A.melvin_gnu"])
    'melvin'
    """
    e3sm_test_suites = get_test_suites()

    machine = None
    for testarg in testargs:
        testarg = testarg.strip()
        if testarg.startswith("^"):
            testarg = testarg[1:]

        if testarg not in e3sm_test_suites:
            machine_for_this_test = parse_test_name(testarg)[4]
            if machine_for_this_test is not None:
                if machine is None:
                    machine = machine_for_this_test
                else:
                    expect(machine == machine_for_this_test, "Must have consistent machine '%s' != '%s'" % (machine, machine_for_this_test))

    return machine
Esempio n. 11
0
def get_recommended_test_time(test_full_name):
###############################################################################
    """
    >>> get_recommended_test_time("ERS.f19_g16_rx1.A.melvin_gnu")
    '0:10:00'

    >>> get_recommended_test_time("TESTRUNPASS_P69.f19_g16_rx1.A.melvin_gnu.testmod")
    '0:13:00'

    >>> get_recommended_test_time("PET_Ln20.ne30_ne30.FC5.sandiatoss3_intel.cam-outfrq9s")
    >>>
    """
    _, _, _, _, machine, compiler, _ = CIME.utils.parse_test_name(test_full_name)
    expect(machine is not None, "{} is not a full test name".format(test_full_name))

    best_time = None
    suites = get_test_suites()
    for suite in suites:
        tests    = get_test_suite(suite, machine=machine, compiler=compiler, skip_inherit=True)
        rec_time = get_test_data(suite)[1]
        if test_full_name in tests and rec_time is not None and \
           (best_time is None or convert_to_seconds(rec_time) < convert_to_seconds(best_time)):
            best_time = rec_time

    return best_time
Esempio n. 12
0
File: ldsta.py Progetto: Katetc/cime
 def run_phase(self):
     archive_dir = self._case.get_value('DOUT_S_ROOT')
     if os.path.isdir(archive_dir):
         shutil.rmtree(archive_dir)
     self.run_indv()
     # finished running, so all archive files should exist
     start_date = get_file_date(self._case.get_value('RUN_STARTDATE'))
     rest_dir = os.path.join(archive_dir, 'rest')
     delta_day = datetime.timedelta(1)
     current_date = start_date + delta_day
     next_datecheck = current_date
     days_left = self._case.get_value('STOP_N')
     final_date = start_date + delta_day * days_left
     while current_date < final_date:
         logger.info('Testing archiving with last date: {}'.format(current_date))
         current_date_str = '{:04}-{:02}-{:02}'.format(current_date.year,
                                                       current_date.month,
                                                       current_date.day)
         case_st_archive(self._case, last_date_str=current_date_str, copy_only=False)
         archive_dates = [get_file_date(fname)
                          for fname in glob.glob(os.path.join(rest_dir, '*'))]
         while next_datecheck <= current_date:
             expect(next_datecheck in archive_dates,
                    'Not all dates generated and/or archived: '
                    + '{} is missing'.format(next_datecheck))
             next_datecheck += delta_day
         for date in archive_dates:
             expect(date <= current_date,
                    'Archived date greater than specified by last-date: '
                    + '{}'.format(date))
         num_days = random.randint(1, min(3, days_left))
         days_left -= num_days
         current_date += num_days * delta_day
Esempio n. 13
0
def verify_chksum(input_data_root, rundir, filename, isdirectory):
    """
    For file in filename perform a chksum and compare the result to that stored in
    the local checksumfile, if isdirectory chksum all files in the directory of form *.*
    """
    if not chksum_hash:
        hashfile = os.path.join(rundir, local_chksum_file)
        if not os.path.isfile(hashfile):
            logger.warning("Failed to find or download file {}".format(hashfile))
            return

        with open(hashfile) as fd:
            lines = fd.readlines()
            for line in lines:
                fchksum, fname = line.split()
                if fname in chksum_hash:
                    expect(chksum_hash[fname] == fchksum, " Inconsistent hashes in chksum for file {}".format(fname))
                else:
                    chksum_hash[fname] = fchksum

    if isdirectory:
        filenames = glob.glob(os.path.join(filename,"*.*"))
    else:
        filenames = [filename]
    for fname in filenames:
        if not os.sep in fname:
            continue
        chksum = md5(os.path.join(input_data_root, fname))
        if chksum_hash:
            if not fname in chksum_hash:
                logger.warning("Did not find hash for file {} in chksum file {}".format(filename, hashfile))
            else:
                expect(chksum == chksum_hash[fname],
                       "chksum mismatch for file {} expected {} found {}".
                       format(os.path.join(input_data_root,fname),chksum, chksum_hash[fname]))
Esempio n. 14
0
    def __init__(self, infile=None, files=None, machine=None):
        """
        initialize an object
        if a filename is provided it will be used,
        otherwise if a files object is provided it will be used
        otherwise create a files object from default values
        """

        self.machine_node = None
        self.machine = None
        self.machines_dir = None

        if infile is None:
            if files is None:
                files = Files()
            infile = files.get_value("MACHINES_SPEC_FILE", resolved=False)
            infile = files.get_resolved_value(infile)

        self.machines_dir = os.path.dirname(infile)

        GenericXML.__init__(self, infile)

        # Append the contents of $HOME/.cime/config_machines.xml if it exists
        # This could cause problems if node matchs are repeated when only one is expected
        infile = os.path.join(os.environ.get("HOME"),".cime","config_machines.xml")
        logger.debug("Infile: %s" , infile)
        if os.path.exists(infile):
            GenericXML.read(self, infile)

        if machine is None:
            machine = self.probe_machine_name()

        expect(machine is not None, "Could not initialize machine object")
        self.set_machine(machine)
Esempio n. 15
0
def build_libraries(exeroot, caseroot, cimeroot, libroot, mpilib, lid, machines_file):
###############################################################################

    if (mpilib == "mpi-serial"):
        for header_to_copy in glob.glob(os.path.join(cimeroot, "externals/mct/mpi-serial/*.h")):
            shutil.copy(header_to_copy, os.path.join(libroot, "include"))

    sharedpath = os.environ["SHAREDPATH"]
    shared_lib = os.path.join(sharedpath, "lib")
    shared_inc = os.path.join(sharedpath, "include")
    for shared_item in [shared_lib, shared_inc]:
        if (not os.path.exists(shared_item)):
            os.makedirs(shared_item)

    libs = ["mct", "gptl", "pio", "csm_share"]
    logs = []

    for lib in libs:
        full_lib_path = os.path.join(sharedpath, lib)
        if (not os.path.exists(full_lib_path)):
            os.makedirs(full_lib_path)

        file_build = os.path.join(sharedpath, "%s.bldlog.%s" % (lib, lid))
        with open(file_build, "w") as fd:
            fd.write("Current env:\n%s" % "\n".join(["  %s = %s" % (env, os.environ[env]) for env in sorted(os.environ)]))

        my_file = os.path.join(os.path.dirname(machines_file), "buildlib.%s" % lib)
        stat = run_cmd("%s %s %s >> %s 2>&1" %
                       (my_file, sharedpath, caseroot, file_build),
                       from_dir=exeroot,
                       ok_to_fail=True, verbose=True)[0]
        expect(stat == 0, "ERROR: buildlib.%s failed, cat %s" % (lib, file_build))
        logs.append(file_build)

    return logs
Esempio n. 16
0
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False):
###############################################################################
    results = Queue.Queue()

    for test_path in test_paths:
        t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak))
        t.daemon = True
        t.start()

    while threading.active_count() > 1:
        time.sleep(1)

    test_results = {}
    completed_test_paths = []
    while (not results.empty()):
        test_name, test_path, test_status = results.get()
        if (test_name in test_results):
            prior_path, prior_status = test_results[test_name]
            if (test_status == prior_status):
                logging.warning("Test name '%s' was found in both '%s' and '%s'" %
                        (test_name, test_path, prior_path))
            else:
                raise SystemExit("Test name '%s' was found in both '%s' and '%s' with different results" %
                                 (test_name, test_path, prior_path))

        test_results[test_name] = (test_path, test_status)
        completed_test_paths.append(test_path)

    expect(set(test_paths) == set(completed_test_paths),
           "Missing results for test paths: %s" % (set(test_paths) - set(completed_test_paths)) )

    return test_results
Esempio n. 17
0
    def set_default_value(self, vid, attributes=None):
        """
        Set the value of an entry to the default value for that entry
        vid can be an xml node pointer or a string identifier of a node
        """
        value = None
        if (type(vid) != type(str())):
            node = vid
            vid = node.attrib["id"]
        else:
            nodes = self.get_node("entry", {"id":vid})
            if (nodes is None):
                return
            expect(len(nodes) == 1, "More than one match found for id " + vid)
            node = nodes[0]

        valnodes = self.get_node("value",root=node)
        if (valnodes is not None):
            for valnode in valnodes:
                for att in valnode.attributes:
                    if (att.key in attributes):
                        if (re.search(attributes[att.key],att.text)):
                            value = valnode.text
                            logger.info("id %s value %s" % (vid, valnode.text))

        if (value is None):
            value = self.get_node("default_value", root=node)
        if (value is not None):
            node.set("value", value[0].text)
            return value[0].text
Esempio n. 18
0
def case_test(case, testname=None, reset=False):
    if testname is None:
        testname = case.get_value('TESTCASE')

    expect(testname is not None, "testname argument not resolved")
    logging.warning("Running test for {}".format(testname))

    _set_up_signal_handlers()

    try:
        # The following line can throw exceptions if the testname is
        # not found or the test constructor throws. We need to be
        # sure to leave TestStatus in the appropriate state if that
        # happens.
        test = find_system_test(testname, case)(case)
    except:
        caseroot = case.get_value("CASEROOT")
        with TestStatus(test_dir=caseroot) as ts:
            ts.set_status(RUN_PHASE, TEST_FAIL_STATUS, comments="failed to initialize")
        append_testlog(str(sys.exc_info()[1]))
        return False

    if reset:
        logger.info("Reset test to initial conditions and exit")
        # pylint: disable=protected-access
        test._resetup_case(RUN_PHASE)
        return True
    success = test.run()

    return success
Esempio n. 19
0
    def get_value(self, name, resolved=True):
        """
        Get Value of fields in the config_machines.xml file
        """
        expect(self.machine is not None, "Machine object has no machine defined")
        value = None

        # COMPILER and MPILIB are special, if called without arguments they get the default value from the
        # COMPILERS and MPILIBS lists in the file.
        if (name == "COMPILER"):
            value = self.get_default_compiler()
        elif (name == "MPILIB"):
            value = self.get_default_MPIlib()
        else:
            nodes = self.get_node(name, root=self.machine)
            if (nodes):
                node = nodes[0]
                expect(node is not None, "No match found for %s in machine %s" % (name, self.name))
                value = node.text

        if (value is None):
            # if all else fails
            value = GenericXML.get_value(self, name)

        if (resolved):
            value = self.get_resolved_value(value)

        return value
Esempio n. 20
0
File: case.py Progetto: ekluzek/cime
    def get_env(self, short_name):
        full_name = "env_%s.xml" % (short_name)
        for env_file in self._files:
            if os.path.basename(env_file.filename) == full_name:
                return env_file

        expect(False, "Could not find object for %s in case"%full_name)
Esempio n. 21
0
 def compare_xml(self, other, root=None, otherroot=None):
     xmldiffs = {}
     if root is not None:
         expect(otherroot is not None," inconsistant request")
     f1nodes = self.scan_children("entry", root=root)
     for node in f1nodes:
         vid = self.get(node, "id")
         logger.debug("Compare vid {}".format(vid))
         f2match = other.scan_optional_child("entry", attributes={"id":vid},root=otherroot)
         expect(f2match is not None,"Could not find {} in Locked file".format(vid))
         if node != f2match:
             f1val = self.get_value(vid, resolved=False)
             if f1val is not None:
                 f2val = other.get_value(vid, resolved=False)
                 if f1val != f2val:
                     xmldiffs[vid] = [f1val, f2val]
             elif hasattr(self, "_components"):
                 # pylint: disable=no-member
                 for comp in self._components:
                     f1val = self.get_value("{}_{}".format(vid,comp), resolved=False)
                     if f1val is not None:
                         f2val = other.get_value("{}_{}".format(vid,comp), resolved=False)
                         if f1val != f2val:
                             xmldiffs[vid] = [f1val, f2val]
                     else:
                         if node != f2match:
                             f1value_nodes = self.get_children("value", root=node)
                             for valnode in f1value_nodes:
                                 f2valnodes = other.get_children("value", root=f2match, attributes=self.attrib(valnode))
                                 for f2valnode in f2valnodes:
                                     if self.attrib(valnode) is None and self.attrib(f2valnode) is None or \
                                        self.attrib(f2valnode) == self.attrib(valnode):
                                         if other.get_resolved_value(self.text(f2valnode)) != self.get_resolved_value(self.text(valnode)):
                                             xmldiffs["{}:{}".format(vid, self.attrib(valnode))] = [self.text(valnode), self.text(f2valnode)]
     return xmldiffs
Esempio n. 22
0
def _parse_timing_files(timing_files):
    """
    Parse every file in list for timing information and return data dict
    """
    data = {}
    for timing_file in timing_files:
        timing = _read_timing_file(timing_file)
        logger.debug('ntasks: %s' % "; ".join([str(k) + ":" +
                                               str(timing[k]['ntasks'])
                                               for k in timing.keys()]))
        logger.debug('cost: %s' % "; ".join([str(k) + ":" +
                                             str(timing[k]['cost'])
                                             for k in timing.keys()]))
        for key in timing:
            if key not in data:
                data[key] = {'cost':[], 'ntasks':[], 'nthrds':[]}

            if timing[key]['ntasks'] in data[key]['ntasks']:
                logger.warning('WARNING: duplicate timing run data in %s '
                               'for %s ntasks=%d.', timing_file, key,
                               timing[key]['ntasks'])
                index = data[key]['ntasks'].index(timing[key]['ntasks'])
                logger.warning('Existing value: cost=%s. Ignoring new value: '
                               'cost=%s', data[key]['cost'][index],
                               timing[key]['cost'])
            elif 'name' in data[key] and data[key]['name'] != timing[key]['name']:
                expect(False, "Timing files have inconsistant model components {} has {} vs {}"
                       .format(key, data[key]['name'], timing[key]['name']))
            else:
                data[key]['name'] = timing[key]['name']
                data[key]['cost'].append(timing[key]['cost'])
                data[key]['ntasks'].append(timing[key]['ntasks'])
                data[key]['nthrds'].append(timing[key]['nthrds'])
    return data
Esempio n. 23
0
def run_gmake(case, compclass, libroot, libname="", user_cppdefs=""):
###############################################################################

    caseroot  = case.get_value("CASEROOT")
    casetools = case.get_value("CASETOOLS")
    gmake_j   = case.get_value("GMAKE_J")
    gmake     = case.get_value("GMAKE")
    mach      = case.get_value("MACH")

    complib = ""
    if libname:
        complib  = os.path.join(libroot, "lib%s.a" % libname)
    else:
        complib  = os.path.join(libroot, "lib%s.a" % compclass)

    makefile = os.path.join(casetools, "Makefile")
    macfile  = os.path.join(caseroot, "Macros.%s" % mach)

    if user_cppdefs:
        cmd = "%s complib -j %d MODEL=%s COMPLIB=%s -f %s MACFILE=%s USER_CPPDEFS=%s" \
            % (gmake, gmake_j, compclass, complib, makefile, macfile, user_cppdefs )
    else:
        cmd = "%s complib -j %d MODEL=%s COMPLIB=%s -f %s MACFILE=%s " \
            % (gmake, gmake_j, compclass, complib, makefile, macfile )

    rc, out, err = run_cmd(cmd, ok_to_fail=True)
    expect(rc == 0, "Command %s failed rc=%d\nout=%s\nerr=%s" % (cmd, rc, out, err))

    logger.info("Command %s completed with output %s\nerr %s" ,cmd, out, err)
Esempio n. 24
0
    def get_resolved_value(self, raw_value):
        """
        A value in the xml file may contain references to other xml
        variables or to environment variables. These are refered to in
        the perl style with $name and $ENV{name}.

        >>> obj = GenericXML()
        >>> os.environ["FOO"] = "BAR"
        >>> os.environ["BAZ"] = "BARF"
        >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three")
        'one BAR two BARF three'
        >>> obj.get_resolved_value("2 + 3 - 1")
        '4'
        >>> obj.get_resolved_value("0001-01-01")
        '0001-01-01'
        """
        logger.debug("raw_value %s" % raw_value)
        reference_re = re.compile(r'\${?(\w+)}?')
        env_ref_re   = re.compile(r'\$ENV\{(\w+)\}')
        math_re = re.compile(r'\s[+-/*]\s')
        item_data = raw_value

        if item_data is None:
            return None

        if type(item_data) is not str:
            return item_data

        for m in env_ref_re.finditer(item_data):
            logger.debug("look for %s in env" % item_data)
            env_var = m.groups()[0]
            expect(env_var in os.environ, "Undefined env var '%s'" % env_var)
            item_data = item_data.replace(m.group(), os.environ[env_var])

        for m in reference_re.finditer(item_data):
            var = m.groups()[0]
            logger.debug("find: %s" % var)
            ref = self.get_value(var)
            if ref is not None:
                logger.debug("resolve: " + str(ref))
                item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref)))
            elif var == "CIMEROOT":
                cimeroot = get_cime_root()
                item_data = item_data.replace(m.group(), cimeroot)
            elif var == "SRCROOT":
                srcroot = os.path.join(get_cime_root(),"..")
                item_data = item_data.replace(m.group(), srcroot)
            elif var in os.environ:
                # this is a list of suppressed warnings (things normally expected to be resolved in env)
                if var not in ("USER",):
                    logging.warn("Resolved from env: " + var)
                item_data = item_data.replace(m.group(), os.environ[var])
        if math_re.search(item_data):
            try:
                tmp = eval(item_data)
            except:
                tmp = item_data
            item_data = str(tmp)

        return item_data
Esempio n. 25
0
    def __init__(self, infile=None):
        """
        Initialize an object
        """

        logger.debug("Initializing %s" , infile)
        self.tree = None

        if infile == None:
            # if file is not defined just return
            self.filename = None
            return

        if os.path.isfile(infile) and os.access(infile, os.R_OK):
            # If file is defined and exists, read it
            self.filename = infile
            self.read(infile)
        else:
            # if file does not exist create a root xml element
            # and set it's id to file

            logger.debug("File %s does not exists." , infile)
            expect("$" not in infile,"File path not fully resolved %s"%infile)

            self.filename = infile
            root = ET.Element("xml")
            self.root = ET.SubElement(root, "file")
            self.root.set("id", os.path.basename(infile))
            self.tree = ET.ElementTree(root)
Esempio n. 26
0
def check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir="Buildconf",
                         download=True, chksum=False):
    """
    Read through all files of the form *.input_data_list in the data_list_dir directory.  These files
    contain a list of input and boundary files needed by each model component.  For each file in the
    list confirm that it is available in input_data_root and if not (optionally download it from a
    server at address using protocol.  Perform a chksum of the downloaded file.
    """
    success = False
    if protocol is not None and address is not None:
        success = self.check_input_data(protocol=protocol, address=address, download=download,
                                        input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum)
    else:
        if chksum:
            _download_checksum_file(self.get_value("RUNDIR"))

        success = self.check_input_data(protocol=protocol, address=address, download=False,
                                        input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum)
        if download and not success:
            if not chksum:
                _download_checksum_file(self.get_value("RUNDIR"))
            success = _downloadfromserver(self, input_data_root, data_list_dir)

    expect(not download or (download and success), "Could not find all inputdata on any server")
    self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)
    return success
Esempio n. 27
0
def _build_usernl_files(case, model, comp):
###############################################################################
    """
    Create user_nl_xxx files, expects cwd is caseroot
    """
    model = model.upper()
    model_file = case.get_value("CONFIG_%s_FILE" % model)
    model_dir = os.path.dirname(model_file)

    expect(os.path.isdir(model_dir),
           "cannot find cime_config directory %s for component %s" % (model_dir, comp))

    if comp == "cpl":
        if not os.path.exists("user_nl_cpl"):
            shutil.copy(os.path.join(model_dir, "user_nl_cpl"), ".")
    else:
        ninst = case.get_value("NINST_%s" % model)
        nlfile = "user_nl_%s" % comp
        model_nl = os.path.join(model_dir, nlfile)
        if os.path.exists(model_nl):
            if ninst > 1:
                for inst_counter in xrange(1, ninst+1):
                    case_nlfile = "%s_%04d" % (nlfile, inst_counter)
                    if not os.path.exists(case_nlfile):
                        shutil.copy(model_nl, case_nlfile)
            else:
                if not os.path.exists(nlfile):
                    shutil.copy(model_nl, nlfile)
Esempio n. 28
0
def build_xcpl_nml(case, caseroot, compname):
###############################################################################
    compclasses = case.get_values("COMP_CLASSES")
    compclass = None
    for compclass in compclasses:
        if case.get_value("COMP_%s"%compclass) == compname:
            break
    expect(compclass is not None,
           "Could not identify compclass for compname %s"%compname)
    rundir = case.get_value("RUNDIR")
    ninst  = case.get_value("NINST_%s" % compclass.upper())
    nx     = case.get_value("%s_NX" % compclass.upper())
    ny     = case.get_value("%s_NY" % compclass.upper())
    if compname == "xrof":
        flood_mode = case.get_value('XROF_FLOOD_MODE')

    extras = []
    dtype = 1
    npes = 0
    length = 0

    if compname == "xatm":
        if ny == 1:
            dtype = 2
        extras = [["24",
                   "ncpl  number of communications w/coupler per dat"],
                  ["0.0",
                   "simul time proxy (secs): time between cpl comms"]]
    elif compname == "xglc" or compname == "xice":
        dtype = 2
    elif compname == "xlnd":
        dtype = 11
    elif compname == "xocn":
        dtype = 4
    elif compname == "xrof":
        dtype = 11
        if flood_mode == "ACTIVE":
            extras = [[".true.", "flood flag"]]
        else:
            extras = [[".false.", "flood flag"]]

    for i in range(1, ninst + 1):
        # If only 1 file, name is 'compclass_in'
        # otherwise files are 'compclass_in0001', 'compclass_in0002', etc
        if ninst == 1:
            filename = os.path.join(rundir, "%s_in" % compname)
        else:
            filename = os.path.join(rundir, "%s_in_%4.4d" % (compname, i))

        with open(filename, 'w') as infile:
            infile.write("%-20d ! i-direction global dimension\n" % nx)
            infile.write("%-20d ! j-direction global dimension\n" % ny)
            infile.write("%-20d ! decomp_type  1=1d-by-lat, 2=1d-by-lon,"
                         " 3=2d, 4=2d evensquare, 11=segmented\n" % dtype)
            infile.write("%-20d ! num of pes for i (type 3 only)\n" % npes)
            infile.write("%-20d ! length of segments (type 4 only)\n"
                         % length)
            for extra in extras:
                infile.write("%-20s ! %s\n" % (extra[0], extra[1]))
Esempio n. 29
0
    def add_default(self, name, value=None, ignore_abs_path=None):
        """Add a value for the specified variable to the namelist.

        If the specified variable is already defined in the object, the existing
        value is preserved. Otherwise, the `value` argument, if provided, will
        be used to set the value. If no such value is found, the defaults file
        will be consulted. If null values are present in any of the above, the
        result will be a merged array of values.

        If no value for the variable is found via any of the above, this method
        will raise an exception.
        """
        # pylint: disable=protected-access
        group = self._definition.get_group(name)

        # Use this to see if we need to raise an error when nothing is found.
        have_value = False
        # Check for existing value.
        current_literals = self._namelist.get_variable_value(group, name)

        # Check for input argument.
        if value is not None:
            have_value = True
            # if compression were to occur, this is where it does
            literals = self._to_namelist_literals(name, value)
            current_literals = merge_literal_lists(literals, current_literals)

        # Check for default value.
        default = self.get_default(name, allow_none=True)
        if default is not None:
            have_value = True
            default_literals = self._to_namelist_literals(name, default)
            current_literals = merge_literal_lists(default_literals, current_literals)
        expect(have_value, "No default value found for {}.".format(name))

        # Go through file names and prepend input data root directory for
        # absolute pathnames.
        var_type, _, var_size = self._definition.split_type_string(name)
        if var_type == "character" and ignore_abs_path is None:
            var_input_pathname = self._definition.get_input_pathname(name)
            if var_input_pathname == 'abs':
                current_literals = expand_literal_list(current_literals)
                for i, literal in enumerate(current_literals):
                    if literal == '':
                        continue
                    file_path = character_literal_to_string(literal)
                    # NOTE - these are hard-coded here and a better way is to make these extensible
                    if file_path == 'UNSET' or file_path == 'unset' or file_path == 'idmap':
                        continue
                    if file_path == 'null':
                        continue
                    file_path = self.set_abs_file_path(file_path)
                    if not os.path.exists(file_path):
                        logger.warning("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal))
                    current_literals[i] = string_to_character_literal(file_path)
                current_literals = compress_literal_list(current_literals)

        # Set the new value.
        self._namelist.set_variable_value(group, name, current_literals, var_size)
Esempio n. 30
0
def compare_files(gold_file, compare_file, case=None):
###############################################################################
    expect(os.path.exists(gold_file), "File not found: %s" % gold_file)
    expect(os.path.exists(compare_file), "File not found: %s" % compare_file)

    return compare_data(open(gold_file, "r").readlines(),
                        open(compare_file, "r").readlines(),
                        case)
Esempio n. 31
0
    def __init__(self):
        """
        initialize an object

        >>> files = Files()
        >>> files.get_value('CASEFILE_HEADERS',resolved=False)
        '$CIMEROOT/config/config_headers.xml'
        """
        cimeroot = get_cime_root()
        infile = os.path.join(cimeroot, "config", get_model(),
                              "config_files.xml")
        expect(os.path.isfile(infile),
               "Could not find or open file %s" % infile)
        schema = os.path.join(cimeroot, "config", "xml_schemas",
                              "entry_id.xsd")
        EntryID.__init__(self, infile, schema=schema)
Esempio n. 32
0
    def _to_namelist_literals(self, name, value):
        """Transform a literal list as needed for `set_value`.

        This is the inverse of `_to_python_value`, except that many of the
        changes have potentially already been performed.
        """
        var_type, _, var_size, = self._definition.split_type_string(name)
        if var_size == 1 and not isinstance(value, list):
            value = [value]
        for i, scalar in enumerate(value):
            if scalar is None:
                value[i] = ""
            elif var_type == 'character':
                expect(not isinstance(scalar, list), name)
                value[i] = self.quote_string(scalar)
        return compress_literal_list(value)
Esempio n. 33
0
    def get_valid_value_string(self, node, value, vid=None, ignore_type=False):
        valid_values = self._get_valid_values(node)
        if ignore_type:
            expect(isinstance(value, six.string_types),
                   "Value must be type string if ignore_type is true")
            str_value = value
            return str_value
        type_str = self._get_type_info(node)
        str_value = convert_to_string(value, type_str, vid)

        if valid_values is not None and not str_value.startswith('$'):
            expect(
                str_value in valid_values,
                "Did not find {} in valid values for {}: {}".format(
                    value, vid, valid_values))
        return str_value
Esempio n. 34
0
def handle_conflicts(is_merge=False, auto_conf=False):
###############################################################################
    logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
    logging.info("There are conflicts, analyzing...")
    logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")

    remaining_conflicts = handle_easy_conflicts(is_merge) if auto_conf else True
    if remaining_conflicts:
        expect(False, "There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
    else:
        logging.info("All conflicts were automatically resovled, continuing")
        run_cmd_no_fail("git commit --no-edit")

    logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
    logging.info("Automatic conflict resolution complete")
    logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
Esempio n. 35
0
def check_case(self, skip_pnl=False):
    self.check_lockedfiles()
    if not skip_pnl:
        self.create_namelists()  # Must be called before check_all_input_data
    logger.info(
        "Checking that inputdata is available as part of case submission")
    self.check_all_input_data()

    if self.get_value('COMP_WAV') == 'ww':
        # the ww3 buildnml has dependencies on inputdata so we must run it again
        self.create_namelists(component='WAV')

    expect(
        self.get_value("BUILD_COMPLETE"), "Build complete is "
        "not True please rebuild the model by calling case.build")
    logger.info("Check case OK")
Esempio n. 36
0
 def overwrite_existing_entries(self):
     # if there exist two nodes with the same id delete the first one.
     for node in self.get_children("entry"):
         vid = self.get(node, "id")
         samenodes = self.get_nodes_by_id(vid)
         if len(samenodes) > 1:
             expect(
                 len(samenodes) == 2,
                 "Too many matchs for id {} in file {}".format(vid, self.filename),
             )
             logger.debug("Overwriting node {}".format(vid))
             read_only = self.read_only
             if read_only:
                 self.read_only = False
             self.remove_child(samenodes[0])
             self.read_only = read_only
Esempio n. 37
0
def check_code(files, num_procs=10, interactive=False):
    ###############################################################################
    """
    Check all python files in the given directory

    Returns True if all files had no problems
    """
    # Get list of files to check, we look to see if user-provided file argument
    # is a valid file, if not, we search the repo for a file with similar name.
    files_to_check = []
    if files:
        repo_files = run_cmd_no_fail('git ls-files',
                                     from_dir=get_cime_root(),
                                     verbose=False).splitlines()
        for filearg in files:
            if os.path.exists(filearg):
                files_to_check.append(os.path.abspath(filearg))
            else:
                found = False
                for repo_file in repo_files:
                    if repo_file.endswith(filearg):
                        found = True
                        files_to_check.append(
                            repo_file)  # could have multiple matches

                if not found:
                    logger.warning(
                        "Could not find file matching argument '%s'" % filearg)
    else:
        # Check every python file
        files_to_check = get_all_checkable_files()

    if "scripts/lib/six.py" in files_to_check:
        files_to_check.remove("scripts/lib/six.py")
        logger.info("Not checking contributed file six.py")

    expect(len(files_to_check) > 0, "No matching files found")

    # No point in using more threads than files
    if len(files_to_check) < num_procs:
        num_procs = len(files_to_check)

    pool = ThreadPool(num_procs)
    results = pool.map(lambda x: _run_pylint(x, interactive), files_to_check)
    pool.close()
    pool.join()
    return dict(results)
Esempio n. 38
0
def get_full_test_names(testargs, machine, compiler):
###############################################################################
    """
    Return full test names in the form:
    TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS
    Testmods are optional

    Testargs can be categories or test names and support the NOT symbol '^'

    >>> get_full_test_names(["cime_tiny"], "melvin", "gnu")
    ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu']

    >>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37_rx1.A"], "melvin", "gnu")
    ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']

    >>> get_full_test_names(['ERS.f19_g16_rx1.A', 'NCK.f19_g16_rx1.A', 'PEA_P1_M.f45_g37_rx1.A'], "melvin", "gnu")
    ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']

    >>> get_full_test_names(["cime_tiny", "^NCK.f19_g16_rx1.A"], "melvin", "gnu")
    ['ERS.f19_g16_rx1.A.melvin_gnu']
    """
    expect(machine is not None, "Must define a machine")
    expect(compiler is not None, "Must define a compiler")
    acme_test_suites = get_test_suites()

    tests_to_run = set()
    negations = set()

    for testarg in testargs:
        # remove any whitespace in name
        testarg = testarg.strip()
        if (testarg.startswith("^")):
            negations.add(testarg[1:])
        elif (testarg in acme_test_suites):
            tests_to_run.update(get_test_suite(testarg, machine, compiler))
        else:
            tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler))

    for negation in negations:
        if (negation in acme_test_suites):
            tests_to_run -= set(get_test_suite(negation, machine, compiler))
        else:
            fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler)
            if (fullname in tests_to_run):
                tests_to_run.remove(fullname)

    return list(sorted(tests_to_run))
Esempio n. 39
0
def _interpret_value(value_str, filename):
    ###############################################################################
    comma_re = re.compile(r'\s*,\s*')
    dict_re = re.compile(r"^'(\S+)\s*->\s*(\S+)'")

    value_str = _normalize_lists(value_str)

    tokens = [
        item.strip() for item in comma_re.split(value_str)
        if item.strip() != ""
    ]
    if ("->" in value_str):
        # dict
        rv = OrderedDict()
        for token in tokens:
            m = dict_re.match(token)
            expect(
                m is not None,
                "In file '{}', Dict entry '{}' does not match expected format".
                format(filename, token))
            k, v = m.groups()
            rv[k] = _interpret_value(v, filename)

        return rv
    else:
        new_tokens = []
        for token in tokens:
            if "*" in token:
                try:
                    # the following ensure that the following to namelist settings trigger a match
                    # nmlvalue = 1,1,1 versus nmlvalue = 3*1
                    sub_tokens = [item.strip() for item in token.split("*")]
                    expect(
                        len(sub_tokens) == 2,
                        "Incorrect usage of multiplication in token '{}'".
                        format(token))
                    new_tokens.extend([sub_tokens[1]] * int(sub_tokens[0]))
                except:
                    # User probably did not intend to use the * operator as a namelist multiplier
                    new_tokens.append(token)
            else:
                new_tokens.append(token)

        if "," in value_str or len(new_tokens) > 1:
            return new_tokens
        else:
            return new_tokens[0]
Esempio n. 40
0
    def run_phase(self):  # pylint: disable=arguments-differ
        ###########################################################################
        self._activate_case2()
        should_match = (self._case.get_value("DESP_MODE") == "NOCHANGE")
        SystemTestsCompareTwo.run_phase(self, success_change=not should_match)
        # Look for expected coupler restart files
        logger = logging.getLogger(__name__)
        self._activate_case1()
        rundir1 = self._case.get_value("RUNDIR")
        self._activate_case2()
        rundir2 = self._case.get_value("RUNDIR")
        compare_ok = True
        multi_driver = self._case.get_value("MULTI_DRIVER")
        comps = self._case.get_values("COMP_CLASSES")
        for comp in comps:
            if not self._case.get_value("PAUSE_ACTIVE_{}".format(comp)):
                continue
            if comp == "CPL":
                if multi_driver:
                    ninst = self._case.get_value("NINST_MAX")
                else:
                    ninst = 1
            else:
                ninst = self._case.get_value("NINST_{}".format(comp))

            comp_name = self._case.get_value('COMP_{}'.format(comp))
            for index in range(1, ninst + 1):
                if ninst == 1:
                    rname = '*.{}.r.*'.format(comp_name)
                else:
                    rname = '*.{}_{:04d}.r.*'.format(comp_name, index)

                restart_files_1 = glob.glob(os.path.join(rundir1, rname))
                expect((len(restart_files_1) > 0),
                       "No case1 restart files for {}".format(comp))
                restart_files_2 = glob.glob(os.path.join(rundir2, rname))
                expect((len(restart_files_2) > len(restart_files_1)),
                       "No pause (restart) files found in case2 for {}".format(
                           comp))
                # Do cprnc of restart files.
                rfile1 = restart_files_1[len(restart_files_1) - 1]
                # rfile2 has to match rfile1 (same time string)
                parts = os.path.basename(rfile1).split(".")
                glob_str = "*.{}".format(".".join(parts[len(parts) - 4:]))
                restart_files_2 = glob.glob(os.path.join(rundir2, glob_str))
                expect((len(restart_files_2) == 1),
                       "Missing case2 restart file, {}", glob_str)
                rfile2 = restart_files_2[0]
                ok = cprnc(comp, rfile1, rfile2, self._case, rundir2)[0]
                logger.warning("CPRNC result for {}: {}".format(
                    os.path.basename(rfile1), "PASS" if
                    (ok == should_match) else "FAIL"))
                compare_ok = compare_ok and (should_match == ok)

        expect(
            compare_ok, "Not all restart files {}".format(
                "matched" if should_match else "failed to match"))
Esempio n. 41
0
    def get_workflow_jobs(self, machine, workflowid="default"):
        """
        Return a list of jobs with the first element the name of the script
        and the second a dict of qualifiers for the job
        """
        jobs = []
        bnodes = []
        findmore = True
        prepend = False
        while findmore:
            bnode = self.get_optional_child("workflow_jobs", attributes={"id":workflowid})
            expect(bnode,"No workflow {} found in file {}".format(workflowid, self.filename))
            if prepend:
                bnodes = [bnode] + bnodes
            else:
                bnodes.append(bnode)
            prepend = False
            workflow_attribs = self.attrib(bnode)
            if "prepend" in workflow_attribs:
                workflowid = workflow_attribs["prepend"]
                prepend = True
            elif "append" in workflow_attribs:
                workflowid = workflow_attribs["append"]
            else:
                findmore = False
        for bnode in bnodes:
            for jnode in self.get_children(root=bnode):
                if self.name(jnode) == "job":
                    name = self.get(jnode, "name")
                    jdict = {}
                    for child in self.get_children(root=jnode):
                        if self.name(child) == "runtime_parameters":
                            attrib = self.attrib(child)
                            if attrib and attrib == {'MACH' : machine}:
                                for rtchild in self.get_children(root=child):
                                    jdict[self.name(rtchild)] = self.text(rtchild)
                            elif not attrib:
                                for rtchild in self.get_children(root=child):
                                    if self.name(rtchild) not in jdict:
                                        jdict[self.name(rtchild)] = self.text(rtchild)

                        else:
                            jdict[self.name(child)] = self.text(child)

                jobs.append((name, jdict))

        return jobs
Esempio n. 42
0
def _main_func(options, work_dir):
###############################################################################

    """Construct grids html from an XML file."""
        
    # Initialize a variables for the html template
    all_grids = dict()
    model_version = options.version[0]

    # read in txtfile with the grids definitions
    txtfile = options.txtfile[0]
    expect((txtfile),
           'Cannot find query_config --grids --long output text file %s' %txtfile)

    # read the grids file looking for aliases and load the all_grids dict
    with open( txtfile, 'r') as f:
        for line in f:
            if "alias:" in line:
                line = line.rstrip('\n')
                alias = line.split(':')
                alias.pop(0)
                key = alias[0].lstrip()
                all_grids[key] = ''
            else:
                all_grids[key] += line
    f.close()


    # load up jinja template
    templateLoader = jinja2.FileSystemLoader( searchpath='{0}/templates'.format(work_dir) )
    templateEnv = jinja2.Environment( loader=templateLoader )

    # TODO - get the cesm_version for the CIME root
    tmplFile = 'griddef2html.tmpl'
    template = templateEnv.get_template( tmplFile )
    templateVars = { 'today'              : _now,
                     'model_version'      : model_version,
                     'all_grids'          : all_grids }
        
    # render the template
    grid_tmpl = template.render( templateVars )

    # write the output file
    with open( options.htmlfile[0], 'w') as html:
        html.write(grid_tmpl)

    return 0
Esempio n. 43
0
def handle_easy_conflicts(is_merge):
    ###############################################################################
    conflicting_files = run_cmd_no_fail(
        "git diff --name-only --diff-filter=U").splitlines()
    if not conflicting_files:
        expect(
            False,
            "Merge appears to have failed for reasons other than merge conflicts"
        )

    rv = []
    for conflicting_file in conflicting_files:
        able_to_handle = handle_easy_conflict(conflicting_file, is_merge)
        if not able_to_handle:
            rv.append(conflicting_file)

    return rv
Esempio n. 44
0
def wait_for_tests_impl(test_paths,
                        no_wait=False,
                        check_throughput=False,
                        check_memory=False,
                        ignore_namelists=False,
                        ignore_memleak=False,
                        no_run=False):
    ###############################################################################
    results = queue.Queue()

    for test_path in test_paths:
        t = threading.Thread(target=wait_for_test,
                             args=(test_path, results, not no_wait,
                                   check_throughput, check_memory,
                                   ignore_namelists, ignore_memleak, no_run))
        t.daemon = True
        t.start()

    while threading.active_count() > 1:
        time.sleep(1)

    test_results = {}
    completed_test_paths = []
    while (not results.empty()):
        test_name, test_path, test_status, test_phase = results.get()
        if (test_name in test_results):
            prior_path, prior_status, _ = test_results[test_name]
            if (test_status == prior_status):
                logging.warning(
                    "Test name '{}' was found in both '{}' and '{}'".format(
                        test_name, test_path, prior_path))
            else:
                raise CIMEError(
                    "Test name '{}' was found in both '{}' and '{}' with different results"
                    .format(test_name, test_path, prior_path))

        expect(test_name is not None,
               "Failed to get test name for test_path: {}".format(test_path))
        test_results[test_name] = (test_path, test_status, test_phase)
        completed_test_paths.append(test_path)

    expect(
        set(test_paths) == set(completed_test_paths),
        "Missing results for test paths: {}".format(
            set(test_paths) - set(completed_test_paths)))
    return test_results
Esempio n. 45
0
def update_acme_tests(xml_file, categories, platform=None):
    ###############################################################################
    # Retrieve all supported ACME platforms, killing the third entry (MPI lib)
    # for the moment.
    supported_platforms = [p[:2] for p in find_all_supported_platforms()]

    # Fish all of the existing machine/compiler combos out of the XML file.
    if (platform is not None):
        platforms = [tuple(platform.split(","))]
    else:
        platforms = find_all_platforms(xml_file)
        # Prune the non-supported platforms from our list.
        for p in platforms:
            if p not in supported_platforms:
                logging.info("pruning unsupported platform %s" % repr(p))
        platforms = [p for p in platforms if p in supported_platforms]

    manage_xml_entries = os.path.join(CIME.utils.get_cime_root(), "scripts",
                                      "manage_testlists")

    expect(
        os.path.isfile(manage_xml_entries),
        "Couldn't find manage_testlists, expected it to be here: '%s'" %
        manage_xml_entries)

    for category in categories:
        # Remove any existing acme test category from the file.
        if (platform is None):
            run_cmd_no_fail(
                "%s -model acme -component allactive -removetests -category %s"
                % (manage_xml_entries, category))
        else:
            run_cmd_no_fail(
                "%s -model acme -component allactive -removetests -category %s -machine %s -compiler %s"
                % (manage_xml_entries, category, platforms[0][0],
                   platforms[0][1]))

        # Generate a list of test entries corresponding to our suite at the top
        # of the file.
        new_test_file = generate_acme_test_entries(category, platforms)
        run_cmd_no_fail(
            "%s -model acme -component allactive -addlist -file %s -category %s"
            % (manage_xml_entries, new_test_file, category))
        os.unlink(new_test_file)

    print "SUCCESS"
Esempio n. 46
0
File: pre.py Progetto: lxu16/origin
    def run_phase(self):
        ###########################################################################
        SystemTestsCompareTwo.run_phase(self)
        # Look for expected coupler restart files
        logger = logging.getLogger(__name__)
        self._activate_case1()
        rundir1 = self._case.get_value("RUNDIR")
        self._activate_case2()
        rundir2 = self._case.get_value("RUNDIR")
        should_match = (self._case.get_value("DESP_MODE") == "NOCHANGE")
        compare_ok = True
        pause_comps = self._case.get_value("PAUSE_COMPONENT_LIST")
        expect((pause_comps != 'none'),
               "Pause/Resume (PRE) test has no pause components")
        if pause_comps == 'all':
            pause_comps = self._case.get_values("COMP_CLASSES")
        else:
            pause_comps = pause_comps.split(':')

        for comp in pause_comps:
            comp_name = self._case.get_value('COMP_%s' % comp.upper())
            rname = '*.%s.r.*' % comp_name
            restart_files_1 = glob.glob(os.path.join(rundir1, rname))
            expect((len(restart_files_1) > 0),
                   "No case1 restart files for %s" % comp)
            restart_files_2 = glob.glob(os.path.join(rundir2, rname))
            expect((len(restart_files_2) > len(restart_files_1)),
                   "No pause (restart) files found in case2 for %s" % comp)
            # Do cprnc of restart files.
            rfile1 = restart_files_1[len(restart_files_1) - 1]
            # rfile2 has to match rfile1 (same time string)
            parts = os.path.basename(rfile1).split(".")
            glob_str = "*.%s" % ".".join(parts[len(parts) - 4:])
            restart_files_2 = glob.glob(os.path.join(rundir2, glob_str))
            expect((len(restart_files_2) == 1),
                   "Missing case2 restart file, %s", glob_str)
            rfile2 = restart_files_2[0]
            ok, out = cprnc(comp, rfile1, rfile2, self._case, rundir2)  # pylint: disable=unused-variable
            logger.warning("CPRNC result for %s: %s" %
                           (os.path.basename(rfile1), "PASS" if
                            (ok == should_match) else "FAIL"))
            compare_ok = compare_ok and (should_match == ok)

        expect(
            compare_ok, "Not all restart files %s" %
            ("matched" if should_match else "failed to match"))
Esempio n. 47
0
    def __init__(self):
        """
        initialize an object
        """

        expect(
            get_model() == "cesm",
            "testreport is only meant to populate the CESM test database.",
        )
        self.root = None

        GenericXML.__init__(
            self,
            root_name_override="testrecord",
            read_only=False,
            infile="TestRecord.xml",
        )
Esempio n. 48
0
def get_test_suite(suite,
                   machine=None,
                   compiler=None,
                   skip_inherit=False,
                   skip_tests=None):
    ###############################################################################
    """
    Return a list of FULL test names for a suite.
    """
    expect(suite in get_test_suites(),
           "Unknown test suite: '{}'".format(suite))
    machobj = Machines(machine=machine)
    machine = machobj.get_machine_name()

    if compiler is None:
        compiler = machobj.get_default_compiler()
    expect(
        machobj.is_valid_compiler(compiler),
        "Compiler {} not valid for machine {}".format(compiler, machine),
    )

    inherits_from, _, _, tests_raw = get_test_data(suite)
    tests = []
    for item in tests_raw:
        expect(
            isinstance(item, str),
            "Bad type of test {}, expected string".format(item),
        )

        test_mods = None
        test_components = item.split(".")
        expect(len(test_components) in [3, 4], "Bad test name {}".format(item))

        if len(test_components) == 4:
            test_name = ".".join(test_components[:-1])
            test_mods = test_components[-1]
        else:
            test_name = item
        if not skip_tests or not test_name in skip_tests:
            tests.append(
                CIME.utils.get_full_test_name(
                    test_name,
                    machine=machine,
                    compiler=compiler,
                    testmods_string=test_mods,
                ))

    if not skip_inherit:
        for inherits in inherits_from:
            inherited_tests = get_test_suite(inherits, machine, compiler)

            for inherited_test in inherited_tests:
                if inherited_test not in tests:
                    tests.append(inherited_test)

    return tests
Esempio n. 49
0
    def get_node(self, nodename, attributes=None, root=None, xpath=None):
        """
        Get an xml element matching nodename with optional attributes.

        Error unless exactly one match.
        """

        nodes = self.get_nodes(nodename,
                               attributes=attributes,
                               root=root,
                               xpath=xpath)

        expect(
            len(nodes) == 1,
            "Incorrect number of matches, %d, for nodename '%s' and attrs '%s' in file '%s'"
            % (len(nodes), nodename, attributes, self.filename))
        return nodes[0]
Esempio n. 50
0
def query_grids(files, long_output, xml=False):
    """
    query all grids.
    """
    config_file = files.get_value("GRIDS_SPEC_FILE")
    expect(
        os.path.isfile(config_file),
        "Cannot find config_file {} on disk".format(config_file),
    )

    grids = Grids(config_file)
    if xml:
        print("{}".format(grids.get_raw_record().decode("UTF-8")))
    elif long_output:
        grids.print_values(long_output=long_output)
    else:
        grids.print_values()
Esempio n. 51
0
def case_lt_archive(case):
    ###############################################################################
    caseroot = case.get_value("CASEROOT")

    # max number of threads needed by scripts
    os.environ["maxthrds"] = "1"

    # document start
    append_status("lt_archive starting", caseroot=caseroot, sfile="CaseStatus")

    # determine status of run and short term archiving
    runComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
                                        "Run SUCCESSFUL")
    staComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
                                        "st_archiving completed")

    # set up envrionment vars and call the lt_archive.sh script
    if runComplete and staComplete:
        os.environ["DOUT_S_ROOT"] = case.get_value("DOUT_S_ROOT")
        os.environ["DOUT_L_MSROOT"] = case.get_value("DOUT_L_MSROOT")
        os.environ["DOUT_L_HPSS_ACCNT"] = case.get_value("DOUT_L_HPSS_ACCNT")

        lid = time.strftime("%y%m%d-%H%M%S")
        lt_archive = LTArchive(case.get_value("MACH"))
        lt_archive_args = lt_archive.get_lt_archive_args()
        if lt_archive_args is None:
            lt_archive_args = " "

        cmd = os.path.join(caseroot, "Toolslt_archive.sh") \
            + lt_archive_args + "ltArchiveStatus." + lid + " 2>&1"
        run_cmd_no_fail(cmd, from_dir=caseroot)
    else:
        logger.warn("runComplete %s staComplete %s" %
                    (runComplete, staComplete))
        expect(
            False,
            "lt_archive: run or st_archive is not yet complete or was not successful."
            "Unable to perform long term archive...")

    # document completion
    append_status("lt_archive completed",
                  caseroot=caseroot,
                  sfile="CaseStatus")

    return True
Esempio n. 52
0
def verify_chksum(input_data_root, rundir, filename, isdirectory):
    """
    For file in filename perform a chksum and compare the result to that stored in
    the local checksumfile, if isdirectory chksum all files in the directory of form *.*
    """
    hashfile = os.path.join(rundir, local_chksum_file)
    if not chksum_hash:
        if not os.path.isfile(hashfile):
            logger.warning(
                "Failed to find or download file {}".format(hashfile))
            return

        with open(hashfile) as fd:
            lines = fd.readlines()
            for line in lines:
                fchksum, fname = line.split()
                if fname in chksum_hash:
                    expect(
                        chksum_hash[fname] == fchksum,
                        " Inconsistent hashes in chksum for file {}".format(
                            fname),
                    )
                else:
                    chksum_hash[fname] = fchksum

    if isdirectory:
        filenames = glob.glob(os.path.join(filename, "*.*"))
    else:
        filenames = [filename]
    for fname in filenames:
        if not os.sep in fname:
            continue
        chksum = md5(os.path.join(input_data_root, fname))
        if chksum_hash:
            if not fname in chksum_hash:
                logger.warning(
                    "Did not find hash for file {} in chksum file {}".format(
                        filename, hashfile))
            else:
                expect(
                    chksum == chksum_hash[fname],
                    "chksum mismatch for file {} expected {} found {}".format(
                        os.path.join(input_data_root, fname), chksum,
                        chksum_hash[fname]),
                )
Esempio n. 53
0
    def set_batch_system(self, batch_system, machine=None):
        """
        Sets the batch system block in the Batch object
        """
        machine = machine if machine is not None else self.machine
        if self.batch_system != batch_system or self.batch_system_node is None:
            nodes = self.get_nodes("batch_system",{"type" : batch_system})
            for node in nodes:
                mach = node.get("MACH")
                if mach is None:
                    self.batch_system_node = node
                elif mach == machine:
                    self.machine = machine
                    self.machine_node = node

            expect(self.batch_system_node is not None, "No batch system '%s' found" % batch_system)

        return batch_system
Esempio n. 54
0
def get_test_data(suite):
    ###############################################################################
    """
    For a given suite, returns (inherit, time, share, tests)
    """
    raw_dict = _ALL_TESTS[suite]
    for key in raw_dict.keys():
        expect(
            key in ["inherit", "time", "share", "tests"],
            "Unexpected test key '{}'".format(key),
        )

    return (
        _get_key_data(raw_dict, "inherit", tuple),
        _get_key_data(raw_dict, "time", str),
        _get_key_data(raw_dict, "share", bool),
        _get_key_data(raw_dict, "tests", tuple),
    )
Esempio n. 55
0
def _signal_handler(signum, _):
    name = "Unknown"
    for signame in _iter_signal_names():
        if signum == getattr(signal, signame):
            name = signame

    # Terminate children
    proc_ids = find_proc_id(children_only=True)
    for proc_id in proc_ids:
        try:
            os.kill(proc_id, signal.SIGKILL)
        except OSError:
            # If the batch system killed the entire process group, these
            # processes might already be dying
            pass

    # Throw an exception so SystemTest infrastructure can handle this error
    expect(False, "Job killed due to receiving signal {:d} ({})".format(signum, name))
Esempio n. 56
0
    def __init__(self):
        """
        initialize an object

        >>> files = Files()
        >>> files.get_value('CASEFILE_HEADERS',resolved=False)
        '$CIMEROOT/config/config_headers.xml'
        """
        cimeroot = get_cime_root()
        infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml")
        expect(os.path.isfile(infile), "Could not find or open file {}".format(infile))
        schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd")
        EntryID.__init__(self, infile, schema=schema)
        config_files_override = os.path.join(os.path.dirname(cimeroot),".config_files.xml")
        # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files
        if os.path.isfile(config_files_override):
            self.read(config_files_override)
            self.overwrite_existing_entries()
Esempio n. 57
0
 def get_compset_components(self):
     #If are doing a create_clone then, self._compsetname is not set yet
     components = []
     compset = self.get_value("COMPSET")
     if compset is None:
         compset = self._compsetname
     expect(compset is not None, "ERROR: compset is not set")
     # the first element is always the date operator - skip it
     elements = compset.split('_')[1:]  # pylint: disable=maybe-no-member
     for element in elements:
         # ignore the possible BGC or TEST modifier
         if element.startswith("BGC%") or element.startswith("TEST"):
             continue
         else:
             element_component = element.split('%')[0].lower()
             element_component = re.sub(r'[0-9]*', "", element_component)
             components.append(element_component)
     return components
Esempio n. 58
0
def wait_for_tests_impl(test_paths,
                        no_wait=False,
                        check_throughput=False,
                        check_memory=False,
                        ignore_namelists=False,
                        ignore_memleak=False):
    ###############################################################################
    results = Queue.Queue()

    for test_path in test_paths:
        t = threading.Thread(target=wait_for_test,
                             args=(test_path, results, not no_wait,
                                   check_throughput, check_memory,
                                   ignore_namelists, ignore_memleak))
        t.daemon = True
        t.start()

    while threading.active_count() > 1:
        time.sleep(1)

    test_results = {}
    completed_test_paths = []
    while (not results.empty()):
        test_name, test_path, test_status = results.get()
        if (test_name in test_results):
            prior_path, prior_status = test_results[test_name]
            if (test_status == prior_status):
                logging.warning(
                    "Test name '%s' was found in both '%s' and '%s'" %
                    (test_name, test_path, prior_path))
            else:
                raise SystemExit(
                    "Test name '%s' was found in both '%s' and '%s' with different results"
                    % (test_name, test_path, prior_path))

        test_results[test_name] = (test_path, test_status)
        completed_test_paths.append(test_path)

    expect(
        set(test_paths) == set(completed_test_paths),
        "Missing results for test paths: %s" %
        (set(test_paths) - set(completed_test_paths)))

    return test_results
Esempio n. 59
0
def get_recommended_test_time(test_full_name):
###############################################################################
    """
    >>> get_recommended_test_time("ERS.f19_g16_rx1.A.melvin_gnu")
    '0:10:00'

    >>> get_recommended_test_time("ERP_Ln9.ne30_ne30.FC5.melvin_gun.cam-outfrq9s")
    '0:45:00'

    >>> get_recommended_test_time("PET_Ln9.ne30_ne30.FC5.sandiatoss3_intel.cam-outfrq9s")
    '03:00:00'

    >>> get_recommended_test_time("PET_Ln20.ne30_ne30.FC5.sandiatoss3_intel.cam-outfrq9s")
    >>>
    """
    _, _, _, _, machine, compiler, _ = CIME.utils.parse_test_name(test_full_name)
    expect(machine is not None, "{} is not a full test name".format(test_full_name))

    best_time = None
    suites = get_test_suites()
    for suite in suites:
        _, rec_time, tests_raw = _ALL_TESTS[suite]
        for item in tests_raw:
            test_mod = None
            if (isinstance(item, six.string_types)):
                test_name = item
            else:
                test_name = item[0]
                if (len(item) == 2):
                    test_mod = item[1]
                else:
                    test_mod_machines = [item[2]] if isinstance(item[2], six.string_types) else item[2]
                    if (machine in test_mod_machines):
                        test_mod = item[1]

            full_test = CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmod=test_mod)

            if full_test == test_full_name and rec_time is not None:
                if best_time is None or \
                        convert_to_seconds(rec_time) < convert_to_seconds(best_time):
                    best_time = rec_time

    return best_time
Esempio n. 60
0
def make_fake_teststatus(path, testname, status, phase):
    expect(phase in test_status.CORE_PHASES, "Bad phase '%s'" % phase)
    with test_status.TestStatus(test_dir=path, test_name=testname) as ts:
        for core_phase in test_status.CORE_PHASES:
            if core_phase == phase:
                ts.set_status(
                    core_phase,
                    status,
                    comments=("time=42"
                              if phase == test_status.RUN_PHASE else ""),
                )
                break
            else:
                ts.set_status(
                    core_phase,
                    test_status.TEST_PASS_STATUS,
                    comments=("time=42"
                              if phase == test_status.RUN_PHASE else ""),
                )