Esempio n. 1
0
def download_if_in_repo(svn_loc, input_data_root, rel_path):
    """
    Return True if successfully downloaded
    """
    rel_path = rel_path.strip('/')
    full_url = os.path.join(svn_loc, rel_path)

    full_path = os.path.join(input_data_root, rel_path)
    logging.info("Trying to download file: '%s' to path '%s'" % (full_url, full_path))
    # Make sure local path exists, create if it does not
    if(not os.path.exists(os.path.dirname(full_path))):
        os.makedirs(os.path.dirname(full_path))

    stat, out, err = run_cmd("svn --non-interactive --trust-server-cert ls %s" % full_url, ok_to_fail=True)
    if (stat != 0):
        logging.warning("FAIL: SVN repo '%s' does not have file '%s'\nReason:%s\n%s\n" % (svn_loc, full_url, out, err))
        return False
    else:
        stat, output, errput = \
            run_cmd("svn --non-interactive --trust-server-cert export %s %s" % (full_url, full_path), ok_to_fail=True)
        if (stat != 0):
            logging.warning("svn export failed with output: %s and errput %s\n" % (output, errput))
            return False
        else:
            # Make sure it is group r/w
            os.chmod(full_path, 0664)
            logging.info("SUCCESS\n")
            return True
Esempio n. 2
0
def _main_func(description):
    date, fullmonth = parse_command_line(sys.argv, description)
    if fullmonth:
        fday = 1
        _, lday = monthrange(date.year, date.month)
    else:
        fday = date.day
        lday = date.day

    print("Getting data for range {} to {}".format(fday, lday))
    for day in range(fday, lday + 1):
        tdate = date.replace(day=day)
        jday = get_julian_day_of_year(tdate)
        print("Getting data for year {} julian date {}".format(
            date.year, jday))
        dataroot = "https://goldsfs1.gesdisc.eosdis.nasa.gov/data/GEOS5/DFPITI3NVASM.5.12.4/{}/{:03d}/.hidden/".format(
            date.year, jday)
        cmd = "wget -nc -np -r -nH --directory-prefix=/glade/scratch/jedwards/NASAdata/ -A'GEOS.*.V01.nc4' " + dataroot
        err, output, _ = run_cmd(cmd, combine_output=True, verbose=True)
        expect(
            err == 0,
            "Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}"
            .format(cmd, output.encode('utf-8')))

        dataroot = "https://goldsfs1.gesdisc.eosdis.nasa.gov/data/GEOS5/DFPITI3NXASM.5.12.4/{}/{:03d}/.hidden/".format(
            date.year, jday)
        cmd = "wget -nc -np -r -nH --directory-prefix=/glade/scratch/jedwards/NASAdata/ -A'GEOS.*.V01.nc4' " + dataroot
        err, output, _ = run_cmd(cmd, combine_output=True, verbose=True)
        expect(
            err == 0,
            "Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}"
            .format(cmd, output.encode('utf-8')))
Esempio n. 3
0
 def write_pe_template(self, pefilename, ntasks, nthrds, roots):
     from distutils.spawn import find_executable
     from xml.etree import ElementTree as ET
     from CIME.utils import run_cmd
     logger.info("Writing pe node info to %s", pefilename)
     root = ET.Element('config_pes')
     grid = ET.SubElement(root, 'grid')
     grid.set('name', 'any')
     mach = ET.SubElement(grid, 'mach')
     mach.set('name', 'any')
     pes = ET.SubElement(mach, 'pes')
     pes.set('compset', 'any')
     pes.set('pesize', '')
     ntasks_node = ET.SubElement(pes, 'ntasks')
     for k in ntasks:
         node = ET.SubElement(ntasks_node, 'ntasks_' + k)
         node.text = str(ntasks[k])
     nthrds_node = ET.SubElement(pes, 'nthrds')
     for k in nthrds:
         node = ET.SubElement(nthrds_node, 'nthrds_' + k)
         node.text = str(nthrds[k])
     rootpe_node = ET.SubElement(pes, 'rootpe')
     for k in roots:
         node = ET.SubElement(rootpe_node, 'rootpe_' + k)
         node.text = str(roots[k])
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd("%s --format --output %s -" % (xmllint, pefilename),
                 input_str=ET.tostring(root))
 def write_pe_template(self, pefilename, ntasks, nthrds, roots):
     from distutils.spawn import find_executable
     from xml.etree import ElementTree as ET
     from CIME.utils import run_cmd
     logger.info("Writing pe node info to %s", pefilename)
     root = ET.Element('config_pes')
     grid = ET.SubElement(root, 'grid')
     grid.set('name', 'any')
     mach = ET.SubElement(grid, 'mach')
     mach.set('name', 'any')
     pes = ET.SubElement(mach, 'pes')
     pes.set('compset', 'any')
     pes.set('pesize', '')
     ntasks_node = ET.SubElement(pes, 'ntasks')
     for k in ntasks:
         node = ET.SubElement(ntasks_node, 'ntasks_' + k)
         node.text = str(ntasks[k])
     nthrds_node = ET.SubElement(pes, 'nthrds')
     for k in nthrds:
         node = ET.SubElement(nthrds_node, 'nthrds_' + k)
         node.text = str(nthrds[k])
     rootpe_node = ET.SubElement(pes, 'rootpe')
     for k in roots:
         node = ET.SubElement(rootpe_node, 'rootpe_' + k)
         node.text = str(roots[k])
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd("%s --format --output %s -" % (xmllint, pefilename),
                 input_str=ET.tostring(root))
Esempio n. 5
0
def _check_pelayouts_require_rebuild(case, models):
###############################################################################
    """
    Create if we require a rebuild, expects cwd is caseroot
    """
    locked_pes = "LockedFiles/env_mach_pes.xml"
    if os.path.exists(locked_pes):
        # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined
        # for any component
        env_mach_pes_locked = EnvMachPes(infile=locked_pes)
        for comp in models:
            if case.get_value("%s_PE_CHANGE_REQUIRES_REBUILD" % comp):
                # Changing these values in env_mach_pes.xml will force
                # you to clean the corresponding component
                old_tasks   = env_mach_pes_locked.get_value("NTASKS_%s" % comp)
                old_threads = env_mach_pes_locked.get_value("NTHRDS_%s" % comp)
                old_inst    = env_mach_pes_locked.get_value("NINST_%s" % comp)

                new_tasks   = case.get_value("NTASKS_%s" % comp)
                new_threads = case.get_value("NTHRDS_%s" % comp)
                new_inst    = case.get_value("NINST_%s" % comp)

                if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:
                    logger.warn("%s pe change requires clean build" % comp)
                    cleanflag = comp.lower()
                    run_cmd("./case.build --clean %s" % cleanflag)

        os.remove(locked_pes)
Esempio n. 6
0
def bless_namelists(test_name, test_dir, report_only, force, baseline_name, baseline_root):
###############################################################################
    # Be aware that restart test will overwrite the original namelist files
    # with versions of the files that should not be blessed. This forces us to
    # re-run create_test.

    # Update namelist files
    logger.info("Test '{}' had namelist diff".format(test_name))
    if (not report_only and
        (force or six.moves.input("Update namelists (y/n)? ").upper() in ["Y", "YES"])):

        if baseline_name is None:
            stat, baseline_name, _ = run_cmd("./xmlquery --value BASELINE_NAME_CMP", from_dir=test_dir)
            if stat != 0 or not baseline_name:
                baseline_name = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root())

        if baseline_root is None:
            stat, baseline_root, _ = run_cmd("./xmlquery --value BASELINE_ROOT", from_dir=test_dir)
            if stat != 0 or not baseline_root:
                return False, "Could not determine baseline root"

        create_test_gen_args = " -g {} ".format(baseline_name if get_model() == "cesm" else " -g -b {} ".format(baseline_name))
        stat, out, _ = run_cmd("{}/create_test {} -n {} --baseline-root {} -o".format(get_scripts_root(), test_name, create_test_gen_args, baseline_root), combine_output=True)
        if stat != 0:
            return False, "Namelist regen failed: '{}'".format(out)
        else:
            return True, None
    else:
        return True, None
Esempio n. 7
0
def getTimings(case, lid):
###############################################################################

    check_timing = case.get_value("CHECK_TIMING")
    if check_timing:
        caseroot = case.get_value("CASEROOT")
        timingDir = os.path.join(caseroot, "timing")
        if not os.path.isdir(timingDir):
            os.makedirs(timingDir)

        logger.info("Running timing script %s " %(os.path.join(caseroot, "Tools", "getTiming")))
        cmd = "%s -lid %s " %(os.path.join(caseroot,"Tools","getTiming"), lid)
        run_cmd(cmd)

        # save the timing files if desired
        save_timing = case.get_value("SAVE_TIMING")
        if save_timing:
            rundir = case.get_value("RUNDIR")
            shutil.move(os.path.join(rundir,"timing"),
                        os.path.join(rundir,"timing."+lid))

        # compress relevant timing files
        logger.info( "gzipping timing stats.." )
        model = case.get_value("MODEL")
        timingfile = os.path.join(timingDir, model + "_timing_stats." + lid)
        with open(timingfile, 'rb') as f_in, gzip.open(timingfile + '.gz', 'wb') as f_out:
            shutil.copyfileobj(f_in, f_out)
        os.remove(timingfile)
        logger.info("Done with timings")
Esempio n. 8
0
    def write_pe_template(self, pefilename, ntasks, nthrds, roots):
        from distutils.spawn import find_executable
        from xml.etree import ElementTree as ET
        from CIME.utils import run_cmd

        logger.info("Writing pe node info to %s", pefilename)
        root = ET.Element("config_pes")
        grid = ET.SubElement(root, "grid")
        grid.set("name", "any")
        mach = ET.SubElement(grid, "mach")
        mach.set("name", "any")
        pes = ET.SubElement(mach, "pes")
        pes.set("compset", "any")
        pes.set("pesize", "")
        ntasks_node = ET.SubElement(pes, "ntasks")
        for k in ntasks:
            node = ET.SubElement(ntasks_node, "ntasks_" + k)
            node.text = str(ntasks[k])
        nthrds_node = ET.SubElement(pes, "nthrds")
        for k in nthrds:
            node = ET.SubElement(nthrds_node, "nthrds_" + k)
            node.text = str(nthrds[k])
        rootpe_node = ET.SubElement(pes, "rootpe")
        for k in roots:
            node = ET.SubElement(rootpe_node, "rootpe_" + k)
            node.text = str(roots[k])
        xmllint = find_executable("xmllint")
        if xmllint is not None:
            run_cmd(
                "%s --format --output %s -" % (xmllint, pefilename),
                input_str=ET.tostring(root),
            )
Esempio n. 9
0
def abort_merge():
    ###############################################################################
    new_merge_tag = get_merge_tag()
    pr_branch = get_branch_from_tag(new_merge_tag)
    delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Esempio n. 10
0
def abort_merge():
###############################################################################
    new_merge_tag = get_merge_tag()
    pr_branch = get_branch_from_tag(new_merge_tag)
    delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Esempio n. 11
0
def abort_split():
    ###############################################################################
    new_split_tag = get_split_tag()
    pr_branch = get_branch_from_tag(new_split_tag)
    delete_tag(new_split_tag)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Esempio n. 12
0
def abort_split():
###############################################################################
    new_split_tag = get_split_tag()
    pr_branch = get_branch_from_tag(new_split_tag)
    delete_tag(new_split_tag)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Esempio n. 13
0
    def _nlcomp_phase(self, test):
    ###########################################################################
        test_dir       = self._get_test_dir(test)
        casedoc_dir    = os.path.join(test_dir, "CaseDocs")
        compare_nl     = os.path.join(CIME.utils.get_scripts_root(), "Tools", "compare_namelists")
        simple_compare = os.path.join(CIME.utils.get_scripts_root(), "Tools", "simple_compare")

        if self._compare:
            has_fails         = False
            baseline_dir      = os.path.join(self._baseline_root, self._baseline_cmp_name, test)
            baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")

            # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)
            # TODO: Namelist files should have consistent suffix
            all_items_to_compare = [item for item in glob.glob("%s/*" % casedoc_dir)\
                                    if "README" not in os.path.basename(item)\
                                    and not item.endswith("doc")\
                                    and not item.endswith("prescribed")\
                                    and not os.path.basename(item).startswith(".")] + \
                                    glob.glob("%s/*user_nl*" % test_dir)
            for item in all_items_to_compare:
                baseline_counterpart = os.path.join(baseline_casedocs \
                                                    if os.path.dirname(item).endswith("CaseDocs") \
                                                    else baseline_dir,os.path.basename(item))
                if not os.path.exists(baseline_counterpart):
                    self._log_output(test, "Missing baseline namelist '%s'" % baseline_counterpart)
                    has_fails = True
                else:
                    if compare_namelists.is_namelist_file(item):
                        rc, output, _  = run_cmd("%s %s %s -c %s 2>&1" %
                                                 (compare_nl, baseline_counterpart, item, test),
                                                 ok_to_fail=True)
                    else:
                        rc, output, _  = run_cmd("%s %s %s -c %s 2>&1" %
                                                 (simple_compare, baseline_counterpart, item, test),
                                                 ok_to_fail=True)

                    if rc != 0:
                        has_fails = True
                        self._log_output(test, output)

            if has_fails:
                self._test_has_nl_problem(test)

        if self._generate:
            baseline_dir      = os.path.join(self._baseline_root, self._baseline_gen_name, test)
            baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
            if not os.path.isdir(baseline_dir):
                os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)

            if os.path.isdir(baseline_casedocs):
                shutil.rmtree(baseline_casedocs)
            shutil.copytree(casedoc_dir, baseline_casedocs)
            for item in glob.glob(os.path.join(test_dir, "user_nl*")):
                shutil.copy2(item, baseline_dir)

        # Always mark as passed unless we hit exception
        return True
Esempio n. 14
0
    def _run_phase(self, test):
    ###########################################################################
        test_dir = self._get_test_dir(test)
        # wallclock is an optional field in the version 2.0 testlist.xml file
        # setting wallclock time close to the expected test time will help queue throughput
        if (test in self._test_data and "wallclock" in self._test_data[test]):
            run_cmd("./xmlchange JOB_WALLCLOCK_TIME=%s" % self._test_data[test]["wallclock"], from_dir=test_dir)

        return self._shell_cmd_for_phase(test, "./case.submit", RUN_PHASE, from_dir=test_dir)
Esempio n. 15
0
def clean(case, cleanlist=None):
###############################################################################

    clm_config_opts = case.get_value("CLM_CONFIG_OPTS")
    comp_lnd = case.get_value("COMP_LND")
    if cleanlist is None:
        cleanlist = case.get_value("COMP_CLASSES").split(',')
        cleanlist = [x.lower().replace('drv','cpl') for x in cleanlist]
        testcase        = case.get_value("TESTCASE")
        # we only want to clean clm here if it is clm4_0 otherwise remove
        # it from the cleanlist
        if testcase is not None and comp_lnd == "clm" and\
                clm_config_opts is not None and "lnd" in cleanlist and\
                "clm4_0" not in clm_config_opts:
                    cleanlist.remove('lnd')


    debug           = case.get_value("DEBUG")
    use_esmf_lib    = case.get_value("USE_ESMF_LIB")
    build_threaded  = case.get_value("BUILD_THREADED")
    gmake           = case.get_value("GMAKE")
    caseroot        = case.get_value("CASEROOT")
    casetools       = case.get_value("CASETOOLS")

    os.environ["DEBUG"]           = stringify_bool(debug)
    os.environ["USE_ESMF_LIB"]    = stringify_bool(use_esmf_lib)
    os.environ["BUILD_THREADED"]  = stringify_bool(build_threaded)
    os.environ["CASEROOT"]        = case.get_value("CASEROOT")
    os.environ["COMP_INTERFACE"]  = case.get_value("COMP_INTERFACE")
    os.environ["PIO_VERSION"]     = str(case.get_value("PIO_VERSION"))
    os.environ["CLM_CONFIG_OPTS"] = clm_config_opts  if clm_config_opts is not None else ""

    cmd = gmake + " -f " + casetools + "/Makefile"
    for item in cleanlist:
        cmd = cmd + " clean" + item
    logger.info("calling %s "%(cmd))
    run_cmd(cmd)

    # unlink Locked files directory
    file = os.path.join(caseroot,"LockedFiles/env_build.xml")
    if os.path.isfile(file):
        os.unlink(file)

    # reset following values in xml files
    case.set_value("SMP_BUILD",str(0))
    case.set_value("NINST_BUILD",str(0))
    case.set_value("BUILD_STATUS",str(0))
    case.set_value("BUILD_COMPLETE","FALSE")
    case.flush()

    # append call of to CaseStatus
    msg = "cleanbuild %s "%" ".join(cleanlist)
    append_status(msg, caseroot=caseroot, sfile="CaseStatus")
 def writexml(self, addlist, newfilename):
     root = ET.Element('config_pes')
     for a, b in addlist:
         if b is not None:
             root.append(ET.Element('REPLACE'))
             root.append(b.to_cime4())
             root.append(ET.Element('WITH'))
         if a is not None:
             root.append(a.to_cime4())
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd("%s --format --output %s -"%(xmllint, newfilename),
                 input_str=ET.tostring(root))
Esempio n. 17
0
def create_perturbed_init_file(original, perturb_file, outfile, weight):
    ncflint = "ncflint"
    if not os.path.isdir(os.path.dirname(outfile)):
        os.makedirs(os.path.dirname(outfile))
    safe_copy(original, outfile)
    if "BWHIST" in original:
        cmd = ncflint + " -A -v US,VS,T,Q,PS -w {},1.0 {} {} {}".format(
            weight, perturb_file, original, outfile)
    else:
        cmd = ncflint + " -O -C -v lat,lon,slat,slon,lev,ilev,hyai,hybi,hyam,hybm,US,VS,T,Q,PS -w {},1.0 {} {} {}".format(
            weight, perturb_file, original, outfile)
    run_cmd(cmd, verbose=True)
    os.rename(outfile, outfile.replace("-tmp.nc", "-00000.nc"))
Esempio n. 18
0
 def writexml(self, addlist, newfilename):
     root = ET.Element('config_pes')
     for a, b in addlist:
         if b is not None:
             root.append(ET.Element('REPLACE'))
             root.append(b.to_cime4())
             root.append(ET.Element('WITH'))
         if a is not None:
             root.append(a.to_cime4())
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd("%s --format --output %s -" % (xmllint, newfilename),
                 input_str=ET.tostring(root))
Esempio n. 19
0
 def writexml(self, addlist, newfilename):
     root = ET.Element("config_pes")
     for a, b in addlist:
         if b is not None:
             root.append(ET.Element("REPLACE"))
             root.append(b.to_cime5())
             root.append(ET.Element("WITH"))
         if a is not None:
             root.append(a.to_cime5())
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd(
             "{} --format --output {} -".format(xmllint, newfilename),
             input_str=ET.tostring(root),
         )
Esempio n. 20
0
def _run_git_cmd_recursively(cmd, srcroot, output):
    """ Runs a git command recursively

    Runs the git command in srcroot then runs it on each submodule.
    Then output from both commands is written to the output file.
    """
    rc1, output1, err1 = run_cmd("git {}".format(cmd), from_dir=srcroot)

    rc2, output2, err2 = run_cmd(
        "git submodule foreach --recursive \"git {}; echo\"".format(cmd),
        from_dir=srcroot)

    with open(output, "w") as fd:
        fd.write((output1 if rc1 == 0 else err1) + "\n\n")
        fd.write((output2 if rc2 == 0 else err2) + "\n")
Esempio n. 21
0
    def run_cmd_assert_result(self,
                              cmd,
                              from_dir=None,
                              expected_stat=0,
                              env=None,
                              verbose=False):
        from_dir = os.getcwd() if from_dir is None else from_dir
        stat, output, errput = utils.run_cmd(cmd,
                                             from_dir=from_dir,
                                             env=env,
                                             verbose=verbose)
        if expected_stat == 0:
            expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat
        else:
            expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (
                expected_stat,
                stat,
            )
        msg = """
    COMMAND: %s
    FROM_DIR: %s
    %s
    OUTPUT: %s
    ERRPUT: %s
    """ % (
            cmd,
            from_dir,
            expectation,
            output,
            errput,
        )
        self.assertEqual(stat, expected_stat, msg=msg)

        return output
Esempio n. 22
0
def case_st_archive(case):
###############################################################################
    caseroot = case.get_value("CASEROOT")
    logger.info("st_archive starting")
    # do short-term archiving
    append_status("st_archiving starting",
                 caseroot=caseroot, sfile="CaseStatus")

    cmd = os.path.join(caseroot, "Tools/st_archive") + " >> stArchiveStatus 2>&1"
    rc, out, err = run_cmd(cmd, ok_to_fail=True)
    if rc != 0:
        append_status("st_archive failed: %s \nerr = %s"%(out,err),sfile="CaseStatus")
        return False

    append_status("st_archiving completed",
                 caseroot=caseroot, sfile="CaseStatus")
    logger.info("st_archive completed")

    # resubmit case if appropriate
    resubmit = case.get_value("RESUBMIT")
    if resubmit > 0:
        append_status("resubmitting from st_archive",
                      caseroot=caseroot, sfile="CaseStatus")
        logger.info("resubmitting from st_archive, resubmit=%d"%resubmit)
        submit(case, resubmit=True)

    return True
Esempio n. 23
0
def do_subtree_pull(squash=False):
###############################################################################
    stat = run_cmd("git subtree pull {} --prefix=cime {} master".format("--squash" if squash else "", ESMCI_REMOTE_NAME),
                   verbose=True)[0]
    if stat != 0:
        logging.info("There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
        sys.exit(1)
Esempio n. 24
0
def bless_namelists(test_name, report_only, force, baseline_name,
                    baseline_root):
    ###############################################################################
    # Be aware that restart test will overwrite the original namelist files
    # with versions of the files that should not be blessed. This forces us to
    # re-run create_test.

    # Update namelist files
    logger.info("Test '{}' had namelist diff".format(test_name))
    if (not report_only
            and (force or six.moves.input("Update namelists (y/n)? ").upper()
                 in ["Y", "YES"])):

        create_test_gen_args = " -g {} ".format(baseline_name if get_model(
        ) == "cesm" else " -g -b {} ".format(baseline_name))
        stat, out, _ = run_cmd(
            "{}/create_test {} -n {} --baseline-root {} -o".format(
                get_scripts_root(), test_name, create_test_gen_args,
                baseline_root),
            combine_output=True)
        if stat != 0:
            return False, "Namelist regen failed: '{}'".format(out)
        else:
            return True, None
    else:
        return True, None
Esempio n. 25
0
def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument
    version = case.get_value("MODEL_VERSION")
    # version has already been recorded
    srcroot = case.get_value("SRCROOT")
    manic = os.path.join("manage_externals","checkout_externals")
    manic_full_path = os.path.join(srcroot, manic)
    out = None
    if os.path.exists(manic_full_path):
        args = " --status --verbose --no-logging"
        stat, out, err = run_cmd(manic_full_path + args, from_dir=srcroot)
        errmsg = """Error gathering provenance information from manage_externals.

manage_externals error message:
{err}

manage_externals output:
{out}

To solve this, either:

(1) Find and fix the problem: From {srcroot}, try to get this command to work:
    {manic}{args}

(2) If you don't need provenance information, rebuild with --skip-provenance-check
""".format(out=indent_string(out, 4), err=indent_string(err, 4),
           srcroot=srcroot, manic=manic, args=args)
        expect(stat==0,errmsg)

    caseroot = case.get_value("CASEROOT")
    with open(os.path.join(caseroot, "CaseStatus"), "a") as fd:
        if version is not None and version != "unknown":
            fd.write("CESM version is {}\n".format(version))
        if out is not None:
            fd.write("{}\n".format(out))
Esempio n. 26
0
    def generate_baseline(self):
        """
        generate a new baseline case based on the current test
        """
        if self._runstatus != "PASS":
            append_status("Cannot generate baselines, test did not pass.\n", sfile="TestStatus.log")
            return

        newestcpllogfile = self._get_latest_cpl_log()
        baselineroot = self._case.get_value("BASELINE_ROOT")
        basegen_dir = os.path.join(baselineroot, self._case.get_value("BASEGEN_CASE"))
        for bdir in (baselineroot, basegen_dir):
            if not os.path.isdir(bdir):
                append_status("GFAIL %s baseline\n" % self._case.get_value("CASEBASEID"),
                             sfile="TestStatus")
                append_status("ERROR %s does not exist" % bdir, sfile="TestStatus.log")
                return -1
        compgen = os.path.join(self._case.get_value("SCRIPTSROOT"),"Tools",
                               "component_compgen_baseline.sh")
        compgen += " -baseline_dir "+basegen_dir
        compgen += " -test_dir "+self._case.get_value("RUNDIR")
        compgen += " -generate_tag "+self._case.get_value("BASELINE_NAME_GEN")
        compgen += " -testcase "+self._case.get_value("CASE")
        compgen += " -testcase_base "+self._case.get_value("CASEBASEID")
        rc, out, err = run_cmd(compgen, ok_to_fail=True)
        # copy latest cpl log to baseline
        # drop the date so that the name is generic
        shutil.copyfile(newestcpllogfile,
                        os.path.join(basegen_dir,"cpl.log.gz"))
        append_status(out,sfile="TestStatus")
        if rc != 0:
            append_status("Error in Baseline Generate: %s"%err,sfile="TestStatus.log")
Esempio n. 27
0
    def fileexists(self, rel_path):
        stat,out,err = run_cmd("globus-url-copy -list {}".format(os.path.join(self._root_address, os.path.dirname(rel_path))+os.sep))
        if stat or os.path.basename(rel_path) not in out:
            logging.warning("FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err))
            return False

        return True
Esempio n. 28
0
def _run_pylint(on_file, interactive):
###############################################################################
    pylint = find_executable("pylint")

    cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import"
    cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement"
    cmd_options += ",logging-format-interpolation,no-name-in-module"
    cimeroot = get_cime_root()

    if "scripts/Tools" in on_file:
        cmd_options +=",relative-import"

    # add init-hook option
    cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\
        (os.path.join(cimeroot,"scripts","lib"),
         os.path.join(cimeroot,"scripts","Tools"),
         os.path.join(cimeroot,"scripts","fortran_unit_testing","python"))

    cmd = "%s %s %s" % (pylint, cmd_options, on_file)
    logger.debug("pylint command is %s"%cmd)
    stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)
    if stat != 0:
        if interactive:
            logger.info("File %s has pylint problems, please fix\n    Use command: %s" % (on_file, cmd))
            logger.info(out + "\n" + err)
        return (on_file, out + "\n" + err)
    else:
        if interactive:
            logger.info("File %s has no pylint problems" % on_file)
        return (on_file, "")
Esempio n. 29
0
def _build_model_thread(
    config_dir,
    compclass,
    compname,
    caseroot,
    libroot,
    bldroot,
    incroot,
    file_build,
    thread_bad_results,
    smp,
    compiler,
):
    ###############################################################################
    logger.info("Building {} with output to {}".format(compclass, file_build))
    t1 = time.time()
    cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib")
    if os.path.isfile(cmd):
        logger.warning(
            "WARNING: using local buildlib script for {}".format(compname))
    else:
        cmd = os.path.join(config_dir, "buildlib")
        expect(os.path.isfile(cmd),
               "Could not find buildlib for {}".format(compname))

    compile_cmd = "COMP_CLASS={compclass} COMP_NAME={compname} {cmd} {caseroot} {libroot} {bldroot} ".format(
        compclass=compclass,
        compname=compname,
        cmd=cmd,
        caseroot=caseroot,
        libroot=libroot,
        bldroot=bldroot,
    )
    if get_model() != "ufs":
        compile_cmd = "SMP={} {}".format(stringify_bool(smp), compile_cmd)

    if is_python_executable(cmd):
        logging_options = get_logging_options()
        if logging_options != "":
            compile_cmd = compile_cmd + logging_options

    with open(file_build, "w") as fd:
        stat = run_cmd(compile_cmd,
                       from_dir=bldroot,
                       arg_stdout=fd,
                       arg_stderr=subprocess.STDOUT)[0]

    if stat != 0:
        thread_bad_results.append(
            "BUILD FAIL: {}.buildlib failed, cat {}".format(
                compname, file_build))

    analyze_build_log(compclass, file_build, compiler)

    for mod_file in glob.glob(os.path.join(bldroot,
                                           "*_[Cc][Oo][Mm][Pp]_*.mod")):
        safe_copy(mod_file, incroot)

    t2 = time.time()
    logger.info("{} built in {:f} seconds".format(compname, (t2 - t1)))
Esempio n. 30
0
def do_subtree_pull(squash=False):
###############################################################################
    stat = run_cmd("git subtree pull {} --prefix=cime {} master".format("--squash" if squash else "", ESMCI_REMOTE_NAME),
                   verbose=True)[0]
    if stat != 0:
        logging.info("There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
        sys.exit(1)
Esempio n. 31
0
def build_libraries(exeroot, caseroot, cimeroot, libroot, mpilib, lid, machines_file):
###############################################################################

    if (mpilib == "mpi-serial"):
        for header_to_copy in glob.glob(os.path.join(cimeroot, "externals/mct/mpi-serial/*.h")):
            shutil.copy(header_to_copy, os.path.join(libroot, "include"))

    sharedpath = os.environ["SHAREDPATH"]
    shared_lib = os.path.join(sharedpath, "lib")
    shared_inc = os.path.join(sharedpath, "include")
    for shared_item in [shared_lib, shared_inc]:
        if (not os.path.exists(shared_item)):
            os.makedirs(shared_item)

    libs = ["mct", "gptl", "pio", "csm_share"]
    logs = []

    for lib in libs:
        full_lib_path = os.path.join(sharedpath, lib)
        if (not os.path.exists(full_lib_path)):
            os.makedirs(full_lib_path)

        file_build = os.path.join(sharedpath, "%s.bldlog.%s" % (lib, lid))
        with open(file_build, "w") as fd:
            fd.write("Current env:\n%s" % "\n".join(["  %s = %s" % (env, os.environ[env]) for env in sorted(os.environ)]))

        my_file = os.path.join(os.path.dirname(machines_file), "buildlib.%s" % lib)
        stat = run_cmd("%s %s %s >> %s 2>&1" %
                       (my_file, sharedpath, caseroot, file_build),
                       from_dir=exeroot,
                       ok_to_fail=True, verbose=True)[0]
        expect(stat == 0, "ERROR: buildlib.%s failed, cat %s" % (lib, file_build))
        logs.append(file_build)

    return logs
Esempio n. 32
0
def _run_pylint(on_file, interactive):
    ###############################################################################
    pylint = find_executable("pylint")

    cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement"
    cimeroot = get_cime_root()

    if "scripts/Tools" in on_file:
        cmd_options += ",relative-import"

    # add init-hook option
    cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\"))'"%\
        (os.path.join(cimeroot,"utils","python"),
         os.path.join(cimeroot,"scripts","Tools"))

    cmd = "%s %s %s" % (pylint, cmd_options, on_file)
    logger.debug("pylint command is %s" % cmd)
    stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)
    if stat != 0:
        if interactive:
            logger.info(
                "File %s has pylint problems, please fix\n    Use command: %s"
                % (on_file, cmd))
            logger.info(out + "\n" + err)
        return (on_file, out + "\n" + err)
    else:
        if interactive:
            logger.info("File %s has no pylint problems" % on_file)
        return (on_file, "")
Esempio n. 33
0
def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldroot, incroot, file_build,
                        thread_bad_results, smp, compiler):
###############################################################################
    logger.info("Building {} with output to {}".format(compclass, file_build))
    t1 = time.time()
    cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib")
    if os.path.isfile(cmd):
        logger.warning("WARNING: using local buildlib script for {}".format(compname))
    else:
        cmd = os.path.join(config_dir, "buildlib")
        expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname))

    with open(file_build, "w") as fd:
        stat = run_cmd("MODEL={} SMP={} {} {} {} {} "
                       .format(compclass, stringify_bool(smp), cmd, caseroot, libroot, bldroot),
                       from_dir=bldroot,  arg_stdout=fd,
                       arg_stderr=subprocess.STDOUT)[0]
    analyze_build_log(compclass, file_build, compiler)
    if (stat != 0):
        thread_bad_results.append("BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build))

    for mod_file in glob.glob(os.path.join(bldroot, "*_[Cc][Oo][Mm][Pp]_*.mod")):
        safe_copy(mod_file, incroot)

    t2 = time.time()
    logger.info("{} built in {:f} seconds".format(compname, (t2 - t1)))
Esempio n. 34
0
def do_subtree_pull(squash=False, auto_conf=False):
    ###############################################################################
    stat = run_cmd("git subtree pull {} --prefix=cime {} master".format(
        "--squash" if squash else "", ESMCI_REMOTE_NAME),
                   verbose=True)[0]
    if stat != 0:
        handle_conflicts(is_merge=True, auto_conf=auto_conf)
Esempio n. 35
0
def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldroot, incroot, file_build,
                        thread_bad_results, smp, compiler):
###############################################################################
    logger.info("Building {} with output to {}".format(compclass, file_build))
    t1 = time.time()
    cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib")
    if os.path.isfile(cmd):
        logger.warning("WARNING: using local buildlib script for {}".format(compname))
    else:
        cmd = os.path.join(config_dir, "buildlib")
        expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname))

    with open(file_build, "w") as fd:
        stat = run_cmd("MODEL={} SMP={} {} {} {} {} "
                       .format(compclass, stringify_bool(smp), cmd, caseroot, libroot, bldroot),
                       from_dir=bldroot,  arg_stdout=fd,
                       arg_stderr=subprocess.STDOUT)[0]
    analyze_build_log(compclass, file_build, compiler)
    if (stat != 0):
        thread_bad_results.append("BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build))

    for mod_file in glob.glob(os.path.join(bldroot, "*_[Cc][Oo][Mm][Pp]_*.mod")):
        safe_copy(mod_file, incroot)

    t2 = time.time()
    logger.info("{} built in {:f} seconds".format(compname, (t2 - t1)))
Esempio n. 36
0
def run_gmake(case, compclass, libroot, libname="", user_cppdefs=""):
###############################################################################

    caseroot  = case.get_value("CASEROOT")
    casetools = case.get_value("CASETOOLS")
    gmake_j   = case.get_value("GMAKE_J")
    gmake     = case.get_value("GMAKE")
    mach      = case.get_value("MACH")

    complib = ""
    if libname:
        complib  = os.path.join(libroot, "lib%s.a" % libname)
    else:
        complib  = os.path.join(libroot, "lib%s.a" % compclass)

    makefile = os.path.join(casetools, "Makefile")
    macfile  = os.path.join(caseroot, "Macros.%s" % mach)

    if user_cppdefs:
        cmd = "%s complib -j %d MODEL=%s COMPLIB=%s -f %s MACFILE=%s USER_CPPDEFS=%s" \
            % (gmake, gmake_j, compclass, complib, makefile, macfile, user_cppdefs )
    else:
        cmd = "%s complib -j %d MODEL=%s COMPLIB=%s -f %s MACFILE=%s " \
            % (gmake, gmake_j, compclass, complib, makefile, macfile )

    rc, out, err = run_cmd(cmd, ok_to_fail=True)
    expect(rc == 0, "Command %s failed rc=%d\nout=%s\nerr=%s" % (cmd, rc, out, err))

    logger.info("Command %s completed with output %s\nerr %s" ,cmd, out, err)
Esempio n. 37
0
def apply_user_mods(caseroot, user_mods_path, ninst={}):
    '''
    Recursivlely apply user_mods to caseroot
    '''
    include_dirs = build_include_dirs_list(user_mods_path)
    for include_dir in include_dirs:
        for user_nl in glob.iglob(os.path.join(include_dir,"user_nl_*")):
            with open(os.path.join(include_dir, user_nl), "r") as fd:
                contents = fd.read()
            case_user_nl = user_nl.replace(include_dir, caseroot)
            comp = case_user_nl.split('_')[-1]
            if comp in ninst.keys():
                for comp_inst in xrange(1,ninst[comp]):
                    case_user_nl_inst = case_user_nl + "_%4.4d"%comp_inst
                    logger.info("Appending file %s"%case_user_nl_inst)
                    with open(case_user_nl_inst, "a") as fd:
                        fd.write(contents)
            else:
                logger.info("Appending file %s"%case_user_nl)
                with open(case_user_nl, "a") as fd:
                    fd.write(contents)
        for root, dirs, files in os.walk(include_dir,followlinks=True,topdown=False):
            if "src" in os.path.basename(root):
                for sfile in files:
                    source_mods = os.path.join(root,sfile)
                    case_source_mods = source_mods.replace(include_dir, caseroot)
                    if os.path.isfile(case_source_mods):
                        logger.warn("Refusing to overwrite existing SourceMods in %s"%case_source_mods)
                    else:
                        logger.info("Adding SourceMod to case %s"%case_source_mods)
                        try:
                            shutil.copyfile(source_mods, case_source_mods)
                        except:
                            expect(False, "Could not write file %s in caseroot %s"
                                   %(case_source_mods,caseroot))
        case_shell_commands = None
        shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\
                               glob.glob(os.path.join(include_dir,"xmlchange_cmnds"))
        for shell_commands_file in shell_command_files:
            case_shell_commands = shell_commands_file.replace(include_dir, caseroot)
            with open(shell_commands_file,"r") as fd:
                new_shell_commands = fd.read().replace("xmlchange","xmlchange --force")
            with open(case_shell_commands, "a") as fd:
                fd.write(new_shell_commands)
    if case_shell_commands is not None:
        os.chmod(case_shell_commands, 0777)
        run_cmd(case_shell_commands)
Esempio n. 38
0
    def getdirectory(self, rel_path, full_path):
        stat, _,err = run_cmd("globus-url-copy -v -r {}{} file://{}{}".format(os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep))

        if (stat != 0):
            logging.warning("FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n".
                            format(self._root_address,rel_path, err))
            return False
        return True
Esempio n. 39
0
 def _get_procs_needed(self, test, phase):
 ###########################################################################
     if (phase == RUN_PHASE and self._no_batch):
         test_dir = self._get_test_dir(test)
         out = run_cmd("./xmlquery TOTALPES -value", from_dir=test_dir)
         return int(out)
     else:
         return 1
Esempio n. 40
0
    def getdirectory(self, rel_path, full_path):
        stat, _,err = run_cmd("globus-url-copy -v -r {}{} file://{}{}".format(os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep))

        if (stat != 0):
            logging.warning("FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n".
                            format(self._root_address,rel_path, err))
            return False
        return True
Esempio n. 41
0
def merge_branch(branch, squash=False, auto_conf=False):
    ###############################################################################
    stat = run_cmd(
        "git merge {} -m 'Merge {branch}' -X rename-threshold=25 {branch}".
        format("--squash" if squash else "", branch=branch),
        verbose=True)[0]
    if stat != 0:
        handle_conflicts(auto_conf=auto_conf)
    def test_e_xmlquery(self):
        # Set script and script path
        xmlquery = "./xmlquery"
        cls = self.__class__
        casedir = cls._testdirs[0]

        # Check for environment
        self.assertTrue(os.path.isdir(self.SCRIPT_DIR))
        self.assertTrue(os.path.isdir(self.TOOLS_DIR))
        self.assertTrue(os.path.isfile(os.path.join(casedir, xmlquery)))

        # Test command line options
        with Case(casedir, read_only=True) as case:
            STOP_N = case.get_value("STOP_N")
            COMP_CLASSES = case.get_values("COMP_CLASSES")
            BUILD_COMPLETE = case.get_value("BUILD_COMPLETE")
            cmd = xmlquery + " STOP_N --value"
            output = utils.run_cmd_no_fail(cmd, from_dir=casedir)
            self.assertTrue(output == str(STOP_N), msg="%s != %s" % (output, STOP_N))
            cmd = xmlquery + " BUILD_COMPLETE --value"
            output = utils.run_cmd_no_fail(cmd, from_dir=casedir)
            self.assertTrue(output == "TRUE", msg="%s != %s" % (output, BUILD_COMPLETE))
            # we expect DOCN_MODE to be undefined in this X compset
            # this test assures that we do not try to resolve this as a compvar
            cmd = xmlquery + " DOCN_MODE --value"
            _, output, error = utils.run_cmd(cmd, from_dir=casedir)
            self.assertTrue(
                error == "ERROR:  No results found for variable DOCN_MODE",
                msg="unexpected result for DOCN_MODE, output {}, error {}".format(
                    output, error
                ),
            )

            for comp in COMP_CLASSES:
                caseresult = case.get_value("NTASKS_%s" % comp)
                cmd = xmlquery + " NTASKS_%s --value" % comp
                output = utils.run_cmd_no_fail(cmd, from_dir=casedir)
                self.assertTrue(
                    output == str(caseresult), msg="%s != %s" % (output, caseresult)
                )
                cmd = xmlquery + " NTASKS --subgroup %s --value" % comp
                output = utils.run_cmd_no_fail(cmd, from_dir=casedir)
                self.assertTrue(
                    output == str(caseresult), msg="%s != %s" % (output, caseresult)
                )
            if self.MACHINE.has_batch_system():
                JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run")
                cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value"
                output = utils.run_cmd_no_fail(cmd, from_dir=casedir)
                self.assertTrue(
                    output == JOB_QUEUE, msg="%s != %s" % (output, JOB_QUEUE)
                )

            cmd = xmlquery + " --listall"
            utils.run_cmd_no_fail(cmd, from_dir=casedir)

        cls._do_teardown.append(cls._testroot)
Esempio n. 43
0
def merge_branch(branch):
###############################################################################
    stat = run_cmd("git merge -m 'Merge {}' -X rename-threshold=25 {}".format(branch, branch), verbose=True)[0]
    if stat != 0:
        logging.info("There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
        logging.info("If the histories are unrelated, you may need to rebase the branch manually:")
        logging.info("git rebase $pr_branch --onto $merge_tag")
        logging.info("Then repeat the above merge command")
        sys.exit(1)
Esempio n. 44
0
def merge_branch(branch):
###############################################################################
    stat = run_cmd("git merge -m 'Merge {}' -X rename-threshold=25 {}".format(branch, branch), verbose=True)[0]
    if stat != 0:
        logging.info("There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
        logging.info("If the histories are unrelated, you may need to rebase the branch manually:")
        logging.info("git rebase $pr_branch --onto $merge_tag")
        logging.info("Then repeat the above merge command")
        sys.exit(1)
Esempio n. 45
0
def runModel(case):
###############################################################################

    # Set OMP_NUM_THREADS
    tm = TaskMaker(case)
    num_threads = tm.thread_count
    os.environ["OMP_NUM_THREADS"] = str(num_threads)

    # Run the model
    logger.info("%s MODEL EXECUTION BEGINS HERE" %(time.strftime("%Y-%m-%d %H:%M:%S")))

    machine = Machines(machine=case.get_value("MACH"))
    cmd = machine.get_full_mpirun(tm, case, "case.run")
    cmd = case.get_resolved_value(cmd)

    logger.debug("run command is %s " %cmd)
    rundir = case.get_value("RUNDIR")
    run_cmd(cmd, from_dir=rundir)
    logger.info( "%s MODEL EXECUTION HAS FINISHED" %(time.strftime("%Y-%m-%d %H:%M:%S")))
Esempio n. 46
0
    def getfile(self, rel_path, full_path):
        stat, _, err = run_cmd("globus-url-copy -v {} file://{}".format(
            os.path.join(self._root_address, rel_path), full_path))

        if stat != 0:
            logging.warning(
                "FAIL: GridFTP repo '{}' does not have file '{}' error={}\n".
                format(self._root_address, rel_path, err))
            return False
        return True
Esempio n. 47
0
 def _component_compare_move(self, suffix):
     cmd = os.path.join(self._case.get_value("SCRIPTSROOT"), "Tools",
                        "component_compare_move.sh")
     rc, out, err = run_cmd("%s -rundir %s -testcase %s -suffix %s" %
                            (cmd, self._case.get_value('RUNDIR'), self._case.get_value('CASE'), suffix),
                            ok_to_fail=True)
     if rc == 0:
         append_status(out, sfile="TestStatus.log")
     else:
         append_status("Component_compare_test.sh failed out: %s\n\nerr: %s\n"%(out,err)
                       ,sfile="TestStatus.log")
Esempio n. 48
0
    def compare_baseline(self):
        """
        compare the current test output to a baseline result
        """
        if self._runstatus != "PASS":
            append_status("Cannot compare baselines, test did not pass.\n", sfile="TestStatus.log")
            return

        baselineroot = self._case.get_value("BASELINE_ROOT")
        basecmp_dir = os.path.join(baselineroot, self._case.get_value("BASECMP_CASE"))
        for bdir in (baselineroot, basecmp_dir):
            if not os.path.isdir(bdir):
                append_status("GFAIL %s baseline\n",self._case.get_value("CASEBASEID"),
                             sfile="TestStatus")
                append_status("ERROR %s does not exist"%bdir, sfile="TestStatus.log")
                return -1
        compgen = os.path.join(self._case.get_value("SCRIPTSROOT"),"Tools",
                               "component_compgen_baseline.sh")
        compgen += " -baseline_dir "+basecmp_dir
        compgen += " -test_dir "+self._case.get_value("RUNDIR")
        compgen += " -compare_tag "+self._case.get_value("BASELINE_NAME_CMP")
        compgen += " -testcase "+self._case.get_value("CASE")
        compgen += " -testcase_base "+self._case.get_value("CASEBASEID")
        rc, out, err = run_cmd(compgen, ok_to_fail=True)

        append_status(out.replace("compare","compare baseline", 1),sfile="TestStatus")
        if rc != 0:
            append_status("Error in Baseline compare: %s\n%s"%(out,err), sfile="TestStatus.log")

        # compare memory usage to baseline
        newestcpllogfile = self._get_latest_cpl_log()
        memlist = self._get_mem_usage(newestcpllogfile)
        baselog = os.path.join(basecmp_dir, "cpl.log.gz")
        if not os.path.isfile(baselog):
            # for backward compatibility
            baselog = os.path.join(basecmp_dir, "cpl.log")
        if len(memlist) > 3:
            blmem = self._get_mem_usage(baselog)[-1][1]
            curmem = memlist[-1][1]
            diff = (curmem-blmem)/blmem
            if(diff < 0.1):
                append_status("PASS  Memory usage baseline compare ",sfile="TestStatus")
            else:
                append_status("FAIL  Memory usage increase > 10% from baseline",sfile="TestStatus")
        # compare throughput to baseline
        current = self._get_throughput(newestcpllogfile)
        baseline = self._get_throughput(baselog)
        #comparing ypd so bigger is better
        if baseline is not None and current is not None:
            diff = (baseline - current)/baseline
            if(diff < 0.25):
                append_status("PASS  Throughput baseline compare ",sfile="TestStatus")
            else:
                append_status("FAIL  Throughput increase > 25% from baseline",sfile="TestStatus")
Esempio n. 49
0
def _build_model_thread(config_dir, caseroot, bldroot, compspec, file_build,
                        exeroot, model, comp, objdir, incroot, thread_bad_results):
###############################################################################
    stat = run_cmd("%s/buildlib %s %s %s >> %s 2>&1" %
                   (config_dir, caseroot, bldroot, compspec, file_build),
                   from_dir=objdir, ok_to_fail=True,verbose=True)[0]
    if (stat != 0):
        thread_bad_results.append("ERROR: %s.buildlib failed, see %s" % (comp, file_build))

    for mod_file in glob.glob(os.path.join(objdir, "*_[Cc][Oo][Mm][Pp]_*.mod")):
        shutil.copy(mod_file, incroot)
Esempio n. 50
0
def download_if_in_repo(svn_loc, input_data_root, rel_path):
    """
    Return True if successfully downloaded
    """
    full_url = os.path.join(svn_loc, rel_path)
    full_path = os.path.join(input_data_root, rel_path)
    logging.info("Trying to download file: '%s' to path '%s'" % (full_url, full_path))

    stat = run_cmd("svn --non-interactive --trust-server-cert ls %s" % full_url, ok_to_fail=True)
    if (stat != 0):
        logging.warning("SVN repo '%s' does not have file '%s'" % (svn_loc, rel_path))
        return False
    else:
        stat, output, errput = \
            run_cmd("svn --non-interactive --trust-server-cert export %s %s" % (full_url, full_path))
        if (stat != 0):
            logging.warning("svn export failed with output: %s and errput %s" % (output, errput))
            return False
        else:
            return True
Esempio n. 51
0
def _run_pylint(all_files, interactive):
    ###############################################################################
    pylint = find_executable("pylint")

    cmd_options = (
        " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import"
    )
    cmd_options += (
        ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement")
    cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed"
    cmd_options += " -j 0 -f json"
    cimeroot = get_cime_root()
    srcroot = get_src_root()

    # if "scripts/Tools" in on_file:
    #     cmd_options +=",relative-import"

    # add init-hook option
    cmd_options += ' --init-hook=\'sys.path.extend(("%s","%s","%s","%s"))\'' % (
        os.path.join(cimeroot, "CIME"),
        os.path.join(cimeroot, "CIME", "Tools"),
        os.path.join(cimeroot, "scripts", "fortran_unit_testing", "python"),
        os.path.join(srcroot, "components", "cmeps", "cime_config", "runseq"),
    )

    files = " ".join(all_files)
    cmd = "%s %s %s" % (pylint, cmd_options, files)
    logger.debug("pylint command is %s" % cmd)
    stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)

    data = json.loads(out)

    result = {}

    for item in data:
        if item["type"] != "error":
            continue

        path = item["path"]
        message = item["message"]
        line = item["line"]

        if path in result:
            result[path].append(f"{message}:{line}")
        else:
            result[path] = [
                message,
            ]

    for k in result.keys():
        result[k] = "\n".join(set(result[k]))

    return result
Esempio n. 52
0
def build_libraries(case, exeroot, caseroot, cimeroot, libroot, mpilib, lid, machines_file):
###############################################################################

    if (mpilib == "mpi-serial"):
        for header_to_copy in glob.glob(os.path.join(cimeroot, "externals/mct/mpi-serial/*.h")):
            shutil.copy(header_to_copy, os.path.join(libroot, "include"))

    sharedpath = os.environ["SHAREDPATH"]
    shared_lib = os.path.join(sharedpath, "lib")
    shared_inc = os.path.join(sharedpath, "include")
    for shared_item in [shared_lib, shared_inc]:
        if (not os.path.exists(shared_item)):
            os.makedirs(shared_item)

    libs = ["mct", "gptl", "pio", "csm_share"]
    logs = []

    for lib in libs:
        full_lib_path = os.path.join(sharedpath, lib)
        if (not os.path.exists(full_lib_path)):
            os.makedirs(full_lib_path)

        file_build = os.path.join(sharedpath, "%s.bldlog.%s" % (lib, lid))
        with open(file_build, "w") as fd:
            fd.write("Current env:\n%s" % "\n".join(["  %s = %s" % (env, os.environ[env]) for env in sorted(os.environ)]))

        my_file = os.path.join(os.path.dirname(machines_file), "buildlib.%s" % lib)
        stat = run_cmd("%s %s %s >> %s 2>&1" %
                       (my_file, sharedpath, caseroot, file_build),
                       from_dir=exeroot,
                       ok_to_fail=True, verbose=True)[0]
        expect(stat == 0, "ERROR: buildlib.%s failed, cat %s" % (lib, file_build))
        logs.append(file_build)

    comp_lnd = case.get_value("COMP_LND")
    clm_config_opts = case.get_value("CLM_CONFIG_OPTS")
    if comp_lnd == "clm" and not "clm4_0" in clm_config_opts:
        logging.info("         - Building clm4_5/clm5_0 Library ")
        esmfdir = "esmf" if case.get_value("USE_ESMF_LIB") else "noesmf"
        sharedpath = os.environ["SHAREDPATH"]
        bldroot = os.path.join(sharedpath, case.get_value("COMP_INTERFACE"), esmfdir, "clm","obj" )
        libroot = os.path.join(sharedpath, case.get_value("COMP_INTERFACE"), esmfdir, "lib")
        incroot = os.path.join(sharedpath,"include")
        file_build = os.path.join(exeroot, "lnd.bldlog.%s" %  lid)
        config_lnd_dir = os.path.dirname(case.get_value("CONFIG_LND_FILE"))

        for ndir in [bldroot, libroot]:
            if (not os.path.isdir(ndir)):
                os.makedirs(ndir)

        _build_model_thread(config_lnd_dir, "lnd", caseroot, bldroot, libroot, incroot, file_build, logs)

    return logs
Esempio n. 53
0
def _build_model_thread(config_dir, compclass, compname, caseroot, libroot,
                        bldroot, incroot, file_build, thread_bad_results, smp,
                        compiler, case):
    ###############################################################################
    logger.info("Building {} with output to {}".format(compclass, file_build))
    t1 = time.time()
    cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib")
    if os.path.isfile(cmd):
        logger.warning(
            "WARNING: using local buildlib script for {}".format(compname))
    else:
        cmd = os.path.join(config_dir, "buildlib")
        expect(os.path.isfile(cmd),
               "Could not find buildlib for {}".format(compname))

    # Add to this list as components are converted to python/cmake
    if compname in ["cam"] and get_model() == "e3sm":
        try:
            stat = 0
            run_sub_or_cmd(cmd, [caseroot, libroot, bldroot],
                           "buildlib", [bldroot, libroot, case],
                           logfile=file_build)
        except Exception:
            stat = 1

    else:
        with open(file_build, "w") as fd:
            stat = run_cmd("MODEL={} SMP={} {} {} {} {} ".format(
                compclass, stringify_bool(smp), cmd, caseroot, libroot,
                bldroot),
                           from_dir=bldroot,
                           arg_stdout=fd,
                           arg_stderr=subprocess.STDOUT)[0]

    analyze_build_log(compclass, file_build, compiler)

    if stat != 0:
        thread_bad_results.append(
            "BUILD FAIL: {}.buildlib failed, cat {}".format(
                compname, file_build))

    analyze_build_log(compclass, file_build, compiler)

    for mod_file in glob.glob(os.path.join(bldroot,
                                           "*_[Cc][Oo][Mm][Pp]_*.mod")):
        safe_copy(mod_file, incroot)

    t2 = time.time()
    logger.info("{} built in {:f} seconds".format(compname, (t2 - t1)))
Esempio n. 54
0
def _extract_times(zipfiles, target_file):

    contents ="Target Build_time\n"
    for zipfile in zipfiles:
        stat, output, _ = run_cmd("zgrep 'built in' {}".format(zipfile))
        if stat == 0:
            for line in output.splitlines():
                line = line.strip()
                if line:
                    items = line.split()
                    target, the_time = items[1], items[-2]
                    contents += "{} {}\n".format(target, the_time)

    with open(target_file, "w") as fd:
        fd.write(contents)
Esempio n. 55
0
def _extract_times(zipfiles, target_file):
    contents = "Target Build_time\n"
    total_build_time = 0.0
    for zipfile in zipfiles:
        stat, output, _ = utils.run_cmd("zgrep 'built in' {}".format(zipfile))
        if stat == 0:
            for line in output.splitlines():
                line = line.strip()
                if line:
                    items = line.split()
                    target, the_time = items[1], items[-2]
                    contents += "{} {}\n".format(target, the_time)

        stat, output, _ = utils.run_cmd(
            "zgrep -E '^real [0-9.]+$' {}".format(zipfile))
        if stat == 0:
            for line in output.splitlines():
                line = line.strip()
                if line:
                    total_build_time += float(line.split()[-1])

    with open(target_file, "w") as fd:
        fd.write(contents)
        fd.write("Total_Elapsed_Time {}".format(str(total_build_time)))
Esempio n. 56
0
def _is_test_working(prev_results, src_root, testing=False):
    # If there is no history of success, prev run could not have succeeded and vice versa for failures
    if prev_results[0] is None:
        return False
    elif prev_results[1] is None:
        return True
    else:
        if not testing:
            stat, out, err = run_cmd("git merge-base --is-ancestor {}".format(" ".join(prev_results)), from_dir=src_root)
            expect(stat in [0, 1], "Unexpected status from ancestor check:\n{}\n{}".format(out, err))
        else:
            # Hack for testing
            stat = 0 if prev_results[0] < prev_results[1] else 1

        # stat == 0 tells us that pass is older than fail, so we must have failed, otherwise we passed
        return stat != 0
Esempio n. 57
0
def bless_namelists(test_name, report_only, force, baseline_name, baseline_root):
###############################################################################
    # Be aware that restart test will overwrite the original namelist files
    # with versions of the files that should not be blessed. This forces us to
    # re-run create_test.

    # Update namelist files
    print "Test '%s' had namelist diff" % test_name
    if (not report_only and
        (force or raw_input("Update namelists (y/n)? ").upper() in ["Y", "YES"])):
        create_test_gen_args = " -g %s " % baseline_name if get_model() == "cesm" else " -g -b %s " % baseline_name
        stat, _, err = run_cmd("%s/create_test %s -n %s --baseline-root %s -o" % (get_scripts_root(), test_name, create_test_gen_args, baseline_root))
        if stat != 0:
            return False, "Namelist regen failed: '%s'" % err
        else:
            return True, None
    else:
        return True, None
Esempio n. 58
0
def bless_namelists(test_name, report_only, force, baseline_name,
                    baseline_root):
    ###############################################################################
    # Be aware that restart test will overwrite the original namelist files
    # with versions of the files that should not be blessed. This forces us to
    # re-run create_test.

    # Update namelist files
    print "Test '%s' had a namelist diff" % test_name
    if (not report_only and
        (force
         or raw_input("Update namelists (y/n)? ").upper() in ["Y", "YES"])):
        stat, _, err = run_cmd(
            "create_test -n -g %s -b %s --baseline-root %s" %
            (test_name, baseline_name, baseline_root))
        if stat != 0:
            return False, "Namelist regen failed: '%s'" % err
        else:
            return True, None