Exemplo n.º 1
0
def check_pelayouts_require_rebuild(self, models):
    """
    Create if we require a rebuild, expects cwd is caseroot
    """
    locked_pes = os.path.join(LOCKED_DIR, "env_mach_pes.xml")
    if os.path.exists(locked_pes):
        # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined
        # for any component
        env_mach_pes_locked = EnvMachPes(infile=locked_pes, components=self.get_values("COMP_CLASSES"))
        for comp in models:
            if self.get_value("{}_PE_CHANGE_REQUIRES_REBUILD".format(comp)):
                # Changing these values in env_mach_pes.xml will force
                # you to clean the corresponding component
                old_tasks   = env_mach_pes_locked.get_value("NTASKS_{}".format(comp))
                old_threads = env_mach_pes_locked.get_value("NTHRDS_{}".format(comp))
                old_inst    = env_mach_pes_locked.get_value("NINST_{}".format(comp))

                new_tasks   = self.get_value("NTASKS_{}".format(comp))
                new_threads = self.get_value("NTHRDS_{}".format(comp))
                new_inst    = self.get_value("NINST_{}".format(comp))

                if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:
                    logging.warning("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks))
                    cleanflag = comp.lower()
                    run_cmd_no_fail("./case.build --clean {}".format(cleanflag))

        unlock_file("env_mach_pes.xml", self.get_value("CASEROOT"))
Exemplo n.º 2
0
def e3sm_cime_split(resume, squash=False):
###############################################################################
    if not resume:
        setup()

        old_split_tag = get_split_tag()

        try:
            new_split_tag = make_new_split_tag(old_split_tag)

            merge_tag = get_merge_tag()

            pr_branch = do_subtree_split(new_split_tag, merge_tag)

            run_cmd_no_fail("git checkout {}".format(pr_branch), verbose=True)
        except:
            # If unexpected failure happens, delete new split tag
            logging.info("Abandoning split due to unexpected failure")
            delete_tag(new_split_tag)
            raise

        # upstream merge, potential conflicts
        merge_branch("{}/master".format(ESMCI_REMOTE_NAME), squash=squash)

    else:
        old_split_tag, new_split_tag = get_split_tag(expected_num=2)
        logging.info("Resuming split with old tag {} and new tag {}".format(old_split_tag, new_split_tag))
        pr_branch = get_branch_from_tag(new_split_tag)

    run_cmd_no_fail("git push -u {} {}".format(ESMCI_REMOTE_NAME, pr_branch), verbose=True)
Exemplo n.º 3
0
    def test_xcase_submit(self):
        test_root = MACHINE.get_value("CIME_OUTPUT_ROOT")
        machine = MACHINE.get_machine_name()
        compiler = MACHINE.get_default_compiler()

        test_name = get_full_test_name("PFS_I0",grid="f19_g16", compset="X",
                                             machine=machine, compiler=compiler)
        expected_dir = os.path.join(test_root,
                                    "{}.test_lbt".format(test_name),
                                    "timing")
        if not os.path.isdir(expected_dir):
            with tempfile.NamedTemporaryFile('w+') as tfile, tempfile.NamedTemporaryFile('w+') as xfile:
                tfile.write(PES_XML)
                tfile.flush()
                xfile.write(X_OPTIONS)
                xfile.flush()
                cmd = "./load_balancing_submit.py --pesfile {} --res f19_g16 --compset X --test-id test_lbt  --extra-options-file {} --test-root {}".format(tfile.name, xfile.name, test_root)
                if MACHINE.has_batch_system():
                    sys.stdout.write("Jobs will be submitted to queue. Rerun "
                                     "load_balancing_test.py after jobs have "
                    "finished.")
                else:
                    cmd += " --force-purge"
                output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)

                self.assertTrue(output.find("Timing jobs submitted") >= 0,
                                "Expected 'Timing jobs submitted' in output")

        if os.path.isdir(expected_dir):

            cmd = "./load_balancing_solve.py --total-tasks 32 --blocksize 1 --test-id test_lbt --print-models --test-root {} --layout IceLndAtmOcn".format(test_root)
            output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
            self.assertTrue(output.find("***ATM***") > 0,
                            "--print-models failed to print ATM data")
            self._check_solution(output, "NTASKS_ATM", 31)
Exemplo n.º 4
0
def e3sm_cime_merge(resume, squash=False):
###############################################################################
    if not resume:
        setup()

        old_merge_tag = get_merge_tag()

        try:
            new_merge_tag = make_new_merge_tag(old_merge_tag)

            pr_branch = make_pr_branch(get_branch_from_tag(new_merge_tag), "origin/master")
        except:
            logging.info("Abandoning merge due to unexpected failure")
            delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME)
            raise

        # potential conflicts
        do_subtree_pull(squash=squash)

    else:
        old_merge_tag, new_merge_tag = get_merge_tag(expected_num=2)
        logging.info("Resuming merge with old tag {} and new tag {}".format(old_merge_tag, new_merge_tag))
        pr_branch = get_branch_from_tag(new_merge_tag)

    run_cmd_no_fail("git push -u origin {}".format(pr_branch))
Exemplo n.º 5
0
def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path,
                cmake_args=None, clean=False, verbose=False, enable_genf90=True, color=True):
    """Run cmake in the current working directory.

    Arguments:
    name - Name for output messages.
    test_spec_dir - Test specification directory to run CMake on.
    use_mpiserial (logical) - If True, we'll tell CMake to include mpi-serial for tests
                              that need it
    build_optimized (logical) - If True, we'll build in optimized rather than debug mode
    """
    # Clear CMake cache.
    if clean:
        pwd_contents = os.listdir(os.getcwd())
        if "CMakeCache.txt" in pwd_contents:
            os.remove("CMakeCache.txt")
        if "CMakeFiles" in pwd_contents:
            rmtree("CMakeFiles")

    if not os.path.isfile("CMakeCache.txt"):

        output.print_header("Running cmake for "+name+".")

        # This build_type only has limited uses, and should probably be removed,
        # but for now it's still needed
        if build_optimized:
            build_type = "CESM"
        else:
            build_type = "CESM_DEBUG"

        cmake_command = [
            "cmake",
            "-C Macros.cmake",
            test_spec_dir,
            "-DCIMEROOT="+_CIMEROOT,
            "-DCIME_CMAKE_MODULE_DIRECTORY="+os.path.abspath(os.path.join(_CIMEROOT,"src","CMake")),
            "-DCMAKE_BUILD_TYPE="+build_type,
            "-DPFUNIT_MPIRUN='"+mpirun_command+"'",
            "-DPFUNIT_PATH="+pfunit_path
            ]
        if use_mpiserial:
            cmake_command.append("-DUSE_MPI_SERIAL=ON")
        if verbose:
            cmake_command.append("-Wdev")

        if enable_genf90:
            cmake_command.append("-DENABLE_GENF90=ON")
            genf90_dir = os.path.join(
                _CIMEROOT,"src","externals","genf90"
                )
            cmake_command.append("-DCMAKE_PROGRAM_PATH="+genf90_dir)

        if not color:
            cmake_command.append("-DUSE_COLOR=OFF")

        if cmake_args is not None:
            cmake_command.extend(cmake_args.split(" "))

        run_cmd_no_fail(" ".join(cmake_command), verbose=True, arg_stdout=None, arg_stderr=subprocess.STDOUT)
Exemplo n.º 6
0
def abort_merge():
###############################################################################
    new_merge_tag = get_merge_tag()
    pr_branch = get_branch_from_tag(new_merge_tag)
    delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Exemplo n.º 7
0
def abort_split():
###############################################################################
    new_split_tag = get_split_tag()
    pr_branch = get_branch_from_tag(new_split_tag)
    delete_tag(new_split_tag)
    run_cmd_no_fail("git reset --hard origin/master", verbose=True)
    run_cmd_no_fail("git checkout master", verbose=True)
    run_cmd("git branch -D {}".format(pr_branch), verbose=True)
Exemplo n.º 8
0
def handle_conflicts(is_merge=False, auto_conf=False):
###############################################################################
    logging.info("There are conflicts, analyzing...")
    remaining_conflicts = handle_easy_conflicts(is_merge) if auto_conf else True
    if remaining_conflicts:
        expect(False, "There are merge conflicts. Please fix, commit, and re-run this tool with --resume")
    else:
        logging.info("All conflicts were automatically resovled, continuing")
        run_cmd_no_fail("git commit --no-edit")
Exemplo n.º 9
0
def make_new_tag(prefix, old_tag, remote="origin", commit="HEAD"):
###############################################################################
    new_tag = "{}{}".format(prefix, get_timestamp(timestamp_format="%Y-%m-%d"))
    expect(old_tag != new_tag, "New tag must have different name than old tag")

    run_cmd_no_fail("git tag {} {}".format(new_tag, commit), verbose=True)
    run_cmd_no_fail("git push {} {}".format(remote, new_tag), verbose=True)

    return new_tag
Exemplo n.º 10
0
 def test_read_and_write_json(self):
     "Solve from json file, writing to new json file, solve from new file"
     with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as jsonfile2:
         json.dump(JSON_DICT, jsonfile1)
         jsonfile1.flush()
         cmd = "./load_balancing_solve.py --json-input %s --json-output %s" % (jsonfile1.name, jsonfile2.name)
         output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
         self._check_solution(output, "NTASKS_ATM", 992)
         cmd = "./load_balancing_solve.py --json-input %s" % jsonfile2.name
         output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
         self._check_solution(output, "NTASKS_ATM", 992)
Exemplo n.º 11
0
def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive, avoid_test_id):
###############################################################################

    # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around
    # or archiving them
    for old_cs_file in glob.glob("{}/cs.*".format(scratch_root)):
        if avoid_test_id not in old_cs_file:
            logging.info("TEST ARCHIVER: Removing {}".format(old_cs_file))
            os.remove(old_cs_file)

    # Remove the old CTest XML, same reason as above
    if (os.path.isdir("Testing")):
        logging.info("TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing")))
        shutil.rmtree("Testing")

    if not os.path.exists(old_test_archive):
        os.mkdir(old_test_archive)

    # Archive old data by looking at old test cases
    for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)):
        if avoid_test_id not in old_case:
            logging.info("TEST ARCHIVER: archiving case {}".format(old_case))
            exeroot, rundir, archdir = run_cmd_no_fail("./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case).split(",")

            for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives"), (old_case, "old_cases")]:
                if os.path.exists(the_dir):
                    logging.info("TEST ARCHIVER:   archiving {} to {}".format(the_dir, os.path.join(old_test_archive, target_area)))
                    if not os.path.exists(os.path.join(old_test_archive, target_area)):
                        os.mkdir(os.path.join(old_test_archive, target_area))

                    os.rename(the_dir, os.path.join(old_test_archive, target_area, os.path.basename(old_case)))

    # Check size of archive
    bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0])
    bytes_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") * 1000000000
    if bytes_of_old_test_data > bytes_allowed:
        logging.info("TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000))
        old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root)
        for old_test_id in sorted(old_test_ids):
            logging.info("TEST ARCHIVER:   Removing old data for test {}".format(old_test_id))
            for item in ["old_cases", "old_builds", "old_runs", "old_archives"]:
                for dir_to_rm in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, item, mach_comp, old_test_id)):
                    logging.info("TEST ARCHIVER:     Removing {}".format(dir_to_rm))
                    shutil.rmtree(dir_to_rm)

            bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0])
            if bytes_of_old_test_data < bytes_allowed:
                break

    else:
        logging.info("TEST ARCHIVER: Test data is with accepted bounds, {}GB (actual) < {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000))
Exemplo n.º 12
0
 def writexml(self,addlist,newfilename):
     root = ET.Element('grid_data')
     domains = ET.SubElement(root,'domains')
     for a, b in addlist:
         if b is not None:
             domains.append(ET.Element('REPLACE'))
             domains.append(b.to_cime4())
             domains.append(ET.Element('WITH'))
         if a is not None:
             domains.append(a.to_cime4())
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd_no_fail("%s --format --output %s -"%(xmllint,newfilename),
                 input_str=ET.tostring(root))
Exemplo n.º 13
0
 def writexml(self, addlist, newfilename):
     root = ET.Element('gridmaps')
     gridmaps = ET.SubElement(root, 'gridmap')
     for a, b in addlist:
         if b is not None:
             gridmaps.append(ET.Element('REPLACE'))
             gridmaps.append(b.to_cime5())
             gridmaps.append(ET.Element('WITH'))
         if a is not None:
             gridmaps.append(a.to_cime5())
     xmllint = find_executable("xmllint")
     if xmllint is not None:
         run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename),
                         input_str=ET.tostring(root))
Exemplo n.º 14
0
 def test_set_blocksize_atm(self):
     cmd = "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --blocksize-atm 4 --layout IceLndAtmOcn" % os.path.join(TEST_DIR, "timing")
     output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
     self._check_solution(output, "NTASKS_ATM", 60)
     self._check_solution(output, "NBLOCKS_ATM", 15)
     self._check_solution(output, "NTASKS_OCN", 4)
     self._check_solution(output, "NBLOCKS_OCN", 2)
Exemplo n.º 15
0
def get_all_checkable_files():
###############################################################################
    cimeroot = get_cime_root()
    all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines()

    files_to_test = [item for item in all_git_files
                     if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]
    return files_to_test
Exemplo n.º 16
0
def touches_file(start_range, end_range, filepath, title, skip=None):
###############################################################################
    skip_str = "--grep={} --invert-grep".format(skip) if skip is not None else ""
    result = run_cmd_no_fail("git log {} {}..{} -- {}".format(skip_str, start_range, end_range, filepath))

    if result:
        logging.debug("  touched by {} within range {}..{} by commits\n{}".format(title, start_range, end_range, result))

    return result != ""
Exemplo n.º 17
0
def get_tag(prefix, expected_num=1):
###############################################################################
    tags = run_cmd_no_fail("git tag").split()
    tags = [tag for tag in tags if tag.startswith(prefix)]

    if expected_num == 1:
        return tags[-1]
    else:
        return tags[-expected_num:]
Exemplo n.º 18
0
def make_stage(name, output, make_j, clean=False, verbose=True):
    """Run make in the current working directory.

    Arguments:
    name - Name for output messages.
    make_j (int) - number of processes to use for make
    """
    output.print_header("Running make for "+name+".")

    if clean:
        run_cmd_no_fail("make clean")

    make_command = ["make","-j",str(make_j)]

    if verbose:
        make_command.append("VERBOSE=1")

    run_cmd_no_fail(" ".join(make_command), arg_stdout=None, arg_stderr=subprocess.STDOUT)
Exemplo n.º 19
0
def get_tag(prefix, expected_num=1):
###############################################################################
    tags = run_cmd_no_fail("git tag").split()
    tags = [tag for tag in tags if tag.startswith(prefix)]

    expect(len(tags) == expected_num, "Expected exactly {} {} tag, found {}".format(expected_num, prefix, ", ".join(tags)))

    if expected_num == 1:
        return tags[0]
    else:
        return tags
Exemplo n.º 20
0
def get_tag(prefix, expected_num=1):
###############################################################################
    tags = run_cmd_no_fail("git tag").split()
    tags = [tag for tag in tags if tag.startswith(prefix)]

    expect(len(tags) >= expected_num, "Did not see enough {} tags".format(prefix))

    if expected_num == 1:
        return tags[-1]
    else:
        return tags[-expected_num:]
Exemplo n.º 21
0
    def test_write_pes(self):
        with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as pes_file:
            json.dump(JSON_DICT, jsonfile1)
            jsonfile1.flush()
            cmd = "./load_balancing_solve.py --json-input %s --pe-output %s" % (jsonfile1.name, pes_file.name)
            output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)

            self.assertTrue(os.access(pes_file.name, os.R_OK), "pesfile %s not written" % pes_file.name)
            pesobj = CIME.XML.pes.Pes(pes_file.name)
        for node in pesobj.get_nodes('pes'):
            pesize = node.get('pesize')
            pes_ntasks, pes_nthrds, pes_rootpe, _ = \
               pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize)
Exemplo n.º 22
0
def handle_easy_conflicts(is_merge):
###############################################################################
    conflicting_files = run_cmd_no_fail("git diff --name-only --diff-filter=U").splitlines()
    if not conflicting_files:
        expect(False, "Merge appears to have failed for reasons other than merge conflicts")

    rv = []
    for conflicting_file in conflicting_files:
        able_to_handle = handle_easy_conflict(conflicting_file, is_merge)
        if not able_to_handle:
            rv.append(conflicting_file)

    return rv
Exemplo n.º 23
0
def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
    # We assume all cases were created from the same code repo
    first_result_case = os.path.dirname(list(results.items())[0][1][0])
    try:
        srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case)
    except:
        # Use repo containing this script as last resort
        srcroot = CIME.utils.get_cime_root()

    git_commit = CIME.utils.get_current_commit(repo=srcroot)

    data_rel_path = os.path.join("Testing", utc_time)

    create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)

    create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)

    create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
Exemplo n.º 24
0
def check_code(files, num_procs=10, interactive=False):
###############################################################################
    """
    Check all python files in the given directory

    Returns True if all files had no problems
    """
    # Get list of files to check, we look to see if user-provided file argument
    # is a valid file, if not, we search the repo for a file with similar name.
    files_to_check = []
    if files:
        repo_files = run_cmd_no_fail('git ls-files', from_dir=get_cime_root(), verbose=False).splitlines()
        for filearg in files:
            if os.path.exists(filearg):
                files_to_check.append(os.path.abspath(filearg))
            else:
                found = False
                for repo_file in repo_files:
                    if repo_file.endswith(filearg):
                        found = True
                        files_to_check.append(repo_file) # could have multiple matches

                if not found:
                    logger.warning("Could not find file matching argument '%s'" % filearg)
    else:
        # Check every python file
        files_to_check = get_all_checkable_files()

    if "scripts/lib/six.py" in files_to_check:
        files_to_check.remove("scripts/lib/six.py")
        logger.info("Not checking contributed file six.py")

    expect(len(files_to_check) > 0, "No matching files found")

    # No point in using more threads than files
    if len(files_to_check) < num_procs:
        num_procs = len(files_to_check)

    pool = ThreadPool(num_procs)
    results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)
    pool.close()
    pool.join()
    return dict(results)
Exemplo n.º 25
0
 def test_use_atm_lnd(self):
     "Solve layout atm_lnd from json file"
     with tempfile.NamedTemporaryFile('w+') as jsonfile1:
         atmlnd_dict = copy.deepcopy(JSON_DICT)
         # Fake data for ROF, CPL
         atmlnd_dict['ROF'] = {"ntasks" : [32,64,128,256],
                               "blocksize" : 8,
                               "nthrds" : [1],
                               "cost" : [8.0, 4.0, 2.0, 1.0]}
         atmlnd_dict['CPL'] = {"ntasks" : [32,64,128,256],
                               "blocksize" : 8,
                               "nthrds" : [1],
                               "cost" : [8.0, 4.0, 2.0, 1.0]}
         json.dump(atmlnd_dict, jsonfile1)
         jsonfile1.flush()
         cmd = "./load_balancing_solve.py --json-input %s --print-models --layout tests.atm_lnd.AtmLnd" % (jsonfile1.name)
         output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
         self._check_solution(output, "Natm", 976)
         self._check_solution(output, "NBatm", 976/8)
Exemplo n.º 26
0
def reset_file(version, srcpath, dstpath):
###############################################################################
    is_exe = os.access(dstpath, os.X_OK)
    os.remove(dstpath)
    try:
        run_cmd_no_fail("git show {}:{} > {}".format(version, srcpath, dstpath))
    except CIMEError:
        # If the above failes, then the file was deleted
        run_cmd_no_fail("git rm -f {}".format(dstpath))
    else:
        if is_exe:
            os.chmod(dstpath, os.stat(dstpath).st_mode | osstat.S_IXUSR | osstat.S_IXGRP | osstat.S_IXOTH)

        run_cmd_no_fail("git add {}".format(dstpath))
Exemplo n.º 27
0
    def test_write_pes(self):
        with tempfile.NamedTemporaryFile(
                "w+") as jsonfile1, tempfile.NamedTemporaryFile(
                    "w+") as pes_file:
            json.dump(JSON_DICT, jsonfile1)
            jsonfile1.flush()
            cmd = "./load_balancing_solve.py --json-input %s --pe-output %s" % (
                jsonfile1.name,
                pes_file.name,
            )
            output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)

            self.assertTrue(
                os.access(pes_file.name, os.R_OK),
                "pesfile %s not written" % pes_file.name,
            )
            pesobj = CIME.XML.pes.Pes(pes_file.name)

        pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = pesobj.find_pes_layout(
            "any", "any", "any", "")
        self.assertTrue(pes_ntasks["NTASKS_ATM"] == 992)
Exemplo n.º 28
0
def check_code(files, num_procs=10, interactive=False):
###############################################################################
    """
    Check all python files in the given directory

    Returns True if all files had no problems
    """
    # Get list of files to check, we look to see if user-provided file argument
    # is a valid file, if not, we search the repo for a file with similar name.
    repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()
    files_to_check = []
    if files:
        for filearg in files:
            if os.path.exists(filearg):
                files_to_check.append(os.path.abspath(filearg))
            else:
                found = False
                for repo_file in repo_files:
                    if repo_file.endswith(filearg):
                        found = True
                        files_to_check.append(repo_file) # could have multiple matches

                if not found:
                    logger.warning("Could not find file matching argument '%s'" % filearg)
    else:
        # Check every python file
        files_to_check = get_all_checkable_files()

    expect(len(files_to_check) > 0, "No matching files found")

    # No point in using more threads than files
    if len(files_to_check) < num_procs:
        num_procs = len(files_to_check)

    pool = ThreadPool(num_procs)
    results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)
    pool.close()
    pool.join()
    return dict(results)
Exemplo n.º 29
0
    def test_run_restart_too_many_fails(self):
        if self.NO_FORTRAN_RUN:
            self.skipTest("Skipping fortran test")
        driver = utils.get_cime_default_driver()
        if driver == "mct":
            walltime = "00:15:00"
        else:
            walltime = "00:30:00"

        casedir = self._create_test(
            ["--walltime " + walltime, "NODEFAIL_P1.f09_g16.X"],
            test_id=self._baseline_name,
            env_changes="NODEFAIL_NUM_FAILS=5",
            run_errors=True,
        )
        rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value",
                                       from_dir=casedir)
        fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
        self.assertTrue(os.path.exists(fail_sentinel),
                        msg="Missing %s" % fail_sentinel)

        self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4)
Exemplo n.º 30
0
def update_acme_tests(xml_file, categories, platform=None):
    ###############################################################################
    # Retrieve all supported ACME platforms, killing the third entry (MPI lib)
    # for the moment.
    supported_platforms = [p[:2] for p in find_all_supported_platforms()]

    # Fish all of the existing machine/compiler combos out of the XML file.
    if (platform is not None):
        platforms = [tuple(platform.split(","))]
    else:
        platforms = find_all_platforms(xml_file)
        # Prune the non-supported platforms from our list.
        for p in platforms:
            if p not in supported_platforms:
                logging.info("pruning unsupported platform %s" % repr(p))
        platforms = [p for p in platforms if p in supported_platforms]

    manage_xml_entries = os.path.join(CIME.utils.get_cime_root(), "scripts",
                                      "manage_testlists")

    expect(
        os.path.isfile(manage_xml_entries),
        "Couldn't find manage_testlists, expected it to be here: '%s'" %
        manage_xml_entries)

    for category in categories:
        # Remove any existing acme test category from the file.
        if (platform is None):
            run_cmd_no_fail(
                "%s -model acme -component allactive -removetests -category %s"
                % (manage_xml_entries, category))
        else:
            run_cmd_no_fail(
                "%s -model acme -component allactive -removetests -category %s -machine %s -compiler %s"
                % (manage_xml_entries, category, platforms[0][0],
                   platforms[0][1]))

        # Generate a list of test entries corresponding to our suite at the top
        # of the file.
        new_test_file = generate_acme_test_entries(category, platforms)
        run_cmd_no_fail(
            "%s -model acme -component allactive -addlist -file %s -category %s"
            % (manage_xml_entries, new_test_file, category))
        os.unlink(new_test_file)

    print "SUCCESS"
Exemplo n.º 31
0
def reset_file(version, srcpath, dstpath):
    ###############################################################################
    is_exe = os.access(dstpath, os.X_OK)
    os.remove(dstpath)
    try:
        run_cmd_no_fail("git show {}:{} > {}".format(version, srcpath,
                                                     dstpath))
    except CIMEError:
        # If the above failes, then the file was deleted
        run_cmd_no_fail("git rm -f {}".format(dstpath))
    else:
        if is_exe:
            os.chmod(
                dstpath,
                os.stat(dstpath).st_mode | osstat.S_IXUSR | osstat.S_IXGRP
                | osstat.S_IXOTH)

        run_cmd_no_fail("git add {}".format(dstpath))
Exemplo n.º 32
0
    def test_cime_case(self):
        casedir = self._create_test(
            ["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"],
            test_id=self._baseline_name)

        self.assertEqual(type(self.MACHINE.get_value("MAX_TASKS_PER_NODE")),
                         int)
        self.assertTrue(
            type(self.MACHINE.get_value("PROJECT_REQUIRED")) in
            [type(None), bool])

        with Case(casedir, read_only=False) as case:
            build_complete = case.get_value("BUILD_COMPLETE")
            self.assertFalse(
                build_complete,
                msg="Build complete had wrong value '%s'" % build_complete,
            )

            case.set_value("BUILD_COMPLETE", True)
            build_complete = case.get_value("BUILD_COMPLETE")
            self.assertTrue(
                build_complete,
                msg="Build complete had wrong value '%s'" % build_complete,
            )

            case.flush()

            build_complete = utils.run_cmd_no_fail(
                "./xmlquery BUILD_COMPLETE --value", from_dir=casedir)
            self.assertEqual(
                build_complete,
                "TRUE",
                msg="Build complete had wrong value '%s'" % build_complete,
            )

            # Test some test properties
            self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS")
Exemplo n.º 33
0
 def test_use_atm_lnd(self):
     "Solve layout atm_lnd from json file"
     with tempfile.NamedTemporaryFile('w+') as jsonfile1:
         atmlnd_dict = copy.deepcopy(JSON_DICT)
         # Fake data for ROF, CPL
         atmlnd_dict['ROF'] = {
             "ntasks": [32, 64, 128, 256],
             "blocksize": 8,
             "nthrds": [1],
             "cost": [8.0, 4.0, 2.0, 1.0]
         }
         atmlnd_dict['CPL'] = {
             "ntasks": [32, 64, 128, 256],
             "blocksize": 8,
             "nthrds": [1],
             "cost": [8.0, 4.0, 2.0, 1.0]
         }
         json.dump(atmlnd_dict, jsonfile1)
         jsonfile1.flush()
         cmd = "./load_balancing_solve.py --json-input %s --print-models --layout tests.atm_lnd.AtmLnd" % (
             jsonfile1.name)
         output = run_cmd_no_fail(cmd, from_dir=CODE_DIR)
         self._check_solution(output, "Natm", 976)
         self._check_solution(output, "NBatm", 976 / 8)
Exemplo n.º 34
0
def update_acme_tests(xml_file, categories, platform=None):
###############################################################################
    # Retrieve all supported ACME platforms, killing the third entry (MPI lib)
    # for the moment.
    supported_platforms = [p[:2] for p in find_all_supported_platforms()]

    # Fish all of the existing machine/compiler combos out of the XML file.
    if (platform is not None):
        platforms = [tuple(platform.split(","))]
    else:
        platforms = find_all_platforms(xml_file)
        # Prune the non-supported platforms from our list.
        for p in platforms:
            if p not in supported_platforms:
                logging.info("pruning unsupported platform %s"%repr(p))
        platforms = [p for p in platforms if p in supported_platforms]

    manage_xml_entries = os.path.join(CIME.utils.get_cime_root(), "scripts", "manage_testlists")

    expect(os.path.isfile(manage_xml_entries),
           "Couldn't find manage_testlists, expected it to be here: '%s'" % manage_xml_entries)

    for category in categories:
        # Remove any existing acme test category from the file.
        if (platform is None):
            run_cmd_no_fail("%s -model acme -component allactive -removetests -category %s" % (manage_xml_entries, category))
        else:
            run_cmd_no_fail("%s -model acme -component allactive -removetests -category %s -machine %s -compiler %s"
                            % (manage_xml_entries, category, platforms[0][0], platforms[0][1]))

        # Generate a list of test entries corresponding to our suite at the top
        # of the file.
        new_test_file = generate_acme_test_entries(category, platforms)
        run_cmd_no_fail("%s -model acme -component allactive -addlist -file %s -category %s" %
                        (manage_xml_entries, new_test_file, category))
        os.unlink(new_test_file)

    print "SUCCESS"
Exemplo n.º 35
0
def reset_file(version, srcpath, dstpath):
    ###############################################################################
    os.remove(dstpath)
    run_cmd_no_fail("git show {}:{} > {}".format(version, srcpath, dstpath))
    run_cmd_no_fail("git add {}".format(dstpath))
Exemplo n.º 36
0
def _main():
    output, build_dir, build_optimized, clean,\
        cmake_args, compiler, enable_genf90, machine, machines_dir,\
        make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\
        use_openmp, xml_test_list, verbose \
        = parse_command_line(sys.argv)

#=================================================
# Find directory and file paths.
#=================================================
    suite_specs = []
    # TODO: this violates cime policy of direct access to xml
    # should be moved to CIME/XML
    if xml_test_list is not None:
        test_xml_tree = ElementTree()
        test_xml_tree.parse(xml_test_list)
        known_paths = {
            "here": os.path.abspath(os.path.dirname(xml_test_list)),
            }
        suite_specs.extend(suites_from_xml(test_xml_tree, known_paths))
    if test_spec_dir is not None:
        suite_specs.append(
            TestSuiteSpec("__command_line_test__",
                          ["__command_line_test__"],
                          [os.path.abspath(test_spec_dir)])
            )


    if machines_dir is not None:
        machines_file = os.path.join(machines_dir, "config_machines.xml")
        machobj = Machines(infile=machines_file, machine=machine)
    else:
        machobj = Machines(machine=machine)

    # Create build directory if necessary.
    build_dir = os.path.abspath(build_dir)

    if not os.path.isdir(build_dir):
        os.mkdir(build_dir)

    # Switch to the build directory.
    os.chdir(build_dir)

    #=================================================
    # Functions to perform various stages of build.
    #=================================================

    if not use_mpi:
        mpilib = "mpi-serial"
    elif mpilib is None:
        mpilib = machobj.get_default_MPIlib()
        logger.info("Using mpilib: {}".format(mpilib))

    if compiler is None:
        compiler = machobj.get_default_compiler()
        logger.info("Compiler is {}".format(compiler))

    compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib)

    pfunit_path = find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp)

    debug = not build_optimized
    os_ = machobj.get_value("OS")

    # Create the environment, and the Macros.cmake file
    #
    #
    configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_,
              unit_testing=True)
    machspecific = EnvMachSpecific(build_dir, unit_testing=True)

    fake_case = FakeCase(compiler, mpilib, debug)
    machspecific.load_env(fake_case)
    os.environ["OS"] = os_
    os.environ["COMPILER"] = compiler
    os.environ["DEBUG"] = stringify_bool(debug)
    os.environ["MPILIB"] = mpilib
    if use_openmp:
        os.environ["compile_threaded"] = "true"
    else:
        os.environ["compile_threaded"] = "false"

    os.environ["UNIT_TEST_HOST"] = socket.gethostname()
    if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ:
        # The CMake Netcdf find utility that we use (from pio2) seems to key off
        # of the environment variable NETCDF, but not NETCDF_PATH
        logger.info("Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"]))
        os.environ["NETCDF"] = os.environ["NETCDF_PATH"]

    if not use_mpi:
        mpirun_command = ""
    elif mpirun_command is None:
        mpi_attribs = {
            "compiler" : compiler,
            "mpilib"   : mpilib,
            "threaded" : use_openmp,
            "unit_testing" : True
        }

        # We can get away with specifying case=None since we're using exe_only=True
        mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True)
        mpirun_command = machspecific.get_resolved_value(mpirun_command)
        logger.info("mpirun command is '{}'".format(mpirun_command))

#=================================================
# Run tests.
#=================================================

    for spec in suite_specs:
        os.chdir(build_dir)
        if os.path.isdir(spec.name):
            if clean:
                rmtree(spec.name)

        if not os.path.isdir(spec.name):
            os.mkdir(spec.name)

        for label, directory in spec:
            os.chdir(os.path.join(build_dir,spec.name))
            if not os.path.isdir(label):
                os.mkdir(label)

            os.chdir(label)

            name = spec.name+"/"+label

            if not os.path.islink("Macros.cmake"):
                os.symlink(os.path.join(build_dir,"Macros.cmake"), "Macros.cmake")
            use_mpiserial = not use_mpi
            cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose,
                        enable_genf90=enable_genf90, cmake_args=cmake_args)
            make_stage(name, output, make_j, clean=clean, verbose=verbose)


    for spec in suite_specs:
        os.chdir(os.path.join(build_dir,spec.name))
        for label, directory in spec:

            name = spec.name+"/"+label

            output.print_header("Running CTest tests for "+name+".")

            ctest_command = ["ctest", "--output-on-failure"]

            if verbose:
                ctest_command.append("-VV")

            if ctest_args is not None:
                ctest_command.extend(ctest_args.split(" "))

            run_cmd_no_fail(" ".join(ctest_command), from_dir=label, arg_stdout=None, arg_stderr=subprocess.STDOUT)
Exemplo n.º 37
0
def apply_user_mods(caseroot, user_mods_path, keepexe=None):
    '''
    Recursivlely apply user_mods to caseroot - this includes updating user_nl_xxx,
    updating SourceMods and creating case shell_commands and xmlchange_cmds files

    First remove case shell_commands files if any already exist

    If this function is called multiple times, settings from later calls will
    take precedence over earlier calls, if there are conflicts.

    keepexe is an optional argument that is needed for cases where apply_user_mods is
    called from create_clone
    '''
    case_shell_command_files = [
        os.path.join(caseroot, "shell_commands"),
        os.path.join(caseroot, "xmlchange_cmnds")
    ]
    for shell_command_file in case_shell_command_files:
        if os.path.isfile(shell_command_file):
            os.remove(shell_command_file)

    include_dirs = build_include_dirs_list(user_mods_path)
    # If a user_mods dir 'foo' includes 'bar', the include_dirs list returned
    # from build_include_dirs has 'foo' before 'bar'. But with the below code,
    # directories that occur later in the list take precedence over the earlier
    # ones, and we want 'foo' to take precedence over 'bar' in this case (in
    # general: we want a given user_mods directory to take precedence over any
    # mods that it includes). So we reverse include_dirs to accomplish this.
    include_dirs.reverse()
    logger.debug("include_dirs are {}".format(include_dirs))
    for include_dir in include_dirs:
        # write user_nl_xxx file in caseroot
        for user_nl in glob.iglob(os.path.join(include_dir, "user_nl_*")):
            with open(os.path.join(include_dir, user_nl), "r") as fd:
                newcontents = fd.read()
            if len(newcontents) == 0:
                continue
            case_user_nl = user_nl.replace(include_dir, caseroot)
            # If the same variable is set twice in a user_nl file, the later one
            # takes precedence. So by appending the new contents, later entries
            # in the include_dirs list take precedence over earlier entries.
            with open(case_user_nl, "a") as fd:
                fd.write(newcontents)

        # update SourceMods in caseroot
        for root, _, files in os.walk(include_dir,
                                      followlinks=True,
                                      topdown=False):
            if "src" in os.path.basename(root):
                if keepexe is not None:
                    expect(
                        False,
                        "cannot have any source mods in {} if keepexe is an option"
                        .format(user_mods_path))
                for sfile in files:
                    source_mods = os.path.join(root, sfile)
                    case_source_mods = source_mods.replace(
                        include_dir, caseroot)
                    # We overwrite any existing SourceMods file so that later
                    # include_dirs take precedence over earlier ones
                    if os.path.isfile(case_source_mods):
                        logger.warning(
                            "WARNING: Overwriting existing SourceMods in {}".
                            format(case_source_mods))
                    else:
                        logger.info("Adding SourceMod to case {}".format(
                            case_source_mods))
                    try:
                        safe_copy(source_mods, case_source_mods)
                    except Exception:
                        expect(
                            False,
                            "Could not write file {} in caseroot {}".format(
                                case_source_mods, caseroot))

        # create xmlchange_cmnds and shell_commands in caseroot
        shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\
                              glob.glob(os.path.join(include_dir,"xmlchange_cmnds"))
        for shell_commands_file in shell_command_files:
            case_shell_commands = shell_commands_file.replace(
                include_dir, caseroot)
            # add commands from both shell_commands and xmlchange_cmnds to
            # the same file (caseroot/shell_commands)
            case_shell_commands = case_shell_commands.replace(
                "xmlchange_cmnds", "shell_commands")
            # Note that use of xmlchange_cmnds has been deprecated and will soon
            # be removed altogether, so new tests should rely on shell_commands
            if shell_commands_file.endswith("xmlchange_cmnds"):
                logger.warning("xmlchange_cmnds is deprecated and will be removed " +\
                            "in a future release; please rename {} shell_commands".format(shell_commands_file))
            with open(shell_commands_file, "r") as fd:
                new_shell_commands = fd.read().replace("xmlchange",
                                                       "xmlchange --force")
            # By appending the new commands to the end, settings from later
            # include_dirs take precedence over earlier ones
            with open(case_shell_commands, "a") as fd:
                fd.write(new_shell_commands)

    for shell_command_file in case_shell_command_files:
        if os.path.isfile(shell_command_file):
            os.chmod(shell_command_file, 0o777)
            run_cmd_no_fail(shell_command_file, verbose=True)
Exemplo n.º 38
0
def get_last_merge(branch_name):
    ###############################################################################
    return run_cmd_no_fail(
        "git log --first-parent ORIG_HEAD --grep='{}' -1 --oneline".format(
            branch_name)).split()[0]
Exemplo n.º 39
0
        finally:
            os.remove(syslog_jobid_path)

    # If requested, spawn a mach_syslog process to monitor job progress
    sample_interval = case.get_value("SYSLOG_N")
    if sample_interval > 0:
        archive_checkpoints = os.path.join(full_timing_dir,
                                           "checkpoints.{}".format(lid))
        os.mkdir(archive_checkpoints)
        utils.touch("{}/e3sm.log.{}".format(rundir, lid))
        syslog_jobid = utils.run_cmd_no_fail(
            "./mach_syslog {si} {jobid} {lid} {rundir} {rundir}/timing/checkpoints {ac} >& /dev/null & echo $!"
            .format(
                si=sample_interval,
                jobid=job_id,
                lid=lid,
                rundir=rundir,
                ac=archive_checkpoints,
            ),
            from_dir=os.path.join(caseroot, "Tools"),
        )
        with open(os.path.join(rundir, "syslog_jobid.{}".format(job_id)),
                  "w") as fd:
            fd.write("{}\n".format(syslog_jobid))


def save_postrun_provenance(case, lid=None):
    with utils.SharedArea():
        lid = os.environ["LID"] if lid is None else lid

        if case.get_value("SAVE_TIMING"):
Exemplo n.º 40
0
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group,
                          utc_time, current_time, hostname):
    ###############################################################################
    # We assume all cases were created from the same code repo
    first_result_case = os.path.dirname(results.iteritems().next()[1][0])
    try:
        srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT",
                                  from_dir=first_result_case)
    except:
        # Use repo containing this script as last resort
        srcroot = CIME.utils.get_cime_root()

    git_commit = CIME.utils.get_current_commit(repo=srcroot)

    data_rel_path = os.path.join("Testing", utc_time)

    site_elem = xmlet.Element("Site")

    if ("JENKINS_START_TIME" in os.environ):
        time_info_str = "Total testing time: {:d} seconds".format(
            int(current_time) - int(os.environ["JENKINS_START_TIME"]))
    else:
        time_info_str = ""

    site_elem.attrib["BuildName"] = cdash_build_name
    site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time,
                                                    cdash_build_group)
    site_elem.attrib["Name"] = hostname
    site_elem.attrib["OSName"] = "Linux"
    site_elem.attrib["Hostname"] = hostname
    site_elem.attrib["OSVersion"] = "Commit: {}{}".format(
        git_commit, time_info_str)

    testing_elem = xmlet.SubElement(site_elem, "Testing")

    start_date_time_elem = xmlet.SubElement(testing_elem, "StartDateTime")
    start_date_time_elem.text = time.ctime(current_time)

    start_test_time_elem = xmlet.SubElement(testing_elem, "StartTestTime")
    start_test_time_elem.text = str(int(current_time))

    test_list_elem = xmlet.SubElement(testing_elem, "TestList")
    for test_name in sorted(results):
        test_elem = xmlet.SubElement(test_list_elem, "Test")
        test_elem.text = test_name

    for test_name in sorted(results):
        test_path, test_status = results[test_name]
        test_passed = test_status == TEST_PASS_STATUS
        test_norm_path = test_path if os.path.isdir(
            test_path) else os.path.dirname(test_path)

        full_test_elem = xmlet.SubElement(testing_elem, "Test")
        if (test_passed):
            full_test_elem.attrib["Status"] = "passed"
        elif (test_status == NAMELIST_FAIL_STATUS):
            full_test_elem.attrib["Status"] = "notrun"
        else:
            full_test_elem.attrib["Status"] = "failed"

        name_elem = xmlet.SubElement(full_test_elem, "Name")
        name_elem.text = test_name

        path_elem = xmlet.SubElement(full_test_elem, "Path")
        path_elem.text = test_norm_path

        full_name_elem = xmlet.SubElement(full_test_elem, "FullName")
        full_name_elem.text = test_name

        xmlet.SubElement(full_test_elem, "FullCommandLine")
        # text ?

        results_elem = xmlet.SubElement(full_test_elem, "Results")

        named_measurements = (("text/string", "Exit Code",
                               test_status), ("text/string", "Exit Value",
                                              "0" if test_passed else "1"),
                              ("numeric_double", "Execution Time",
                               str(get_test_time(test_norm_path))),
                              ("text/string", "Completion Status",
                               "Not Completed" if test_status
                               == TEST_PEND_STATUS else "Completed"),
                              ("text/string", "Command line", "create_test"))

        for type_attr, name_attr, value in named_measurements:
            named_measurement_elem = xmlet.SubElement(results_elem,
                                                      "NamedMeasurement")
            named_measurement_elem.attrib["type"] = type_attr
            named_measurement_elem.attrib["name"] = name_attr

            value_elem = xmlet.SubElement(named_measurement_elem, "Value")
            value_elem.text = value

        measurement_elem = xmlet.SubElement(results_elem, "Measurement")

        value_elem = xmlet.SubElement(measurement_elem, "Value")
        value_elem.text = get_test_output(test_norm_path)

    elapsed_time_elem = xmlet.SubElement(testing_elem, "ElapsedMinutes")
    elapsed_time_elem.text = "0"  # Skip for now

    etree = xmlet.ElementTree(site_elem)

    etree.write(os.path.join(data_rel_path, "Test.xml"))
Exemplo n.º 41
0
def do_subtree_split(new_split_tag, merge_tag):
###############################################################################
    subtree_branch = get_branch_from_tag(new_split_tag)
    run_cmd_no_fail("git subtree split --prefix=cime --onto={} -b {}".\
                        format(merge_tag, subtree_branch), verbose=True)
    return subtree_branch
Exemplo n.º 42
0
def cmake_stage(name,
                test_spec_dir,
                build_optimized,
                use_mpiserial,
                mpirun_command,
                output,
                pfunit_path,
                cmake_args=None,
                clean=False,
                verbose=False,
                enable_genf90=True,
                color=True):
    """Run cmake in the current working directory.

    Arguments:
    name - Name for output messages.
    test_spec_dir - Test specification directory to run CMake on.
    use_mpiserial (logical) - If True, we'll tell CMake to include mpi-serial for tests
                              that need it
    build_optimized (logical) - If True, we'll build in optimized rather than debug mode
    """
    if clean:
        if os.path.isfile("CMakeCache.txt"):
            os.remove("CMakeCache.txt")
        if os.path.isdir("CMakeFiles"):
            rmtree("CMakeFiles")

    if not os.path.isfile("CMakeCache.txt"):

        output.print_header("Running cmake for " + name + ".")

        # This build_type only has limited uses, and should probably be removed,
        # but for now it's still needed
        if build_optimized:
            build_type = "CESM"
        else:
            build_type = "CESM_DEBUG"

        cmake_command = [
            "cmake", "-C Macros.cmake", test_spec_dir,
            "-DCIMEROOT=" + _CIMEROOT, "-DCIME_CMAKE_MODULE_DIRECTORY=" +
            os.path.abspath(os.path.join(_CIMEROOT, "src", "CMake")),
            "-DCMAKE_BUILD_TYPE=" + build_type,
            "-DPFUNIT_MPIRUN='" + mpirun_command + "'",
            "-DPFUNIT_PATH=" + pfunit_path
        ]
        if use_mpiserial:
            cmake_command.append("-DUSE_MPI_SERIAL=ON")
        if verbose:
            cmake_command.append("-Wdev")

        if enable_genf90:
            cmake_command.append("-DENABLE_GENF90=ON")
            genf90_dir = os.path.join(_CIMEROOT, "src", "externals", "genf90")
            cmake_command.append("-DCMAKE_PROGRAM_PATH=" + genf90_dir)

        if not color:
            cmake_command.append("-DUSE_COLOR=OFF")

        if cmake_args is not None:
            cmake_command.extend(cmake_args.split(" "))

        run_cmd_no_fail(" ".join(cmake_command),
                        verbose=True,
                        arg_stdout=None,
                        arg_stderr=subprocess.STDOUT)
Exemplo n.º 43
0
        logger.warning("{} cannot be created. Skipping archive of timing data and associated provenance.".format(full_timing_dir))
        return

    mach = case.get_value("MACH")
    compiler = case.get_value("COMPILER")

    # For some batch machines save queue info
    job_id = _get_batch_job_id_for_syslog(case)
    if job_id is not None:
        if mach == "theta":
            for cmd, filename in [("qstat -l --header JobID:JobName:User:Project:WallTime:QueuedTime:Score:RunTime:TimeRemaining:Nodes:State:Location:Mode:Command:Args:Procs:Queue:StartTime:attrs:Geometry", "qstatf"),
                                  ("qstat -lf %s" % job_id, "qstatf_jobid"),
                                  ("xtnodestat", "xtnodestat"),
                                  ("xtprocadmin", "xtprocadmin")]:
                filename = "%s.%s" % (filename, lid)
                run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir)
                gzip_existing_file(os.path.join(full_timing_dir, filename))
        elif mach in ["cori-haswell", "cori-knl"]:
            for cmd, filename in [("sinfo -a -l", "sinfol"), ("scontrol show jobid %s" % job_id, "sqsf_jobid"),
                                  # ("sqs -f", "sqsf"),
                                  ("squeue -o '%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l %.20S %.20V'", "squeuef"),
                                  ("squeue -t R -o '%.10i %R'", "squeues")]:
                filename = "%s.%s" % (filename, lid)
                run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir)
                gzip_existing_file(os.path.join(full_timing_dir, filename))
        elif mach in ["anvil", "chrysalis", "compy"]:
            for cmd, filename in [("sinfo -l", "sinfol"), 
                                  ("squeue -o '%all' --job {}".format(job_id), "squeueall_jobid"),
                                  ("squeue -o '%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %.20S %.20V %j'", "squeuef"),
                                  ("squeue -t R -o '%.10i %R'", "squeues")]:
                filename = "%s.%s" % (filename, lid)
Exemplo n.º 44
0
    def test_bless_test_results(self):
        if self.NO_FORTRAN_RUN:
            self.skipTest("Skipping fortran test")
        # Test resubmit scenario if Machine has a batch system
        if self.MACHINE.has_batch_system():
            test_names = [
                "TESTRUNDIFFRESUBMIT_Mmpi-serial.f19_g16_rx1.A",
                "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A",
            ]
        else:
            test_names = ["TESTRUNDIFF_P1.f19_g16_rx1.A"]

        # Generate some baselines
        for test_name in test_names:
            if utils.get_model() == "e3sm":
                genargs = ["-g", "-o", "-b", self._baseline_name, test_name]
                compargs = ["-c", "-b", self._baseline_name, test_name]
            else:
                genargs = [
                    "-g",
                    self._baseline_name,
                    "-o",
                    test_name,
                    "--baseline-root ",
                    self._baseline_area,
                ]
                compargs = [
                    "-c",
                    self._baseline_name,
                    test_name,
                    "--baseline-root ",
                    self._baseline_area,
                ]

            self._create_test(genargs)
            # Hist compare should pass
            self._create_test(compargs)
            # Change behavior
            os.environ["TESTRUNDIFF_ALTERNATE"] = "True"

            # Hist compare should now fail
            test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
            self._create_test(compargs, test_id=test_id, run_errors=True)

            # compare_test_results should detect the fail
            cpr_cmd = "{}/compare_test_results --test-root {} -t {} ".format(
                self.TOOLS_DIR, self._testroot, test_id
            )
            output = self.run_cmd_assert_result(
                cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE
            )

            # use regex
            expected_pattern = re.compile(r"FAIL %s[^\s]* BASELINE" % test_name)
            the_match = expected_pattern.search(output)
            self.assertNotEqual(
                the_match,
                None,
                msg="Cmd '%s' failed to display failed test %s in output:\n%s"
                % (cpr_cmd, test_name, output),
            )
            # Bless
            utils.run_cmd_no_fail(
                "{}/bless_test_results --test-root {} --hist-only --force -t {}".format(
                    self.TOOLS_DIR, self._testroot, test_id
                )
            )
            # Hist compare should now pass again
            self._create_test(compargs)
            self.verify_perms(self._baseline_area)
            if "TESTRUNDIFF_ALTERNATE" in os.environ:
                del os.environ["TESTRUNDIFF_ALTERNATE"]
Exemplo n.º 45
0
def archive_old_test_data(
    machine,
    mach_comp,
    test_id_root,
    scratch_root,
    test_root,
    old_test_archive,
    avoid_test_id,
):
    ###############################################################################

    gb_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA")
    gb_allowed = 500 if gb_allowed is None else gb_allowed
    bytes_allowed = gb_allowed * 1000000000
    expect(
        bytes_allowed > 0,
        "Machine {} does not support test archiving".format(machine.get_machine_name()),
    )

    # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around
    # or archiving them
    for old_cs_file in glob.glob("{}/cs.*".format(scratch_root)):
        if avoid_test_id not in old_cs_file:
            logging.info("TEST ARCHIVER: Removing {}".format(old_cs_file))
            os.remove(old_cs_file)

    # Remove the old CTest XML, same reason as above
    if os.path.isdir("Testing"):
        logging.info(
            "TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing"))
        )
        shutil.rmtree("Testing")

    if not os.path.exists(old_test_archive):
        os.mkdir(old_test_archive)

    # Archive old data by looking at old test cases
    for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)):
        if avoid_test_id not in old_case:
            logging.info("TEST ARCHIVER: archiving case {}".format(old_case))
            exeroot, rundir, archdir = run_cmd_no_fail(
                "./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case
            ).split(",")

            for the_dir, target_area in [
                (exeroot, "old_builds"),
                (rundir, "old_runs"),
                (archdir, "old_archives"),
                (old_case, "old_cases"),
            ]:
                if os.path.exists(the_dir):
                    start_time = time.time()
                    logging.info(
                        "TEST ARCHIVER:   archiving {} to {}".format(
                            the_dir, os.path.join(old_test_archive, target_area)
                        )
                    )
                    if not os.path.exists(os.path.join(old_test_archive, target_area)):
                        os.mkdir(os.path.join(old_test_archive, target_area))

                    old_case_name = os.path.basename(old_case)
                    with tarfile.open(
                        os.path.join(
                            old_test_archive,
                            target_area,
                            "{}.tar.gz".format(old_case_name),
                        ),
                        "w:gz",
                    ) as tfd:
                        tfd.add(the_dir, arcname=old_case_name)

                    shutil.rmtree(the_dir)

                    # Remove parent dir if it's empty
                    parent_dir = os.path.dirname(the_dir)
                    if not os.listdir(parent_dir) or os.listdir(parent_dir) == [
                        "case2_output_root"
                    ]:
                        shutil.rmtree(parent_dir)

                    end_time = time.time()
                    logging.info(
                        "TEST ARCHIVER:   archiving {} took {} seconds".format(
                            the_dir, int(end_time - start_time)
                        )
                    )

    # Check size of archive
    bytes_of_old_test_data = int(
        run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]
    )
    if bytes_of_old_test_data > bytes_allowed:
        logging.info(
            "TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format(
                bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000
            )
        )
        old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root)
        for old_test_id in sorted(old_test_ids):
            logging.info(
                "TEST ARCHIVER:   Removing old data for test {}".format(old_test_id)
            )
            for item in ["old_cases", "old_builds", "old_runs", "old_archives"]:
                for dir_to_rm in glob.glob(
                    "{}/{}/*{}*{}*".format(
                        old_test_archive, item, mach_comp, old_test_id
                    )
                ):
                    logging.info("TEST ARCHIVER:     Removing {}".format(dir_to_rm))
                    if os.path.isdir(dir_to_rm):
                        shutil.rmtree(dir_to_rm)
                    else:
                        os.remove(dir_to_rm)

            bytes_of_old_test_data = int(
                run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]
            )
            if bytes_of_old_test_data < bytes_allowed:
                break

    else:
        logging.info(
            "TEST ARCHIVER: Test data is within accepted bounds, {}GB (actual) < {}GB (limit)".format(
                bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000
            )
        )
Exemplo n.º 46
0
    def test_rebless_namelist(self):
        # Generate some namelist baselines
        if self.NO_FORTRAN_RUN:
            self.skipTest("Skipping fortran test")
        test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A"
        if utils.get_model() == "e3sm":
            genargs = ["-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"]
            compargs = ["-c", "-b", self._baseline_name, "cime_test_only_pass"]
        else:
            genargs = ["-g", self._baseline_name, "-o", "cime_test_only_pass"]
            compargs = ["-c", self._baseline_name, "cime_test_only_pass"]

        self._create_test(genargs)

        # Basic namelist compare
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        cases = self._create_test(compargs, test_id=test_id)
        casedir = self.get_casedir(test_to_change, cases)

        # Check standalone case.cmpgen_namelists
        self.run_cmd_assert_result("./case.cmpgen_namelists", from_dir=casedir)

        # compare_test_results should pass
        cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format(
            self.TOOLS_DIR, self._testroot, test_id
        )
        output = self.run_cmd_assert_result(cpr_cmd)

        # use regex
        expected_pattern = re.compile(r"PASS %s[^\s]* NLCOMP" % test_to_change)
        the_match = expected_pattern.search(output)
        msg = f"Cmd {cpr_cmd} failed to display passed test in output:\n{output}"
        self.assertNotEqual(
            the_match,
            None,
            msg=msg,
        )

        # Modify namelist
        fake_nl = """
 &fake_nml
   fake_item = 'fake'
   fake = .true.
/"""
        baseline_area = self._baseline_area
        baseline_glob = glob.glob(
            os.path.join(baseline_area, self._baseline_name, "TEST*")
        )
        self.assertEqual(
            len(baseline_glob),
            3,
            msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob),
        )

        for baseline_dir in baseline_glob:
            nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
            self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)

            os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR)
            with open(nl_path, "a") as nl_file:
                nl_file.write(fake_nl)

        # Basic namelist compare should now fail
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        self._create_test(compargs, test_id=test_id, run_errors=True)
        casedir = self.get_casedir(test_to_change, cases)

        # Unless namelists are explicitly ignored
        test_id2 = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        self._create_test(compargs + ["--ignore-namelists"], test_id=test_id2)

        self.run_cmd_assert_result(
            "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100
        )

        # preview namelists should work
        self.run_cmd_assert_result("./preview_namelists", from_dir=casedir)

        # This should still fail
        self.run_cmd_assert_result(
            "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100
        )

        # compare_test_results should fail
        cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format(
            self.TOOLS_DIR, self._testroot, test_id
        )
        output = self.run_cmd_assert_result(
            cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE
        )

        # use regex
        expected_pattern = re.compile(r"FAIL %s[^\s]* NLCOMP" % test_to_change)
        the_match = expected_pattern.search(output)
        self.assertNotEqual(
            the_match,
            None,
            msg="Cmd '%s' failed to display passed test in output:\n%s"
            % (cpr_cmd, output),
        )

        # Bless
        new_test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        utils.run_cmd_no_fail(
            "{}/bless_test_results --test-root {} -n --force -t {} --new-test-root={} --new-test-id={}".format(
                self.TOOLS_DIR, self._testroot, test_id, self._testroot, new_test_id
            )
        )

        # Basic namelist compare should now pass again
        self._create_test(compargs)

        self.verify_perms(self._baseline_area)
Exemplo n.º 47
0
def get_last_instance_of(branch_name, head):
    ###############################################################################
    return run_cmd_no_fail(
        "git log --first-parent {} --grep='{}' -1 --oneline".format(
            head, branch_name)).split()[0]
Exemplo n.º 48
0
def has_non_local_commits(filepath, non_local_path, local_tag):
    ###############################################################################
    most_recent = get_tag(local_tag)
    return run_cmd_no_fail("git diff MERGE_HEAD:{} {}:{}".format(
        non_local_path, most_recent, filepath)) != ""
Exemplo n.º 49
0
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group,
                            utc_time, hostname):
    ###############################################################################

    data_rel_path = os.path.join("Testing", utc_time)

    try:
        log_dir = "{}_logs".format(cdash_build_name)

        need_to_upload = False

        for test_name, test_data in results.items():
            test_path, test_status = test_data

            if (test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]):
                ts = TestStatus(os.path.dirname(test_path))

                build_status = ts.get_status(MODEL_BUILD_PHASE)
                run_status = ts.get_status(RUN_PHASE)
                baseline_status = ts.get_status(BASELINE_PHASE)
                if (build_status == TEST_FAIL_STATUS
                        or run_status == TEST_FAIL_STATUS
                        or baseline_status == TEST_FAIL_STATUS):
                    param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
                    log_src_dir = run_cmd_no_fail(
                        "./xmlquery {} --value".format(param),
                        from_dir=os.path.dirname(test_path))

                    log_dst_dir = os.path.join(
                        log_dir, "{}_{}_logs".format(test_name, param))
                    os.makedirs(log_dst_dir)
                    for log_file in glob.glob(
                            os.path.join(log_src_dir, "*log*")):
                        shutil.copy(log_file, log_dst_dir)
                    for log_file in glob.glob(
                            os.path.join(log_src_dir, "*.cprnc.out*")):
                        shutil.copy(log_file, log_dst_dir)

                    need_to_upload = True

        if (need_to_upload):

            tarball = "{}.tar.gz".format(log_dir)
            if (os.path.exists(tarball)):
                os.remove(tarball)

            run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir),
                            arg_stdout=tarball)
            base64 = run_cmd_no_fail("base64 {}".format(tarball))

            xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)

            with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
                fd.write(xml_text)

    finally:
        if (os.path.isdir(log_dir)):
            shutil.rmtree(log_dir)
Exemplo n.º 50
0
def make_pr_branch(branch, branch_head):
###############################################################################
    run_cmd_no_fail("git checkout --no-track -b {} {}".format(branch, branch_head), verbose=True)

    return branch
Exemplo n.º 51
0
def create_cdash_xml(results, cdash_build_name, cdash_project,
                     cdash_build_group):
    ###############################################################################

    #
    # Create dart config file
    #

    current_time = time.time()

    utc_time_tuple = time.gmtime(current_time)
    cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)

    hostname = Machines().get_machine_name()
    if (hostname is None):
        hostname = socket.gethostname().split(".")[0]
        logging.warning(
            "Could not convert hostname '{}' into an ACME machine name".format(
                hostname))

    dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}

# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}

# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}

# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}

# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
           distutils.spawn.find_executable("scp"), cdash_timestamp)

    with open("DartConfiguration.tcl", "w") as dart_fd:
        dart_fd.write(dart_config)

    utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
    os.makedirs(os.path.join("Testing", utc_time))

    # Make tag file
    with open("Testing/TAG", "w") as tag_fd:
        tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))

    create_cdash_test_xml(results, cdash_build_name, cdash_build_group,
                          utc_time, current_time, hostname)

    create_cdash_upload_xml(results, cdash_build_name, cdash_build_group,
                            utc_time, hostname)

    run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
Exemplo n.º 52
0
def do_subtree_split(new_split_tag, merge_tag):
    ###############################################################################
    subtree_branch = get_branch_from_tag(new_split_tag)
    run_cmd_no_fail("git subtree split --prefix=cime --onto={} -b {}".\
                        format(merge_tag, subtree_branch), verbose=True)
    return subtree_branch
Exemplo n.º 53
0
                    "NTASKS_{}".format(comp))
                old_threads = env_mach_pes_locked.get_value(
                    "NTHRDS_{}".format(comp))
                old_inst = env_mach_pes_locked.get_value(
                    "NINST_{}".format(comp))

                new_tasks = case.get_value("NTASKS_{}".format(comp))
                new_threads = case.get_value("NTHRDS_{}".format(comp))
                new_inst = case.get_value("NINST_{}".format(comp))

                if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:
                    logging.warning(
                        "{} pe change requires clean build {} {}".format(
                            comp, old_tasks, new_tasks))
                    cleanflag = comp.lower()
                    run_cmd_no_fail(
                        "./case.build --clean {}".format(cleanflag))

        unlock_file("env_mach_pes.xml", case.get_value("CASEROOT"))


def check_lockedfiles(case):
    """
    Check that all lockedfiles match what's in case

    If caseroot is not specified, it is set to the current working directory
    """
    caseroot = case.get_value("CASEROOT")
    lockedfiles = glob.glob(os.path.join(caseroot, "LockedFiles", "*.xml"))
    for lfile in lockedfiles:
        fpart = os.path.basename(lfile)
        # ignore files used for tests such as env_mach_pes.ERP1.xml by looking for extra dots in the name
Exemplo n.º 54
0
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group,
                            utc_time, hostname, force_log_upload):
    ###############################################################################

    data_rel_path = os.path.join("Testing", utc_time)

    try:
        log_dir = "{}_logs".format(cdash_build_name)

        need_to_upload = False

        for test_name, test_data in results.items():
            test_path, test_status, _ = test_data

            if test_status != TEST_PASS_STATUS or force_log_upload:
                test_case_dir = os.path.dirname(test_path)

                case_dirs = [test_case_dir]
                case_base = os.path.basename(test_case_dir)
                test_case2_dir = os.path.join(test_case_dir, "case2",
                                              case_base)
                if os.path.exists(test_case2_dir):
                    case_dirs.append(test_case2_dir)

                for case_dir in case_dirs:
                    for param in ["EXEROOT", "RUNDIR", "CASEDIR"]:
                        if param == "CASEDIR":
                            log_src_dir = case_dir
                        else:
                            # it's possible that tests that failed very badly/early, and fake cases for testing
                            # will not be able to support xmlquery
                            try:
                                log_src_dir = run_cmd_no_fail(
                                    "./xmlquery {} --value".format(param),
                                    from_dir=case_dir,
                                )
                            except:
                                continue

                        log_dst_dir = os.path.join(
                            log_dir,
                            "{}{}_{}_logs".format(
                                test_name,
                                "" if case_dir == test_case_dir else ".case2",
                                param,
                            ),
                        )
                        os.makedirs(log_dst_dir)
                        for log_file in glob.glob(
                                os.path.join(log_src_dir, "*log*")):
                            if os.path.isdir(log_file):
                                shutil.copytree(
                                    log_file,
                                    os.path.join(log_dst_dir,
                                                 os.path.basename(log_file)),
                                )
                            else:
                                safe_copy(log_file, log_dst_dir)
                        for log_file in glob.glob(
                                os.path.join(log_src_dir, "*.cprnc.out*")):
                            safe_copy(log_file, log_dst_dir)

                need_to_upload = True

        if need_to_upload:

            tarball = "{}.tar.gz".format(log_dir)
            if os.path.exists(tarball):
                os.remove(tarball)

            run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir),
                            arg_stdout=tarball)
            base64 = run_cmd_no_fail("base64 {}".format(tarball))

            xml_text = r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(
                cdash_build_name,
                utc_time,
                cdash_build_group,
                hostname,
                os.path.abspath(tarball),
                base64,
            )

            with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
                fd.write(xml_text)

    finally:
        if os.path.isdir(log_dir):
            shutil.rmtree(log_dir)
Exemplo n.º 55
0
def apply_user_mods(caseroot, user_mods_path, ninst=None):
    '''
    Recursivlely apply user_mods to caseroot - this includes updating user_nl_xxx,
    updating SourceMods and creating case shell_commands and xmlchange_cmds files

    First remove case shell_commands files if any already exist
    '''
    case_shell_command_files = [os.path.join(caseroot,"shell_commands"),
                           os.path.join(caseroot,"xmlchange_cmnds")]
    for shell_command_file in case_shell_command_files:
        if os.path.isfile(shell_command_file):
            os.remove(shell_command_file)

    include_dirs = build_include_dirs_list(user_mods_path)
    for include_dir in include_dirs:
        # write user_nl_xxx file in caseroot
        for user_nl in glob.iglob(os.path.join(include_dir,"user_nl_*")):
            with open(os.path.join(include_dir, user_nl), "r") as fd:
                newcontents = fd.read()
            if len(newcontents) == 0:
                continue
            case_user_nl = user_nl.replace(include_dir, caseroot)
            comp = case_user_nl.split('_')[-1]
            if ninst is not None and comp in ninst.keys() and ninst[comp] > 1:
                for comp_inst in xrange(1, ninst[comp]+1):
                    contents = newcontents
                    case_user_nl_inst = case_user_nl + "_%4.4d"%comp_inst
                    logger.info("Pre-pending file %s"%case_user_nl_inst)
                    if os.path.isfile(case_user_nl_inst):
                        with open(case_user_nl_inst, "r") as fd:
                            old_contents = fd.read()
                            if old_contents.find(contents) == -1:
                                contents = contents + old_contents
                    with open(case_user_nl_inst, "w") as fd:
                        fd.write(contents)
            else:
                contents = newcontents
                logger.info("Pre-pending file %s"%case_user_nl)
                if os.path.isfile(case_user_nl):
                    with open(case_user_nl, "r") as fd:
                        old_contents = fd.read()
                        if old_contents.find(contents) == -1:
                            contents = contents + old_contents
                with open(case_user_nl, "w") as fd:
                    fd.write(contents)

        # update SourceMods in caseroot
        for root, _, files in os.walk(include_dir,followlinks=True,topdown=False):
            if "src" in os.path.basename(root):
                for sfile in files:
                    source_mods = os.path.join(root,sfile)
                    case_source_mods = source_mods.replace(include_dir, caseroot)
                    if os.path.isfile(case_source_mods):
                        logger.warn("Refusing to overwrite existing SourceMods in %s"%case_source_mods)
                    else:
                        logger.info("Adding SourceMod to case %s"%case_source_mods)
                        try:
                            shutil.copyfile(source_mods, case_source_mods)
                        except:
                            expect(False, "Could not write file %s in caseroot %s"
                                   %(case_source_mods,caseroot))

        # create xmlchange_cmnds and shell_commands in caseroot
        shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\
                              glob.glob(os.path.join(include_dir,"xmlchange_cmnds"))
        for shell_commands_file in shell_command_files:
            case_shell_commands = shell_commands_file.replace(include_dir, caseroot)
            with open(shell_commands_file,"r") as fd:
                new_shell_commands = fd.read().replace("xmlchange","xmlchange --force")
            with open(case_shell_commands, "a") as fd:
                fd.write(new_shell_commands)

    for shell_command_file in case_shell_command_files:
        if os.path.isfile(shell_command_file):
            os.chmod(shell_command_file, 0777)
            run_cmd_no_fail(shell_command_file)
Exemplo n.º 56
0
def touches_file(start_range, end_range, filepath):
    ###############################################################################
    return run_cmd_no_fail("git log {}..{} {}".format(start_range, end_range,
                                                      filepath)) != ""
Exemplo n.º 57
0
def _main():
    output, build_dir, build_optimized, clean,\
        cmake_args, compiler, enable_genf90, machine, machines_dir,\
        make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\
        use_openmp, xml_test_list, verbose \
        = parse_command_line(sys.argv)

    #=================================================
    # Find directory and file paths.
    #=================================================
    suite_specs = []
    # TODO: this violates cime policy of direct access to xml
    # should be moved to CIME/XML
    if xml_test_list is not None:
        test_xml_tree = ElementTree()
        test_xml_tree.parse(xml_test_list)
        known_paths = {
            "here": os.path.abspath(os.path.dirname(xml_test_list)),
        }
        suite_specs.extend(suites_from_xml(test_xml_tree, known_paths))
    if test_spec_dir is not None:
        suite_specs.append(
            TestSuiteSpec("__command_line_test__", ["__command_line_test__"],
                          [os.path.abspath(test_spec_dir)]))

    if machines_dir is not None:
        machines_file = os.path.join(machines_dir, "config_machines.xml")
        machobj = Machines(infile=machines_file, machine=machine)
    else:
        machobj = Machines(machine=machine)

    # Create build directory if necessary.
    build_dir = os.path.abspath(build_dir)

    if not os.path.isdir(build_dir):
        os.mkdir(build_dir)

    # Switch to the build directory.
    os.chdir(build_dir)
    if clean:
        pwd_contents = os.listdir(os.getcwd())
        # Clear CMake cache.
        for file_ in pwd_contents:
            if file_ in ("Macros.cmake", "env_mach_specific.xml") \
                    or file_.startswith('Depends') or file_.startswith(".env_mach_specific"):
                os.remove(file_)

    #=================================================
    # Functions to perform various stages of build.
    #=================================================

    if not use_mpi:
        mpilib = "mpi-serial"
    elif mpilib is None:
        mpilib = machobj.get_default_MPIlib()
        logger.info("Using mpilib: {}".format(mpilib))

    if compiler is None:
        compiler = machobj.get_default_compiler()
        logger.info("Compiler is {}".format(compiler))

    compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib)

    pfunit_path = find_pfunit(compilerobj,
                              mpilib=mpilib,
                              use_openmp=use_openmp)

    debug = not build_optimized
    os_ = machobj.get_value("OS")

    # Create the environment, and the Macros.cmake file
    #
    #
    configure(machobj,
              build_dir, ["CMake"],
              compiler,
              mpilib,
              debug,
              os_,
              unit_testing=True)
    machspecific = EnvMachSpecific(build_dir, unit_testing=True)

    fake_case = FakeCase(compiler, mpilib, debug)
    machspecific.load_env(fake_case)
    os.environ["OS"] = os_
    os.environ["COMPILER"] = compiler
    os.environ["DEBUG"] = stringify_bool(debug)
    os.environ["MPILIB"] = mpilib
    if use_openmp:
        os.environ["compile_threaded"] = "true"
    else:
        os.environ["compile_threaded"] = "false"

    os.environ["UNIT_TEST_HOST"] = socket.gethostname()
    if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ:
        # The CMake Netcdf find utility that we use (from pio2) seems to key off
        # of the environment variable NETCDF, but not NETCDF_PATH
        logger.info("Setting NETCDF environment variable: {}".format(
            os.environ["NETCDF_PATH"]))
        os.environ["NETCDF"] = os.environ["NETCDF_PATH"]

    if not use_mpi:
        mpirun_command = ""
    elif mpirun_command is None:
        mpi_attribs = {
            "compiler": compiler,
            "mpilib": mpilib,
            "threaded": use_openmp,
            "unit_testing": True
        }

        # We can get away with specifying case=None since we're using exe_only=True
        mpirun_command, _ = machspecific.get_mpirun(None,
                                                    mpi_attribs,
                                                    None,
                                                    exe_only=True)
        mpirun_command = machspecific.get_resolved_value(mpirun_command)
        logger.info("mpirun command is '{}'".format(mpirun_command))


#=================================================
# Run tests.
#=================================================

    for spec in suite_specs:
        os.chdir(build_dir)
        if os.path.isdir(spec.name):
            if clean:
                rmtree(spec.name)

        if not os.path.isdir(spec.name):
            os.mkdir(spec.name)

        for label, directory in spec:
            os.chdir(os.path.join(build_dir, spec.name))
            if not os.path.isdir(label):
                os.mkdir(label)

            os.chdir(label)

            name = spec.name + "/" + label

            if not os.path.islink("Macros.cmake"):
                os.symlink(os.path.join(build_dir, "Macros.cmake"),
                           "Macros.cmake")
            use_mpiserial = not use_mpi
            cmake_stage(name,
                        directory,
                        build_optimized,
                        use_mpiserial,
                        mpirun_command,
                        output,
                        pfunit_path,
                        verbose=verbose,
                        enable_genf90=enable_genf90,
                        cmake_args=cmake_args)
            make_stage(name, output, make_j, clean=clean, verbose=verbose)

    for spec in suite_specs:
        os.chdir(os.path.join(build_dir, spec.name))
        for label, directory in spec:

            name = spec.name + "/" + label

            output.print_header("Running CTest tests for " + name + ".")

            ctest_command = ["ctest", "--output-on-failure"]

            if verbose:
                ctest_command.append("-VV")

            if ctest_args is not None:
                ctest_command.extend(ctest_args.split(" "))

            run_cmd_no_fail(" ".join(ctest_command),
                            from_dir=label,
                            arg_stdout=None,
                            arg_stderr=subprocess.STDOUT)
Exemplo n.º 58
0
def setup():
    ###############################################################################
    run_cmd_no_fail("git config merge.renameLimit 999999")
    run_cmd_no_fail("git checkout master && git pull", verbose=True)

    remotes = run_cmd_no_fail("git remote")
    if ESMCI_REMOTE_NAME not in remotes:
        run_cmd_no_fail("git remote add {} {}".format(ESMCI_REMOTE_NAME,
                                                      ESMCI_URL),
                        verbose=True)

    run_cmd_no_fail("git fetch --prune {}".format(ESMCI_REMOTE_NAME),
                    verbose=True)
    run_cmd_no_fail("git fetch --prune {} --tags".format(ESMCI_REMOTE_NAME),
                    verbose=True)

    run_cmd_no_fail("git clean -fd", verbose=True)
Exemplo n.º 59
0
def make_pr_branch(branch, branch_head):
###############################################################################
    run_cmd_no_fail("git checkout --no-track -b {} {}".format(branch, branch_head), verbose=True)

    return branch
Exemplo n.º 60
0
def delete_tag(tag, remote="origin"):
    ###############################################################################
    run_cmd_no_fail("git tag -d {}".format(tag), verbose=True)
    run_cmd_no_fail("git push {} :refs/tags/{}".format(remote, tag),
                    verbose=True)