Beispiel #1
0
    def _create_test(self, extra_args, test_id=None, run_errors=False, env_changes=""):
        """
        Convenience wrapper around create_test. Returns list of full paths to created cases. If multiple cases,
        the order of the returned list is not guaranteed to match the order of the arguments.
        """
        # All stub model not supported in nuopc driver
        driver = utils.get_cime_default_driver()
        if driver == "nuopc" and "cime_developer" in extra_args:
            extra_args.append(
                " ^SMS_Ln3.T42_T42.S ^PRE.f19_f19.ADESP_TEST ^PRE.f19_f19.ADESP ^DAE.ww3a.ADWAV"
            )

        test_id = (
            "{}-{}".format(self._baseline_name, utils.get_timestamp())
            if test_id is None
            else test_id
        )
        extra_args.append("-t {}".format(test_id))
        extra_args.append("--baseline-root {}".format(self._baseline_area))
        if self.NO_BATCH:
            extra_args.append("--no-batch")
        if self.TEST_COMPILER and (
            [extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []
        ):
            extra_args.append("--compiler={}".format(self.TEST_COMPILER))
        if self.TEST_MPILIB and (
            [extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []
        ):
            extra_args.append("--mpilib={}".format(self.TEST_MPILIB))
        if [extra_arg for extra_arg in extra_args if "--machine" in extra_arg] == []:
            extra_args.append(f"--machine {self.MACHINE.get_machine_name()}")
        extra_args.append("--test-root={0} --output-root={0}".format(self._testroot))

        full_run = (
            set(extra_args)
            & set(["-n", "--namelist-only", "--no-setup", "--no-build", "--no-run"])
        ) == set()
        if full_run and not self.NO_BATCH:
            extra_args.append("--wait")

        expected_stat = 0 if not run_errors else utils.TESTS_FAILED_ERR_CODE

        output = self.run_cmd_assert_result(
            "{} {}/create_test {}".format(
                env_changes, self.SCRIPT_DIR, " ".join(extra_args)
            ),
            expected_stat=expected_stat,
        )
        cases = []
        for line in output.splitlines():
            if "Case dir:" in line:
                casedir = line.split()[-1]
                self.assertTrue(
                    os.path.isdir(casedir), msg="Missing casedir {}".format(casedir)
                )
                cases.append(casedir)

        self.assertTrue(len(cases) > 0, "create_test made no cases")

        return cases[0] if len(cases) == 1 else cases
Beispiel #2
0
def post_build(case, logs, build_complete=False):
###############################################################################

    logdir = case.get_value("LOGDIR")

    #zip build logs to CASEROOT/logs
    if logdir:
        bldlogdir = os.path.join(logdir, "bld")
        if not os.path.exists(bldlogdir):
            os.makedirs(bldlogdir)

    for log in logs:
        logger.info("Copying build log {} to {}".format(log, bldlogdir))
        with open(log, 'rb') as f_in:
            with gzip.open("{}.gz".format(log), 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
        if "sharedlibroot" not in log:
            shutil.copy("{}.gz".format(log), os.path.join(bldlogdir, "{}.gz".format(os.path.basename(log))))
        os.remove(log)

    if build_complete:

        # Set XML to indicate build complete
        case.set_value("BUILD_COMPLETE", True)
        case.set_value("BUILD_STATUS", 0)
        if "SMP_VALUE" in os.environ:
            case.set_value("SMP_BUILD", os.environ["SMP_VALUE"])
            case.flush()

        lock_file("env_build.xml")

        # must ensure there's an lid
        lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S")
        save_build_provenance(case, lid=lid)
Beispiel #3
0
def make_new_tag(prefix, old_tag, remote="origin", commit="HEAD"):
###############################################################################
    new_tag = "{}{}".format(prefix, get_timestamp(timestamp_format="%Y-%m-%d"))
    expect(old_tag != new_tag, "New tag must have different name than old tag")

    run_cmd_no_fail("git tag {} {}".format(new_tag, commit), verbose=True)
    run_cmd_no_fail("git push {} {}".format(remote, new_tag), verbose=True)

    return new_tag
Beispiel #4
0
def make_new_tag(prefix, old_tag, remote="origin", commit="HEAD"):
    ###############################################################################
    new_tag = "{}{}".format(prefix, get_timestamp(timestamp_format="%Y-%m-%d"))
    expect(old_tag != new_tag, "New tag must have different name than old tag")

    run_cmd_no_fail("git tag {} {}".format(new_tag, commit), verbose=True)
    run_cmd_no_fail("git push {} {}".format(remote, new_tag), verbose=True)

    return new_tag
Beispiel #5
0
    def build_phase(self, sharedlib_only=False, model_only=False):
        if "TESTBUILDFAIL_PASS" in os.environ:
            TESTRUNPASS.build_phase(self, sharedlib_only, model_only)
        else:
            if (not sharedlib_only):
                blddir = self._case.get_value("EXEROOT")
                bldlog = os.path.join(blddir, "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S")))
                with open(bldlog, "w") as fd:
                    fd.write("BUILD FAIL: Intentional fail for testing infrastructure")

                expect(False, "BUILD FAIL: Intentional fail for testing infrastructure")
Beispiel #6
0
 def setUp(self):
     self._thread_error = None
     self._unset_proxy = self.setup_proxy()
     self._machine = self.MACHINE.get_machine_name()
     self._compiler = (self.MACHINE.get_default_compiler() if
                       self.TEST_COMPILER is None else self.TEST_COMPILER)
     self._baseline_name = "fake_testing_only_%s" % utils.get_timestamp()
     self._baseline_area = os.path.join(self.TEST_ROOT, "baselines")
     self._testroot = self.TEST_ROOT
     self._hasbatch = self.MACHINE.has_batch_system() and not self.NO_BATCH
     self._do_teardown = not self.NO_TEARDOWN
     self._root_dir = os.getcwd()
Beispiel #7
0
    def test_jenkins_generic_job(self):
        # Generate fresh baselines so that this test is not impacted by
        # unresolved diffs
        self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name)
        self.assert_num_leftovers("cime_test_only_pass")

        build_name = "jenkins_generic_job_pass_%s" % utils.get_timestamp()
        self.simple_test(
            True,
            "-t cime_test_only_pass -b %s" % self._baseline_name,
            build_name=build_name,
        )
        self.assert_num_leftovers(
            "cime_test_only_pass"
        )  # jenkins_generic_job should have automatically cleaned up leftovers from prior run
        self.assert_dashboard_has_build(build_name)
    def test_jenkins_generic_job_realistic_dash(self):
        # The actual quality of the cdash results for this test can only
        # be inspected manually

        # Generate fresh baselines so that this test is not impacted by
        # unresolved diffs
        self.simple_test(False,
                         "-t cime_test_all -g -b %s" % self._baseline_name)
        self.assert_num_leftovers("cime_test_all")

        # Should create a diff
        os.environ["TESTRUNDIFF_ALTERNATE"] = "True"

        # Should create a nml diff
        # Modify namelist
        fake_nl = """
 &fake_nml
   fake_item = 'fake'
   fake = .true.
/"""
        baseline_glob = glob.glob(
            os.path.join(self._baseline_area, self._baseline_name,
                         "TESTRUNPASS*"))
        self.assertEqual(
            len(baseline_glob),
            1,
            msg="Expected one match, got:\n%s" % "\n".join(baseline_glob),
        )

        for baseline_dir in baseline_glob:
            nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
            self.assertTrue(os.path.isfile(nl_path),
                            msg="Missing file %s" % nl_path)

            os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR)
            with open(nl_path, "a") as nl_file:
                nl_file.write(fake_nl)

        build_name = "jenkins_generic_job_mixed_%s" % utils.get_timestamp()
        self.simple_test(False,
                         "-t cime_test_all -b %s" % self._baseline_name,
                         build_name=build_name)
        self.assert_num_leftovers(
            "cime_test_all"
        )  # jenkins_generic_job should have automatically cleaned up leftovers from prior run
        self.assert_dashboard_has_build(build_name)
Beispiel #9
0
def post_build(case, logs, build_complete=False, save_build_provenance=True):
###############################################################################
    for log in logs:
        gzip_existing_file(log)

    if build_complete:
        # must ensure there's an lid
        lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S")
        if save_build_provenance:
            save_build_provenance_sub(case, lid=lid)
        # Set XML to indicate build complete
        case.set_value("BUILD_COMPLETE", True)
        case.set_value("BUILD_STATUS", 0)
        if "SMP_VALUE" in os.environ:
            case.set_value("SMP_BUILD", os.environ["SMP_VALUE"])
            case.flush()

        lock_file("env_build.xml")
    def test_jenkins_generic_job_kill(self):
        build_name = "jenkins_generic_job_kill_%s" % utils.get_timestamp()
        run_thread = threading.Thread(
            target=self.threaded_test,
            args=(False, " -t cime_test_only_slow_pass -b master", build_name),
        )
        run_thread.daemon = True
        run_thread.start()

        time.sleep(120)

        self.kill_subprocesses(sig=signal.SIGTERM)

        run_thread.join(timeout=30)

        self.assertFalse(run_thread.is_alive(),
                         msg="jenkins_generic_job should have finished")
        self.assertTrue(
            self._thread_error is None,
            msg="Thread had failure: %s" % self._thread_error,
        )
        self.assert_dashboard_has_build(build_name)
Beispiel #11
0
    def test_bless_test_results(self):
        if self.NO_FORTRAN_RUN:
            self.skipTest("Skipping fortran test")
        # Test resubmit scenario if Machine has a batch system
        if self.MACHINE.has_batch_system():
            test_names = [
                "TESTRUNDIFFRESUBMIT_Mmpi-serial.f19_g16_rx1.A",
                "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A",
            ]
        else:
            test_names = ["TESTRUNDIFF_P1.f19_g16_rx1.A"]

        # Generate some baselines
        for test_name in test_names:
            if utils.get_model() == "e3sm":
                genargs = ["-g", "-o", "-b", self._baseline_name, test_name]
                compargs = ["-c", "-b", self._baseline_name, test_name]
            else:
                genargs = [
                    "-g",
                    self._baseline_name,
                    "-o",
                    test_name,
                    "--baseline-root ",
                    self._baseline_area,
                ]
                compargs = [
                    "-c",
                    self._baseline_name,
                    test_name,
                    "--baseline-root ",
                    self._baseline_area,
                ]

            self._create_test(genargs)
            # Hist compare should pass
            self._create_test(compargs)
            # Change behavior
            os.environ["TESTRUNDIFF_ALTERNATE"] = "True"

            # Hist compare should now fail
            test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
            self._create_test(compargs, test_id=test_id, run_errors=True)

            # compare_test_results should detect the fail
            cpr_cmd = "{}/compare_test_results --test-root {} -t {} ".format(
                self.TOOLS_DIR, self._testroot, test_id
            )
            output = self.run_cmd_assert_result(
                cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE
            )

            # use regex
            expected_pattern = re.compile(r"FAIL %s[^\s]* BASELINE" % test_name)
            the_match = expected_pattern.search(output)
            self.assertNotEqual(
                the_match,
                None,
                msg="Cmd '%s' failed to display failed test %s in output:\n%s"
                % (cpr_cmd, test_name, output),
            )
            # Bless
            utils.run_cmd_no_fail(
                "{}/bless_test_results --test-root {} --hist-only --force -t {}".format(
                    self.TOOLS_DIR, self._testroot, test_id
                )
            )
            # Hist compare should now pass again
            self._create_test(compargs)
            self.verify_perms(self._baseline_area)
            if "TESTRUNDIFF_ALTERNATE" in os.environ:
                del os.environ["TESTRUNDIFF_ALTERNATE"]
Beispiel #12
0
    def setUp(self):
        self._testroot = os.path.join(self.TEST_ROOT, "TestWaitForTests")
        self._timestamp = utils.get_timestamp()

        # basic tests
        self._testdir_all_pass = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_all_pass")
        self._testdir_with_fail = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_with_fail")
        self._testdir_unfinished = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_unfinished")
        self._testdir_unfinished2 = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_unfinished2")

        # live tests
        self._testdir_teststatus1 = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_teststatus1")
        self._testdir_teststatus2 = os.path.join(
            self._testroot, "scripts_regression_tests.testdir_teststatus2")

        self._testdirs = [
            self._testdir_all_pass,
            self._testdir_with_fail,
            self._testdir_unfinished,
            self._testdir_unfinished2,
            self._testdir_teststatus1,
            self._testdir_teststatus2,
        ]
        basic_tests = self._testdirs[:self._testdirs.
                                     index(self._testdir_teststatus1)]

        for testdir in self._testdirs:
            if os.path.exists(testdir):
                shutil.rmtree(testdir)
            os.makedirs(testdir)

        for r in range(10):
            for testdir in basic_tests:
                os.makedirs(os.path.join(testdir, str(r)))
                test_utils.make_fake_teststatus(
                    os.path.join(testdir, str(r)),
                    "Test_%d" % r,
                    test_status.TEST_PASS_STATUS,
                    test_status.RUN_PHASE,
                )

        test_utils.make_fake_teststatus(
            os.path.join(self._testdir_with_fail, "5"),
            "Test_5",
            test_status.TEST_FAIL_STATUS,
            test_status.RUN_PHASE,
        )
        test_utils.make_fake_teststatus(
            os.path.join(self._testdir_unfinished, "5"),
            "Test_5",
            test_status.TEST_PEND_STATUS,
            test_status.RUN_PHASE,
        )
        test_utils.make_fake_teststatus(
            os.path.join(self._testdir_unfinished2, "5"),
            "Test_5",
            test_status.TEST_PASS_STATUS,
            test_status.SUBMIT_PHASE,
        )

        integration_tests = self._testdirs[len(basic_tests):]
        for integration_test in integration_tests:
            os.makedirs(os.path.join(integration_test, "0"))
            test_utils.make_fake_teststatus(
                os.path.join(integration_test, "0"),
                "Test_0",
                test_status.TEST_PASS_STATUS,
                test_status.CORE_PHASES[0],
            )

        # Set up proxy if possible
        self._unset_proxy = self.setup_proxy()

        self._thread_error = None
Beispiel #13
0
    def test_rebless_namelist(self):
        # Generate some namelist baselines
        if self.NO_FORTRAN_RUN:
            self.skipTest("Skipping fortran test")
        test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A"
        if utils.get_model() == "e3sm":
            genargs = ["-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"]
            compargs = ["-c", "-b", self._baseline_name, "cime_test_only_pass"]
        else:
            genargs = ["-g", self._baseline_name, "-o", "cime_test_only_pass"]
            compargs = ["-c", self._baseline_name, "cime_test_only_pass"]

        self._create_test(genargs)

        # Basic namelist compare
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        cases = self._create_test(compargs, test_id=test_id)
        casedir = self.get_casedir(test_to_change, cases)

        # Check standalone case.cmpgen_namelists
        self.run_cmd_assert_result("./case.cmpgen_namelists", from_dir=casedir)

        # compare_test_results should pass
        cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format(
            self.TOOLS_DIR, self._testroot, test_id
        )
        output = self.run_cmd_assert_result(cpr_cmd)

        # use regex
        expected_pattern = re.compile(r"PASS %s[^\s]* NLCOMP" % test_to_change)
        the_match = expected_pattern.search(output)
        msg = f"Cmd {cpr_cmd} failed to display passed test in output:\n{output}"
        self.assertNotEqual(
            the_match,
            None,
            msg=msg,
        )

        # Modify namelist
        fake_nl = """
 &fake_nml
   fake_item = 'fake'
   fake = .true.
/"""
        baseline_area = self._baseline_area
        baseline_glob = glob.glob(
            os.path.join(baseline_area, self._baseline_name, "TEST*")
        )
        self.assertEqual(
            len(baseline_glob),
            3,
            msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob),
        )

        for baseline_dir in baseline_glob:
            nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
            self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)

            os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR)
            with open(nl_path, "a") as nl_file:
                nl_file.write(fake_nl)

        # Basic namelist compare should now fail
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        self._create_test(compargs, test_id=test_id, run_errors=True)
        casedir = self.get_casedir(test_to_change, cases)

        # Unless namelists are explicitly ignored
        test_id2 = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        self._create_test(compargs + ["--ignore-namelists"], test_id=test_id2)

        self.run_cmd_assert_result(
            "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100
        )

        # preview namelists should work
        self.run_cmd_assert_result("./preview_namelists", from_dir=casedir)

        # This should still fail
        self.run_cmd_assert_result(
            "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100
        )

        # compare_test_results should fail
        cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format(
            self.TOOLS_DIR, self._testroot, test_id
        )
        output = self.run_cmd_assert_result(
            cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE
        )

        # use regex
        expected_pattern = re.compile(r"FAIL %s[^\s]* NLCOMP" % test_to_change)
        the_match = expected_pattern.search(output)
        self.assertNotEqual(
            the_match,
            None,
            msg="Cmd '%s' failed to display passed test in output:\n%s"
            % (cpr_cmd, output),
        )

        # Bless
        new_test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        utils.run_cmd_no_fail(
            "{}/bless_test_results --test-root {} -n --force -t {} --new-test-root={} --new-test-id={}".format(
                self.TOOLS_DIR, self._testroot, test_id, self._testroot, new_test_id
            )
        )

        # Basic namelist compare should now pass again
        self._create_test(compargs)

        self.verify_perms(self._baseline_area)
Beispiel #14
0
    def _generate_baseline(self):
        """
        generate a new baseline case based on the current test
        """
        with self._test_status:
            # generate baseline

            # BEGIN: modified CIME.hist_utils.generate_baseline
            rundir = self._case.get_value("RUNDIR")
            basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"),
                                       self._case.get_value("BASEGEN_CASE"))
            testcase = self._case.get_value("CASE")

            if not os.path.isdir(basegen_dir):
                os.makedirs(basegen_dir)

            if os.path.isdir(os.path.join(basegen_dir, testcase)):
                expect(False, " Cowardly refusing to overwrite existing baseline directory")

            comments = "Generating baselines into '{}'\n".format(basegen_dir)
            num_gen = 0

            model = 'cam'
            comments += "  generating for model '{}'\n".format(model)
            hists = _get_all_hist_files(testcase, model, rundir)
            logger.debug("mvk_hist_files: {}".format(hists))

            num_gen += len(hists)
            for hist in hists:
                basename = hist[hist.rfind(model):]
                baseline = os.path.join(basegen_dir, basename)
                if os.path.exists(baseline):
                    os.remove(baseline)

                shutil.copy(hist, baseline)
                comments += "    generating baseline '{}' from file {}\n".format(baseline, hist)

            newestcpllogfile = self._case.get_latest_cpl_log(coupler_log_path=self._case.get_value("LOGDIR"))
            if newestcpllogfile is None:
                logger.warning("No cpl.log file found in log directory {}".format(self._case.get_value("LOGDIR")))
            else:
                shutil.copyfile(newestcpllogfile,
                                os.path.join(basegen_dir, "cpl.log.gz"))

            expect(num_gen > 0, "Could not generate any hist files for case '{}', something is seriously wrong".format(
                os.path.join(rundir, testcase)))
            # make sure permissions are open in baseline directory
            for root, _, files in os.walk(basegen_dir):
                for name in files:
                    try:
                        os.chmod(os.path.join(root, name),
                                 stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
                    except OSError:
                        # We tried. Not worth hard failure here.
                        pass

            if get_model() == "e3sm":
                bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME)
                with open(bless_log, "a") as fd:
                    fd.write("sha:{} date:{}\n".format(get_current_commit(repo=self._case.get_value("CIMEROOT")),
                                                       get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S")))
            # END: modified CIME.hist_utils.generate_baseline

            append_testlog(comments)
            status = CIME.test_status.TEST_PASS_STATUS
            baseline_name = self._case.get_value("BASEGEN_CASE")
            self._test_status.set_status("{}".format(CIME.test_status.GENERATE_PHASE), status,
                                         comments=os.path.dirname(baseline_name))
            basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE"))
            # copy latest cpl log to baseline
            # drop the date so that the name is generic
            newestcpllogfiles = self._get_latest_cpl_logs()
            for cpllog in newestcpllogfiles:
                m = re.search(r"/(cpl.*.log).*.gz", cpllog)
                if m is not None:
                    baselog = os.path.join(basegen_dir, m.group(1)) + ".gz"
                    shutil.copyfile(cpllog,
                                    os.path.join(basegen_dir, baselog))
Beispiel #15
0
def case_run(case, skip_pnl=False):
###############################################################################
    # Set up the run, run the model, do the postrun steps
    run_with_submit = case.get_value("RUN_WITH_SUBMIT")
    expect(run_with_submit,
           "You are not calling the run script via the submit script. "
           "As a result, short-term archiving will not be called automatically."
           "Please submit your run using the submit script like so:"
           " ./case.submit Time: {}".format(get_timestamp()))

    # Forces user to use case.submit if they re-submit
    if case.get_value("TESTCASE") is None:
        case.set_value("RUN_WITH_SUBMIT", False)

    prerun_script = case.get_value("PRERUN_SCRIPT")
    postrun_script = case.get_value("POSTRUN_SCRIPT")

    data_assimilation = case.get_value("DATA_ASSIMILATION")
    data_assimilation_cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
    data_assimilation_script = case.get_value("DATA_ASSIMILATION_SCRIPT")

    # set up the LID
    lid = new_lid()

    save_prerun_provenance(case)

    for cycle in range(data_assimilation_cycles):
        # After the first DA cycle, runs are restart runs
        if cycle > 0:
            case.set_value("CONTINUE_RUN", "TRUE")
            lid = new_lid()

        if prerun_script:
            case.flush()
            do_external(prerun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
                        lid, prefix="prerun")
            case.read_xml()

        lid = run_model(case, lid, skip_pnl, da_cycle=cycle)
        save_logs(case, lid)       # Copy log files back to caseroot
        if case.get_value("CHECK_TIMING") or case.get_value("SAVE_TIMING"):
            get_timing(case, lid)     # Run the getTiming script

        if data_assimilation:
            case.flush()
            do_data_assimilation(data_assimilation_script, case.get_value("CASEROOT"), cycle, lid,
                                 case.get_value("RUNDIR"))
            case.read_xml()

        if postrun_script:
            case.flush()
            do_external(postrun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
                        lid, prefix="postrun")
            case.read_xml()

        save_postrun_provenance(case)

    logger.warning("check for resubmit")
    resubmit_check(case)

    return True
Beispiel #16
0
    def __init__(self, test_names, test_data=None,
                 no_run=False, no_build=False, no_setup=False, no_batch=None,
                 test_root=None, test_id=None,
                 machine_name=None, compiler=None,
                 baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None,
                 clean=False, namelists_only=False,
                 project=None, parallel_jobs=None,
                 walltime=None, proc_pool=None,
                 use_existing=False, save_timing=False, queue=None,
                 allow_baseline_overwrite=False, output_root=None,
                 force_procs=None, force_threads=None, mpilib=None,
                 input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False, non_local=False):
    ###########################################################################
        self._cime_root       = get_cime_root()
        self._cime_model      = get_model()
        self._cime_driver     = "mct"
        self._save_timing     = save_timing
        self._queue           = queue
        self._test_data       = {} if test_data is None else test_data # Format:  {test_name -> {data_name -> data}}
        self._mpilib          = mpilib  # allow override of default mpilib
        self._completed_tests = 0
        self._input_dir       = input_dir
        self._pesfile         = pesfile
        self._allow_baseline_overwrite = allow_baseline_overwrite
        self._allow_pnl       = allow_pnl
        self._non_local       = non_local

        self._mail_user = mail_user
        self._mail_type = mail_type

        self._machobj = Machines(machine=machine_name)

        self._model_build_cost = 4

        # If user is forcing procs or threads, re-write test names to reflect this.
        if force_procs or force_threads:
            test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads)

        self._no_setup = no_setup
        self._no_build = no_build or no_setup or namelists_only
        self._no_run   = no_run or self._no_build
        self._output_root = output_root
        # Figure out what project to use
        if project is None:
            self._project = get_project()
            if self._project is None:
                self._project = self._machobj.get_value("PROJECT")
        else:
            self._project = project

        # We will not use batch system if user asked for no_batch or if current
        # machine is not a batch machine
        self._no_batch = no_batch or not self._machobj.has_batch_system()
        expect(not (self._no_batch and self._queue is not None),
               "Does not make sense to request a queue without batch system")

        # Determine and resolve test_root
        if test_root is not None:
            self._test_root = test_root
        elif self._output_root is not None:
            self._test_root = self._output_root
        else:
            self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT")

        if self._project is not None:
            self._test_root = self._test_root.replace("$PROJECT", self._project)

        self._test_root = os.path.abspath(self._test_root)
        self._test_id   = test_id if test_id is not None else get_timestamp()

        self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler

        self._clean          = clean
        self._namelists_only = namelists_only

        self._walltime = walltime

        if parallel_jobs is None:
            self._parallel_jobs = min(len(test_names),
                                      self._machobj.get_value("MAX_MPITASKS_PER_NODE"))
        else:
            self._parallel_jobs = parallel_jobs

        self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None
        self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None

        # Compute baseline_root
        self._baseline_root = baseline_root if baseline_root is not None \
                              else self._machobj.get_value("BASELINE_ROOT")

        if self._project is not None:
            self._baseline_root = self._baseline_root.replace("$PROJECT", self._project)

        self._baseline_root = os.path.abspath(self._baseline_root)

        if baseline_cmp_name or baseline_gen_name:
            if self._baseline_cmp_name:
                full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name)
                expect(os.path.isdir(full_baseline_dir),
                       "Missing baseline comparison directory {}".format(full_baseline_dir))

            # the following is to assure that the existing generate directory is not overwritten
            if self._baseline_gen_name:
                full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name)
                existing_baselines = []
                for test_name in test_names:
                    test_baseline = os.path.join(full_baseline_dir, test_name)
                    if os.path.isdir(test_baseline):
                        existing_baselines.append(test_baseline)

                expect(allow_baseline_overwrite or len(existing_baselines) == 0,
                       "Baseline directories already exists {}\n" \
                       "Use -o to avoid this error".format(existing_baselines))

        if self._cime_model == "e3sm":
            _order_tests_by_runtime(test_names, self._baseline_root)

        # This is the only data that multiple threads will simultaneously access
        # Each test has it's own value and setting/retrieving items from a dict
        # is atomic, so this should be fine to use without mutex.
        # name -> (phase, status)
        self._tests = OrderedDict()
        for test_name in test_names:
            self._tests[test_name] = (TEST_START, TEST_PASS_STATUS)

        # Oversubscribe by 1/4
        if proc_pool is None:
            pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE"))
            self._proc_pool = int(pes * 1.25)
        else:
            self._proc_pool = int(proc_pool)

        self._procs_avail = self._proc_pool

        # Setup phases
        self._phases = list(PHASES)
        if self._no_setup:
            self._phases.remove(SETUP_PHASE)
        if self._no_build:
            self._phases.remove(SHAREDLIB_BUILD_PHASE)
            self._phases.remove(MODEL_BUILD_PHASE)
        if self._no_run:
            self._phases.remove(RUN_PHASE)

        if use_existing:
            for test in self._tests:
                with TestStatus(self._get_test_dir(test)) as ts:
                    for phase, status in ts:
                        if phase in CORE_PHASES:
                            if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]:
                                if status == TEST_FAIL_STATUS:
                                    # Import for potential subsequent waits
                                    ts.set_status(phase, TEST_PEND_STATUS)

                                # We need to pick up here
                                break

                            else:
                                if phase != SUBMIT_PHASE:
                                    # Somewhat subtle. Create_test considers submit/run to be the run phase,
                                    # so don't try to update test status for a passed submit phase
                                    self._update_test_status(test, phase, TEST_PEND_STATUS)
                                    self._update_test_status(test, phase, status)

                                    if phase == RUN_PHASE:
                                        logger.info("Test {} passed and will not be re-run".format(test))

                logger.info("Using existing test directory {}".format(self._get_test_dir(test)))
        else:
            # None of the test directories should already exist.
            for test in self._tests:
                expect(not os.path.exists(self._get_test_dir(test)),
                       "Cannot create new case in directory '{}', it already exists."
                       " Pick a different test-id".format(self._get_test_dir(test)))
                logger.info("Creating test directory {}".format(self._get_test_dir(test)))
Beispiel #17
0
    def test_c_use_existing(self):
        tests = get_tests.get_full_test_names(
            [
                "TESTBUILDFAIL_P1.f19_g16_rx1.A",
                "TESTRUNFAIL_P1.f19_g16_rx1.A",
                "TESTRUNPASS_P1.f19_g16_rx1.A",
            ],
            self._machine,
            self._compiler,
        )
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        ct = test_scheduler.TestScheduler(
            tests,
            test_id=test_id,
            no_batch=self.NO_BATCH,
            test_root=self._testroot,
            output_root=self._testroot,
            compiler=self._compiler,
            mpilib=self.TEST_MPILIB,
            machine_name=self.MACHINE.get_machine_name(),
        )

        build_fail_test = [item for item in tests
                           if "TESTBUILDFAIL" in item][0]
        run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
        pass_test = [item for item in tests if "TESTRUNPASS" in item][0]

        log_lvl = logging.getLogger().getEffectiveLevel()
        logging.disable(logging.CRITICAL)
        try:
            ct.run_tests()
        finally:
            logging.getLogger().setLevel(log_lvl)

        test_statuses = glob.glob("%s/*%s/TestStatus" %
                                  (self._testroot, test_id))
        self.assertEqual(len(tests), len(test_statuses))

        self._wait_for_tests(test_id, expect_works=False)

        for x in test_statuses:
            casedir = os.path.dirname(x)
            ts = test_status.TestStatus(test_dir=casedir)
            test_name = ts.get_name()
            if test_name == build_fail_test:
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.MODEL_BUILD_PHASE,
                    test_status.TEST_FAIL_STATUS,
                )
                with test_status.TestStatus(test_dir=casedir) as ts:
                    ts.set_status(test_status.MODEL_BUILD_PHASE,
                                  test_status.TEST_PEND_STATUS)
            elif test_name == run_fail_test:
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_FAIL_STATUS)
                with test_status.TestStatus(test_dir=casedir) as ts:
                    ts.set_status(test_status.SUBMIT_PHASE,
                                  test_status.TEST_PEND_STATUS)
            else:
                self.assertTrue(test_name == pass_test)
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.MODEL_BUILD_PHASE,
                    test_status.TEST_PASS_STATUS,
                )
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.SUBMIT_PHASE,
                    test_status.TEST_PASS_STATUS,
                )
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_PASS_STATUS)

        os.environ["TESTBUILDFAIL_PASS"] = "******"
        os.environ["TESTRUNFAIL_PASS"] = "******"
        ct2 = test_scheduler.TestScheduler(
            tests,
            test_id=test_id,
            no_batch=self.NO_BATCH,
            use_existing=True,
            test_root=self._testroot,
            output_root=self._testroot,
            compiler=self._compiler,
            mpilib=self.TEST_MPILIB,
            machine_name=self.MACHINE.get_machine_name(),
        )

        log_lvl = logging.getLogger().getEffectiveLevel()
        logging.disable(logging.CRITICAL)
        try:
            ct2.run_tests()
        finally:
            logging.getLogger().setLevel(log_lvl)

        self._wait_for_tests(test_id)

        for x in test_statuses:
            ts = test_status.TestStatus(test_dir=os.path.dirname(x))
            test_name = ts.get_name()
            self.assert_test_status(
                test_name,
                ts,
                test_status.MODEL_BUILD_PHASE,
                test_status.TEST_PASS_STATUS,
            )
            self.assert_test_status(test_name, ts, test_status.SUBMIT_PHASE,
                                    test_status.TEST_PASS_STATUS)
            self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                    test_status.TEST_PASS_STATUS)

        del os.environ["TESTBUILDFAIL_PASS"]
        del os.environ["TESTRUNFAIL_PASS"]

        # test that passed tests are not re-run

        ct2 = test_scheduler.TestScheduler(
            tests,
            test_id=test_id,
            no_batch=self.NO_BATCH,
            use_existing=True,
            test_root=self._testroot,
            output_root=self._testroot,
            compiler=self._compiler,
            mpilib=self.TEST_MPILIB,
            machine_name=self.MACHINE.get_machine_name(),
        )

        log_lvl = logging.getLogger().getEffectiveLevel()
        logging.disable(logging.CRITICAL)
        try:
            ct2.run_tests()
        finally:
            logging.getLogger().setLevel(log_lvl)

        self._wait_for_tests(test_id)

        for x in test_statuses:
            ts = test_status.TestStatus(test_dir=os.path.dirname(x))
            test_name = ts.get_name()
            self.assert_test_status(
                test_name,
                ts,
                test_status.MODEL_BUILD_PHASE,
                test_status.TEST_PASS_STATUS,
            )
            self.assert_test_status(test_name, ts, test_status.SUBMIT_PHASE,
                                    test_status.TEST_PASS_STATUS)
            self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                    test_status.TEST_PASS_STATUS)
Beispiel #18
0
def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist,
                     save_build_provenance):
###############################################################################

    t1 = time.time()

    expect(not (sharedlib_only and model_only),
           "Contradiction: both sharedlib_only and model_only")
    logger.info("Building case in directory {}".format(caseroot))
    logger.info("sharedlib_only is {}".format(sharedlib_only))
    logger.info("model_only is {}".format(model_only))

    expect(os.path.isdir(caseroot), "'{}' is not a valid directory".format(caseroot))
    os.chdir(caseroot)

    expect(os.path.exists(get_batch_script_for_job(case.get_primary_job())),
           "ERROR: must invoke case.setup script before calling build script ")

    cimeroot = case.get_value("CIMEROOT")

    comp_classes = case.get_values("COMP_CLASSES")

    case.check_lockedfiles(skip="env_batch")

    # Retrieve relevant case data
    # This environment variable gets set for cesm Make and
    # needs to be unset before building again.
    if "MODEL" in os.environ:
        del os.environ["MODEL"]
    build_threaded      = case.get_build_threaded()
    casetools           = case.get_value("CASETOOLS")
    exeroot             = os.path.abspath(case.get_value("EXEROOT"))
    incroot             = os.path.abspath(case.get_value("INCROOT"))
    libroot             = os.path.abspath(case.get_value("LIBROOT"))
    sharedlibroot       = os.path.abspath(case.get_value("SHAREDLIBROOT"))
    multi_driver = case.get_value("MULTI_DRIVER")
    complist = []
    ninst = 1
    for comp_class in comp_classes:
        if comp_class == "CPL":
            config_dir = None
            if multi_driver:
                ninst = case.get_value("NINST_MAX")
        else:
            config_dir = os.path.dirname(case.get_value("CONFIG_{}_FILE".format(comp_class)))
            if multi_driver:
                ninst = 1
            else:
                ninst = case.get_value("NINST_{}".format(comp_class))

        comp = case.get_value("COMP_{}".format(comp_class))
        thrds =  case.get_value("NTHRDS_{}".format(comp_class))
        expect(ninst is not None,"Failed to get ninst for comp_class {}".format(comp_class))
        complist.append((comp_class.lower(), comp, thrds, ninst, config_dir ))
        os.environ["COMP_{}".format(comp_class)] = comp

    ocn_submodel        = case.get_value("OCN_SUBMODEL")
    profile_papi_enable = case.get_value("PROFILE_PAPI_ENABLE")
    compiler            = case.get_value("COMPILER")
    comp_interface      = case.get_value("COMP_INTERFACE")
    mpilib              = case.get_value("MPILIB")
    use_esmf_lib        = case.get_value("USE_ESMF_LIB")
    debug               = case.get_value("DEBUG")
    ninst_build         = case.get_value("NINST_BUILD")
    smp_value           = case.get_value("SMP_VALUE")
    clm_use_petsc       = case.get_value("CLM_USE_PETSC")
    cism_use_trilinos   = case.get_value("CISM_USE_TRILINOS")
    mali_use_albany     = case.get_value("MALI_USE_ALBANY")
    use_moab            = case.get_value("USE_MOAB")
    clm_config_opts     = case.get_value("CLM_CONFIG_OPTS")
    cam_config_opts     = case.get_value("CAM_CONFIG_OPTS")
    pio_config_opts     = case.get_value("PIO_CONFIG_OPTS")
    ninst_value         = case.get_value("NINST_VALUE")
    mach                = case.get_value("MACH")
    os_                 = case.get_value("OS")
    # Load some params into env
    os.environ["CIMEROOT"]             = cimeroot
    os.environ["CASETOOLS"]            = casetools
    os.environ["EXEROOT"]              = exeroot
    os.environ["INCROOT"]              = incroot
    os.environ["LIBROOT"]              = libroot
    os.environ["SHAREDLIBROOT"]        = sharedlibroot
    os.environ["CASEROOT"]             = caseroot
    os.environ["COMPILER"]             = compiler
    os.environ["COMP_INTERFACE"]       = comp_interface
    os.environ["NINST_VALUE"]          = str(ninst_value)
    os.environ["BUILD_THREADED"]       = stringify_bool(build_threaded)
    os.environ["MACH"]                 = mach
    os.environ["USE_ESMF_LIB"]         = stringify_bool(use_esmf_lib)
    os.environ["MPILIB"]               = mpilib
    os.environ["DEBUG"]                = stringify_bool(debug)
    os.environ["OS"]                   = os_
    os.environ["CLM_CONFIG_OPTS"]      = clm_config_opts     if clm_config_opts     is not None else ""
    os.environ["CAM_CONFIG_OPTS"]      = cam_config_opts     if cam_config_opts     is not None else ""
    os.environ["PIO_CONFIG_OPTS"]      = pio_config_opts     if pio_config_opts     is not None else ""
    os.environ["OCN_SUBMODEL"]         = ocn_submodel        if ocn_submodel        is not None else ""
    os.environ["PROFILE_PAPI_ENABLE"]  = stringify_bool(profile_papi_enable)
    os.environ["CLM_USE_PETSC"]        = stringify_bool(clm_use_petsc)
    os.environ["CISM_USE_TRILINOS"]    = stringify_bool(cism_use_trilinos)
    os.environ["MALI_USE_ALBANY"]      = stringify_bool(mali_use_albany)
    os.environ["USE_MOAB"]             = stringify_bool(use_moab)

    if get_model() == "e3sm" and mach == "titan" and compiler == "pgiacc":
        case.set_value("CAM_TARGET", "preqx_acc")

    # This is a timestamp for the build , not the same as the testid,
    # and this case may not be a test anyway. For a production
    # experiment there may be many builds of the same case.
    lid               = get_timestamp("%y%m%d-%H%M%S")
    os.environ["LID"] = lid

    # Set the overall USE_PETSC variable to TRUE if any of the
    # *_USE_PETSC variables are TRUE.
    # For now, there is just the one CLM_USE_PETSC variable, but in
    # the future there may be others -- so USE_PETSC will be true if
    # ANY of those are true.

    use_petsc = clm_use_petsc
    case.set_value("USE_PETSC", use_petsc)
    os.environ["USE_PETSC"] = stringify_bool(use_petsc)

    # Set the overall USE_TRILINOS variable to TRUE if any of the
    # *_USE_TRILINOS variables are TRUE.
    # For now, there is just the one CISM_USE_TRILINOS variable, but in
    # the future there may be others -- so USE_TRILINOS will be true if
    # ANY of those are true.

    use_trilinos = False if cism_use_trilinos is None else cism_use_trilinos
    case.set_value("USE_TRILINOS", use_trilinos)
    os.environ["USE_TRILINOS"] = stringify_bool(use_trilinos)

    # Set the overall USE_ALBANY variable to TRUE if any of the
    # *_USE_ALBANY variables are TRUE.
    # For now, there is just the one MALI_USE_ALBANY variable, but in
    # the future there may be others -- so USE_ALBANY will be true if
    # ANY of those are true.

    use_albany = stringify_bool(mali_use_albany)
    case.set_value("USE_ALBANY", use_albany)
    os.environ["USE_ALBANY"] = use_albany

    # Load modules
    case.load_env()

    sharedpath = _build_checks(case, build_threaded, comp_interface,
                               use_esmf_lib, debug, compiler, mpilib,
                               complist, ninst_build, smp_value, model_only, buildlist)

    t2 = time.time()
    logs = []

    if not model_only:
        logs = _build_libraries(case, exeroot, sharedpath, caseroot,
                                cimeroot, libroot, lid, compiler, buildlist, comp_interface)

    if not sharedlib_only:
        os.environ["INSTALL_SHAREDPATH"] = os.path.join(exeroot, sharedpath) # for MPAS makefile generators
        logs.extend(_build_model(build_threaded, exeroot, clm_config_opts, incroot, complist,
                                lid, caseroot, cimeroot, compiler, buildlist, comp_interface))

        if not buildlist:
            # in case component build scripts updated the xml files, update the case object
            case.read_xml()
            # Note, doing buildlists will never result in the system thinking the build is complete

    post_build(case, logs, build_complete=not (buildlist or sharedlib_only),
               save_build_provenance=save_build_provenance)

    t3 = time.time()

    if not sharedlib_only:
        logger.info("Time spent not building: {:f} sec".format(t2 - t1))
        logger.info("Time spent building: {:f} sec".format(t3 - t2))
        logger.info("MODEL BUILD HAS FINISHED SUCCESSFULLY")

    return True
Beispiel #19
0
###############################################################################
    expect(os.path.isfile(da_script), "Data Assimilation script {} not found".format(da_script))
    filename = "da.log.{}".format(lid)
    outfile = os.path.join(rundir, filename)
    run_sub_or_cmd(da_script, [caseroot, cycle], os.path.basename(da_script), [caseroot, cycle], logfile=outfile,combine_output=True)

###############################################################################
def case_run(case, skip_pnl=False):
###############################################################################
    # Set up the run, run the model, do the postrun steps
    run_with_submit = case.get_value("RUN_WITH_SUBMIT")
    expect(run_with_submit,
           "You are not calling the run script via the submit script. "
           "As a result, short-term archiving will not be called automatically."
           "Please submit your run using the submit script like so:"
           " ./case.submit Time: {}".format(get_timestamp()))

    # Forces user to use case.submit if they re-submit
    if case.get_value("TESTCASE") is None:
        case.set_value("RUN_WITH_SUBMIT", False)

    prerun_script = case.get_value("PRERUN_SCRIPT")
    postrun_script = case.get_value("POSTRUN_SCRIPT")

    data_assimilation = case.get_value("DATA_ASSIMILATION")
    data_assimilation_cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
    data_assimilation_script = case.get_value("DATA_ASSIMILATION_SCRIPT")

    # set up the LID
    lid = new_lid()
Beispiel #20
0
    def test_b_full(self):
        tests = get_tests.get_full_test_names(["cime_test_only"],
                                              self._machine, self._compiler)
        test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp())
        ct = test_scheduler.TestScheduler(
            tests,
            test_id=test_id,
            no_batch=self.NO_BATCH,
            test_root=self._testroot,
            output_root=self._testroot,
            compiler=self._compiler,
            mpilib=self.TEST_MPILIB,
            machine_name=self.MACHINE.get_machine_name(),
        )

        build_fail_test = [item for item in tests
                           if "TESTBUILDFAIL_" in item][0]
        build_fail_exc_test = [
            item for item in tests if "TESTBUILDFAILEXC" in item
        ][0]
        run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0]
        run_fail_exc_test = [
            item for item in tests if "TESTRUNFAILEXC" in item
        ][0]
        pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
        test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0]
        mem_fail_test = [item for item in tests
                         if "TESTMEMLEAKFAIL" in item][0]
        mem_pass_test = [item for item in tests
                         if "TESTMEMLEAKPASS" in item][0]
        st_arch_fail_test = [
            item for item in tests if "TESTRUNSTARCFAIL" in item
        ][0]

        log_lvl = logging.getLogger().getEffectiveLevel()
        logging.disable(logging.CRITICAL)
        try:
            ct.run_tests()
        finally:
            logging.getLogger().setLevel(log_lvl)

        self._wait_for_tests(test_id, expect_works=False)

        test_statuses = glob.glob("%s/*%s/TestStatus" %
                                  (self._testroot, test_id))
        self.assertEqual(len(tests), len(test_statuses))

        for x in test_statuses:
            ts = test_status.TestStatus(test_dir=os.path.dirname(x))
            test_name = ts.get_name()
            log_files = glob.glob("%s/%s*%s/TestStatus.log" %
                                  (self._testroot, test_name, test_id))
            self.assertEqual(
                len(log_files),
                1,
                "Expected exactly one test_status.TestStatus.log file, found %d"
                % len(log_files),
            )
            log_file = log_files[0]
            if test_name == build_fail_test:

                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.MODEL_BUILD_PHASE,
                    test_status.TEST_FAIL_STATUS,
                )
                data = open(log_file, "r").read()
                self.assertTrue(
                    "Intentional fail for testing infrastructure" in data,
                    "Broken test did not report build error:\n%s" % data,
                )
            elif test_name == build_fail_exc_test:
                data = open(log_file, "r").read()
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.SHAREDLIB_BUILD_PHASE,
                    test_status.TEST_FAIL_STATUS,
                )
                self.assertTrue(
                    "Exception from init" in data,
                    "Broken test did not report build error:\n%s" % data,
                )
            elif test_name == run_fail_test:
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_FAIL_STATUS)
            elif test_name == run_fail_exc_test:
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_FAIL_STATUS)
                data = open(log_file, "r").read()
                self.assertTrue(
                    "Exception from run_phase" in data,
                    "Broken test did not report run error:\n%s" % data,
                )
            elif test_name == mem_fail_test:
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.MEMLEAK_PHASE,
                    test_status.TEST_FAIL_STATUS,
                )
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_PASS_STATUS)
            elif test_name == test_diff_test:
                self.assert_test_status(test_name, ts, "COMPARE_base_rest",
                                        test_status.TEST_FAIL_STATUS)
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_PASS_STATUS)
            elif test_name == st_arch_fail_test:
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_PASS_STATUS)
                self.assert_test_status(
                    test_name,
                    ts,
                    test_status.STARCHIVE_PHASE,
                    test_status.TEST_FAIL_STATUS,
                )
            else:
                self.assertTrue(test_name in [pass_test, mem_pass_test])
                self.assert_test_status(test_name, ts, test_status.RUN_PHASE,
                                        test_status.TEST_PASS_STATUS)
                if test_name == mem_pass_test:
                    self.assert_test_status(
                        test_name,
                        ts,
                        test_status.MEMLEAK_PHASE,
                        test_status.TEST_PASS_STATUS,
                    )
Beispiel #21
0
def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False):
    """
    copy the current test output to baseline result

    case - The case containing the hist files to be copied into baselines
    baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config
    allow_baseline_overwrite must be true to generate baselines to an existing directory.

    returns (SUCCESS, comments)
    """
    rundir   = case.get_value("RUNDIR")
    if baseline_dir is None:
        baselineroot = case.get_value("BASELINE_ROOT")
        basegen_dir = os.path.join(baselineroot, case.get_value("BASEGEN_CASE"))
    else:
        basegen_dir = baseline_dir
    testcase = case.get_value("CASE")

    if not os.path.isdir(basegen_dir):
        os.makedirs(basegen_dir)

    if (os.path.isdir(os.path.join(basegen_dir,testcase)) and
        not allow_baseline_overwrite):
        expect(False, " Cowardly refusing to overwrite existing baseline directory")

    comments = "Generating baselines into '{}'\n".format(basegen_dir)
    num_gen = 0
    for model in _iter_model_file_substrs(case):
        comments += "  generating for model '{}'\n".format(model)
        hists =  _get_latest_hist_files(testcase, model, rundir)
        logger.debug("latest_files: {}".format(hists))
        num_gen += len(hists)
        for hist in hists:
            basename = hist[hist.rfind(model):]
            baseline = os.path.join(basegen_dir, basename)
            if os.path.exists(baseline):
                os.remove(baseline)

            shutil.copy(hist, baseline)
            comments += "    generating baseline '{}' from file {}\n".format(baseline, hist)

    # copy latest cpl log to baseline
    # drop the date so that the name is generic
    newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("LOGDIR"))
    if newestcpllogfile is None:
        logger.warning("No cpl.log file found in log directory {}".format(case.get_value("LOGDIR")))
    else:
        shutil.copyfile(newestcpllogfile,
                    os.path.join(basegen_dir, "cpl.log.gz"))

    expect(num_gen > 0, "Could not generate any hist files for case '{}', something is seriously wrong".format(os.path.join(rundir, testcase)))
    #make sure permissions are open in baseline directory
    for root, _, files in os.walk(basegen_dir):
        for name in files:
            try:
                os.chmod(os.path.join(root,name), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
            except OSError:
                # We tried. Not worth hard failure here.
                pass

    if get_model() == "e3sm":
        bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME)
        with open(bless_log, "a") as fd:
            fd.write("sha:{} date:{}\n".format(get_current_commit(repo=case.get_value("CIMEROOT")),
                                               get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S")))

    return True, comments
Beispiel #22
0
    clm_use_petsc = case.get_value("CLM_USE_PETSC")
    cism_use_trilinos = case.get_value("CISM_USE_TRILINOS")
    mali_use_albany = case.get_value("MALI_USE_ALBANY")
    mach = case.get_value("MACH")

    # Load some params into env
    os.environ["BUILD_THREADED"] = stringify_bool(build_threaded)
    cime_model = get_model()

    if cime_model == "e3sm" and mach == "titan" and compiler == "pgiacc":
        case.set_value("CAM_TARGET", "preqx_acc")

    # This is a timestamp for the build , not the same as the testid,
    # and this case may not be a test anyway. For a production
    # experiment there may be many builds of the same case.
    lid = get_timestamp("%y%m%d-%H%M%S")
    os.environ["LID"] = lid

    # Set the overall USE_PETSC variable to TRUE if any of the
    # *_USE_PETSC variables are TRUE.
    # For now, there is just the one CLM_USE_PETSC variable, but in
    # the future there may be others -- so USE_PETSC will be true if
    # ANY of those are true.

    use_petsc = clm_use_petsc
    case.set_value("USE_PETSC", use_petsc)

    # Set the overall USE_TRILINOS variable to TRUE if any of the
    # *_USE_TRILINOS variables are TRUE.
    # For now, there is just the one CISM_USE_TRILINOS variable, but in
    # the future there may be others -- so USE_TRILINOS will be true if
Beispiel #23
0
    def _generate_baseline(self):
        """
        generate a new baseline case based on the current test
        """
        with self._test_status:
            # generate baseline

            # BEGIN: modified CIME.hist_utils.generate_baseline
            rundir = self._case.get_value("RUNDIR")
            basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"),
                                       self._case.get_value("BASEGEN_CASE"))
            testcase = self._case.get_value("CASE")

            if not os.path.isdir(basegen_dir):
                os.makedirs(basegen_dir)

            if os.path.isdir(os.path.join(basegen_dir, testcase)):
                expect(
                    False,
                    " Cowardly refusing to overwrite existing baseline directory"
                )

            comments = "Generating baselines into '{}'\n".format(basegen_dir)
            num_gen = 0

            model = 'cam'
            comments += "  generating for model '{}'\n".format(model)
            hists = _get_all_hist_files(testcase, model, rundir)
            logger.debug("mvk_hist_files: {}".format(hists))

            num_gen += len(hists)
            for hist in hists:
                basename = hist[hist.rfind(model):]
                baseline = os.path.join(basegen_dir, basename)
                if os.path.exists(baseline):
                    os.remove(baseline)

                shutil.copy(hist, baseline)
                comments += "    generating baseline '{}' from file {}\n".format(
                    baseline, hist)

            newestcpllogfile = self._case.get_latest_cpl_log(
                coupler_log_path=self._case.get_value("LOGDIR"))
            if newestcpllogfile is None:
                logger.warning(
                    "No cpl.log file found in log directory {}".format(
                        self._case.get_value("LOGDIR")))
            else:
                shutil.copyfile(newestcpllogfile,
                                os.path.join(basegen_dir, "cpl.log.gz"))

            expect(
                num_gen > 0,
                "Could not generate any hist files for case '{}', something is seriously wrong"
                .format(os.path.join(rundir, testcase)))
            # make sure permissions are open in baseline directory
            for root, _, files in os.walk(basegen_dir):
                for name in files:
                    try:
                        os.chmod(
                            os.path.join(root,
                                         name), stat.S_IRUSR | stat.S_IWUSR
                            | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
                    except OSError:
                        # We tried. Not worth hard failure here.
                        pass

            if get_model() == "e3sm":
                bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME)
                with open(bless_log, "a") as fd:
                    fd.write("sha:{} date:{}\n".format(
                        get_current_commit(
                            repo=self._case.get_value("CIMEROOT")),
                        get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S")))
            # END: modified CIME.hist_utils.generate_baseline

            append_testlog(comments)
            status = CIME.test_status.TEST_PASS_STATUS
            baseline_name = self._case.get_value("BASEGEN_CASE")
            self._test_status.set_status(
                "{}".format(CIME.test_status.GENERATE_PHASE),
                status,
                comments=os.path.dirname(baseline_name))
            basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"),
                                       self._case.get_value("BASEGEN_CASE"))
            # copy latest cpl log to baseline
            # drop the date so that the name is generic
            newestcpllogfiles = self._get_latest_cpl_logs()
            for cpllog in newestcpllogfiles:
                m = re.search(r"/(cpl.*.log).*.gz", cpllog)
                if m is not None:
                    baselog = os.path.join(basegen_dir, m.group(1)) + ".gz"
                    shutil.copyfile(cpllog, os.path.join(basegen_dir, baselog))
Beispiel #24
0
        coupler_log_path=case.get_value("RUNDIR"))
    if newestcpllogfile is None:
        logger.warning("No cpl.log file found in directory {}".format(
            case.get_value("RUNDIR")))
    else:
        safe_copy(newestcpllogfile, os.path.join(basegen_dir, "cpl.log.gz"))

    expect(
        num_gen > 0,
        "Could not generate any hist files for case '{}', something is seriously wrong"
        .format(os.path.join(rundir, testcase)))
    #make sure permissions are open in baseline directory
    for root, _, files in os.walk(basegen_dir):
        for name in files:
            try:
                os.chmod(
                    os.path.join(root, name), stat.S_IRUSR | stat.S_IWUSR
                    | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
            except OSError:
                # We tried. Not worth hard failure here.
                pass

    if get_model() == "e3sm":
        bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME)
        with open(bless_log, "a") as fd:
            fd.write("sha:{} date:{}\n".format(
                get_current_commit(repo=case.get_value("CIMEROOT")),
                get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S")))

    return True, comments
Beispiel #25
0
def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=False):
    """
    copy the current test output to baseline result

    case - The case containing the hist files to be copied into baselines
    baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config
    allow_baseline_overwrite must be true to generate baselines to an existing directory.

    returns (SUCCESS, comments)
    """
    rundir   = case.get_value("RUNDIR")
    ref_case = case.get_value("RUN_REFCASE")
    if baseline_dir is None:
        baselineroot = case.get_value("BASELINE_ROOT")
        basegen_dir = os.path.join(baselineroot, case.get_value("BASEGEN_CASE"))
    else:
        basegen_dir = baseline_dir
    testcase = case.get_value("CASE")
    archive = case.get_env('archive')

    if not os.path.isdir(basegen_dir):
        os.makedirs(basegen_dir)

    if (os.path.isdir(os.path.join(basegen_dir,testcase)) and
        not allow_baseline_overwrite):
        expect(False, " Cowardly refusing to overwrite existing baseline directory")

    comments = "Generating baselines into '{}'\n".format(basegen_dir)
    num_gen = 0
    for model in _iter_model_file_substrs(case):
        comments += "  generating for model '{}'\n".format(model)
        if model == 'cpl':
            file_extensions = archive.get_hist_file_extensions(archive.get_entry('drv'))
        else:
            file_extensions = archive.get_hist_file_extensions(archive.get_entry(model))
        hists =  _get_latest_hist_files(model, rundir, file_extensions, ref_case=ref_case)
        logger.debug("latest_files: {}".format(hists))
        num_gen += len(hists)
        for hist in hists:
            basename = hist[hist.rfind(model):]
            baseline = os.path.join(basegen_dir, basename)
            if os.path.exists(baseline):
                os.remove(baseline)

            safe_copy(hist, baseline, preserve_meta=False)
            comments += "    generating baseline '{}' from file {}\n".format(baseline, hist)

    # copy latest cpl log to baseline
    # drop the date so that the name is generic
    if case.get_value("COMP_INTERFACE") == "nuopc":
        cplname = "med"
    else:
        cplname = "cpl"

    newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("RUNDIR"), cplname=cplname)
    if newestcpllogfile is None:
        logger.warning("No {}.log file found in directory {}".format(cplname,case.get_value("RUNDIR")))
    else:
        safe_copy(newestcpllogfile, os.path.join(basegen_dir, "{}.log.gz".format(cplname)), preserve_meta=False)

    testname = case.get_value("TESTCASE")
    expect(num_gen > 0 or testname == "PFS", "Could not generate any hist files for case '{}', something is seriously wrong".format(os.path.join(rundir, testcase)))

    if get_model() == "e3sm":
        bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME)
        with open(bless_log, "a") as fd:
            fd.write("sha:{} date:{}\n".format(get_current_commit(repo=case.get_value("CIMEROOT")),
                                               get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S")))

    return True, comments