Example #1
0
    def run_tests(self, wait=False,
                  wait_check_throughput=False,
                  wait_check_memory=False,
                  wait_ignore_namelists=False,
                  wait_ignore_memleak=False):
    ###########################################################################
        """
        Main API for this class.

        Return True if all tests passed.
        """
        start_time = time.time()

        # Tell user what will be run
        logger.info( "RUNNING TESTS:")
        for test in self._tests:
            logger.info( "  {}".format(test))

        # Setup cs files
        self._setup_cs_files()

        GenericXML.DISABLE_CACHING = True
        self._producer()
        GenericXML.DISABLE_CACHING = False

        expect(threading.active_count() == 1, "Leftover threads?")

        wait_handles_report = False
        if not self._no_run and not self._no_batch:
            if wait:
                logger.info("Waiting for tests to finish")
                rv = wait_for_tests(glob.glob(os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id))),
                                    check_throughput=wait_check_throughput,
                                    check_memory=wait_check_memory,
                                    ignore_namelists=wait_ignore_namelists,
                                    ignore_memleak=wait_ignore_memleak)
                wait_handles_report = True
            else:
                logger.info("Due to presence of batch system, create_test will exit before tests are complete.\n" \
                            "To force create_test to wait for full completion, use --wait")

        # Return True if all tests passed from our point of view
        if not wait_handles_report:
            logger.info( "At test-scheduler close, state is:")
            rv = True
            for test in self._tests:
                phase, status = self._get_test_data(test)

                # Give highest priority to fails in test schduler
                if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                    logger.info( "{} {} (phase {})".format(status, test, phase))
                    rv = False

                else:
                    # Be cautious about telling the user that the test passed. This
                    # status should match what they would see on the dashboard. Our
                    # self._test_states does not include comparison fail information,
                    # so we need to parse test status.
                    ts = TestStatus(self._get_test_dir(test))
                    nlfail = ts.get_status(NAMELIST_PHASE) == TEST_FAIL_STATUS
                    ts_status = ts.get_overall_test_status(ignore_namelists=True, check_memory=False, check_throughput=False)

                    if ts_status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                        logger.info( "{} {} (phase {})".format(ts_status, test, phase))
                        rv = False
                    elif nlfail:
                        logger.info( "{} {} (but otherwise OK) {}".format(NAMELIST_FAIL_STATUS, test, phase))
                        rv = False
                    else:
                        logger.info("{} {} {}".format(status, test, phase))

                logger.info( "    Case dir: {}".format(self._get_test_dir(test)))

        logger.info( "test-scheduler took {} seconds".format(time.time() - start_time))

        return rv
Example #2
0
    def run_tests(self, wait=False):
        ###########################################################################
        """
        Main API for this class.

        Return True if all tests passed.
        """
        start_time = time.time()

        # Tell user what will be run
        logger.info("RUNNING TESTS:")
        for test in self._tests:
            logger.info("  %s" % test)

        self._producer()

        expect(threading.active_count() == 1, "Leftover threads?")

        # Setup cs files
        self._setup_cs_files()

        wait_handles_report = False
        if not self._no_run and not self._no_batch:
            if wait:
                logger.info("Waiting for tests to finish")
                rv = wait_for_tests(
                    glob.glob(
                        os.path.join(self._test_root,
                                     "*%s/TestStatus" % self._test_id)))
                wait_handles_report = True
            else:
                logger.info("Due to presence of batch system, create_test will exit before tests are complete.\n" \
                            "To force create_test to wait for full completion, use --wait")

        # Return True if all tests passed from our point of view
        if not wait_handles_report:
            logger.info("At test-scheduler close, state is:")
            rv = True
            for test in self._tests:
                phase, status = self._get_test_data(test)

                # Give highest priority to fails in test schduler
                if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                    logger.info("%s %s (phase %s)" % (status, test, phase))
                    rv = False

                else:
                    # Be cautious about telling the user that the test passed. This
                    # status should match what they would see on the dashboard. Our
                    # self._test_states does not include comparison fail information,
                    # so we need to parse test status.
                    ts = TestStatus(self._get_test_dir(test))
                    nlfail = ts.get_status(NAMELIST_PHASE) == TEST_FAIL_STATUS
                    ts_status = ts.get_overall_test_status(
                        ignore_namelists=True,
                        check_memory=False,
                        check_throughput=False)

                    if ts_status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                        logger.info("%s %s (phase %s)" % (status, test, phase))
                        rv = False
                    elif nlfail:
                        logger.info("%s %s (but otherwise OK) %s" %
                                    (NAMELIST_FAIL_STATUS, test, phase))
                        rv = False
                    else:
                        logger.info("%s %s %s" % (status, test, phase))

                logger.info("    Case dir: %s" % self._get_test_dir(test))

            logger.info("test-scheduler took %s seconds" %
                        (time.time() - start_time))

        return rv
Example #3
0
    def run_tests(self, wait=False,
                  wait_check_throughput=False,
                  wait_check_memory=False,
                  wait_ignore_namelists=False,
                  wait_ignore_memleak=False):
    ###########################################################################
        """
        Main API for this class.

        Return True if all tests passed.
        """
        start_time = time.time()

        # Tell user what will be run
        logger.info( "RUNNING TESTS:")
        for test in self._tests:
            logger.info( "  {}".format(test))

        # Setup cs files
        self._setup_cs_files()

        GenericXML.DISABLE_CACHING = True
        self._producer()
        GenericXML.DISABLE_CACHING = False

        expect(threading.active_count() == 1, "Leftover threads?")

        # Copy TestStatus files to baselines for tests that have already failed.
        if get_model() == "cesm":
            for test in self._tests:
                status = self._get_test_data(test)[1]
                if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS] and self._baseline_gen_name:
                    basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test)
                    test_dir = self._get_test_dir(test)
                    generate_teststatus(test_dir, basegen_case_fullpath)

        wait_handles_report = False
        if not self._no_run and not self._no_batch:
            if wait:
                logger.info("Waiting for tests to finish")
                rv = wait_for_tests(glob.glob(os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id))),
                                    check_throughput=wait_check_throughput,
                                    check_memory=wait_check_memory,
                                    ignore_namelists=wait_ignore_namelists,
                                    ignore_memleak=wait_ignore_memleak)
                wait_handles_report = True
            else:
                logger.info("Due to presence of batch system, create_test will exit before tests are complete.\n" \
                            "To force create_test to wait for full completion, use --wait")

        # Return True if all tests passed from our point of view
        if not wait_handles_report:
            logger.info( "At test-scheduler close, state is:")
            rv = True
            for test in self._tests:
                phase, status = self._get_test_data(test)

                # Give highest priority to fails in test schduler
                if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                    logger.info( "{} {} (phase {})".format(status, test, phase))
                    rv = False

                else:
                    # Be cautious about telling the user that the test passed. This
                    # status should match what they would see on the dashboard. Our
                    # self._test_states does not include comparison fail information,
                    # so we need to parse test status.
                    ts = TestStatus(self._get_test_dir(test))
                    nlfail = ts.get_status(NAMELIST_PHASE) == TEST_FAIL_STATUS
                    ts_status = ts.get_overall_test_status(ignore_namelists=True, check_memory=False, check_throughput=False)
                    local_run = not self._no_run and self._no_batch

                    if ts_status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]:
                        logger.info( "{} {} (phase {})".format(ts_status, test, phase))
                        rv = False
                    elif ts_status == TEST_PEND_STATUS and local_run:
                        logger.info( "{} {} (Some phases left in PEND)".format(TEST_FAIL_STATUS, test))
                        rv = False
                    elif nlfail:
                        logger.info( "{} {} (but otherwise OK) {}".format(NAMELIST_FAIL_STATUS, test, phase))
                        rv = False
                    else:
                        logger.info("{} {} {}".format(status, test, phase))

                logger.info( "    Case dir: {}".format(self._get_test_dir(test)))

        logger.info( "test-scheduler took {} seconds".format(time.time() - start_time))

        return rv