def create_test(self): ########################################################################### """ Main API for this class. Return True if all tests passed. """ start_time = time.time() # Tell user what will be run print "RUNNING TESTS:" for test_name in self._test_names: print " ", test_name # TODO - documentation self._producer() expect(threading.active_count() == 1, "Leftover threads?") # Setup cs files self._setup_cs_files() # Return True if all tests passed print "At create_test close, state is:" rv = True for idx, test_name in enumerate(self._test_names): phase, status = self._test_states[idx] if (status == TEST_PASS_STATUS and phase == RUN_PHASE): # Be cautious about telling the user that the test passed. This # status should match what they would see on the dashboard. Our # self._test_states does not include comparison fail information, # so we need to parse test status. test_status_file = os.path.join(self._get_test_dir(test_name), TEST_STATUS_FILENAME) status = wait_for_tests.interpret_status_file( test_status_file)[1] if (status not in [TEST_PASS_STATUS, TEST_PENDING_STATUS]): print "%s %s (phase %s)" % (status, test_name, phase) rv = False elif (test_name in self._tests_with_nl_problems): print "%s %s (but otherwise OK)" % (NAMELIST_FAIL_STATUS, test_name) rv = False else: print status, test_name, phase print " Case dir: %s" % self._get_test_dir(test_name) print "create_test took", time.time() - start_time, "seconds" return rv
def system_test(self): ########################################################################### """ Main API for this class. Return True if all tests passed. """ start_time = time.time() # Tell user what will be run print "RUNNING TESTS:" for test in self._tests: print " ", test # TODO - documentation self._producer() expect(threading.active_count() == 1, "Leftover threads?") # Setup cs files self._setup_cs_files() # Return True if all tests passed print "At system_test close, state is:" rv = True for test in self._tests: phase, status, nl_fail = self._get_test_data(test) logging.debug("phase %s status %s" % (phase, status)) if (status == TEST_PASS_STATUS and phase == RUN_PHASE): # Be cautious about telling the user that the test passed. This # status should match what they would see on the dashboard. Our # self._test_states does not include comparison fail information, # so we need to parse test status. test_status_file = os.path.join(self._get_test_dir(test), TEST_STATUS_FILENAME) status = wait_for_tests.interpret_status_file(test_status_file)[1] if (status not in [TEST_PASS_STATUS, TEST_PENDING_STATUS]): print "%s %s (phase %s)" % (status, test, phase) rv = False elif (nl_fail): print "%s %s (but otherwise OK)" % (NAMELIST_FAIL_STATUS, test) rv = False else: print status, test, phase print " Case dir: %s" % self._get_test_dir(test) print "system_test took", time.time() - start_time, "seconds" return rv