def finish_test(self, test): """ This is run after each single test-method is finished, but the below logic will only be executed once the very last test-method from the original set of given unit-tests is completed. """ if (self.testsRun != len(self.test_cases)): return if (not self.class_stop_time): self.class_stop_time = datetime.datetime.now() num_tests = len(self.all_results) num_failures = len(self.failure_results) num_errors = len(self.error_results) num_skips = len(self.skipped_results) test_class, _ = utils.get_test_names(test) test_elapsed = self.class_stop_time - self.class_start_time log_level = "INFO" failure_tag = "" if (num_failures or num_errors): log_level = "ERROR" failure_tag = "<<< FAILURE! " elif (num_skips): log_level = "WARNING" utils.write_separator() print utils.write_separator() utils.write_log( log_level, "Tests run: %(num_tests)s, Failures: %(num_failures)s, Errors: %(num_errors)s, Skipped: %(num_skips)s, Time elapsed: %(test_elapsed)s s %(failure_tag)s- in %(test_class)s" % locals()) utils.write_separator() def print_errors(test_class, err_list, kind): for result in err_list: test = result.test elapsed = result.elapsed test_method = test._testMethodName utils.write_log( "ERROR", "%(test_method)s(%(test_class)s) Time elapsed: %(elapsed)s s <<< %(kind)s!" % locals()) err_frame = result.errObj[2].tb_next traceback.print_tb(err_frame, 1) print # write leading newline if detail error reports should be written if any(self.error_results) or any(self.failure_results): print print_errors(test_class, self.error_results, "ERROR") print_errors(test_class, self.failure_results, "FAILURE")
def print_errors(test_class, err_list, kind): for result in err_list: test = result.test elapsed = result.elapsed test_method = test._testMethodName utils.write_log("ERROR", "%(test_method)s(%(test_class)s) Time elapsed: %(elapsed)s s <<< %(kind)s!" % locals()) err_frame = result.errObj[2].tb_next traceback.print_tb(err_frame, 1) print
def print_errors(test_class, err_list, kind): for result in err_list: test = result.test elapsed = result.elapsed test_method = test._testMethodName utils.write_log( "ERROR", "%(test_method)s(%(test_class)s) Time elapsed: %(elapsed)s s <<< %(kind)s!" % locals()) err_frame = result.errObj[2].tb_next traceback.print_tb(err_frame, 1) print
def finish_test(self, test): """ This is run after each single test-method is finished, but the below logic will only be executed once the very last test-method from the original set of given unit-tests is completed. """ if (self.testsRun != len(self.test_cases)): return if (not self.class_stop_time): self.class_stop_time = datetime.datetime.now() num_tests = len(self.all_results) num_failures = len(self.failure_results) num_errors = len(self.error_results) num_skips = len(self.skipped_results) test_class, _ = utils.get_test_names(test) test_elapsed = self.class_stop_time - self.class_start_time log_level = "INFO" failure_tag = "" if (num_failures or num_errors): log_level = "ERROR" failure_tag = "<<< FAILURE! " elif (num_skips): log_level = "WARNING" utils.write_separator() print utils.write_separator() utils.write_log(log_level, "Tests run: %(num_tests)s, Failures: %(num_failures)s, Errors: %(num_errors)s, Skipped: %(num_skips)s, Time elapsed: %(test_elapsed)s s %(failure_tag)s- in %(test_class)s" % locals()) utils.write_separator() def print_errors(test_class, err_list, kind): for result in err_list: test = result.test elapsed = result.elapsed test_method = test._testMethodName utils.write_log("ERROR", "%(test_method)s(%(test_class)s) Time elapsed: %(elapsed)s s <<< %(kind)s!" % locals()) err_frame = result.errObj[2].tb_next traceback.print_tb(err_frame, 1) print # write leading newline if detail error reports should be written if any(self.error_results) or any(self.failure_results): print print_errors(test_class, self.error_results, "ERROR") print_errors(test_class, self.failure_results, "FAILURE")
def print_summary_problems(err_list, kind): if (any(err_list)): utils.write_log("ERROR", kind + "s: ") for r in err_list: test_class, test_method = utils.get_test_names(r.test) err_message = r.errObj[1].message err_frame = r.errObj[2].tb_next err_lineno = err_frame.tb_lineno if err_frame else "" utils.write_log( "ERROR", " %(test_class)s.%(test_method)s:%(err_lineno)s %(err_message)s" % locals())
def test_dbscan_helper(X, eps, min_samples, threshold, use_assert, test_model): dbscan_imp1 = run_dbscan(X, eps, min_samples, model='sklearn') print() if test_model == 'cuml': X = pd2cudf(X) dbscan_imp2 = run_dbscan(X, eps, min_samples, model=test_model) print() for attr in ['labels_']: passed = array_equal(getattr(dbscan_imp1, attr), getattr(dbscan_imp2, attr), threshold, with_sign=True) message = 'compare pca: %s vs sklearn %s %s' % ( test_model, attr, 'equal' if passed else 'NOT equal') print(message) write_log(message) if use_assert: assert passed, message print() del dbscan_imp1, dbscan_imp2, X
def test_pca_helper(X, n_components, svd_solver, whiten, random_state, threshold, use_assert, test_model): pca_imp1 = run_pca(X, n_components, svd_solver, whiten, random_state, model='sklearn') print() if test_model == 'cuml': X = pd2pygdf(X) elif test_model == 'h2o4gpu': X = np.array(X).astype(np.float32) pca_imp2 = run_pca(X, n_components, svd_solver, whiten, random_state, model=test_model) print() for attr in [ 'singular_values_', 'components_', 'explained_variance_', 'explained_variance_ratio_', 'transformed_result' ]: with_sign = False if attr in ['components_', 'transformed_result' ] else True passed = array_equal(getattr(pca_imp1, attr), getattr(pca_imp2, attr), threshold, with_sign=with_sign) message = 'compare pca: %s vs sklearn %s %s' % ( test_model, attr, 'equal' if passed else 'NOT equal') print(message) write_log(message) if use_assert: assert passed, message print() del pca_imp1, pca_imp2, X
def startTest(self, test): TestResultBase.startTest(self, test) # remember the original sys streams self.__sys_stdout = sys.stdout self.__sys_stderr = sys.stderr # just one buffer for both stdout and stderr self.outputBuffer = StringIO.StringIO() sys.stdout = OutputRedirector(self.streams + [self.outputBuffer]) sys.stderr = OutputRedirector(self.streams + [self.outputBuffer]) # now the real testing logic kicks in test_class, test_method = utils.get_test_names(test) if (not self.class_start_time): self.class_start_time = datetime.datetime.now() self.test_start_time = datetime.datetime.now() utils.write_separator() utils.write_log("INFO", "Running %(test_class)s.%(test_method)s" % locals())
def run(self, test): "Run the given test case or test suite." self.runner_start_time = datetime.datetime.now() test_class_dict = {} def find_test_methods(test_decl): is_iterable = hasattr(test_decl, '__iter__') if (is_iterable): for tests in test_decl: find_test_methods(tests) else: cls_nm = type(test_decl).__name__ if not test_class_dict.get(cls_nm): test_class_dict[cls_nm] = list() test_class_dict[cls_nm].append(test_decl) # convert the given TestCase/TestSuite into a dictionary of test-classes find_test_methods(test) all_results = list() success_results = list() failure_results = list() error_results = list() skipped_results = list() utils.write_separator() utils.write_log("INFO", "T E S T S") for k, class_tests in test_class_dict.iteritems(): class_suite = TestSuite(class_tests) reports_dir = os.path.join(os.path.dirname(__main__.__file__), "test-reports") if not os.path.exists(reports_dir): os.makedirs(reports_dir) with file(os.path.join(reports_dir, k + '.txt'), 'wb') as fp: # execute all tests in this test class class_result = TestResult([sys.stdout, fp], class_tests) class_suite(class_result) # get the test-results from this class and add them to the summary lists all_results.extend(class_result.all_results) success_results.extend(class_result.success_results) failure_results.extend(class_result.failure_results) error_results.extend(class_result.error_results) skipped_results.extend(class_result.skipped_results) tests_success = not any(error_results) and not any(failure_results) tests_result = "SUCCESS" if tests_success else "FAILURE" self.runner_stop_time = datetime.datetime.now() # print final summary log after all tests are done running print utils.write_separator() utils.write_log("INFO", "TESTS RUN %(tests_result)s" % locals()) utils.write_separator() utils.write_log("INFO") utils.write_log("INFO", "Results:") if not tests_success: utils.write_log("INFO") def print_summary_problems(err_list, kind): if (any(err_list)): utils.write_log("ERROR", kind + "s: ") for r in err_list: test_class, test_method = utils.get_test_names(r.test) err_message = r.errObj[1].message err_frame = r.errObj[2].tb_next err_lineno = err_frame.tb_lineno if err_frame else "" utils.write_log( "ERROR", " %(test_class)s.%(test_method)s:%(err_lineno)s %(err_message)s" % locals()) print_summary_problems(failure_results, "Failure") print_summary_problems(error_results, "Error") num_success = len(success_results) num_failures = len(failure_results) num_errors = len(error_results) num_skips = len(skipped_results) utils.write_log("INFO") utils.write_log( "ERROR", "Tests run: %(num_success)s, Failures: %(num_failures)s, Errors: %(num_errors)s, Skipped: %(num_skips)s" % locals()) utils.write_log("INFO") total_elapsed = self.runner_stop_time - self.runner_start_time utils.write_separator() utils.write_log("INFO", "Total time: %(total_elapsed)s s" % locals()) utils.write_log("INFO", "Finished at: %s" % self.runner_stop_time) utils.write_separator()
else: raise NotImplementedError @timer def fit_(pca, X, model): pca.fit(X) return pca @timer def transform_(pca, X, model): return pca.transform(X) pca = fit_(pca, X, model=model) Xpca = transform_(pca, X, model=model) pca.transformed_result = lambda: None setattr(pca, 'transformed_result', Xpca) return pca if __name__ == '__main__': args = parse_args() write_log(args) test_pca_mortgage(data_source=args.data, use_assert=args.use_assert, nrows=args.nrows, ncols=args.ncols, quarters=args.quarters, random_state=args.random_state, test_model=args.test_model, threshold=args.threshold)