def run(self): """Run a test in loop.""" while (self.thread_signaller.running()): test_result = unittest.TestResult() self.test.clearContext() self.test(test_result) if test_result.wasSuccessful(): if recording(): add_cycle_result('success') if self.color: trace(green_str('.')) else: trace('.') else: if len(test_result.errors): if recording(): add_cycle_result('error') if self.color: trace(red_str('E')) else: trace('E') else: if recording(): add_cycle_result('failure') if self.color: trace(red_str('F')) else: trace('F') if self.debug: for (test, error) in test_result.errors: trace("ERROR %s: %s" % (str(test), str(error))) for (test, error) in test_result.failures: trace("FAILURE %s: %s" % (str(test), str(error))) thread_sleep(self.sleep_time)
def run(self): """Run a test in loop.""" while (self.thread_signaller.running()): test_result = unittest.TestResult() self.test.clearContext() self.test(test_result) feedback = {} if test_result.wasSuccessful(): if recording(): feedback['count'] = add_cycle_result('success') if self.color: trace(green_str('.')) else: trace('.') feedback['result'] = 'success' else: if len(test_result.errors): if recording(): feedback['count'] = add_cycle_result('error') if self.color: trace(red_str('E')) else: trace('E') feedback['result'] = 'error' else: if recording(): feedback['count'] = add_cycle_result('failure') if self.color: trace(red_str('F')) else: trace('F') feedback['result'] = 'failure' if self.debug: feedback['errors'] = test_result.errors feedback['failures'] = test_result.failures for (test, error) in test_result.errors: trace("ERROR %s: %s" % (str(test), str(error))) for (test, error) in test_result.failures: trace("FAILURE %s: %s" % (str(test), str(error))) if self.feedback is not None: self.feedback.test_done(feedback) thread_sleep(self.sleep_time)
def run(self): """Run a test in loop.""" while self.thread_signaller.running(): test_result = unittest.TestResult() self.test.clearContext() self.test(test_result) feedback = {} if test_result.wasSuccessful(): if recording(): feedback["count"] = add_cycle_result("success") if self.color: trace(green_str(".")) else: trace(".") feedback["result"] = "success" else: if len(test_result.errors): if recording(): feedback["count"] = add_cycle_result("error") if self.color: trace(red_str("E")) else: trace("E") feedback["result"] = "error" else: if recording(): feedback["count"] = add_cycle_result("failure") if self.color: trace(red_str("F")) else: trace("F") feedback["result"] = "failure" if self.debug: feedback["errors"] = test_result.errors feedback["failures"] = test_result.failures for (test, error) in test_result.errors: trace("ERROR %s: %s" % (str(test), str(error))) for (test, error) in test_result.failures: trace("FAILURE %s: %s" % (str(test), str(error))) if self.feedback is not None: self.feedback.test_done(feedback) thread_sleep(self.sleep_time)
def record(self, aggregates, **metadata): """ A context manager that logs a timing record to the results logger. This record contains the cycle, cvus, thread_id, test suite_name, test_name, time, and duration of the contained block of code. If the bench is currently in startup mode (not all threads have begun executing), then this record will be marked with startup=True, and ignored by the default report builder. The contextmanager returns the `metadata` dictionary, and when recording the record, writes all contents of the `metadata` dictionary as children of the record element. The only `metadata` keys used by the report builder are 'result' and 'traceback'. If 'result' != 'Successful', the report builder considers that record to have failed. 'result' defaults to 'Successful' but can be changed by the test contents. If the executed code block raises an exception, 'result' is set to 'Error' and 'traceback' is set to the formatted exception traceback of the raised exception. The exception is then re-raised. `aggregates`: dict This is a dictionary mapping top level aggregation keys to their values. The top level keys will be used to create sections in the built report, and the value will be used to create subsections. The sections will report stats for all records with that aggregate key, and the subsections will report stats for all records with the aggregate value. `**metadata`: Any named arguments to record will be stored as children of the record element in the results log xml """ info = {} info['cycle'] = str(self.cycle) info['cvus'] = str(self.cvus) info['thread_id'] = str(self.thread_id) info['suite_name'] = str(self.suite_name) info['test_name'] = str(self.test_name) start_time = time.time() try: metadata['result'] = 'Successful' yield metadata except KeyboardInterrupt: raise except: metadata['result'] = 'Error' metadata['traceback'] = ' '.join( traceback.format_exception(*sys.exc_info())) raise finally: if self.in_bench_mode and not recording(): info['startup'] = True info['time'] = str(start_time) info['duration'] = str(time.time() - start_time) self.logger_results.record(info, metadata, aggregates)
def grid_image(pointcloud, resolution, bbox): """Convert the point clouds into a image. Parameters: pointcloud : np.array(shape=(n, 4)) resolution : float The unit is meter. bbox : BBox The bounding box of point cloud. Returns: (np.array(dtype='uint8'), np.array, np.array): the first is the gridded image; the second and the third are respectively x/y-axis gridded coordinates.""" xy = pointcloud.T[:2] xy = ((xy + resolution / 2) // resolution).astype(int) bbox = BBox(xy.min(axis=1), xy.max(axis=1)) if bbox.is_none() else bbox sz = bbox.mx + 1 - bbox.mn flatidx = np.ravel_multi_index(xy - bbox.mn[:, None], sz) with recording("np.bincount"): histo = np.bincount(flatidx, pointcloud[:, 3], sz.prod()) / np.maximum( 1, np.bincount(flatidx, None, sz.prod())) return (histo.reshape(sz).astype('uint8'), *xy)
def _logr(self, message, force=False): """Log a result.""" if force or not self.in_bench_mode or recording(): self.logger_result.info(message)