def test_process_timers(self): start_timestamp = time() expected_flushed_metrics_count = 2 + 5 # each timer has 5 separate metrics metrics = (Counter('user.jump', 2), Set('username', 'navdoon'), Timer('db.query', 300), Set('username', 'navdoon2'), Counter('user.jump', -1), Timer('db.query', 309), Timer('db.query', 303)) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.set_destinations([destination]) process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: queue_.put(metric.to_request()) destination.wait_until_expected_count_items(5) processor.shutdown() processor.wait_until_shutdown(5) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) metrics_dict = dict() for (name, value, timestamp) in destination.metrics: metrics_dict[name] = value self.assertGreaterEqual(timestamp, start_timestamp) self.assertEqual(metrics_dict['user.jump'], 1) self.assertEqual(metrics_dict['username'], 2) self.assertEqual(metrics_dict['db.query.count'], 3) self.assertEqual(metrics_dict['db.query.max'], 309) self.assertEqual(metrics_dict['db.query.min'], 300) self.assertEqual(metrics_dict['db.query.mean'], 304) self.assertEqual(metrics_dict['db.query.median'], 303)
def test_process(self): expected_flushed_metrics_count = 2 metrics = ( Counter('user.jump', 2), Set('username', 'navdoon'), Set('username', 'navdoon.test'), Counter('user.jump', 4), Set('username', 'navdoon'), Counter('user.jump', -1), ) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.set_destinations([destination]) processor.init_destinations() process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: queue_.put(metric.to_request()) destination.wait_until_expected_count_items(5) processor.shutdown() processor.wait_until_shutdown(5) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) self.assertEqual(('user.jump', 5), destination.metrics[0][:2]) self.assertEqual(('username', 2), destination.metrics[1][:2])
def test_process(self): expected_flushed_metrics_count = 2 metrics = (Counter('user.jump', 2), Set('username', 'navdoon'), Set('username', 'navdoon.test'), Counter('user.jump', 4), Set('username', 'navdoon'), Counter('user.jump', -1),) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.set_destinations([destination]) processor.init_destinations() process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: queue_.put(metric.to_request()) destination.wait_until_expected_count_items(5) processor.shutdown() processor.wait_until_shutdown(5) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) self.assertEqual(('user.jump', 5), destination.metrics[0][:2]) self.assertEqual(('username', 2), destination.metrics[1][:2])
def test_process_stops_on_stop_token_in_queue(self): token = 'STOP' expected_flushed_metrics_count = 2 metrics = (Counter('user.login', 1), Set('username', 'navdoon'), Counter('user.login', 3), token, Counter('user.login', -1), Counter('user.logout', 1),) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.flush_interval = 2 processor.stop_process_token = token processor.set_destinations([destination]) process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: if metric is token: sleep(processor.flush_interval) # make sure one flush happened before token request = metric else: request = metric.to_request() queue_.put(request) # make sure the processor has process the queue processor.wait_until_shutdown(5) self.assertFalse(processor.is_processing()) destination.wait_until_expected_count_items(10) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) self.assertEqual(('user.login', 4), destination.metrics[0][:2]) self.assertEqual(('username', 1), destination.metrics[1][:2])
def test_set_the_queue(self): queue_ = Queue() processor = QueueProcessor(queue_) self.assertEqual(queue_, processor.queue) next_queue = Queue() processor.queue = next_queue self.assertEqual(next_queue, processor.queue)
def __init__(self, size): # type: (int) -> None LoggerMixIn.__init__(self) self._size = int(size) # type: int self._threads = [] # type: List[Thread] self._queue = Queue() # type: Queue self._queue_lock = RLock() # type: RLock self._task_counter = 0 # type: int self._task_results = dict() # type: Dict[int, Any] self._stop_event = Event() # type: Event self.log_signature = "threadpool " # type: str
def test_set_flush_interval_accepts_positive_numbers(self): processor = QueueProcessor(Queue()) processor.flush_interval = 103 self.assertEqual(103, processor.flush_interval) processor.flush_interval = 0.58 self.assertEqual(0.58, processor.flush_interval) processor.flush_interval = '3.4' self.assertEqual(3.4, processor.flush_interval)
def test_clear_destinations(self): destination = StubDestination() queue_ = Queue() processor = QueueProcessor(queue_) processor.set_destinations([destination]) self.assertEqual([destination], processor._destinations) processor.clear_destinations() self.assertEqual([], processor._destinations)
def test_get_set_queue(self): def set_queue(queue_): self.server.queue = queue_ self.assertRaises(ValueError, set_queue, "not a queue") queue = Queue() self.server.queue = queue self.assertEqual(queue, self.server.queue)
def test_set_flush_interval_fails_on_not_positive_numbers(self): processor = QueueProcessor(Queue()) def set_interval(value): processor.flush_interval = value self.assertRaises(ValueError, set_interval, 0) self.assertRaises(ValueError, set_interval, -10) self.assertRaises(ValueError, set_interval, "not a number")
def test_queue_can_only_change_when_not_processing(self): processor = QueueProcessor(Queue()) orig_queue = processor.queue process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) def set_queue(new_queue): processor.queue = new_queue new_queue = Queue() self.assertRaises(Exception, set_queue, new_queue) self.assertEqual(orig_queue, processor.queue) processor.shutdown() processor.wait_until_shutdown(5) processor.queue = new_queue self.assertEqual(new_queue, processor.queue)
def __init__(self, size): LoggerMixIn.__init__(self) self._size = int(size) self._threads = [] self._queue = Queue() self._queue_lock = RLock() self._task_counter = 0 self._task_results = dict() self._stop_event = Event() self.log_signature = "threadpool "
def test_processor_does_not_accept_invalid_queue(self): self.assertRaises(ValueError, QueueProcessor, "not a queue") self.assertRaises(ValueError, QueueProcessor, 100) processor = QueueProcessor(Queue()) def set_queue(value): processor.queue = value self.assertRaises(ValueError, set_queue, "not a queue") self.assertRaises(ValueError, set_queue, 100)
def test_process_timers(self): start_timestamp = time() expected_flushed_metrics_count = 2 + 5 # each timer has 5 separate metrics metrics = (Counter('user.jump', 2), Set('username', 'navdoon'), Timer('db.query', 300), Set('username', 'navdoon2'), Counter('user.jump', -1), Timer('db.query', 309), Timer('db.query', 303) ) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.set_destinations([destination]) process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: queue_.put(metric.to_request()) destination.wait_until_expected_count_items(5) processor.shutdown() processor.wait_until_shutdown(5) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) metrics_dict = dict() for (name, value, timestamp) in destination.metrics: metrics_dict[name] = value self.assertGreaterEqual(timestamp, start_timestamp) self.assertEqual(metrics_dict['user.jump'], 1) self.assertEqual(metrics_dict['username'], 2) self.assertEqual(metrics_dict['db.query.count'], 3) self.assertEqual(metrics_dict['db.query.max'], 309) self.assertEqual(metrics_dict['db.query.min'], 300) self.assertEqual(metrics_dict['db.query.mean'], 304) self.assertEqual(metrics_dict['db.query.median'], 303)
def test_process_stops_on_stop_token_in_queue(self): token = 'STOP' expected_flushed_metrics_count = 2 metrics = ( Counter('user.login', 1), Set('username', 'navdoon'), Counter('user.login', 3), token, Counter('user.login', -1), Counter('user.logout', 1), ) queue_ = Queue() destination = StubDestination() destination.expected_count = expected_flushed_metrics_count processor = QueueProcessor(queue_) processor.flush_interval = 2 processor.stop_process_token = token processor.set_destinations([destination]) process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) for metric in metrics: if metric is token: sleep(processor.flush_interval ) # make sure one flush happened before token request = metric else: request = metric.to_request() queue_.put(request) # make sure the processor has process the queue processor.wait_until_shutdown(5) self.assertFalse(processor.is_processing()) destination.wait_until_expected_count_items(10) self.assertEqual(expected_flushed_metrics_count, len(destination.metrics)) self.assertEqual(('user.login', 4), destination.metrics[0][:2]) self.assertEqual(('username', 1), destination.metrics[1][:2])
def test_destinations_can_change_when_queue_processor_is_running(self): processor = QueueProcessor(Queue()) try: destinations = [StubDestination()] processor.set_destinations(destinations) processor_thread = Thread(target=processor.process) processor_thread.start() processor.wait_until_processing(10) self.assertTrue(processor.is_processing()) new_destinations = [StubDestination()] processor.set_destinations(new_destinations) self.assertEqual(new_destinations, processor.get_destinations()) self.assertTrue(processor.is_processing()) finally: processor.shutdown()
def test_continues_processing_after_reload(self): metrics = (Counter('user.login', 1), Set('username', 'navdoon'), Counter('user.login', 3)) queue_ = Queue() destination = StubDestination() destination.expected_count = 1 processor = QueueProcessor(queue_) processor.flush_interval = 1 processor.set_destinations([destination]) process_thread = Thread(target=processor.process) process_thread.start() processor.wait_until_processing(5) expected_flushed_metrics2 = 2 destination2 = StubDestination() destination2.expected_count = expected_flushed_metrics2 processor.set_destinations([destination2]) for metric in metrics: queue_.put(metric.to_request()) destination.wait_until_expected_count_items(5) processor.shutdown() process_thread.join(5) self.assertFalse(processor.is_processing()) self.assertGreaterEqual(len(destination.metrics), 1) self.assertLessEqual(queue_.qsize(), 2) for metric in metrics: queue_.put(metric.to_request()) self.assertGreaterEqual(queue_.qsize(), len(metrics)) resume_process_thread = Thread(target=processor.process) resume_process_thread.start() processor.wait_until_processing(5) self.assertTrue(processor.is_processing()) self.assertEqual(processor.get_destinations(), [destination2]) destination2.wait_until_expected_count_items(5) processor.shutdown() resume_process_thread.join(5) self.assertGreaterEqual(len(destination2.metrics), expected_flushed_metrics2)
def init_destinations(self): # type: () -> None self._log_debug("initializing destination ...") self._flush_threads_initialized.set() self._stop_flush_threads() self._clear_flush_threads() self._log_debug("initializing {} destination threads ...".format( len(self._destinations))) for destination in self._destinations: queue_ = Queue() # type: Queue flush_thread = Thread( target=self._flush_metrics_queue_to_destination, args=(queue_, destination)) flush_thread.setDaemon(True) flush_thread.start() self._flush_queues.append(queue_) self._flush_threads.append(flush_thread) self._flush_threads_initialized.set() self._log_debug("initialized {} destination threads".format( len(self._flush_threads)))
def __init__(self): self._queue = Queue() # type: Queue
def initialize(self): # type: () -> ThreadPool self._queue = Queue() self._create_worker_threads() self._start_worker_threads() return self
class ThreadPool(LoggerMixIn): def __init__(self, size): # type: (int) -> None LoggerMixIn.__init__(self) self._size = int(size) # type: int self._threads = [] # type: List[Thread] self._queue = Queue() # type: Queue self._queue_lock = RLock() # type: RLock self._task_counter = 0 # type: int self._task_results = dict() # type: Dict[int, Any] self._stop_event = Event() # type: Event self.log_signature = "threadpool " # type: str def __del__(self): # type: () -> None if not self._stop_event.is_set(): self.stop() @property def size(self): # type: () -> int return self._size @property def threads(self): # type: () -> List[Thread] return self._threads def initialize(self): # type: () -> ThreadPool self._queue = Queue() self._create_worker_threads() self._start_worker_threads() return self def do(self, func, *args, **kwargs): # type: (Callable, *Any, **Any) -> int if self._stop_event.is_set(): raise Exception("Task thread pool has stopped") with self._queue_lock: task_id = self._task_counter self._task_counter += 1 self._handle_task(task_id, func, args, kwargs) return task_id def is_done(self): # type: () -> bool with self._queue_lock: is_done = self._queue.empty() return is_done def wait_until_done(self): # type: () -> ThreadPool self._queue.join() return self def stop(self, wait=True, timeout=None): # type: (bool, float) -> None self._stop_event.set() if wait: num_threads = len(self._threads) self._log_debug( "joining {} worker threads ...".format(num_threads)) start_time = time() counter = 0 for thread in self._threads: counter += 1 self._log_debug("joining thread {} ...".format(counter)) thread.join(timeout) if timeout is None: continue elif time() - start_time > timeout: raise Exception("Stopping thread pool timedout") self._log_debug("joined worker {} threads".format(num_threads)) self._threads = [] def get_result(self, task_id): # type: (int) -> Any if task_id not in self._task_results: raise ValueError( "No results found for task id '{}'".format(task_id)) return self._task_results[task_id] def _handle_task(self, task_id, func, args, kwargs): # type: (int, Callable, Sequence[Any], Dict[Any, Any]) -> None self._queue.put((task_id, func, args, kwargs)) def _create_worker_threads(self): # type: () -> None for i in range(self._size): worker = WorkerThread(self._queue, self._stop_event, self._task_results) self._threads.append(worker) def _start_worker_threads(self): # type: () -> ThreadPool for thread in self._threads: thread.start() self._stop_event.clear() return self
class ThreadPool(LoggerMixIn): def __init__(self, size): LoggerMixIn.__init__(self) self._size = int(size) self._threads = [] self._queue = Queue() self._queue_lock = RLock() self._task_counter = 0 self._task_results = dict() self._stop_event = Event() self.log_signature = "threadpool " def __del__(self): if not self._stop_event.is_set(): self.stop() @property def size(self): return self._size @property def threads(self): return self._threads def initialize(self): self._queue = Queue() self._create_worker_threads() self._start_worker_threads() return self def do(self, func, *args, **kwargs): if self._stop_event.is_set(): raise Exception("Task thread pool has stopped") with self._queue_lock: task_id = self._task_counter self._task_counter += 1 self._handle_task(task_id, func, args, kwargs) return task_id def is_done(self): with self._queue_lock: is_done = self._queue.empty() return is_done def wait_until_done(self): self._queue.join() return self def stop(self, wait=True, timeout=None): self._stop_event.set() if wait: num_threads = len(self._threads) self._log_debug( "joining {} worker threads ...".format(num_threads)) start_time = time() counter = 0 for thread in self._threads: counter += 1 self._log_debug("joining thread {} ...".format(counter)) thread.join(timeout) if timeout is None: continue elif time() - start_time > timeout: raise Exception("Stopping thread pool timedout") self._log_debug("joined worker {} threads".format(num_threads)) self._threads = [] def get_result(self, task_id): if task_id not in self._task_results: raise ValueError( "No results found for task id '{}'".format(task_id)) return self._task_results[task_id] def _handle_task(self, task_id, func, args, kwargs): self._queue.put((task_id, func, args, kwargs)) def _create_worker_threads(self): for i in range(self._size): worker = WorkerThread(self._queue, self._stop_event, self._task_results) self._threads.append(worker) def _start_worker_threads(self): for thread in self._threads: thread.start() self._stop_event.clear() return self
def initialize(self): self._queue = Queue() self._create_worker_threads() self._start_worker_threads() return self
def test_set_destinations_when_not_started(self): processor = QueueProcessor(Queue()) destinations = [StubDestination()] processor.set_destinations(destinations) self.assertEqual(destinations, processor._destinations) self.assertFalse(processor.is_processing())
def test_set_destination_fails_on_invalid_destination(self): processor = QueueProcessor(Queue()) self.assertRaises(ValueError, processor.set_destinations, "not a destination")