def test_thread_pool_join_futures_timeout(self): """Thread Pool TimeoutError is raised if join on long futures.""" pool = ThreadPool(max_workers=1) for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join()
def test_thread_pool_join_futures_timeout(self): """Thread Pool TimeoutError is raised if join on long futures.""" pool = ThreadPool() for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join()
def test_thread_pool_join_workers(self): """Thread Pool no worker is running after join.""" pool = ThreadPool(max_workers=4) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertEqual(len(pool._pool_manager.workers), 0)
def test_thread_pool_stop_stopped(self): """Thread Pool is stopped after stop.""" pool = ThreadPool(max_workers=1) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active)
def test_thread_pool_close_stopped(self): """Thread Pool is stopped after close.""" pool = ThreadPool() pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active)
def test_thread_pool_tasks_limit(self): """Thread Pool future limit is honored.""" futures = [] with ThreadPool(max_workers=1, max_tasks=2) as pool: for _ in range(0, 4): futures.append(pool.schedule(tid_function)) self.assertEqual(len(set([t.result() for t in futures])), 2)
def test_thread_pool_single_future(self): """Thread Pool single future.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2)
def test_thread_pool_multiple_futures(self): """Thread Pool multiple futures.""" futures = [] with ThreadPool(max_workers=1) as pool: for _ in range(5): futures.append(pool.schedule(function, args=[1])) self.assertEqual(sum([t.result() for t in futures]), 5)
def test_thread_pool_error_callback(self): """Thread Pool errors are forwarded to callback.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, Exception))
def test_thread_pool_different_thread(self): """Thread Pool multiple futures are handled by different threades.""" futures = [] with ThreadPool(max_workers=2) as pool: for _ in range(0, 5): futures.append(pool.schedule(tid_function)) self.assertEqual(len(set([t.result() for t in futures])), 2)
def test_thread_pool_broken_initializer(self): """Thread Pool broken initializer is notified.""" with self.assertRaises(RuntimeError): with ThreadPool(initializer=broken_initializer) as pool: pool.active time.sleep(0.3) pool.schedule(function)
def test_thread_pool_map_empty(self): """Thread Pool map no elements.""" elements = [] with ThreadPool() as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements)
def test_thread_pool_map_one_chunk(self): """Thread Pool map chunksize 1.""" elements = [1, 2, 3] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements, chunksize=1) generator = future.result() self.assertEqual(list(generator), elements)
def test_thread_pool_map_multi(self): """Thread Pool map multiple iterables.""" expected = (2, 4) with ThreadPool(max_workers=1) as pool: future = pool.map(function, (1, 2, 3), (1, 2)) generator = future.result() self.assertEqual(tuple(generator), expected)
def test_thread_pool_map_single(self): """Thread Pool map one element.""" elements = [0] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements)
def test_thread_pool_stop_stopped(self): """Thread Pool is stopped after stop.""" pool = ThreadPool() pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active)
def test_thread_pool_close_stopped(self): """Thread Pool is stopped after close.""" pool = ThreadPool(max_workers=1) pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active)
def test_thread_pool_callback(self): """Thread Pool results are forwarded to the callback.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule( function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) self.event.wait() self.assertEqual(self.results, 2)
def test_thread_pool_stop_stopped_function(self): """Thread Pool is stopped in function.""" with ThreadPool(max_workers=1) as pool: def function(): pool.stop() pool.schedule(function) self.assertFalse(pool.active)
def test_thread_pool_cancel_callback(self): """Thread Pool FutureCancelled is forwarded to callback.""" with ThreadPool(max_workers=1) as pool: pool.schedule(long_function) future = pool.schedule(long_function) future.add_done_callback(self.callback) future.cancel() self.event.wait() self.assertTrue(isinstance(self.exception, CancelledError))
def test_thread_pool_close_futures(self): """Thread Pool all futures are performed on close.""" futures = [] pool = ThreadPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [t.done() for t in futures])
def test_thread_pool_stop_futures(self): """Thread Pool not all futures are performed on stop.""" futures = [] pool = ThreadPool(max_workers=1) for index in range(10): futures.append(pool.schedule(long_function, args=[index])) pool.stop() pool.join() self.assertTrue(len([t for t in futures if not t.done()]) > 0)
def test_thread_pool_exception_isolated(self): """Thread Pool an Exception does not affect other futures.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) try: future.result() except: pass future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2)
def find_tlds(self): dom_list = [self.known_domain + '.' + tld for tld in self.tld_list] try: pool = ThreadPool(max_workers=self.max_workers, max_tasks=self.max_tasks) results = pool.map(self.check_tld, dom_list, timeout=self.timeout) pool.close() pool.join() print(results) except Exception as e: print(repr(e)) pass
def test_thread_pool_stop_stopped_callback(self): """Thread Pool is stopped in callback.""" with ThreadPool(max_workers=1) as pool: def stop_pool_callback(_): pool.stop() future = pool.schedule(function, args=[1]) future.add_done_callback(stop_pool_callback) with self.assertRaises(RuntimeError): for index in range(10): time.sleep(0.1) pool.schedule(long_function, args=[index]) self.assertFalse(pool.active)
def _processing_listener_handler_wrapper(self, job_id, job_type, job_queue, job_data): with self.r_conn.lock(self.redis_work_mutex_key_prefix + job_id, timeout=self.redis_work_mutex_key_ttl): has_handler_lock = self.r_conn.exists( self.redis_handler_mutex_key_prefix + job_id) if not has_handler_lock: self.r_conn.setex(self.redis_handler_mutex_key_prefix + job_id, self.redis_handler_mutex_key_ttl, job_id) else: return service_name = job_queue.rsplit( ":", 1 )[-1] # "<prefix>:registration:codex:<name>" --> [<prefix>:registration, <name>] job_info = self.get_registered_service_details( service_name) # job registration info if not job_info: self.r_conn.delete(self.redis_handler_mutex_key_prefix + job_id) return logger.debug("Received new job to process with id %s by process %s", job_id, os.getpid()) with ThreadPool(max_workers=1) as executor: future = executor.schedule(self.handlers["default"], args=(job_id, job_type, job_info, job_data)) try: future.result(timeout=self.handler_timeout) logger.debug("Job with id %s finished by process %s", job_id, os.getpid()) except concurrent.futures.TimeoutError: logger.warning( "Timeout occurred in Job with id %s by process %s", job_id, os.getpid()) for t in executor._pool_manager.workers: # type: Thread thread_id = t.ident if t.is_alive(): logger.warning( "Attempting to kill thread with id %s timeout occurred in process %s", thread_id, os.getpid()) is_killed = self._thread_killer(thread_id) if is_killed: logger.debug( "Successfully killed thread with id %s in process %s", thread_id, os.getpid()) finally: self.r_conn.delete(self.redis_handler_mutex_key_prefix + job_id)
def test_thread_pool_close_futures(self): """Thread Pool all futures are performed on close.""" futures = [] pool = ThreadPool() for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [t.done() for t in futures])
def test_thread_pool_stop_futures(self): """Thread Pool not all futures are performed on stop.""" futures = [] pool = ThreadPool() for index in range(10): futures.append(pool.schedule(long_function, args=[index])) pool.stop() pool.join() self.assertTrue(len([t for t in futures if not t.done()]) > 0)
def test_thread_pool_map_broken_pool(self): """Thread Pool Fork Broken Pool.""" elements = [1, 2, 3] with ThreadPool(max_workers=1) as pool: future = pool.map(long_function, elements, timeout=1) generator = future.result() pool._context.state = ERROR while True: try: next(generator) except TimeoutError as error: self.assertFalse(pool.active) future.cancel() break except StopIteration: break
def test_thread_pool_map_error(self): """Thread Pool errors do not stop the iteration.""" raised = None elements = [1, 'a', 3] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() while True: try: next(generator) except TypeError as error: raised = error except StopIteration: break self.assertTrue(isinstance(raised, TypeError))
def test_thread_pool_map_cancel(self): """Thread Pool cancel iteration.""" with ThreadPool(max_workers=1) as pool: future = pool.map(long_function, range(5)) generator = future.result() self.assertEqual(next(generator), 0) future.cancel() # either gets computed or it gets cancelled try: self.assertEqual(next(generator), 1) except CancelledError: pass for _ in range(3): with self.assertRaises(CancelledError): next(generator)
def get_new_ids(known_ids, max_ids=None): """Crawl extension ids available in Chrome store.""" shard_urls = [ shard_elem.text for shard_elem in get_inner_elems( requests.get(config.const_sitemap_url(), timeout=10).text) ] with ThreadPool(16) as pool: future = pool.map(process_shard, shard_urls, chunksize=1) iterator = future.result() returned_ids = 0 while True: try: for extid in next(iterator): if extid not in known_ids: yield extid returned_ids += 1 if max_ids is not None and returned_ids >= max_ids: pool.stop() return except StopIteration: return
def test_thread_pool_error(self): """Thread Pool errors are raised by future get.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) with self.assertRaises(Exception): future.result()
def test_thread_pool_map_zero_chunk(self): """Thread Pool map chunksize 0.""" with ThreadPool(max_workers=1) as pool: with self.assertRaises(ValueError): pool.map(function, [], chunksize=0)
def test_thread_pool_join_running(self): """Thread Pool RuntimeError is raised if active pool joined.""" with ThreadPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertRaises(RuntimeError, pool.join)