def test_join(self): executor = executors.Executor(1) with self.assertRaises(futures.Timeout): executor.join(timeout=0.01) for stub in executor.stubs: self.assertFalse(stub.future.is_completed()) executor.shutdown() executor.join(timeout=0.01) for stub in executor.stubs: self.assertTrue(stub.future.is_completed())
def test_shutdown_graceful(self): executor = executors.Executor(4) event1 = threading.Event() event2 = threading.Event() try: start_barrier = threading.Barrier(3) def func(): start_barrier.wait() event1.wait() f1 = executor.submit(func) f2 = executor.submit(func) f3 = executor.submit(event2.wait) start_barrier.wait() for stub in executor.stubs: self.assertFalse(stub.future.is_completed()) event2.set() self.assertTrue(f3.get_result(timeout=1)) with self.assertLogs(executors.__name__) as cm: items = executor.shutdown(graceful=True) with self.assertRaises(futures.Timeout): executor.join(timeout=0.001) self.assertEqual(len(cm.output), 1) self.assertRegex(cm.output[0], r'not join 2 executor') self.assertFalse(f1.is_completed()) self.assertFalse(f2.is_completed()) self.assertEqual(items, []) counts = {True: 0, False: 0} for stub in executor.stubs: counts[stub.future.is_completed()] += 1 self.assertEqual(counts, {True: 2, False: 2}) event1.set() self.assertIsNone(f1.get_result(timeout=1)) self.assertIsNone(f2.get_result(timeout=1)) for stub in executor.stubs: self.assertTrue(stub.future.is_completed()) finally: event1.set() event2.set() executor.shutdown() executor.join()
def test_executor(self): with executors.Executor(3) as executor: self.assertEqual(len(executor.stubs), 3) self.assertEqual(executor.submit(inc, 1).get_result(), 2) f = executor.submit(inc, 'x') with self.assertRaises(TypeError): f.get_result() with self.assertRaises(queues.Closed): executor.submit(inc, 1) for stub in executor.stubs: self.assertTrue(stub.future.is_completed())
def test_fifo(self): actual = [] b = threading.Barrier(2) with executors.Executor(1) as executor: executor.submit(b.wait) fs = [ executor.submit(actual.append, i) for i in (0, 5, 2, 3, 4, 1) ] b.wait() for f in fs: f.get_result() self.assertEqual(actual, [0, 5, 2, 3, 4, 1])
def test_shutdown_not_graceful(self): executor = executors.Executor(2) event = threading.Event() try: start_barrier = threading.Barrier(3) def func(): start_barrier.wait() event.wait() f1 = executor.submit(func) f2 = executor.submit(func) f3 = executor.submit(event.wait) start_barrier.wait() for stub in executor.stubs: self.assertFalse(stub.future.is_completed()) with self.assertLogs(executors.__name__) as cm: items = executor.shutdown(graceful=False) self.assertEqual(len(cm.output), 1) self.assertRegex(cm.output[0], r'drop 1 tasks') self.assertFalse(f1.is_completed()) self.assertFalse(f2.is_completed()) self.assertFalse(f3.is_completed()) self.assertEqual([m.future for m in items], [f3]) event.set() self.assertIsNone(f1.get_result(timeout=1)) self.assertIsNone(f2.get_result(timeout=1)) for stub in executor.stubs: self.assertTrue(stub.future.is_completed()) finally: event.set() executor.shutdown() executor.join()
def __init__( self, *, executor=None, num_pools=0, num_connections_per_pool=0, ): # If you do not provide an executor, I will just make one for # myself, but to save you the effort to shut down the executor, # I will also make it daemonic. This is mostly fine since if # the process is exiting, you probably do not care much about # unfinished HTTP requests in the executor (if it is not fine, # you may always provide an executor to me, and properly shut it # down on process exit). self._executor = executor or executors.Executor(daemon=True) self._session = requests.Session() adapter_kwargs = {} if num_pools > 0: adapter_kwargs['pool_connections'] = num_pools if num_connections_per_pool > 0: adapter_kwargs['pool_maxsize'] = num_pools if adapter_kwargs: LOG.info( 'config session: num_pools=%d num_connections_per_pool=%d', num_pools, num_connections_per_pool, ) self._session.mount( 'https://', requests.adapters.HTTPAdapter(**adapter_kwargs)) self._session.mount( 'http://', requests.adapters.HTTPAdapter(**adapter_kwargs)) # Make all connections share one SSL context to reduce memory # footprint. (self._session.get_adapter('https://').poolmanager\ .connection_pool_kw['ssl_context']) = self._SSL_CONTEXT
def test_del_not_resurrecting(self): tests.assert_del_not_resurrecting(self, lambda: executors.Executor(1))
def setUpClass(cls): cls.executor = executors.Executor(max_executors=1)