def __init__(self, server_node, iface_cls, config): self._section_name = utils.get_module(__name__) self._logger = logging.getLogger(__name__) self._host = server_node.split(":")[0] self._port = int(server_node.split(":")[1]) self._iface_cls = iface_cls self._get_conn_timeout = config.getint(self._section_name, "pool_timeout", default=settings.DEFAULT_POOL_TIMEOUT) self._socket_timeout = config.getint(self._section_name, "request_timeout", default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000 self._size = config.getint(self._section_name, "pool_size", default=settings.DEFAULT_POOL_SIZE) self._c_module_serialize = config.getboolean(self._section_name, "c_module_serialize", default=settings.USE_C_MODULE_SERIALIZE) self._closed = False if ASYNC_TAG: from gevent.lock import BoundedSemaphore from gevent import queue as Queue self._semaphore = BoundedSemaphore(self._size) self._connection_queue = Queue.LifoQueue(self._size) self._QueueEmpty = Queue.Empty else: from threading import BoundedSemaphore import Queue self._semaphore = BoundedSemaphore(self._size) self._connection_queue = Queue.LifoQueue(self._size) self._QueueEmpty = Queue.Empty
def __init__(self, host, port, iface_cls, size=DEFAULT_POOL_SIZE, asyn=False, network_timeout=DEFAULT_NETWORK_TIMEOUT): self.host = host self.port = port self.iface_cls = iface_cls self.network_timeout = network_timeout self.size = size self._closed = False self._async = asyn if self._async: import gevent.queue try: from gevent import lock as glock except ImportError: # gevent < 1.0 from gevent import coros as glock self._semaphore = glock.BoundedSemaphore(size) self._connection_queue = gevent.queue.LifoQueue(size) self._QueueEmpty = gevent.queue.Empty else: import threading import queue self._semaphore = threading.BoundedSemaphore(size) self._connection_queue = queue.LifoQueue(size) self._QueueEmpty = queue.Empty
def async_get(url=None, urls=None, num_threads=10, cb=None, post=False, depth=False, **kwargs): import gevent from gevent import monkey, queue, event, pool gevent.monkey.patch_all() def scheduler(): """Coordinate downloading in greenlet threads. When the worker queue fills up the scheduler will block on the put() operation. If the job queue is empty and no workers are active the pool is stopped.""" while True: # join dead greenlets for greenlet in list(pool): if greenlet.dead: pool.discard(greenlet) try: url = inq.get_nowait() except queue.Empty: # No urls remaining if pool.free_count() != pool.size: worker_finished.wait() worker_finished.clear() else: # No workers left, shutting down.") pool.join() return True else: # spawn worker for url pool.spawn(worker, url) def worker(url): html = (D.post if post else D.get)(url, **kwargs) if cb: for url in (cb(D, url, html) or []): inq.put(url) worker_finished.set() raise gevent.GreenletExit('success') # incoming queue of urls to download inq = queue.Queue() if depth else queue.LifoQueue() urls = urls or [] if url: urls.append(url) for url in urls: inq.put(url) # start async pool with this many workers maximum pool = pool.Pool(num_threads) worker_finished = event.Event() D = Download(**kwargs) scheduler_greenlet = gevent.spawn(scheduler) scheduler_greenlet.join()
def __init__(self, addresses, size=5): self.log = logging.getLogger(__name__) self._addresses = addresses self._semaphore = lock.BoundedSemaphore(size) self._socket_queue = queue.LifoQueue(size) self.connection_timeout = DEFAULT_CONNECTION_TIMEOUT self.network_timeout = DEFAULT_NETWORK_TIMEOUT self.size = size self._blacklist = list() self._bl_semaphore = lock.BoundedSemaphore(1)
def __init__(self, connection_class=redis.Connection, max_connections=None, **connection_kwargs): self.pid = os.getpid() self.connection_class = connection_class self.connection_kwargs = connection_kwargs self.max_connections = max_connections or 1000 # anything more is useless self._connections = [] self._available_connections = Queue.LifoQueue() self._in_use_connections = set() for _ in xrange(self.max_connections): connection = self.connection_class(**self.connection_kwargs) self._connections.append(connection) self._available_connections.put(connection)
def __init__( self, endpoint: config.EndpointConfiguration, size: int = 10, max_age: int = 120, timeout: float = 1, max_connection_attempts: int = 3, protocol_factory: TProtocolFactory = _DEFAULT_PROTOCOL_FACTORY, ): self.endpoint = endpoint self.max_age = max_age self.retry_policy = RetryPolicy.new(attempts=max_connection_attempts) self.timeout = timeout self.protocol_factory = protocol_factory self.size = size self.pool: ProtocolPool = queue.LifoQueue() for _ in range(size): self.pool.put(None)
def make_put_interrupt(queue): class TestPutInterrupt(GenericGetTestCase): Timeout = Full def wait(self, timeout): while not queue.full(): queue.put(1) return queue.put(2, timeout=timeout) TestPutInterrupt.__name__ += '_' + queue.__class__.__name__ return TestPutInterrupt for obj in [ queue.Queue(1), queue.JoinableQueue(1), queue.LifoQueue(1), queue.PriorityQueue(1), queue.Channel() ]: klass = make_put_interrupt(obj) globals()[klass.__name__] = klass del klass, obj del GenericGetTestCase if __name__ == '__main__': main()
globals()[klass.__name__] = klass del klass, queue_type def make_put_interrupt(queue): class TestPutInterrupt(GenericGetTestCase): Timeout = Full def wait(self, timeout): while not queue.full(): queue.put(1) return queue.put(2, timeout=timeout) TestPutInterrupt.__name__ += '_' + queue.__class__.__name__ return TestPutInterrupt for obj in [queue.Queue(1), queue.JoinableQueue(1), queue.LifoQueue(1), queue.PriorityQueue(1), queue.Channel()]: klass = make_put_interrupt(obj) globals()[klass.__name__] = klass del klass, obj del GenericGetTestCase if __name__ == '__main__': main()