def get_next(self): """ Get the next URI. Only return the next URI if we have waited enough. """ if self._heap.qsize() < self._heap_min_size: self._update_heap() if time.time() >= self._next_possible_crawl: (next_date, next_uri) = self._heap.get_nowait() now = datetime.now(self._timezone) localized_next_date = self._timezone.fromutc( datetime.utcfromtimestamp(next_date)) if now < localized_next_date: # reschedule the uri for crawling self._heap.put_nowait((next_date, next_uri)) raise Empty() self._next_possible_crawl = time.time() + self._min_delay return self._crawluri_from_uri(next_uri) raise Empty()
def test_get_empty(self, fake_queue): fake_queue().get.side_effect = SideEffect([Empty(), Empty(), 123]) # test queue = RequestQueue(Mock()) item = queue.get() # validation self.assertEqual(fake_queue().get.call_count, 3) self.assertEqual(item, 123)
def test_get_empty(self, fake_queue): canceled = Mock() canceled.is_set.return_value = False fake_queue().get.side_effect = SideEffect([Empty(), Empty(), 123]) # test queue = RequestQueue(canceled, Mock()) item = queue.get() # validation self.assertEqual(fake_queue().get.call_count, 3) self.assertEqual(item, 123)
def get(self, block=True, timeout=None): self._cond.acquire() if self.empty(): if not self._count: raise Empty() elif block: self._cond.wait(timeout) if self.empty() and not self._count: self._cond.release() raise Empty() self._cond.release() return Queue.get(self, block, timeout)
def _brpop_read(self, **options): try: try: dest__item = self.client.parse_response("BRPOP", **options) except self.connection.connection_errors: raise Empty() if dest__item: dest, item = dest__item return deserialize(item), dest else: raise Empty() finally: self._in_poll = False
def get(self, block=True, timeout=None): if self.empty(): if not block: raise Empty() else: self.put(self._message_receiver.receive()) return self._queue.pop(0)
def get(self): """Return the result of a single call of the flow. If the flow was called with a single argument -- as in flow({'foo':0, 'bar':1}) -- that single argument is returned (with the side-effects of the flow preserved). If the flow was called with multiple arguments -- as in flow({'foo'0}, {'bar':1}) -- a tuple with those arguments is returned (with the side-effects of the flow preserved). """ if self.__resultbatch: return self.__oneortuple(self.__resultbatch.pop()) # Else fetch new data from the queue retryingafterclose = False while True: try: tmp = self.__queues[-1].get(True, 0.1) self.__queues[-1].task_done() tmp.reverse() self.__resultbatch = tmp return self.__oneortuple(self.__resultbatch.pop()) except Empty: # See explanation in _flowprocess if not self.__closed[-1].value: continue elif not retryingafterclose: retryingafterclose = True continue else: raise Empty()
def _get(self, queue, **kwargs): _debug_fun('_get', (queue, ), kwargs) if not self.buffers[queue]: self._buffer_tasks(queue) if not self.buffers[queue]: raise Empty() task = self.buffers[queue].pop() payload = json.loads(base64.b64decode(task['payloadBase64'])) if queue in self._noack_queues: # For consumers which don't want to ack we # optimistically delete from the queue. self.service.tasks().delete( project=self._get_project('delete'), taskqueue=queue, task=task['id'], ).execute() else: # For consumers that do ACK then we need to keep track # of the queue name and task id so we can delete it later. payload['properties']['delivery_info'].update({ 'taskqueue_task_id': task['id'], 'taskqueue_queue': queue }) return payload
def _get(self, queue): with self.conn_or_acquire() as client: for pri in PRIORITY_STEPS: item = client.rpop(self._q_for_pri(queue, pri)) if item: return loads(item) raise Empty()
def get(self, block=True, timeout=None): """Retrive the task from the first available bucket. Available as in, there is an item in the queue and you can consume tokens from it. """ time_start = time() did_timeout = lambda: timeout and time() - time_start > timeout with self.not_empty: while True: try: remaining_time, item = self._get() except Empty: if not block or did_timeout(): raise self.not_empty.wait(timeout) continue if remaining_time: if not block or did_timeout(): raise Empty() sleep(min(remaining_time, timeout or 1)) else: return item
def get(self, block=True, timeout=DEFAULT_TIME_OUT): """ Return an item whose key is of the highest priority. """ # How fetcher interacts with back queue: # Repeat: # (i) extract current root q of the heap (q is a back queue) # (ii) fetch URL u at head of q ... # until we empty the q we get. (i.e.: u was the last URL in q) # When we have emptied a back queue q, Repeat: # (i) pull URLs u from front queues and # (ii) add u to its corresponding back queue ... # until we get a u whose host does not have a back queue. # Then put u in q and create heap entry for it. self._lock.acquire() if(len(self._map) == 0): self._transfer() if(self._backQselector.empty()): for(p, k) in self._map.iteritems(): self._backQselector.put(HeapNode(self._priorityFunc(p), p)) if(self._backQselector.empty()): self._lock.release() raise Empty() key = self._backQselector.get(block, timeout).getValue() ## may raise Empty que = self._backQ[self._map[key]] item = que.get(block, timeout) if(que.empty()): self._map.pop(self._keyFunc(item)) self._transfer() self._lock.release() return item
class Channel(virtual.Channel): _client = None supports_fanout = True _fanout_queues = {} def __init__(self, *vargs, **kwargs): super_ = super(Channel, self) super_.__init__(*vargs, **kwargs) self._queue_cursors = {} self._queue_readcounts = {} def _new_queue(self, queue, **kwargs): pass def _get(self, queue): try: if queue in self._fanout_queues: msg = self._queue_cursors[queue].next() self._queue_readcounts[queue] += 1 return loads(msg['payload']) else: msg = self.client.command( 'findandmodify', 'messages', query={'queue': queue}, sort={'_id': pymongo.ASCENDING}, remove=True, ) except errors.OperationFailure, exc: if 'No matching object found' in exc.args[0]: raise Empty() raise except StopIteration: raise Empty()
def varnish_object_stream_prepare(obj): obj._name_check() host, uri = varnish_rewrite(obj) headers = { 'User-Agent': "swiftrepl", 'If-Cached': obj.etag } try: varnish_object_stream_prepare.queue except AttributeError: varnish_object_stream_prepare.queue = LifoQueue(maxsize=256) try: connection, t = varnish_object_stream_prepare.queue.get(False) if time.time() - t > 3: raise Empty() except Empty: connection = httplib.HTTPConnection(host, port=80, timeout=10) connection.request('GET', uri, None, headers) response = connection.getresponse() if response.status < 200 or response.status > 299: buff = response.read() try: varnish_object_stream_prepare.queue.put((connection, time.time()), False) except Full: del connection raise cloudfiles.errors.ResponseError(response.status, response.reason) return response, connection
def _emulated(self, timeout=None): """Get the next message avaiable in the queue. :returns: The message and the name of the queue it came from as a tuple. :raises Empty: If there are no more items in any of the queues. """ # A set of queues we've already tried. tried = set() while True: # Get the next queue in the cycle, and try to get an item off it. try: queue = self.cycle.next() except StopIteration: raise Empty("No queues registered") try: item = queue.get() except Empty: # raises Empty when we've tried all of them. tried.add(queue.name) if tried == self.all: raise else: return item, queue.name
def get_next_task(self): """ Implementing classes or mix ins should override this method. By default, this method will cause an Actor to idle by raising an <code>Empty</cdoe> exception when invoked. :raises Empty: if no next task is available. """ raise Empty()
def get(self, block=True, timeout=None): """get an item out of the queue .. note:: if `block` is ``True`` (the default) and the queue is :meth`empty`, this method will block the current coroutine until something has been :meth:`put`. :param block: whether to block if there is no data yet available (default ``True``) :type block: bool :param timeout: the maximum time in seconds to block waiting for data. with the default of ``None``, can wait indefinitely. this is unused if `block` is ``False``. :type timeout: int, float or None :raises: :class:`Empty` if there is no data in the queue and block is ``False``, or `timeout` expires :returns: something that was previously :meth:`put` in the queue """ if not self._data: if not block: raise Empty() current = compat.getcurrent() waketime = None if timeout is None else time.time() + timeout if timeout is not None: scheduler.schedule_at(waketime, current) self._waiters.append((current, waketime)) scheduler.state.mainloop.switch() if timeout is not None: if not scheduler._remove_timer(waketime, current): self._waiters.remove((current, waketime)) raise Empty() if self.full() and self._waiters: scheduler.schedule(self._waiters.popleft()[0]) return self._get()
def get(self, timeout=None): self.on_poll_start() events = self.poller.poll(timeout) for fileno, event in events or []: return self.handle_event(fileno, event) raise Empty()
def _get(self, queue, timeout=None): try: return loads(self.client.get(queue, timeout)) except socket.error, exc: if exc.errno == errno.EAGAIN and timeout != 0: raise Empty() else: raise
def _get(self, queue): result = self._query(queue, limit=1) if not result: raise Empty() item = result.rows[0].value self.client.delete(item) return loads(item['payload'])
def deleteMin(self): if len(self._data) == 0: raise Empty('The Priority Queue is empty.') last = len(self._data) - 1 self._swap(0, last) item = self._data.pop() self._bubbleDown(0) return item
def _brpop_read(self, **options): try: try: dest__item = self.client.parse_response( self.client.connection, "BRPOP", **options) except self.connection_errors: # if there's a ConnectionError, disconnect so the next # iteration will reconnect automatically. self.client.connection.disconnect() raise Empty() if dest__item: dest, item = dest__item return loads(item), dest else: raise Empty() finally: self._in_poll = False
def _get(self, queue): try: msg = self.client.database.command("findandmodify", "messages", query={"queue": queue}, remove=True) except errors.OperationFailure, exc: if "No matching object found" in exc.args[0]: raise Empty() raise
def get(self, queue): try: msg = self.client.database.command("findandmodify", "messages", query={"queue": queue}, remove=True) except OperationFailure: raise Empty("Empty queue") return msg["value"]["payload"]
def effect(): try: if not calls[0]: raise Empty() rem = remaining[0] remaining[0] = 0 return rem, Mock() finally: calls[0] += 1
def parse_response(self, connection, type, **options): cmd, queues = self.connection._sock.data.pop() assert cmd == type self.connection._sock.data = [] if type == "BRPOP": item = self.brpop(queues, 0.001) if item: return item raise Empty()
def _wait(self, timeout, ttl): while timeout > 0: try: return self.q.get(timeout=min(timeout, ttl)) except Empty: pass timeout -= ttl ttl = self._expire() raise Empty()
def _get(self, queue, timeout=None): """Get next message from `queue`.""" DEBUG and pr('>>> Channel._get: %s' % queue) q = self._get_queue(queue) m = q.read() if m: q.delete_message(m) return deserialize(m.get_body()) raise Empty()
def get_nowait(self): """Remove and return an item from the queue without blocking. Exceptions: Queue.Empty: if an item is not immediately available. """ item = self._pop() if item is not None: return item raise Empty()
def _get(self, queue): try: msg = self.client.database.command("findandmodify", "messages", query={"queue": queue}, remove=True) except errors.OperationFailure: raise Empty() return deserialize(msg["value"]["payload"])
def test_handle_next_queue_empty(self): m_queue = Mock() m_queue.get.side_effect = iter([ Empty(), RuntimeError() ]) self.driver._watcher_queue = m_queue self.assertRaises(RuntimeError, self.driver._handle_next_watcher_event, False)