def connect(self): super(OutboundEventSocket, self).connect() # Starts event handler for this client/session. self.start_event_handler() # Sends connect and sets timeout while connecting. timer = Timeout(self.transport.get_connect_timeout()) timer.start() try: connect_response = self._protocol_send("connect") if not connect_response.is_success(): self.disconnect() raise ConnectError("Error while connecting") except Timeout: self.disconnect() raise ConnectError("Timeout connecting") finally: timer.cancel() # Sets channel and channel unique id from this event self._channel = connect_response self._uuid = connect_response.get_header("Unique-ID") # Set connected flag to True self.connected = True # Sets event filter or raises ConnectError if self._filter: if self._is_eventjson: filter_response = self.eventjson(self._filter) else: filter_response = self.eventplain(self._filter) if not filter_response.is_success(): self.disconnect() raise ConnectError("Event filter failure")
def run_node_shutdown(self): results = [] failed_results = [] shutdown_count = math.floor(len(self.running_nodes)*self.shutdown_fraction) self.failed_nodes = set() while len(self.shutdown_nodes) + len(self.failed_nodes) < shutdown_count: address = random.choice(self.running_nodes) timeout = Timeout(self.shutdown_timeout, TestTimeout()) timeout.start() try: TestClient(address).shutdown() results.append(TextTestResult(self.get_node_name(address), 'Shutdown signal sent.')) self.running_nodes.remove(address) self.shutdown_nodes.append(address) l.info('Shutdown node ' + address) except TestTimeout: self.failed_nodes.add(address) failed_results.append(TextTestResult(self.get_node_name(address), 'Node timed-out when signaled to shutdown.')) l.exception('Shutdown node ' + address) except KeyValueError, error: self.failed_nodes.add(address) failed_results.append(TextTestResult(self.get_node_name(address), 'Node returned error when signaled to shutdown. ' + str(error))) l.exception('Shutdown node ' + address) except Exception, error: self.failed_nodes.add(address) failed_results.append(TextTestResult(self.get_node_name(address), 'Node does not want to shutdown. ' + str(error))) l.exception('Shutdown node ' + address)
def _handle_request(self, listener_name, sock, addr): client = TFileObjectTransport(sock.makefile()) itrans = self.tfactory.getTransport(client) otrans = self.tfactory.getTransport(client) iprot = self.pfactory.getProtocol(itrans) oprot = self.pfactory.getProtocol(otrans) try: while True: (name, type, seqid) = iprot.readMessageBegin() request_start = time.time() try: timeout_con = Timeout(self.cfg.timeout, Timeout) timeout_con.start() if name not in self.wsgi._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException( TApplicationException.UNKNOWN_METHOD, "Unknown function %s" % (name)) oprot.writeMessageBegin( name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() raise ThriftFuncNotFound else: self.wsgi._processMap[name](self.wsgi, seqid, iprot, oprot) except ThriftFuncNotFound, ex: self.log.error("Unknown function %s" % (name)) self.log.access( addr, name, "FUNC_NOT_FOUND", time.time() - request_start) except Timeout, ex: self.log.error("A greenlet process timeout.") self.log.access( addr, name, "TIMEOUT", time.time() - request_start)
def __call__(self, *args): timer = Timeout(self.timeout) timer.start() try: result = self.function(*args) finally: timer.cancel() return result
def _get_instances(cluster): t = Timeout(RAPI_TIMEOUT) t.start() try: instancesall.extend(cluster.get_user_instances(request.user)) except (GanetiApiError, Timeout): bad_clusters.append(cluster) finally: t.cancel()
def _get_instances(cluster): t = Timeout(RAPI_TIMEOUT) t.start() try: instances.extend(cluster.get_instances()) except (GanetiApiError, Timeout): pass finally: t.cancel()
def join(self, timeout=None): """Wait until the greenlet finishes or *timeout* expires. Return ``None`` regardless. """ if self.ready(): return else: switch = getcurrent().switch self.rawlink(switch) try: t = Timeout.start_new(timeout) try: result = self.parent.switch() assert result is self, 'Invalid switch into Greenlet.join(): %r' % (result, ) finally: t.cancel() except Timeout as ex: self.unlink(switch) if ex is not t: raise if PY3: ex.__traceback__ = None except: self.unlink(switch) raise
def stop(self, timeout=0): """Shutdown the server.""" for sock in self.listeners: sock.close() self.socket = [] #2. Set "keep-alive" connections to "close" # TODO #3a. set low timeout (min(1s, timeout or 1)) on events belonging to connection (to kill long-polling connections # TODO #3. Wait until every connection is closed or timeout expires if self._requests: timer = Timeout.start_new(timeout) try: try: self._no_connections_event.wait(timeout=timeout) except Timeout, ex: if timer is not ex: raise finally: timer.cancel() #4. forcefull close all the connections # TODO #5. free http instance self.http = None #6. notify event created in serve_forever() self._stopped_event.set()
def acquire(self, blocking=True, timeout=None): if self.counter > 0: self.counter -= 1 return True elif not blocking: return False else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: try: result = self.hub.switch() assert result is self, 'Invalid switch into Semaphore.acquire(): %r' % ( result, ) except Timeout: ex = sys.exc_info()[1] if ex is timer: return False raise finally: timer.cancel() finally: self.unlink(switch) self.counter -= 1 assert self.counter >= 0 return True
def connect(self, address): """ Connect to *address*. .. versionchanged:: 20.6.0 If the host part of the address includes an IPv6 scope ID, it will be used instead of ignored, if the platform supplies :func:`socket.inet_pton`. """ if self.timeout == 0.0: return self._sock.connect(address) address = _socketcommon._resolve_addr(self._sock, address) timer = Timeout._start_new_or_dummy(self.timeout, timeout('timed out')) try: while 1: err = self._sock.getsockopt(SOL_SOCKET, SO_ERROR) if err: raise error(err, strerror(err)) result = self._sock.connect_ex(address) if not result or result == EISCONN: break if (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows): self._wait(self._write_event) else: raise error(result, strerror(result)) finally: timer.close()
def put(self, item, block=True, timeout=None): if self.hub is getcurrent(): if self.getters: getter = self.getters.popleft() getter.switch(item) return raise Full if not block: timeout = 0 waiter = Waiter() item = (item, waiter) self.putters.append(item) timeout = Timeout.start_new(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.get() assert result is waiter, "Invalid switch into Channel.put: %r" % ( result, ) except: self._discard(item) raise finally: timeout.cancel()
def wait(self, timeout=None): """ Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread (greenlet) calls :meth:`set` to set the flag to true, or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). :return: The value of the internal flag (``True`` or ``False``). (If no timeout was given, the only possible return value is ``True``.) """ if self._flag: return self._flag switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout._start_new_or_dummy(timeout) try: try: result = self.hub.switch() if result is not self: raise InvalidSwitchError('Invalid switch into Event.wait(): %r' % (result, )) except Timeout as ex: if ex is not timer: raise finally: timer.cancel() finally: self.unlink(switch) return self._flag
def connect(self, address): if self.timeout == 0.0: return _socket.socket.connect(self._sock, address) address = _socketcommon._resolve_addr(self._sock, address) with Timeout._start_new_or_dummy(self.timeout, timeout("timed out")): while True: err = self.getsockopt(SOL_SOCKET, SO_ERROR) if err: raise error(err, strerror(err)) result = _socket.socket.connect_ex(self._sock, address) if not result or result == EISCONN: break elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows): self._wait(self._write_event) else: if (isinstance(address, tuple) and address[0] == 'fe80::1' and result == EHOSTUNREACH): # On Python 3.7 on mac, we see EHOSTUNREACH # returned for this link-local address, but it really is # supposed to be ECONNREFUSED according to the standard library # tests (test_socket.NetworkConnectionNoServer.test_create_connection) # (On previous versions, that code passed the '127.0.0.1' IPv4 address, so # ipv6 link locals were never a factor; 3.7 passes 'localhost'.) # It is something of a mystery how the stdlib socket code doesn't # produce EHOSTUNREACH---I (JAM) can't see how socketmodule.c would avoid # that. The normal connect just calls connect_ex much like we do. result = ECONNREFUSED raise error(result, strerror(result))
def put(self, item, block=True, timeout=None): if self.hub is getcurrent(): if self.getters: getter = self.getters.popleft() getter.switch(item) return raise Full if not block: timeout = 0 waiter = Waiter() item = (item, waiter) self.putters.append(item) timeout = Timeout.start_new(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.get() assert result is waiter, "Invalid switch into Channel.put: %r" % (result, ) except: self._discard(item) raise finally: timeout.cancel()
def connect(self, address): if self.timeout == 0.0: return self._sock.connect(address) sock = self._sock if isinstance(address, tuple): r = getaddrinfo(address[0], address[1], sock.family, sock.type, sock.proto) address = r[0][-1] if self.timeout is not None: timer = Timeout.start_new(self.timeout, timeout('timed out')) else: timer = None try: while True: err = sock.getsockopt(SOL_SOCKET, SO_ERROR) if err: raise error(err, strerror(err)) result = sock.connect_ex(address) if not result or result == EISCONN: break elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows): self._wait(self._write_event) else: raise error(result, strerror(result)) finally: if timer is not None: timer.cancel()
def _do_wait(self, timeout): """ Wait for up to *timeout* seconds to expire. If timeout elapses, return the exception. Otherwise, return None. Raises timeout if a different timer expires. """ switch = getcurrent().switch self.rawlink(switch) try: # As a tiny efficiency optimization, avoid allocating a timer # if not needed. timer = Timeout.start_new(timeout) if timeout is not None else None try: try: result = get_hub().switch() assert result is self, 'Invalid switch into Semaphore.wait/acquire(): %r' % (result, ) except Timeout as ex: if ex is not timer: raise return ex finally: if timer is not None: timer.cancel() finally: self.unlink(switch)
def _wait_core(self, timeout, catch=Timeout): # The core of the wait implementation, handling # switching and linking. If *catch* is set to (), # a timeout that elapses will be allowed to be raised. # Returns a true value if the wait succeeded without timing out. switch = getcurrent().switch # pylint:disable=undefined-variable self.rawlink(switch) try: with Timeout._start_new_or_dummy(timeout) as timer: try: if self.hub is None: self.hub = get_hub() result = self.hub.switch() if result is not self: # pragma: no cover raise InvalidSwitchError( 'Invalid switch into Event.wait(): %r' % (result, )) return True except catch as ex: if ex is not timer: raise # test_set_and_clear and test_timeout in test_threading # rely on the exact return values, not just truthish-ness return False finally: self.unlink(switch)
def peek(self, block=True, timeout=None): if self.qsize(): if self.putters: self._schedule_unlock() return self._peek() elif not block and get_hub() is getcurrent(): # special case to make peek(False) runnable in the mainloop # greenlet there are no items in the queue; try to fix the # situation by unlocking putters while self.putters: putter = self.putters.pop() if putter: putter.switch(putter) if self.qsize(): return self._peek() raise Empty elif block: waiter = Waiter() timeout = Timeout.start_new(timeout, Empty) try: self.getters.add(waiter) if self.putters: self._schedule_unlock() result = waiter.get() assert result is waiter, "Invalid switch into Queue.put: %r" % (result,) return self._peek() finally: self.getters.discard(waiter) timeout.cancel() else: raise Empty
def wait(self, timeout=None): """Block until the instance is ready. If this instance already holds a value / an exception, return immediatelly. Otherwise, block until another thread calls :meth:`set` or :meth:`set_exception` or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method always returns ``None`` regardless of the reason it returns. To find out out what happened, use :meth:`ready` and :meth:`successful` methods or :attr:`value` and :attr:`exception` properties. """ if self._exception is not _NONE: return else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: result = get_hub().switch() assert result is self, 'Invalid switch into AsyncResult.wait(): %r' % (result, ) finally: timer.cancel() except Timeout, exc: self.unlink(switch) if exc is not timer: raise except:
def get(self, block=True, timeout=None): """Return the stored value or raise the exception. If this instance already holds a value / an exception, return / raise it immediatelly. Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception` or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). """ if self._exception is not _NONE: if self._exception is None: return self.value raise self._exception elif block: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: result = self.hub.switch() assert result is self, 'Invalid switch into AsyncResult.get(): %r' % (result, ) finally: timer.cancel() except: self.unlink(switch) raise if self._exception is None: return self.value raise self._exception else: raise Timeout
def wait(self, timeout=None): """Block until the instance is ready. If this instance already holds a value / an exception, return immediatelly. Otherwise, block until another thread calls :meth:`set` or :meth:`set_exception` or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). Return :attr:`value`. """ if self._exception is not _NONE: return self.value else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: result = self.hub.switch() assert result is self, 'Invalid switch into AsyncResult.wait(): %r' % (result, ) finally: timer.cancel() except Timeout as exc: self.unlink(switch) if exc is not timer: raise except: self.unlink(switch) raise # not calling unlink() in non-exception case, because if switch() # finished normally, link was already removed in _notify_links return self.value
def select(rlist, wlist, xlist, timeout=None): """An implementation of :meth:`select.select` that blocks only the current greenlet. Note: *xlist* is ignored. """ watchers = [] timeout = Timeout.start_new(timeout) loop = get_hub().loop io = loop.io MAXPRI = loop.MAXPRI result = SelectResult() try: try: for readfd in rlist: watcher = io(get_fileno(readfd), 1) watcher.priority = MAXPRI watcher.start(result.add_read, readfd) watchers.append(watcher) for writefd in wlist: watcher = io(get_fileno(writefd), 2) watcher.priority = MAXPRI watcher.start(result.add_write, writefd) watchers.append(watcher) except IOError: ex = sys.exc_info()[1] raise error(*ex.args) result.event.wait(timeout=timeout) return result.read, result.write, [] finally: for watcher in watchers: watcher.stop() timeout.cancel()
def acquire(self, blocking=True, timeout=None): if self.counter > 0: self.counter -= 1 return True elif not blocking: return False else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: try: result = self.hub.switch() assert result is self, "Invalid switch into Semaphore.acquire(): %r" % (result,) except Timeout: ex = sys.exc_info()[1] if ex is timer: return False raise finally: timer.cancel() finally: self.unlink(switch) self.counter -= 1 assert self.counter >= 0 return True
def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls :meth:`set` to set the flag to true, or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). Return the value of the internal flag (``True`` or ``False``). """ if self._flag: return self._flag else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) try: try: result = self.hub.switch() assert result is self, 'Invalid switch into Event.wait(): %r' % (result, ) except Timeout as ex: if ex is not timer: raise finally: timer.cancel() finally: self.unlink(switch) return self._flag
def kill(self, exception=GreenletExit, block=True, timeout=None): """ Kill all greenlets being tracked by this group. """ timer = Timeout._start_new_or_dummy(timeout) try: try: while self.greenlets: for greenlet in list(self.greenlets): if greenlet not in self.dying: try: kill = greenlet.kill except AttributeError: _kill(greenlet, exception) else: kill(exception, block=False) self.dying.add(greenlet) if not block: break joinall(self.greenlets) except Timeout as ex: if ex is not timer: raise finally: timer.cancel()
def join(self, timeout=None): """ join(timeout=None) -> None Wait until the greenlet finishes or *timeout* expires. Return ``None`` regardless. """ if self.ready(): return switch = getcurrent().switch # pylint:disable=undefined-variable self.rawlink(switch) try: t = Timeout._start_new_or_dummy(timeout) try: result = self.parent.switch() if result is not self: raise InvalidSwitchError('Invalid switch into Greenlet.join(): %r' % (result, )) finally: t.cancel() except Timeout as ex: self.unlink(switch) if ex is not t: raise except: self.unlink(switch) raise
def _wait_auth_request(self): ''' Waits until auth/request event is received. ''' # Sets timeout to wait for auth/request timer = Timeout(self.transport.get_connect_timeout()) timer.start() try: # When auth/request is received, # _auth_request method will wake up async result # so we will just wait this event here. return self._wait_auth_event.get() except Timeout: raise ConnectError("Timeout waiting auth/request") finally: timer.cancel()
def joinall(greenlets, timeout=None, raise_error=False, count=None): from gevent.queue import Queue queue = Queue() put = queue.put if count is None: count = len(greenlets) timeout = Timeout.start_new(timeout) try: try: for greenlet in greenlets: greenlet.rawlink(put) if raise_error: for _ in xrange(count): greenlet = queue.get() if not greenlet.successful(): raise greenlet.exception else: for _ in xrange(count): queue.get() except: if sys.exc_info()[1] is not timeout: raise finally: for greenlet in greenlets: greenlet.unlink(put) finally: timeout.cancel()
def wait(io, timeout=None, timeout_exc=_NONE): """ Block the current greenlet until *io* is ready. If *timeout* is non-negative, then *timeout_exc* is raised after *timeout* second has passed. By default *timeout_exc* is ``socket.timeout('timed out')``. If :func:`cancel_wait` is called on *io* by another greenlet, raise an exception in this blocking greenlet (``socket.error(EBADF, 'File descriptor was closed in another greenlet')`` by default). :param io: A libev watcher, most commonly an IO watcher obtained from :meth:`gevent.core.loop.io` :keyword timeout_exc: The exception to raise if the timeout expires. By default, a :class:`socket.timeout` exception is raised. If you pass a value for this keyword, it is interpreted as for :class:`gevent.timeout.Timeout`. """ if io.callback is not None: raise ConcurrentObjectUseError('This socket is already used by another greenlet: %r' % (io.callback, )) if timeout is not None: timeout_exc = timeout_exc if timeout_exc is not _NONE else _timeout_error('timed out') timeout = Timeout.start_new(timeout, timeout_exc) try: return get_hub().wait(io) finally: if timeout is not None: timeout.cancel()
def __get_or_peek(self, method, block, timeout): # Internal helper method. The `method` should be either # self._get when called from self.get() or self._peek when # called from self.peek(). Call this after the initial check # to see if there are items in the queue. if self.hub is getcurrent(): # special case to make get_nowait() or peek_nowait() runnable in the mainloop greenlet # there are no items in the queue; try to fix the situation by unlocking putters while self.putters: # Note: get() used popleft(), peek used pop(); popleft # is almost certainly correct. self.putters.popleft().put_and_switch() if self.qsize(): return method() raise Empty() if not block: # We can't block, we're not the hub, and we have nothing # to return. No choice... raise Empty() waiter = Waiter() timeout = Timeout._start_new_or_dummy(timeout, Empty) try: self.getters.append(waiter) if self.putters: self._schedule_unlock() result = waiter.get() if result is not waiter: raise InvalidSwitchError('Invalid switch into Queue.get: %r' % (result, )) return method() finally: timeout.cancel() _safe_remove(self.getters, waiter)
def peek(self, block=True, timeout=None): """Return an item from the queue without removing it. If optional args *block* is true and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Empty` exception if no item was available within that time. Otherwise (*block* is false), return an item if one is immediately available, else raise the :class:`Empty` exception (*timeout* is ignored in that case). """ if self.qsize(): return self._peek() elif self.hub is getcurrent(): # special case to make peek(False) runnable in the mainloop greenlet # there are no items in the queue; try to fix the situation by unlocking putters while self.putters: self.putters.pop().put_and_switch() if self.qsize(): return self._peek() raise Empty elif block: waiter = Waiter() timeout = Timeout.start_new(timeout, Empty) try: self.getters.add(waiter) if self.putters: self._schedule_unlock() result = waiter.get() assert result is waiter, 'Invalid switch into Queue.peek: %r' % (result, ) return self._peek() finally: self.getters.discard(waiter) timeout.cancel() else: raise Empty
def _wait_auth_request(self): ''' Waits until auth/request event is received. ''' # Sets timeout to wait for auth/request timer = Timeout(self.transport.get_connect_timeout()) timer.start() try: # When auth/request is received, # _authRequest method in EventSocket will push this event to queue # so we will just wait this event here. return self._response_queue.get() except Timeout: raise ConnectError("Timeout waiting auth/request") finally: timer.cancel()
def _wait_core(self, timeout, catch=Timeout): # The core of the wait implementation, handling # switching and linking. If *catch* is set to (), # a timeout that elapses will be allowed to be raised. # Returns a true value if the wait succeeded without timing out. switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout._start_new_or_dummy(timeout) try: try: result = self.hub.switch() if result is not self: # pragma: no cover raise InvalidSwitchError('Invalid switch into Event.wait(): %r' % (result, )) return True except catch as ex: if ex is not timer: raise # test_set_and_clear and test_timeout in test_threading # rely on the exact return values, not just truthish-ness return False finally: timer.cancel() finally: self.unlink(switch)
def put(self, item, block=True, timeout=None): if self.hub is getcurrent(): if self.getters: getter = self.getters.popleft() getter.switch(item) return raise Full if not block: timeout = 0 waiter = Waiter() item = (item, waiter) self.putters.append(item) timeout = Timeout._start_new_or_dummy(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.get() if result is not waiter: raise InvalidSwitchError("Invalid switch into Channel.put: %r" % (result, )) except: _safe_remove(self.putters, item) raise finally: timeout.cancel()
def test_channel_withdraw( raiden_network, number_of_nodes, token_addresses, deposit, network_wait, retry_timeout ): """ Withdraw funds after a mediated transfer.""" alice_app, bob_app = raiden_network token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address ) assert token_network_address token_proxy = bob_app.raiden.proxy_manager.token(token_address) bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address) message_handler = WaitForMessage() bob_app.raiden.message_handler = message_handler alice_to_bob_amount = 10 identifier = 1 target = bob_app.raiden.address secret = sha3(target) payment_status = alice_app.raiden.start_mediated_transfer_with_secret( token_network_address=token_network_address, amount=alice_to_bob_amount, target=target, identifier=identifier, secret=secret, ) wait_for_unlock = bob_app.raiden.message_handler.wait_for_message( Unlock, {"payment_identifier": identifier} ) timeout = network_wait * number_of_nodes with Timeout(seconds=timeout): wait_for_unlock.get() msg = ( f"transfer from {to_checksum_address(alice_app.raiden.address)} " f"to {to_checksum_address(bob_app.raiden.address)} failed." ) assert payment_status.payment_done.get(), msg total_withdraw = deposit + alice_to_bob_amount bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address) bob_app.raiden.withdraw( canonical_identifier=bob_alice_channel_state.canonical_identifier, total_withdraw=total_withdraw, ) waiting.wait_for_withdraw_complete( raiden=bob_app.raiden, canonical_identifier=bob_alice_channel_state.canonical_identifier, total_withdraw=total_withdraw, retry_timeout=retry_timeout, ) bob_balance_after_withdraw = token_proxy.balance_of(bob_app.raiden.address) assert bob_initial_balance + total_withdraw == bob_balance_after_withdraw
def poll_all_received_events(self): """ This will be triggered once for each `echo_node_alarm_callback`. It polls all channels for `EventTransferReceivedSuccess` events, adds all new events to the `self.received_transfers` queue and respawns `self.echo_node_worker`, if it died. """ locked = False try: with Timeout(10): locked = self.lock.acquire(blocking=False) if not locked: return else: channels = self.api.get_channel_list( registry_address=self.api.raiden.default_registry. address, token_address=self.token_address, ) received_transfers = list() for channel_state in channels: channel_events = self.api.get_channel_events( channel_state.token_network_identifier, channel_state.identifier, self.last_poll_block, ) received_transfers.extend([ event for event in channel_events if event['event'] == 'EventTransferReceivedSuccess' ]) for event in received_transfers: transfer = event.copy() transfer.pop('block_number') self.received_transfers.put(transfer) # set last_poll_block after events are enqueued (timeout safe) if received_transfers: self.last_poll_block = max( event['block_number'] for event in received_transfers) # increase last_poll_block if the blockchain proceeded delta_blocks = self.api.raiden.get_block_number( ) - self.last_poll_block if delta_blocks > 1: self.last_poll_block += 1 if not self.echo_worker_greenlet.started: log.debug( 'restarting echo_worker_greenlet', dead=self.echo_worker_greenlet.dead, successful=self.echo_worker_greenlet.successful(), exception=self.echo_worker_greenlet.exception, ) self.echo_worker_greenlet = gevent.spawn( self.echo_worker) except Timeout: log.info('timeout while polling for events') finally: if locked: self.lock.release()
def test_web_rtc_message_sync(matrix_transports): transport0, transport1 = matrix_transports transport1_messages = set() raiden_service0 = MockRaidenService() raiden_service1 = MockRaidenService() def mock_handle_web_rtc_messages(message_data, partner_address): messages = validate_and_parse_message(message_data, partner_address) transport1_messages.update(messages) # set mock function to make sure messages are sent via web rtc transport1._web_rtc_manager._handle_message_callback = mock_handle_web_rtc_messages transport0.start(raiden_service0, [], None) transport1.start(raiden_service1, [], None) transport0.immediate_health_check_for(transport1._raiden_service.address) transport1.immediate_health_check_for(transport0._raiden_service.address) with Timeout(TIMEOUT_WEB_RTC_CONNECTION): # wait until web rtc connection is ready while not transport0._web_rtc_manager.has_ready_channel(raiden_service1.address): gevent.sleep(1) while not transport1._web_rtc_manager.has_ready_channel(raiden_service0.address): gevent.sleep(1) queue_identifier = QueueIdentifier( recipient=transport1._raiden_service.address, canonical_identifier=factories.UNIT_CANONICAL_ID, ) raiden0_queues = views.get_all_messagequeues(views.state_from_raiden(raiden_service0)) raiden0_queues[queue_identifier] = [] for i in range(5): message = Processed(message_identifier=MessageID(i), signature=EMPTY_SIGNATURE) raiden0_queues[queue_identifier].append(message) transport0._raiden_service.sign(message) transport0.send_async([MessagesQueue(queue_identifier, [message])]) with Timeout(TIMEOUT_MESSAGE_RECEIVE): while not len(transport1_messages) == 5: gevent.sleep(0.1)
def code(): if _local.db_conn.charset != charset: _local.db_conn.set_charset(charset) with _local.db_conn.cursor() as cursor: with Timeout(db_config.query_timeout): cursor.execute(sql, values) return cursor.fetchall(), cursor.rowcount
def get(self, block=True, timeout=None): """ get(block=True, timeout=None) -> object Return the result the greenlet has returned or re-raise the exception it has raised. If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive. If block is ``True``, unschedule the current greenlet until the result is available or the timeout expires. In the latter cases, :class:`gevent.Timeout` is raised. """ if self.ready(): if self.successful(): return self.value self._raise_exception() if not block: raise Timeout() switch = getcurrent().switch # pylint:disable=undefined-variable self.rawlink(switch) try: t = Timeout._start_new_or_dummy(timeout) try: result = self.parent.switch() if result is not self: raise InvalidSwitchError( 'Invalid switch into Greenlet.get(): %r' % (result, )) finally: t.cancel() except: # unlinking in 'except' instead of finally is an optimization: # if switch occurred normally then link was already removed in _notify_links # and there's no need to touch the links set. # Note, however, that if "Invalid switch" assert was removed and invalid switch # did happen, the link would remain, causing another invalid switch later in this greenlet. self.unlink(switch) raise if self.ready(): if self.successful(): return self.value self._raise_exception()
def server_is_alive(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: with Timeout(1.0): s.connect((self.remote.hostname, self.remote.port)) except (socket.error, Timeout): return False finally: s.close() return True
def callRemoteForResult(self, _name, *args, **kw): """执行远程调用,并等待结果 @param _name: 调用的远程方法的名称 @param timeout: int 结果返回的默认超时时间 @param args: 远程方法需要的参数 @param kw: 远程方法需要的默认参数 """ _key, result = AsyncResultFactory().createAsyncResult() self.broker._sendMessage(_key, _name, args, kw) return result.get(timeout=Timeout(self.timeout))
def poll_all_received_events(self): """ This will be triggered once for each `echo_node_alarm_callback`. It polls all channels for `EventPaymentReceivedSuccess` events, adds all new events to the `self.received_transfers` queue and respawns `self.echo_node_worker`, if it died. """ locked = False try: with Timeout(10): locked = self.lock.acquire(blocking=False) if not locked: return else: received_transfers = self.api.get_payment_history_for_token( self.token_address, from_block=self.last_poll_block, ) # received transfer is a tuple of (block_number, event) received_transfers = [ (block_number, event) for block_number, event in received_transfers if type(event) == EventPaymentReceivedSuccess ] for _, event in received_transfers: transfer = copy.deepcopy(event) self.received_transfers.put(transfer) # set last_poll_block after events are enqueued (timeout safe) if received_transfers: self.last_poll_block = max( block_number for block_number, _ in received_transfers) # increase last_poll_block if the blockchain proceeded delta_blocks = self.api.raiden.get_block_number( ) - self.last_poll_block if delta_blocks > 1: self.last_poll_block += 1 if not self.echo_worker_greenlet.started: log.debug( 'restarting echo_worker_greenlet', dead=self.echo_worker_greenlet.dead, successful=self.echo_worker_greenlet.successful(), exception=self.echo_worker_greenlet.exception, ) self.echo_worker_greenlet = gevent.spawn( self.echo_worker) except Timeout: log.info('timeout while polling for events') finally: if locked: self.lock.release()
def purge(self, queue): """ Deletes all contents of a queue. @TODO could end up in a race with an infinite producer """ assert queue in self._queues with Timeout(5): while not self._queues[queue].empty(): self._queues[queue].get_nowait()
def join(self, timeout=None, raise_error=False): timeout = Timeout.start_new(timeout) try: try: while self.greenlets: joinall(self.greenlets, raise_error=raise_error) except Timeout, ex: if ex is not timeout: raise finally: timeout.cancel()
def get(self, block=True, timeout=None): """Return the stored value or raise the exception. If this instance already holds a value or an exception, return or raise it immediatelly. Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception` or until the optional timeout occurs. When the *timeout* argument is present and not ``None``, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). :keyword bool block: If set to ``False`` and this instance is not ready, immediately raise a :class:`Timeout` exception. """ if self._value is not _NONE: return self._value if self._exc_info: return self._raise_exception() if not block: # Not ready and not blocking, so immediately timeout raise Timeout() switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout._start_new_or_dummy(timeout) try: result = self.hub.switch() if result is not self: raise InvalidSwitchError( 'Invalid switch into AsyncResult.get(): %r' % (result, )) finally: timer.cancel() except: self.unlink(switch) raise # by definition we are now ready return self.get(block=False)
def connect(self): super(OutboundEventSocket, self).connect() # Starts event handler for this client/session. self.start_event_handler() # Sends connect and sets timeout while connecting. timer = Timeout(self.transport.get_connect_timeout()) timer.start() try: connect_response = self._protocol_send("connect") if not connect_response.is_success(): raise ConnectError("Error while connecting") except Timeout: raise ConnectError("Timeout connecting") finally: timer.cancel() # Sets channel and channel unique id from this event self._channel = connect_response self._uuid = connect_response.get_header("Unique-ID") # Set connected flag to True self.connected = True # Sets event filter or raises ConnectError if self._filter: if self._is_eventjson: self.trace("using eventjson") filter_response = self.eventjson(self._filter) else: self.trace("using eventplain") filter_response = self.eventplain(self._filter) if not filter_response.is_success(): raise ConnectError("Event filter failure")
def kill(self, exception=GreenletExit, block=False, timeout=None): timer = Timeout.start_new(timeout) try: while self.greenlets: for greenlet in self.greenlets: if greenlet not in self.dying: greenlet.kill(exception) self.dying.add(greenlet) if not block: break joinall(self.greenlets) finally: timer.cancel()
def download(url): data = '' retries = 0 timeout = 30.0 while not data and retries < 10: if retries > 0: print "retry %d ..." % retries with Timeout(timeout, False): data = urllib.urlopen(url).read().decode('cp1251', 'ignore') retries += 1 timeout *= 1.1 return data
def test(): result = [] start = time.time() print 'test start: %s' % start try: with Timeout(2, TimeOutException) as timeout: for t in TaskPool.imap_unordered(sub_task, xrange(10)): print t, time.time() result.append(t) except TimeOutException, e: print '*************time out*************'
def killall(greenlets, exception=GreenletExit, block=False, timeout=None): if block: waiter = Waiter() core.active_event(_killall3, greenlets, exception, waiter) if block: t = Timeout.start_new(timeout) try: alive = waiter.wait() if alive: joinall(alive, raise_error=False) finally: t.cancel() else: core.active_event(_killall, greenlets, exception)
def test_timeout(seconds, default): timeout = Timeout.start_new(seconds) try: try: return gsleep(5) except Timeout as t: # if sys.exc_info()[1] is timeout: if t is timeout: print 'timeout instance sys.exc_info()[1] is timout: %s' % (sys.exc_info()[1] is timeout) return default raise # not my timeout finally: print 'test_timeout: cancel timeout' timeout.cancel()
def _transfer_expired( initiator_app: App, target_app: App, token_address: TokenAddress, amount: PaymentAmount, identifier: PaymentID, timeout: Optional[float] = None, ) -> SecretHash: assert identifier is not None, "The identifier must be provided" assert isinstance(target_app.raiden.message_handler, WaitForMessage) # This timeout has to be larger then the lock expiration. The lock # expiration unit is block numbers, and its value is defined relative to # the node's reveal timeout configuration. For the integration tests the # reveal timeout is chosen proportionally to the number of nodes, 90 # seconds is a rough default that should work with the standard # configuration. if timeout is None: timeout = 90 secret, secrethash = make_secret_with_hash() wait_for_remove_expired_lock = target_app.raiden.message_handler.wait_for_message( LockExpired, {"secrethash": secrethash} ) token_network_registry_address = initiator_app.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state=views.state_from_app(initiator_app), token_network_registry_address=token_network_registry_address, token_address=token_address, ) assert token_network_address payment_status = initiator_app.raiden.start_mediated_transfer_with_secret( token_network_address=token_network_address, amount=amount, target=TargetAddress(target_app.raiden.address), identifier=identifier, secret=secret, secrethash=secrethash, ) with Timeout(seconds=timeout): wait_for_remove_expired_lock.get() msg = ( f"transfer from {to_checksum_address(initiator_app.raiden.address)} " f"to {to_checksum_address(target_app.raiden.address)} did not expire." ) assert payment_status.payment_done.get() is False, msg return secrethash
def _get_instances(cluster): t = Timeout(RAPI_TIMEOUT) t.start() try: instances.extend(cluster.get_user_instances(request.user)) except (GanetiApiError, Timeout): bad_clusters.append(cluster) finally: t.cancel()
def _handle_request(self, listener_name, sock, addr): client = TFileObjectTransport(sock.makefile()) itrans = self.tfactory.getTransport(client) otrans = self.tfactory.getTransport(client) iprot = self.pfactory.getProtocol(itrans) oprot = self.pfactory.getProtocol(otrans) try: while True: (name, type, seqid) = iprot.readMessageBegin() request_start = time.time() try: timeout_con = Timeout(self.cfg.timeout, Timeout) timeout_con.start() if name not in self.wsgi._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException( TApplicationException.UNKNOWN_METHOD, "Unknown function %s" % (name)) oprot.writeMessageBegin( name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() raise ThriftFuncNotFound else: self.wsgi._processMap[name](self.wsgi, seqid, iprot, oprot) except ThriftFuncNotFound, ex: self.log.error("Unknown function %s" % (name)) self.log.access( addr, name, "FUNC_NOT_FOUND", time.time() - request_start) break except Timeout, ex: self.log.error("A greenlet process timeout.") self.log.access( addr, name, "TIMEOUT", time.time() - request_start) break
def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif self.hub is getcurrent(): # We're in the mainloop, so we cannot wait; we can switch to other greenlets though. # Check if possible to get a free slot in the queue. while self.getters and self.qsize( ) and self.qsize() >= self.maxsize: getter = self.getters.popleft() getter.switch(getter) if self.qsize() < self.maxsize: self._put(item) return raise Full elif block: waiter = ItemWaiter(item, self) self.putters.append(waiter) timeout = Timeout.start_new(timeout, Full) if timeout is not None else None try: if self.getters: self._schedule_unlock() result = waiter.get() if result is not waiter: raise InvalidSwitchError( "Invalid switch into Queue.put: %r" % (result, )) finally: if timeout is not None: timeout.cancel() try: self.putters.remove(waiter) except ValueError: pass # removed by unlock else: raise Full
def wait(io, timeout=None, timeout_exc=timeout('timed out')): """Block the current greenlet until *io* is ready. If *timeout* is non-negative, then *timeout_exc* is raised after *timeout* second has passed. By default *timeout_exc* is ``socket.timeout('timed out')``. If :func:`cancel_wait` is called, raise ``socket.error(EBADF, 'File descriptor was closed in another greenlet')``. """ assert io.callback is None, 'This socket is already used by another greenlet: %r' % (io.callback, ) if timeout is not None: timeout = Timeout.start_new(timeout, timeout_exc) try: return get_hub().wait(io) finally: if timeout is not None: timeout.cancel()
def killall(greenlets, exception=GreenletExit, block=True, timeout=None): if not greenlets: return loop = greenlets[0].loop if block: waiter = Waiter() loop.run_callback(_killall3, greenlets, exception, waiter) t = Timeout.start_new(timeout) try: alive = waiter.get() if alive: joinall(alive, raise_error=False) finally: t.cancel() else: loop.run_callback(_killall, greenlets, exception)
def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub() is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters and self.qsize( ) and self.qsize() >= self.maxsize: getter = self.getters.pop() getter.switch(getter) if self.qsize() < self.maxsize: self._put(item) return raise Full elif block: waiter = ItemWaiter(item) self.putters.add(waiter) timeout = Timeout.start_new(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.get() assert result is waiter, "Invalid switch into Queue.put: %r" % ( result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) else: raise Full