Пример #1
0
    def __init__(self, discovery, udpsocket, throttle_policy, config):
        # these values are initialized by the start method
        self.queueids_to_queues: typing.Dict
        self.raiden: RaidenService

        self.discovery = discovery
        self.config = config

        self.retry_interval = config['retry_interval']
        self.retries_before_backoff = config['retries_before_backoff']
        self.nat_keepalive_retries = config['nat_keepalive_retries']
        self.nat_keepalive_timeout = config['nat_keepalive_timeout']
        self.nat_invitation_timeout = config['nat_invitation_timeout']

        self.event_stop = RaidenGreenletEvent()

        self.greenlets = list()
        self.addresses_events = dict()

        self.messageids_to_asyncresults = dict()

        # Maps the addresses to a dict with the latest nonce (using a dict
        # because python integers are immutable)
        self.nodeaddresses_to_nonces = dict()

        cache = cachetools.TTLCache(
            maxsize=50,
            ttl=CACHE_TTL,
        )
        cache_wrapper = cachetools.cached(cache=cache)
        self.get_host_port = cache_wrapper(discovery.get)

        self.throttle_policy = throttle_policy
        self.server = DatagramServer(udpsocket, handle=self._receive)
Пример #2
0
    def start_health_check(self, recipient):
        """ Starts a task for healthchecking `recipient` if there is not
        one yet.
        """
        if recipient not in self.addresses_events:
            ping_nonce = self.nodeaddresses_to_nonces.setdefault(
                recipient,
                {'nonce': 0},  # HACK: Allows the task to mutate the object
            )

            events = healthcheck.HealthEvents(
                event_healthy=RaidenGreenletEvent(),
                event_unhealthy=RaidenGreenletEvent(),
            )

            self.addresses_events[recipient] = events

            greenlet_healthcheck = gevent.spawn(
                healthcheck.healthcheck,
                self,
                recipient,
                self.event_stop,
                events.event_healthy,
                events.event_unhealthy,
                self.nat_keepalive_retries,
                self.nat_keepalive_timeout,
                self.nat_invitation_timeout,
                ping_nonce,
            )
            greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}'
            self.greenlets.append(greenlet_healthcheck)
Пример #3
0
    def __init__(
            self,
            chain: BlockChainService,
            query_start_block: typing.BlockNumber,
            default_registry: TokenNetworkRegistry,
            default_secret_registry: SecretRegistry,
            private_key_bin,
            transport,
            config,
            discovery=None,
    ):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        self.tokennetworkids_to_connectionmanagers = dict()
        self.identifier_to_results = defaultdict(list)

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.query_start_block = query_start_block
        self.default_secret_registry = default_secret_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)
        self.discovery = discovery

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self.stop_event = RaidenGreenletEvent()
        self.start_event = RaidenGreenletEvent()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent access to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        self.event_poll_lock = gevent.lock.Semaphore()
Пример #4
0
def event_first_of(*events: _AbstractLinkable) -> Event:
    """ Waits until one of `events` is set.

    The event returned is /not/ cleared with any of the `events`, this value
    must not be reused if the clearing behavior is used.
    """
    first_finished = RaidenGreenletEvent()

    if not all(isinstance(e, _AbstractLinkable) for e in events):
        raise ValueError('all events must be linkable')

    for event in events:
        event.rawlink_safe(lambda _: first_finished.set())

    return first_finished
Пример #5
0
    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = RaidenGreenletEvent()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError('not enough funds for echo node %s for token %s' % (
                    pex(self.api.raiden.address),
                    pex(self.token_address),
                ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_block = self.api.raiden.get_block_number()
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')
Пример #6
0
class Miner(RaidenGreenlet):
    def __init__(self, web3, mine_sleep=1):
        super().__init__()
        self.web3 = web3
        self.mine_sleep = mine_sleep
        self.stop = RaidenGreenletEvent()

    def _run(self):
        while self.stop.is_set() is False:
            #  tester miner sleeps for 1 sec by default, which is the same
            #  period as tester geth is using
            #  (see: raiden/tests/utils/geth.py:geth_generate_poa_genesis())
            self.web3.testing.mine(1)
            gevent.sleep(self.mine_sleep)
Пример #7
0
    def _run_app(self):
        from raiden.ui.console import Console
        from raiden.api.python import RaidenAPI

        # this catches exceptions raised when waiting for the stalecheck to complete
        try:
            app_ = run_app(**self._options)
        except EthNodeCommunicationError:
            print(
                '\n'
                'Could not contact the ethereum node through JSON-RPC.\n'
                'Please make sure that JSON-RPC is enabled for these interfaces:\n'
                '\n'
                '    eth_*, net_*, web3_*\n'
                '\n'
                'geth: https://github.com/ethereum/go-ethereum/wiki/Management-APIs\n',
            )
            sys.exit(1)

        domain_list = []
        if self._options['rpccorsdomain']:
            if ',' in self._options['rpccorsdomain']:
                for domain in self._options['rpccorsdomain'].split(','):
                    domain_list.append(str(domain))
            else:
                domain_list.append(str(self._options['rpccorsdomain']))

        self._raiden_api = RaidenAPI(app_.raiden)

        api_server = None
        if self._options['rpc']:
            rest_api = RestAPI(self._raiden_api)
            api_server = APIServer(
                rest_api,
                cors_domain_list=domain_list,
                web_ui=self._options['web_ui'],
                eth_rpc_endpoint=self._options['eth_rpc_endpoint'],
            )
            (api_host, api_port) = split_endpoint(self._options['api_address'])

            try:
                api_server.start(api_host, api_port)
            except APIServerPortInUseError:
                print(
                    'ERROR: API Address %s:%s is in use. '
                    'Use --api-address <host:port> to specify port to listen on.'
                    % (api_host, api_port), )
                sys.exit(1)

            print(
                'The Raiden API RPC server is now running at http://{}:{}/.\n\n'
                'See the Raiden documentation for all available endpoints at\n'
                'http://raiden-network.readthedocs.io/en/stable/rest_api.html'.
                format(
                    api_host,
                    api_port,
                ), )

        if self._options['console']:
            console = Console(app_)
            console.start()

        # spawn a greenlet to handle the version checking
        gevent.spawn(check_version)
        # spawn a greenlet to handle the gas reserve check
        gevent.spawn(check_gas_reserve, app_.raiden)

        self._startup_hook()

        # wait for interrupt
        event = RaidenGreenletEvent()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)

        try:
            event.wait()
            print('Signal received. Shutting down ...')
        except RaidenError as ex:
            click.secho(f'FATAL: {ex}', fg='red')
        except Exception as ex:
            with NamedTemporaryFile(
                    'w',
                    prefix=
                    f'raiden-exception-{datetime.utcnow():%Y-%m-%dT%H-%M}',
                    suffix='.txt',
                    delete=False,
            ) as traceback_file:
                traceback.print_exc(file=traceback_file)
                click.secho(
                    f'FATAL: An unexpected exception occured. '
                    f'A traceback has been written to {traceback_file.name}\n'
                    f'{ex}',
                    fg='red',
                )
        finally:
            self._shutdown_hook()
            if api_server:
                api_server.stop()

        return app_
Пример #8
0
class UDPTransport:
    UDP_MAX_MESSAGE_SIZE = 1200

    def __init__(self, discovery, udpsocket, throttle_policy, config):
        # these values are initialized by the start method
        self.queueids_to_queues: typing.Dict
        self.raiden: RaidenService

        self.discovery = discovery
        self.config = config

        self.retry_interval = config['retry_interval']
        self.retries_before_backoff = config['retries_before_backoff']
        self.nat_keepalive_retries = config['nat_keepalive_retries']
        self.nat_keepalive_timeout = config['nat_keepalive_timeout']
        self.nat_invitation_timeout = config['nat_invitation_timeout']

        self.event_stop = RaidenGreenletEvent()

        self.greenlets = list()
        self.addresses_events = dict()

        self.messageids_to_asyncresults = dict()

        # Maps the addresses to a dict with the latest nonce (using a dict
        # because python integers are immutable)
        self.nodeaddresses_to_nonces = dict()

        cache = cachetools.TTLCache(
            maxsize=50,
            ttl=CACHE_TTL,
        )
        cache_wrapper = cachetools.cached(cache=cache)
        self.get_host_port = cache_wrapper(discovery.get)

        self.throttle_policy = throttle_policy
        self.server = DatagramServer(udpsocket, handle=self._receive)

    def start(
        self,
        raiden: RaidenService,
        queueids_to_queues: typing.Dict[typing.QueueIdentifier,
                                        typing.List[Event]],
    ):
        self.raiden = raiden
        self.queueids_to_queues = dict()

        # server.stop() clears the handle. Since this may be a restart the
        # handle must always be set
        self.server.set_handle(self._receive)

        for queue_identifier, queue in queueids_to_queues.items():
            encoded_queue = list()

            for sendevent in queue:
                message = message_from_sendevent(sendevent, raiden.address)
                raiden.sign(message)
                encoded = message.encode()

                encoded_queue.append((encoded, sendevent.message_identifier))

            self.init_queue_for(queue_identifier, encoded_queue)

        self.server.start()

    def stop_and_wait(self):
        # Stop handling incoming packets, but don't close the socket. The
        # socket can only be safely closed after all outgoing tasks are stopped
        self.server.stop_accepting()

        # Stop processing the outgoing queues
        self.event_stop.set()
        gevent.wait(self.greenlets)

        # All outgoing tasks are stopped. Now it's safe to close the socket. At
        # this point there might be some incoming message being processed,
        # keeping the socket open is not useful for these.
        self.server.stop()

        # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket
        # so we do that ourselves here.
        # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208
        # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J
        try:
            self.server._socket.close()  # pylint: disable=protected-access
        except socket.error:
            pass

        # Set all the pending results to False
        for async_result in self.messageids_to_asyncresults.values():
            async_result.set(False)

    def get_health_events(self, recipient):
        """ Starts a healthcheck task for `recipient` and returns a
        HealthEvents with locks to react on its current state.
        """
        if recipient not in self.addresses_events:
            self.start_health_check(recipient)

        return self.addresses_events[recipient]

    def start_health_check(self, recipient):
        """ Starts a task for healthchecking `recipient` if there is not
        one yet.
        """
        if recipient not in self.addresses_events:
            ping_nonce = self.nodeaddresses_to_nonces.setdefault(
                recipient,
                {'nonce': 0},  # HACK: Allows the task to mutate the object
            )

            events = healthcheck.HealthEvents(
                event_healthy=RaidenGreenletEvent(),
                event_unhealthy=RaidenGreenletEvent(),
            )

            self.addresses_events[recipient] = events

            greenlet_healthcheck = gevent.spawn(
                healthcheck.healthcheck,
                self,
                recipient,
                self.event_stop,
                events.event_healthy,
                events.event_unhealthy,
                self.nat_keepalive_retries,
                self.nat_keepalive_timeout,
                self.nat_invitation_timeout,
                ping_nonce,
            )
            greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}'
            self.greenlets.append(greenlet_healthcheck)

    def init_queue_for(
        self,
        queue_identifier: typing.QueueIdentifier,
        items: typing.List[QueueItem_T],
    ) -> Queue_T:
        """ Create the queue identified by the queue_identifier
        and initialize it with `items`.
        """
        recipient = queue_identifier.recipient
        queue = self.queueids_to_queues.get(queue_identifier)
        assert queue is None

        queue = NotifyingQueue(items=items)
        self.queueids_to_queues[queue_identifier] = queue

        events = self.get_health_events(recipient)

        greenlet_queue = gevent.spawn(
            single_queue_send,
            self,
            recipient,
            queue,
            self.event_stop,
            events.event_healthy,
            events.event_unhealthy,
            self.retries_before_backoff,
            self.retry_interval,
            self.retry_interval * 10,
        )

        if queue_identifier.channel_identifier == CHANNEL_IDENTIFIER_GLOBAL_QUEUE:
            greenlet_queue.name = f'Queue for {pex(recipient)} - global'
        else:
            greenlet_queue.name = (
                f'Queue for {pex(recipient)} - {queue_identifier.channel_identifier}'
            )

        self.greenlets.append(greenlet_queue)

        log.debug(
            'new queue created for',
            node=pex(self.raiden.address),
            queue_identifier=queue_identifier,
        )

        return queue

    def get_queue_for(
        self,
        queue_identifier: typing.QueueIdentifier,
    ) -> Queue_T:
        """ Return the queue identified by the given queue identifier.

        If the queue doesn't exist it will be instantiated.
        """
        queue = self.queueids_to_queues.get(queue_identifier)

        if queue is None:
            items = ()
            queue = self.init_queue_for(queue_identifier, items)

        return queue

    def send_async(
        self,
        queue_identifier: typing.QueueIdentifier,
        message: 'Message',
    ):
        """ Send a new ordered message to recipient.

        Messages that use the same `queue_identifier` are ordered.
        """
        recipient = queue_identifier.recipient
        if not is_binary_address(recipient):
            raise ValueError('Invalid address {}'.format(pex(recipient)))

        # These are not protocol messages, but transport specific messages
        if isinstance(message, (Delivered, Ping, Pong)):
            raise ValueError('Do not use send for {} messages'.format(
                message.__class__.__name__))

        messagedata = message.encode()
        if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE:
            raise ValueError(
                'message size exceeds the maximum {}'.format(
                    self.UDP_MAX_MESSAGE_SIZE), )

        # message identifiers must be unique
        message_id = message.message_identifier

        # ignore duplicates
        if message_id not in self.messageids_to_asyncresults:
            self.messageids_to_asyncresults[message_id] = RaidenAsyncResult()

            queue = self.get_queue_for(queue_identifier)
            queue.put((messagedata, message_id))

            log.debug(
                'MESSAGE QUEUED',
                node=pex(self.raiden.address),
                queue_identifier=queue_identifier,
                message=message,
            )

    def maybe_send(self, recipient: typing.Address, message: Message):
        """ Send message to recipient if the transport is running. """

        if not is_binary_address(recipient):
            raise InvalidAddress('Invalid address {}'.format(pex(recipient)))

        messagedata = message.encode()
        host_port = self.get_host_port(recipient)

        self.maybe_sendraw(host_port, messagedata)

    def maybe_sendraw_with_result(
        self,
        recipient: typing.Address,
        messagedata: bytes,
        message_id: typing.MessageID,
    ) -> AsyncResult:
        """ Send message to recipient if the transport is running.

        Returns:
            An AsyncResult that will be set once the message is delivered. As
            long as the message has not been acknowledged with a Delivered
            message the function will return the same AsyncResult.
        """
        async_result = self.messageids_to_asyncresults.get(message_id)
        if async_result is None:
            async_result = RaidenAsyncResult()
            self.messageids_to_asyncresults[message_id] = async_result

        host_port = self.get_host_port(recipient)
        self.maybe_sendraw(host_port, messagedata)

        return async_result

    def maybe_sendraw(self, host_port: typing.Tuple[int, int],
                      messagedata: bytes):
        """ Send message to recipient if the transport is running. """

        # Don't sleep if timeout is zero, otherwise a context-switch is done
        # and the message is delayed, increasing it's latency
        sleep_timeout = self.throttle_policy.consume(1)
        if sleep_timeout:
            gevent.sleep(sleep_timeout)

        # Check the udp socket is still available before trying to send the
        # message. There must be *no context-switches after this test*.
        if hasattr(self.server, 'socket'):
            self.server.sendto(
                messagedata,
                host_port,
            )

    def _receive(self, data, host_port):  # pylint: disable=unused-argument
        try:
            self.receive(data)
        except RaidenShuttingDown:  # For a clean shutdown
            return

    def receive(self, messagedata: bytes):
        """ Handle an UDP packet. """
        # pylint: disable=unidiomatic-typecheck

        if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE:
            log.error(
                'INVALID MESSAGE: Packet larger than maximum size',
                node=pex(self.raiden.address),
                message=hexlify(messagedata),
                length=len(messagedata),
            )
            return

        message = decode(messagedata)

        if type(message) == Pong:
            self.receive_pong(message)
        elif type(message) == Ping:
            self.receive_ping(message)
        elif type(message) == Delivered:
            self.receive_delivered(message)
        elif message is not None:
            self.receive_message(message)
        else:
            log.error(
                'INVALID MESSAGE: Unknown cmdid',
                node=pex(self.raiden.address),
                message=hexlify(messagedata),
            )

    def receive_message(self, message: Message):
        """ Handle a Raiden protocol message.

        The protocol requires durability of the messages. The UDP transport
        relies on the node's WAL for durability. The message will be converted
        to a state change, saved to the WAL, and *processed* before the
        durability is confirmed, which is a stronger property than what is
        required of any transport.
        """
        # pylint: disable=unidiomatic-typecheck

        if on_message(self.raiden, message):

            # Sending Delivered after the message is decoded and *processed*
            # gives a stronger guarantee than what is required from a
            # transport.
            #
            # Alternatives are, from weakest to strongest options:
            # - Just save it on disk and asynchronously process the messages
            # - Decode it, save to the WAL, and asynchronously process the
            #   state change
            # - Decode it, save to the WAL, and process it (the current
            #   implementation)
            delivered_message = Delivered(message.message_identifier)
            self.raiden.sign(delivered_message)

            self.maybe_send(
                message.sender,
                delivered_message,
            )

    def receive_delivered(self, delivered: Delivered):
        """ Handle a Delivered message.

        The Delivered message is how the UDP transport guarantees persistence
        by the partner node. The message itself is not part of the raiden
        protocol, but it's required by this transport to provide the required
        properties.
        """
        processed = ReceiveDelivered(delivered.delivered_message_identifier)
        self.raiden.handle_state_change(processed)

        message_id = delivered.delivered_message_identifier
        async_result = self.raiden.transport.messageids_to_asyncresults.get(
            message_id)

        # clear the async result, otherwise we have a memory leak
        if async_result is not None:
            del self.messageids_to_asyncresults[message_id]
            async_result.set()

    # Pings and Pongs are used to check the health status of another node. They
    # are /not/ part of the raiden protocol, only part of the UDP transport,
    # therefore these messages are not forwarded to the message handler.
    def receive_ping(self, ping: Ping):
        """ Handle a Ping message by answering with a Pong. """

        log.debug(
            'PING RECEIVED',
            node=pex(self.raiden.address),
            message_id=ping.nonce,
            message=ping,
            sender=pex(ping.sender),
        )

        pong = Pong(ping.nonce)
        self.raiden.sign(pong)

        try:
            self.maybe_send(ping.sender, pong)
        except (InvalidAddress, UnknownAddress) as e:
            log.debug("Couldn't send the `Delivered` message", e=e)

    def receive_pong(self, pong: Pong):
        """ Handles a Pong message. """

        message_id = ('ping', pong.nonce, pong.sender)
        async_result = self.messageids_to_asyncresults.get(message_id)

        if async_result is not None:
            log.debug(
                'PONG RECEIVED',
                node=pex(self.raiden.address),
                sender=pex(pong.sender),
                message_id=pong.nonce,
            )

            async_result.set(True)

    def get_ping(self, nonce: int) -> Ping:
        """ Returns a signed Ping message.

        Note: Ping messages don't have an enforced ordering, so a Ping message
        with a higher nonce may be acknowledged first.
        """
        message = Ping(nonce)
        self.raiden.sign(message)
        message_data = message.encode()

        return message_data

    def set_node_network_state(self, node_address: typing.Address, node_state):
        state_change = ActionChangeNodeNetworkState(node_address, node_state)
        self.raiden.handle_state_change(state_change)
Пример #9
0
def run(
    privatekey,
    registry_contract_address,
    secret_registry_contract_address,
    discovery_contract_address,
    listen_address,
    structlog,
    logfile,
    scenario,
    stage_prefix,
):  # pylint: disable=unused-argument

    # TODO: only enabled structlog on "initiators"
    structlog.configure(structlog, log_file=logfile)

    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.DEFAULT_CONFIG.copy()
    config['transport']['udp']['host'] = listen_host
    config['transport']['udp']['port'] = listen_port
    config['privatekey_hex'] = privatekey

    privatekey_bin = decode_hex(privatekey)

    web3 = Web3(HTTPProvider(f'http://127.0.0.1:8545'))
    rpc_client = JSONRPCClient(web3, privatekey_bin)

    blockchain_service = BlockChainService(privatekey_bin, rpc_client)

    discovery = ContractDiscovery(
        blockchain_service,
        decode_hex(discovery_contract_address),
    )

    registry = blockchain_service.token_network_registry(
        registry_contract_address, )

    secret_registry = blockchain_service.secret_registry(
        secret_registry_contract_address, )

    throttle_policy = TokenBucket(
        config['protocol']['throttle_capacity'],
        config['protocol']['throttle_fill_rate'],
    )

    transport = UDPTransport(
        discovery=discovery,
        udpsocket=gevent.server._udp_socket((listen_host, listen_port)),
        throttle_policy=throttle_policy,
        config=config['transport']['udp'],
    )

    raiden_event_handler = RaidenEventHandler()

    app = App(
        config=config,
        chain=blockchain_service,
        query_start_block=0,
        default_registry=registry,
        default_secret_registry=secret_registry,
        transport=transport,
        raiden_event_handler=raiden_event_handler,
        discovery=discovery,
    )
    app.start()

    app.discovery.register(
        app.raiden.address,
        listen_host,
        listen_port,
    )

    from_block = 0
    app.raiden.install_all_blockchain_filters(
        app.raiden.default_registry,
        app.raiden.default_secret_registry,
        from_block,
    )

    if scenario:
        script = json.load(scenario)

        tools = ConsoleTools(
            app.raiden,
            app.discovery,
            app.config['settle_timeout'],
        )

        transfers_by_peer = {}

        tokens = script['tokens']
        token_address = None
        peer = None
        our_node = hexlify(app.raiden.address)
        log.warning('our address is {}'.format(our_node))
        for token in tokens:
            # skip tokens that we're not part of
            nodes = token['channels']
            if our_node not in nodes:
                continue

            partner_nodes = [node for node in nodes if node != our_node]

            # allow for prefunded tokens
            if 'token_address' in token:
                token_address = token['token_address']
            else:
                token_address = tools.create_token(registry_contract_address)

            transfers_with_amount = token['transfers_with_amount']

            # FIXME: in order to do bidirectional channels, only one side
            # (i.e. only token['channels'][0]) should
            # open; others should join by calling
            # raiden.api.deposit, AFTER the channel came alive!

            # NOTE: leaving unidirectional for now because it most
            #       probably will get to higher throughput

            log.warning('Waiting for all nodes to come online')

            api = RaidenAPI(app.raiden)

            for node in partner_nodes:
                api.start_health_check_for(node)

            while True:
                all_reachable = all(
                    api.get_node_network_state(node) == NODE_NETWORK_REACHABLE
                    for node in partner_nodes)

                if all_reachable:
                    break

                gevent.sleep(5)

            log.warning('All nodes are online')

            if our_node != nodes[-1]:
                our_index = nodes.index(our_node)
                peer = nodes[our_index + 1]

                tools.token_network_register(
                    app.raiden.default_registry.address, token_address)
                amount = transfers_with_amount[nodes[-1]]

                while True:
                    try:
                        app.discovery.get(peer.decode('hex'))
                        break
                    except KeyError:
                        log.warning(
                            'Error: peer {} not found in discovery'.format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning('Opening channel with {} for {}'.format(
                            peer, token_address))
                        api.channel_open(app.raiden.default_registry.address,
                                         token_address, peer)
                        break
                    except KeyError:
                        log.warning(
                            'Error: could not open channel with {}'.format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning('Funding channel with {} for {}'.format(
                            peer, token_address))
                        api.channel_deposit(
                            app.raiden.default_registry.address,
                            token_address,
                            peer,
                            amount,
                        )
                        break
                    except Exception:
                        log.warning(
                            'Error: could not deposit {} for {}'.format(
                                amount, peer))
                        time.sleep(random.randrange(30))

                if our_index == 0:
                    last_node = nodes[-1]
                    transfers_by_peer[last_node] = int(amount)

        if stage_prefix is not None:
            open('{}.stage1'.format(stage_prefix), 'a').close()
            log.warning('Done with initialization, waiting to continue...')
            event = RaidenGreenletEvent()
            gevent.signal(signal.SIGUSR2, event.set)
            event.wait()

        transfer_results = {'total_time': 0, 'timestamps': []}

        def transfer(token_address, amount_per_transfer, total_transfers, peer,
                     is_async):
            def transfer_():
                log.warning('Making {} transfers to {}'.format(
                    total_transfers, peer))
                initial_time = time.time()
                times = [0] * total_transfers
                for index in range(total_transfers):
                    RaidenAPI(app.raiden).transfer(
                        app.raiden.default_registry.address,
                        token_address.decode('hex'),
                        amount_per_transfer,
                        peer,
                    )
                    times[index] = time.time()

                transfer_results['total_time'] = time.time() - initial_time
                transfer_results['timestamps'] = times

                log.warning('Making {} transfers took {}'.format(
                    total_transfers, transfer_results['total_time']))
                log.warning('Times: {}'.format(times))

            if is_async:
                return gevent.spawn(transfer_)
            else:
                transfer_()

        # If sending to multiple targets, do it asynchronously, otherwise
        # keep it simple and just send to the single target on my thread.
        if len(transfers_by_peer) > 1:
            greenlets = []
            for peer_, amount in transfers_by_peer.items():
                greenlet = transfer(token_address, 1, amount, peer_, True)
                if greenlet is not None:
                    greenlets.append(greenlet)

            gevent.joinall(greenlets)

        elif len(transfers_by_peer) == 1:
            for peer_, amount in transfers_by_peer.items():
                transfer(token_address, 1, amount, peer_, False)

        log.warning('Waiting for termination')

        open('{}.stage2'.format(stage_prefix), 'a').close()
        log.warning('Waiting for transfers to finish, will write results...')
        event = RaidenGreenletEvent()
        gevent.signal(signal.SIGUSR2, event.set)
        event.wait()

        open('{}.stage3'.format(stage_prefix), 'a').close()
        event = RaidenGreenletEvent()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    else:
        log.warning('No scenario file supplied, doing nothing!')

        open('{}.stage2'.format(stage_prefix), 'a').close()
        event = RaidenGreenletEvent()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    app.stop()
Пример #10
0
 def __init__(self, web3, mine_sleep=1):
     super().__init__()
     self.web3 = web3
     self.mine_sleep = mine_sleep
     self.stop = RaidenGreenletEvent()
Пример #11
0
class RaidenService:
    """ A Raiden node. """

    def __init__(
            self,
            chain: BlockChainService,
            query_start_block: typing.BlockNumber,
            default_registry: TokenNetworkRegistry,
            default_secret_registry: SecretRegistry,
            private_key_bin,
            transport,
            config,
            discovery=None,
    ):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        self.tokennetworkids_to_connectionmanagers = dict()
        self.identifier_to_results: typing.Dict[
            typing.PaymentIdentifier,
            RaidenAsyncResult,
        ] = dict()

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.query_start_block = query_start_block
        self.default_secret_registry = default_secret_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)
        self.discovery = discovery

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self.stop_event = RaidenGreenletEvent()
        self.start_event = RaidenGreenletEvent()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None
        self.snapshot_group = 0

        # This flag will be used to prevent the service from processing
        # state changes events until we know that pending transactions
        # have been dispatched.
        self.dispatch_events_lock = Semaphore(1)

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent access to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        self.event_poll_lock = gevent.lock.Semaphore()

    def start_async(self) -> RaidenGreenletEvent:
        """ Start the node asynchronously. """
        self.start_event.clear()
        self.stop_event.clear()

        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked

        # start the registration early to speed up the start
        if self.config['transport_type'] == 'udp':
            endpoint_registration_greenlet = gevent.spawn(
                self.discovery.register,
                self.address,
                self.config['transport']['udp']['external_ip'],
                self.config['transport']['udp']['external_port'],
            )

        # The database may be :memory:
        storage = sqlite.SQLiteStorage(self.database_path, serialize.PickleSerializer())
        self.wal = wal.restore_from_latest_snapshot(
            node.state_transition,
            storage,
        )

        if self.wal.state_manager.current_state is None:
            log.debug('No recoverable state available, created inital state')
            block_number = self.chain.block_number()

            state_change = ActionInitChain(
                random.Random(),
                block_number,
                self.chain.node_address,
                self.chain.network_id,
            )
            self.wal.log_and_dispatch(state_change, block_number)
            payment_network = PaymentNetworkState(
                self.default_registry.address,
                [],  # empty list of token network states as it's the node's startup
            )
            state_change = ContractReceiveNewPaymentNetwork(
                constants.NULL_ADDRESS,
                payment_network,
            )
            self.handle_state_change(state_change)

            # On first run Raiden needs to fetch all events for the payment
            # network, to reconstruct all token network graphs and find opened
            # channels
            last_log_block_number = 0
        else:
            # The `Block` state change is dispatched only after all the events
            # for that given block have been processed, filters can be safely
            # installed starting from this position without losing events.
            last_log_block_number = views.block_number(self.wal.state_manager.current_state)
            log.debug('Restored state from WAL', last_restored_block=last_log_block_number)

        # Restore the current snapshot group
        self.snapshot_group = last_log_block_number // SNAPSHOT_BLOCK_COUNT

        # Install the filters using the correct from_block value, otherwise
        # blockchain logs can be lost.
        self.install_all_blockchain_filters(
            self.default_registry,
            self.default_secret_registry,
            last_log_block_number,
        )

        # Complete the first_run of the alarm task and synchronize with the
        # blockchain since the last run.
        #
        # Notes about setup order:
        # - The filters must be polled after the node state has been primed,
        # otherwise the state changes won't have effect.
        # - The alarm must complete its first run  before the transport is started,
        #  to avoid rejecting messages for unknown channels.
        self.alarm.register_callback(self._callback_new_block)

        self.alarm.first_run()

        chain_state = views.state_from_raiden(self)
        # Dispatch pending transactions
        pending_transactions = views.get_pending_transactions(
            chain_state,
        )
        log.debug(
            'Processing pending transactions',
            num_pending_transactions=len(pending_transactions),
        )
        with self.dispatch_events_lock:
            for transaction in pending_transactions:
                on_raiden_event(self, transaction)

        self.alarm.start()

        queueids_to_queues = views.get_all_messagequeues(chain_state)
        self.transport.start(self, queueids_to_queues)

        # Health check needs the transport layer
        self.start_neighbours_healthcheck()

        if self.config['transport_type'] == 'udp':
            def set_start_on_registration(_):
                self.start_event.set()

            endpoint_registration_greenlet.link_safe(set_start_on_registration)
        else:
            self.start_event.set()

        return self.start_event

    def start(self) -> RaidenGreenletEvent:
        """ Start the node. """
        self.start_async().wait()

    def start_neighbours_healthcheck(self):
        for neighbour in views.all_neighbour_nodes(self.wal.state_manager.current_state):
            if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
                self.start_health_check_for(neighbour)

    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.transport.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(getattr(self.transport, 'greenlets', []))
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        self.blockchain_events.reset()

        if self.db_lock is not None:
            self.db_lock.release()

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def get_block_number(self):
        return views.block_number(self.wal.state_manager.current_state)

    def handle_state_change(self, state_change, block_number=None):
        log.debug('STATE CHANGE', node=pex(self.address), state_change=state_change)

        if block_number is None:
            block_number = self.get_block_number()

        # Take a snapshot every SNAPSHOT_BLOCK_COUNT
        # TODO: Gather more data about storage requirements
        # and update the value to specify how often we need
        # capturing a snapshot should take place
        new_snapshot_group = block_number // SNAPSHOT_BLOCK_COUNT
        if new_snapshot_group > self.snapshot_group:
            log.debug(f'Storing snapshot at block: {block_number}')
            self.wal.snapshot()
            self.snapshot_group = new_snapshot_group

        event_list = self.wal.log_and_dispatch(state_change, block_number)

        if self.dispatch_events_lock.locked():
            return []

        for event in event_list:
            log.debug('RAIDEN EVENT', node=pex(self.address), raiden_event=event)

            on_raiden_event(self, event)

        return event_list

    def set_node_network_state(self, node_address, network_state):
        state_change = ActionChangeNodeNetworkState(node_address, network_state)
        self.wal.log_and_dispatch(state_change, self.get_block_number())

    def start_health_check_for(self, node_address):
        self.transport.start_health_check(node_address)

    def _callback_new_block(self, current_block_number, chain_id):
        """Called once a new block is detected by the alarm task.

        Note:
            This should be called only once per block, otherwise there will be
            duplicated `Block` state changes in the log.

            Therefore this method should be called only once a new block is
            mined with the appropriate block_number argument from the
            AlarmTask.
        """
        # Raiden relies on blockchain events to update its off-chain state,
        # therefore some APIs /used/ to forcefully poll for events.
        #
        # This was done for APIs which have on-chain side-effects, e.g.
        # openning a channel, where polling the event is required to update
        # off-chain state to providing a consistent view to the caller, e.g.
        # the channel exists after the API call returns.
        #
        # That pattern introduced a race, because the events are returned only
        # once per filter, and this method would be called concurrently by the
        # API and the AlarmTask. The following lock is necessary, to ensure the
        # expected side-effects are properly applied (introduced by the commit
        # 3686b3275ff7c0b669a6d5e2b34109c3bdf1921d)
        with self.event_poll_lock:
            for event in self.blockchain_events.poll_blockchain_events(current_block_number):
                # These state changes will be procesed with a block_number
                # which is /larger/ than the ChainState's block_number.
                on_blockchain_event(self, event, current_block_number, chain_id)

            # On restart the Raiden node will re-create the filters with the
            # ethereum node. These filters will have the from_block set to the
            # value of the latest Block state change. To avoid missing events
            # the Block state change is dispatched only after all of the events
            # have been processed.
            #
            # This means on some corner cases a few events may be applied
            # twice, this will happen if the node crashed and some events have
            # been processed but the Block state change has not been
            # dispatched.
            state_change = Block(current_block_number)
            self.handle_state_change(state_change, current_block_number)

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key)

    def install_all_blockchain_filters(
            self,
            token_network_registry_proxy: TokenNetworkRegistry,
            secret_registry_proxy: SecretRegistry,
            from_block: typing.BlockNumber,
    ):
        with self.event_poll_lock:
            node_state = views.state_from_raiden(self)
            channels = views.list_all_channelstate(node_state)
            token_networks = views.get_token_network_identifiers(
                node_state,
                token_network_registry_proxy.address,
            )

            self.blockchain_events.add_token_network_registry_listener(
                token_network_registry_proxy,
                from_block,
            )
            self.blockchain_events.add_secret_registry_listener(
                secret_registry_proxy,
                from_block,
            )

            for token_network in token_networks:
                token_network_proxy = self.chain.token_network(token_network)
                self.blockchain_events.add_token_network_listener(
                    token_network_proxy,
                    from_block,
                )

            for channel_state in channels:
                channel_proxy = self.chain.payment_channel(
                    channel_state.token_network_identifier,
                    channel_state.identifier,
                )
                self.blockchain_events.add_payment_channel_listener(
                    channel_proxy,
                    from_block,
                )

    def connection_manager_for_token_network(self, token_network_identifier):
        if not is_binary_address(token_network_identifier):
            raise InvalidAddress('token address is not valid.')

        known_token_networks = views.get_token_network_identifiers(
            views.state_from_raiden(self),
            self.default_registry.address,
        )

        if token_network_identifier not in known_token_networks:
            raise InvalidAddress('token is not registered.')

        manager = self.tokennetworkids_to_connectionmanagers.get(token_network_identifier)

        if manager is None:
            manager = ConnectionManager(self, token_network_identifier)
            self.tokennetworkids_to_connectionmanagers[token_network_identifier] = manager

        return manager

    def leave_all_token_networks(self):
        state_change = ActionLeaveAllNetworks()
        self.wal.log_and_dispatch(state_change, self.get_block_number())

    def close_and_settle(self):
        log.info('raiden will close and settle all channels now')

        self.leave_all_token_networks()

        connection_managers = [cm for cm in self.tokennetworkids_to_connectionmanagers.values()]

        if connection_managers:
            waiting.wait_for_settle_all_channels(
                self,
                self.alarm.sleep_time,
            )

    def mediated_transfer_async(
            self,
            token_network_identifier,
            amount,
            target,
            identifier,
    ):
        """ Transfer `amount` between this node and `target`.

        This method will start an asynchronous transfer, the transfer might fail
        or succeed depending on a couple of factors:

            - Existence of a path that can be used, through the usage of direct
              or intermediary channels.
            - Network speed, making the transfer sufficiently fast so it doesn't
              expire.
        """

        async_result = self.start_mediated_transfer(
            token_network_identifier,
            amount,
            target,
            identifier,
        )

        return async_result

    def direct_transfer_async(self, token_network_identifier, amount, target, identifier):
        """ Do a direct transfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payers perspective),
        whereas the mediated transfer requires 6 messages.
        """

        self.start_health_check_for(target)

        if identifier is None:
            identifier = create_default_identifier()

        direct_transfer = ActionTransferDirect(
            token_network_identifier,
            target,
            identifier,
            amount,
        )

        self.handle_state_change(direct_transfer)

    def start_mediated_transfer(
            self,
            token_network_identifier,
            amount,
            target,
            identifier,
    ):

        self.start_health_check_for(target)

        if identifier is None:
            identifier = create_default_identifier()

        if identifier in self.identifier_to_results:
            return self.identifier_to_results[identifier]

        async_result = RaidenAsyncResult()
        self.identifier_to_results[identifier] = async_result

        secret = random_secret()
        init_initiator_statechange = initiator_init(
            self,
            identifier,
            amount,
            secret,
            token_network_identifier,
            target,
        )

        # Dispatch the state change even if there are no routes to create the
        # wal entry.
        self.handle_state_change(init_initiator_statechange)

        return async_result

    def mediate_mediated_transfer(self, transfer: LockedTransfer):
        init_mediator_statechange = mediator_init(self, transfer)
        self.handle_state_change(init_mediator_statechange)

    def target_mediated_transfer(self, transfer: LockedTransfer):
        self.start_health_check_for(transfer.initiator)
        init_target_statechange = target_init(transfer)
        self.handle_state_change(init_target_statechange)
Пример #12
0
class EchoNode:

    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = RaidenGreenletEvent()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError('not enough funds for echo node %s for token %s' % (
                    pex(self.api.raiden.address),
                    pex(self.token_address),
                ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_block = self.api.raiden.get_block_number()
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')

    def echo_node_alarm_callback(self, block_number, chain_id):
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug('echo_node callback', block_number=block_number)
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.append(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self):
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_node_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers = self.api.get_payment_history_for_token(
                        self.token_address,
                        from_block=self.last_poll_block,
                    )

                    # received transfer is a tuple of (block_number, event)
                    received_transfers = [
                        (block_number, event)
                        for block_number, event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]
                    for _, event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.last_poll_block = max(
                            block_number
                            for block_number, _ in received_transfers
                        )

                    # increase last_poll_block if the blockchain proceeded
                    delta_blocks = self.api.raiden.get_block_number() - self.last_poll_block
                    if delta_blocks > 1:
                        self.last_poll_block += 1

                    if not self.echo_worker_greenlet.started:
                        log.debug(
                            'restarting echo_worker_greenlet',
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        except Timeout:
            log.info('timeout while polling for events')
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)

    def on_transfer(self, transfer):
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator """
        echo_amount = 0
        if transfer.amount % 3 == 0:
            log.info(
                'ECHO amount - 1',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount - 1

        elif transfer.amount == 7:
            log.info(
                'ECHO lucky number draw',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    'duplicate lottery entry',
                    initiator=pex(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = len(tickets)

            # payout
            elif len(tickets) == 6:
                log.info('payout!')
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()
                # add new participant
                tickets.append(transfer)
                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = 49
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                'echo transfer received',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount

        if echo_amount:
            log.debug(
                'sending echo transfer',
                target=pex(transfer.initiator),
                amount=echo_amount,
                orig_identifier=transfer.identifier,
                echo_identifier=transfer.identifier + echo_amount,
                token_address=pex(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                self.api.raiden.default_registry.address,
                self.token_address,
                echo_amount,
                transfer.initiator,
                identifier=transfer.identifier + echo_amount,
            )
        self.num_handled_transfers += 1

    def stop(self):
        self.stop_signal = True
        self.greenlets.append(self.echo_worker_greenlet)
        gevent.wait(self.greenlets)