Ejemplo n.º 1
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        # pylint: disable=too-many-arguments

        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(
            private_key_bin,
            ctx=GLOBAL_CTX,
            raw=True,
        )
        pubkey = private_key.pubkey.serialize(compressed=False)

        self.registries = list()
        self.managers_by_asset_address = dict()
        self.managers_by_address = dict()

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        event_handler = RaidenEventHandler(self)

        alarm = AlarmTask(chain)
        # ignore the blocknumber
        alarm.register_callback(
            lambda _: event_handler.poll_all_event_listeners())
        alarm.start()

        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.set_block_number)

        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(self, config['send_ping_time'],
                                               config['max_unresponsive_time'])
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.event_handler = event_handler
        self.message_handler = message_handler
        self.start_event_listener = event_handler.start_event_listener

        self.on_message = message_handler.on_message
        self.on_event = event_handler.on_event
Ejemplo n.º 2
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        # pylint: disable=too-many-arguments

        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(
            private_key_bin,
            ctx=GLOBAL_CTX,
            raw=True,
        )
        pubkey = private_key.pubkey.serialize(compressed=False)

        self.registries = list()
        self.managers_by_asset_address = dict()
        self.managers_by_address = dict()
        self.event_listeners = list()

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        event_handler = RaidenEventHandler(self)

        alarm = AlarmTask(chain)
        alarm.start()
        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(
                self,
                config['send_ping_time'],
                config['max_unresponsive_time']
            )
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.event_handler = event_handler
        self.message_handler = message_handler

        self.on_message = message_handler.on_message
        self.on_event = event_handler.on_event
Ejemplo n.º 3
0
    def __init__(self, chain, default_registry, private_key_bin, transport,
                 discovery, config):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        self.token_to_channelgraph = dict()
        self.tokens_to_connectionmanagers = dict()
        self.manager_to_token = dict()
        self.swapkey_to_tokenswap = dict()
        self.swapkey_to_greenlettask = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(
            lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(
            endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.message_handler = RaidenMessageHandler(self)
        self.state_machine_event_handler = StateMachineEventHandler(self)
        self.blockchain_events = BlockchainEvents()
        self.greenlet_task_dispatcher = GreenletTasksDispatcher()
        self.on_message = self.message_handler.on_message
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']))
        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            self.database_dir = os.path.dirname(self.database_path)
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.snapshot_dir = os.path.join(self.database_dir, 'snapshots')
            self.serialization_file = os.path.join(self.snapshot_dir,
                                                   'data.pickle')

            if not os.path.exists(self.snapshot_dir):
                os.makedirs(self.snapshot_dir)

            # Prevent concurrent acces to the same db
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_dir = None
            self.lock_file = None
            self.snapshot_dir = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        self.start()
Ejemplo n.º 4
0
class RaidenService:
    """ A Raiden node. """
    def __init__(self, chain, default_registry, private_key_bin, transport,
                 discovery, config):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        self.token_to_channelgraph = dict()
        self.tokens_to_connectionmanagers = dict()
        self.manager_to_token = dict()
        self.swapkey_to_tokenswap = dict()
        self.swapkey_to_greenlettask = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(
            lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(
            endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.message_handler = RaidenMessageHandler(self)
        self.state_machine_event_handler = StateMachineEventHandler(self)
        self.blockchain_events = BlockchainEvents()
        self.greenlet_task_dispatcher = GreenletTasksDispatcher()
        self.on_message = self.message_handler.on_message
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']))
        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            self.database_dir = os.path.dirname(self.database_path)
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.snapshot_dir = os.path.join(self.database_dir, 'snapshots')
            self.serialization_file = os.path.join(self.snapshot_dir,
                                                   'data.pickle')

            if not os.path.exists(self.snapshot_dir):
                os.makedirs(self.snapshot_dir)

            # Prevent concurrent acces to the same db
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_dir = None
            self.lock_file = None
            self.snapshot_dir = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        self.start()

    def start(self):
        """ Start the node. """
        # XXX Should this really be here? Or will start() never be called again
        # after stop() in the lifetime of Raiden apart from the tests? This is
        # at least at the moment prompted by tests/integration/test_transer.py
        if self.stop_event and self.stop_event.is_set():
            self.stop_event.clear()

        self.alarm.start()

        # Prime the block number cache and set the callbacks
        self._block_number = self.alarm.last_block_number
        self.alarm.register_callback(self.poll_blockchain_events)
        self.alarm.register_callback(self.set_block_number)

        # Registry registration must start *after* the alarm task, this avoid
        # corner cases were the registry is queried in block A, a new block B
        # is mined, and the alarm starts polling at block C.
        self.register_registry(self.default_registry.address)

        # Restore from snapshot must come after registering the registry as we
        # need to know the registered tokens to populate `token_to_channelgraph`
        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked
            self.restore_from_snapshots()

        # The database may be :memory:
        storage = sqlite.SQLiteStorage(self.database_path,
                                       serialize.PickleSerializer())
        self.wal = wal.WriteAheadLog(node.state_transition, storage)

        # Start the protocol after the registry is queried to avoid warning
        # about unknown channels.
        self.protocol.start()

        # Health check needs the protocol layer
        self.start_neighbours_healthcheck()

        self.start_event.set()

    def start_neighbours_healthcheck(self):
        for graph in self.token_to_channelgraph.values():
            for neighbour in graph.get_neighbours():
                if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
                    self.start_health_check_for(neighbour)

    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.protocol.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(self.protocol.greenlets)
        wait_for.extend(self.greenlet_task_dispatcher.stop())
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        # save the state after all tasks are done
        if self.serialization_file:
            save_snapshot(self.serialization_file, self)

        if self.db_lock is not None:
            self.db_lock.release()

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def restore_from_snapshots(self):
        data = load_snapshot(self.serialization_file)
        data_exists_and_is_recent = (data is not None
                                     and 'registry_address' in data
                                     and data['registry_address']
                                     == ROPSTEN_REGISTRY_ADDRESS)

        if data_exists_and_is_recent:
            first_channel = True
            for channel in data['channels']:
                try:
                    self.restore_channel(channel)
                    first_channel = False
                except AddressWithoutCode as e:
                    log.warn(
                        'Channel without code while restoring. Must have been '
                        'already settled while we were offline.',
                        error=str(e))
                except AttributeError as e:
                    if first_channel:
                        log.warn(
                            'AttributeError during channel restoring. If code has changed'
                            ' then this is fine. If not then please report a bug.',
                            error=str(e))
                        break
                    else:
                        raise

            for restored_queue in data['queues']:
                self.restore_queue(restored_queue)

            self.protocol.receivedhashes_to_acks = data[
                'receivedhashes_to_acks']
            self.protocol.nodeaddresses_to_nonces = data[
                'nodeaddresses_to_nonces']

            self.restore_transfer_states(data['transfers'])

    def set_block_number(self, block_number):
        state_change = Block(block_number)
        self.state_machine_event_handler.log_and_dispatch_to_all_tasks(
            state_change)

        for graph in self.token_to_channelgraph.values():
            for channel in graph.address_to_channel.values():
                channel.state_transition(state_change)

        # To avoid races, only update the internal cache after all the state
        # tasks have been updated.
        self._block_number = block_number

    def handle_state_change(self, state_change, block_number=None):
        is_logging = log.isEnabledFor(logging.DEBUG)

        if is_logging:
            log.debug('STATE CHANGE',
                      node=pex(self.address),
                      state_change=state_change)

        if block_number is None:
            block_number = self.get_block_number()

        event_list = self.wal.log_and_dispatch(state_change, block_number)

        for event in event_list:
            if is_logging:
                log.debug('EVENT', node=pex(self.address), event=event)

            on_raiden_event(self, event)

        return event_list

    def set_node_network_state(self, node_address, network_state):
        for graph in self.token_to_channelgraph.values():
            channel = graph.partneraddress_to_channel.get(node_address)

            if channel:
                channel.network_state = network_state

    def start_health_check_for(self, node_address):
        self.protocol.start_health_check(node_address)

    def get_block_number(self):
        return self._block_number

    def poll_blockchain_events(self, current_block=None):
        # pylint: disable=unused-argument
        on_statechange = self.state_machine_event_handler.on_blockchain_statechange

        for state_change in self.blockchain_events.poll_state_change(
                self._block_number):
            on_statechange(state_change)

    def find_channel_by_address(self, netting_channel_address_bin):
        for graph in self.token_to_channelgraph.values():
            channel = graph.address_to_channel.get(netting_channel_address_bin)

            if channel is not None:
                return channel

        raise ValueError('unknown channel {}'.format(
            encode_hex(netting_channel_address_bin)))

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    def register_secret(self, secret: bytes):
        """ Register the secret with any channel that has a hashlock on it.

        This must search through all channels registered for a given hashlock
        and ignoring the tokens. Useful for refund transfer, split transfer,
        and token swaps.

        Raises:
            TypeError: If secret is unicode data.
        """
        if not isinstance(secret, bytes):
            raise TypeError('secret must be bytes')

        hashlock = sha3(secret)
        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        for hash_channel in self.token_to_hashlock_to_channels.values():
            for channel in hash_channel[hashlock]:
                channel.register_secret(secret)

                # The protocol ignores duplicated messages.
                self.send_async(
                    channel.partner_state.address,
                    revealsecret_message,
                )

    def register_channel_for_hashlock(self, token_address, channel, hashlock):
        channels_registered = self.token_to_hashlock_to_channels[
            token_address][hashlock]

        if channel not in channels_registered:
            channels_registered.append(channel)

    def handle_secret(  # pylint: disable=too-many-arguments
            self, identifier, token_address, secret, partner_secret_message,
            hashlock):
        """ Unlock/Witdraws locks, register the secret, and send Secret
        messages as necessary.

        This function will:
            - Unlock the locks created by this node and send a Secret message to
            the corresponding partner so that she can withdraw the token.
            - Withdraw the lock from sender.
            - Register the secret for the locks received and reveal the secret
            to the senders


        Note:
            The channel needs to be registered with
            `raiden.register_channel_for_hashlock`.
        """
        # handling the secret needs to:
        # - unlock the token for all `forward_channel` (the current one
        #   and the ones that failed with a refund)
        # - send a message to each of the forward nodes allowing them
        #   to withdraw the token
        # - register the secret for the `originating_channel` so that a
        #   proof can be made, if necessary
        # - reveal the secret to the `sender` node (otherwise we
        #   cannot withdraw the token)
        channels_list = self.token_to_hashlock_to_channels[token_address][
            hashlock]
        channels_to_remove = list()

        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        messages_to_send = []
        for channel in channels_list:
            # unlock a pending lock
            if channel.our_state.is_known(hashlock):
                secret = channel.create_secret(identifier, secret)
                self.sign(secret)

                channel.register_transfer(
                    self.get_block_number(),
                    secret,
                )

                messages_to_send.append((
                    channel.partner_state.address,
                    secret,
                ))

                channels_to_remove.append(channel)

            # withdraw a pending lock
            elif channel.partner_state.is_known(hashlock):
                if partner_secret_message:
                    is_balance_proof = (partner_secret_message.sender
                                        == channel.partner_state.address
                                        and partner_secret_message.channel
                                        == channel.channel_address)

                    if is_balance_proof:
                        channel.register_transfer(
                            self.get_block_number(),
                            partner_secret_message,
                        )
                        channels_to_remove.append(channel)
                    else:
                        channel.register_secret(secret)
                        messages_to_send.append((
                            channel.partner_state.address,
                            revealsecret_message,
                        ))
                else:
                    channel.register_secret(secret)
                    messages_to_send.append((
                        channel.partner_state.address,
                        revealsecret_message,
                    ))

            else:
                log.error(
                    'Channel is registered for a given lock but the lock is not contained in it.'
                )

        for channel in channels_to_remove:
            channels_list.remove(channel)

        if not channels_list:
            del self.token_to_hashlock_to_channels[token_address][hashlock]

        # send the messages last to avoid races
        for recipient, message in messages_to_send:
            self.send_async(
                recipient,
                message,
            )

    def get_channel_details(self, token_address, netting_channel):
        channel_details = netting_channel.detail()
        our_state = ChannelEndState(
            channel_details['our_address'],
            channel_details['our_balance'],
            None,
            EMPTY_MERKLE_TREE,
        )
        partner_state = ChannelEndState(
            channel_details['partner_address'],
            channel_details['partner_balance'],
            None,
            EMPTY_MERKLE_TREE,
        )

        def register_channel_for_hashlock(channel, hashlock):
            self.register_channel_for_hashlock(
                token_address,
                channel,
                hashlock,
            )

        channel_address = netting_channel.address
        reveal_timeout = self.config['reveal_timeout']
        settle_timeout = channel_details['settle_timeout']

        external_state = ChannelExternalState(
            register_channel_for_hashlock,
            netting_channel,
        )

        channel_detail = ChannelDetails(
            channel_address,
            our_state,
            partner_state,
            external_state,
            reveal_timeout,
            settle_timeout,
        )

        return channel_detail

    def restore_channel(self, serialized_channel):
        token_address = serialized_channel.token_address

        netting_channel = self.chain.netting_channel(
            serialized_channel.channel_address, )

        # restoring balances from the blockchain since the serialized
        # value could be falling behind.
        channel_details = netting_channel.detail()

        # our_address is checked by detail
        assert channel_details[
            'partner_address'] == serialized_channel.partner_address

        if serialized_channel.our_leaves:
            our_layers = compute_layers(serialized_channel.our_leaves)
            our_tree = MerkleTreeState(our_layers)
        else:
            our_tree = EMPTY_MERKLE_TREE

        our_state = ChannelEndState(
            channel_details['our_address'],
            channel_details['our_balance'],
            serialized_channel.our_balance_proof,
            our_tree,
        )

        if serialized_channel.partner_leaves:
            partner_layers = compute_layers(serialized_channel.partner_leaves)
            partner_tree = MerkleTreeState(partner_layers)
        else:
            partner_tree = EMPTY_MERKLE_TREE

        partner_state = ChannelEndState(
            channel_details['partner_address'],
            channel_details['partner_balance'],
            serialized_channel.partner_balance_proof,
            partner_tree,
        )

        def register_channel_for_hashlock(channel, hashlock):
            self.register_channel_for_hashlock(
                token_address,
                channel,
                hashlock,
            )

        external_state = ChannelExternalState(
            register_channel_for_hashlock,
            netting_channel,
        )
        details = ChannelDetails(
            serialized_channel.channel_address,
            our_state,
            partner_state,
            external_state,
            serialized_channel.reveal_timeout,
            channel_details['settle_timeout'],
        )

        graph = self.token_to_channelgraph[token_address]
        graph.add_channel(details)
        channel = graph.address_to_channel.get(
            serialized_channel.channel_address, )

        channel.our_state.balance_proof = serialized_channel.our_balance_proof
        channel.partner_state.balance_proof = serialized_channel.partner_balance_proof

    def restore_queue(self, serialized_queue):
        receiver_address = serialized_queue['receiver_address']
        token_address = serialized_queue['token_address']

        queue = self.protocol.get_channel_queue(
            receiver_address,
            token_address,
        )

        for messagedata in serialized_queue['messages']:
            queue.put(messagedata)

    def restore_transfer_states(self, transfer_states):
        self.identifier_to_statemanagers = transfer_states

    def register_registry(self, registry_address):
        proxies = get_relevant_proxies(
            self.chain,
            self.address,
            registry_address,
        )

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.blockchain_events.add_proxies_listeners(proxies)

        for manager in proxies.channel_managers:
            token_address = manager.token_address()
            manager_address = manager.address

            channels_detail = list()
            netting_channels = proxies.channelmanager_nettingchannels[
                manager_address]
            for channel in netting_channels:
                detail = self.get_channel_details(token_address, channel)
                channels_detail.append(detail)

            edge_list = manager.channels_addresses()
            graph = ChannelGraph(
                self.address,
                manager_address,
                token_address,
                edge_list,
                channels_detail,
            )

            self.manager_to_token[manager_address] = token_address
            self.token_to_channelgraph[token_address] = graph

            self.tokens_to_connectionmanagers[
                token_address] = ConnectionManager(self, token_address, graph)

    def channel_manager_is_registered(self, manager_address):
        return manager_address in self.manager_to_token

    def register_channel_manager(self, manager_address):
        manager = self.default_registry.manager(manager_address)
        netting_channels = [
            self.chain.netting_channel(channel_address) for channel_address in
            manager.channels_by_participant(self.address)
        ]

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.blockchain_events.add_channel_manager_listener(manager)
        for channel in netting_channels:
            self.blockchain_events.add_netting_channel_listener(channel)

        token_address = manager.token_address()
        edge_list = manager.channels_addresses()
        channels_detail = [
            self.get_channel_details(token_address, channel)
            for channel in netting_channels
        ]

        graph = ChannelGraph(
            self.address,
            manager_address,
            token_address,
            edge_list,
            channels_detail,
        )

        self.manager_to_token[manager_address] = token_address
        self.token_to_channelgraph[token_address] = graph

        self.tokens_to_connectionmanagers[token_address] = ConnectionManager(
            self, token_address, graph)

    def register_netting_channel(self, token_address, channel_address):
        netting_channel = self.chain.netting_channel(channel_address)
        self.blockchain_events.add_netting_channel_listener(netting_channel)

        detail = self.get_channel_details(token_address, netting_channel)
        graph = self.token_to_channelgraph[token_address]
        graph.add_channel(detail)

    def connection_manager_for_token(self, token_address):
        if not isaddress(token_address):
            raise InvalidAddress('token address is not valid.')
        if token_address in self.tokens_to_connectionmanagers.keys():
            manager = self.tokens_to_connectionmanagers[token_address]
        else:
            raise InvalidAddress('token is not registered.')
        return manager

    def leave_all_token_networks_async(self):
        leave_results = []
        for token_address in self.token_to_channelgraph:
            try:
                connection_manager = self.connection_manager_for_token(
                    token_address)
                leave_results.append(connection_manager.leave_async())
            except InvalidAddress:
                pass
        combined_result = AsyncResult()
        gevent.spawn(gevent.wait, leave_results).link(combined_result)
        return combined_result

    def close_and_settle(self):
        log.info('raiden will close and settle all channels now')

        connection_managers = [
            self.connection_manager_for_token(token_address)
            for token_address in self.token_to_channelgraph
        ]

        def blocks_to_wait():
            return max(connection_manager.min_settle_blocks
                       for connection_manager in connection_managers)

        all_channels = list(
            itertools.chain.from_iterable([
                connection_manager.open_channels
                for connection_manager in connection_managers
            ]))

        leaving_greenlet = self.leave_all_token_networks_async()
        # using the un-cached block number here
        last_block = self.chain.block_number()

        earliest_settlement = last_block + blocks_to_wait()

        # TODO: estimate and set a `timeout` parameter in seconds
        # based on connection_manager.min_settle_blocks and an average
        # blocktime from the past

        current_block = last_block
        while current_block < earliest_settlement:
            gevent.sleep(self.alarm.wait_time)
            last_block = self.chain.block_number()
            if last_block != current_block:
                current_block = last_block
                avg_block_time = self.chain.estimate_blocktime()
                wait_blocks_left = blocks_to_wait()
                not_settled = sum(
                    1 for channel in all_channels
                    if not channel.state == CHANNEL_STATE_SETTLED)
                if not_settled == 0:
                    log.debug('nothing left to settle')
                    break
                log.info(
                    'waiting at least %s more blocks (~%s sec) for settlement'
                    '(%s channels not yet settled)' %
                    (wait_blocks_left, wait_blocks_left * avg_block_time,
                     not_settled))

            leaving_greenlet.wait(timeout=blocks_to_wait() *
                                  self.chain.estimate_blocktime() * 1.5)

        if any(channel.state != CHANNEL_STATE_SETTLED
               for channel in all_channels):
            log.error('Some channels were not settled!',
                      channels=[
                          pex(channel.channel_address)
                          for channel in all_channels
                          if channel.state != CHANNEL_STATE_SETTLED
                      ])

    def mediated_transfer_async(self, token_address, amount, target,
                                identifier):
        """ Transfer `amount` between this node and `target`.

        This method will start an asyncronous transfer, the transfer might fail
        or succeed depending on a couple of factors:

            - Existence of a path that can be used, through the usage of direct
              or intermediary channels.
            - Network speed, making the transfer sufficiently fast so it doesn't
              expire.
        """

        async_result = self.start_mediated_transfer(
            token_address,
            amount,
            identifier,
            target,
        )

        return async_result

    def direct_transfer_async(self, token_address, amount, target, identifier):
        """ Do a direct tranfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payer's perspective),
        whereas the mediated transfer requires 6 messages.
        """
        graph = self.token_to_channelgraph[token_address]
        direct_channel = graph.partneraddress_to_channel.get(target)

        direct_channel_with_capacity = (direct_channel
                                        and direct_channel.can_transfer and
                                        amount <= direct_channel.distributable)

        if direct_channel_with_capacity:
            direct_transfer = direct_channel.create_directtransfer(
                amount, identifier)
            self.sign(direct_transfer)
            direct_channel.register_transfer(
                self.get_block_number(),
                direct_transfer,
            )

            direct_transfer_state_change = ActionTransferDirect(
                identifier,
                amount,
                token_address,
                direct_channel.partner_state.address,
            )
            # TODO: add the transfer sent event
            state_change_id = self.transaction_log.log(
                direct_transfer_state_change)

            # TODO: This should be set once the direct transfer is acknowledged
            transfer_success = EventTransferSentSuccess(
                identifier,
                amount,
                target,
            )
            self.transaction_log.log_events(state_change_id,
                                            [transfer_success],
                                            self.get_block_number())

            async_result = self.protocol.send_async(
                direct_channel.partner_state.address,
                direct_transfer,
            )

        else:
            async_result = AsyncResult()
            async_result.set(False)

        return async_result

    def start_mediated_transfer(self, token_address, amount, identifier,
                                target):
        # pylint: disable=too-many-locals

        async_result = AsyncResult()
        graph = self.token_to_channelgraph[token_address]

        available_routes = get_best_routes(
            graph,
            self.protocol.nodeaddresses_networkstatuses,
            self.address,
            target,
            amount,
            None,
        )

        if not available_routes:
            async_result.set(False)
            return async_result

        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        route_state = RoutesState(available_routes)
        our_address = self.address
        block_number = self.get_block_number()

        transfer_state = LockedTransferState(
            identifier=identifier,
            amount=amount,
            token=token_address,
            initiator=self.address,
            target=target,
            expiration=None,
            hashlock=None,
            secret=None,
        )

        # Issue #489
        #
        # Raiden may fail after a state change using the random generator is
        # handled but right before the snapshot is taken. If that happens on
        # the next initialization when raiden is recovering and applying the
        # pending state changes a new secret will be generated and the
        # resulting events won't match, this breaks the architecture model,
        # since it's assumed the re-execution of a state change will always
        # produce the same events.
        #
        # TODO: Removed the secret generator from the InitiatorState and add
        # the secret into all state changes that require one, this way the
        # secret will be serialized with the state change and the recovery will
        # use the same /random/ secret.
        random_generator = RandomSecretGenerator()

        init_initiator = ActionInitInitiator(
            our_address=our_address,
            transfer=transfer_state,
            routes=route_state,
            random_generator=random_generator,
            block_number=block_number,
        )

        state_manager = StateManager(initiator.state_transition, None)
        self.state_machine_event_handler.log_and_dispatch(
            state_manager, init_initiator)

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it hapens (issue #374)
        self.identifier_to_statemanagers[identifier].append(state_manager)
        self.identifier_to_results[identifier].append(async_result)

        return async_result

    def mediate_mediated_transfer(self, message):
        # pylint: disable=too-many-locals
        identifier = message.identifier
        amount = message.lock.amount
        target = message.target
        token = message.token
        graph = self.token_to_channelgraph[token]

        available_routes = get_best_routes(
            graph,
            self.protocol.nodeaddresses_networkstatuses,
            self.address,
            target,
            amount,
            message.sender,
        )

        from_channel = graph.partneraddress_to_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        our_address = self.address
        from_transfer = lockedtransfer_from_message(message)
        route_state = RoutesState(available_routes)
        block_number = self.get_block_number()

        init_mediator = ActionInitMediator(
            our_address,
            from_transfer,
            route_state,
            from_route,
            block_number,
        )

        state_manager = StateManager(mediator.state_transition, None)

        self.state_machine_event_handler.log_and_dispatch(
            state_manager, init_mediator)

        self.identifier_to_statemanagers[identifier].append(state_manager)

    def target_mediated_transfer(self, message):
        graph = self.token_to_channelgraph[message.token]
        from_channel = graph.partneraddress_to_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        from_transfer = lockedtransfer_from_message(message)
        our_address = self.address
        block_number = self.get_block_number()

        init_target = ActionInitTarget(
            our_address,
            from_route,
            from_transfer,
            block_number,
        )

        state_manager = StateManager(target_task.state_transition, None)
        self.state_machine_event_handler.log_and_dispatch(
            state_manager, init_target)

        identifier = message.identifier
        self.identifier_to_statemanagers[identifier].append(state_manager)

    def direct_transfer_async2(self, token_address, amount, target,
                               identifier):
        """ Do a direct transfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payers perspective),
        whereas the mediated transfer requires 6 messages.
        """

        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        direct_transfer = ActionTransferDirect2(
            target,
            identifier,
            amount,
        )

        registry_address = self.default_registry.address
        state_change = ActionForTokenNetwork(
            registry_address,
            token_address,
            direct_transfer,
        )

        self.handle_state_change(state_change)

    def start_mediated_transfer2(self, token_address, amount, identifier,
                                 target):
        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        assert identifier not in self.identifier_to_results

        async_result = AsyncResult()
        self.identifier_to_results[identifier].append(async_result)

        secret = random_secret()
        init_initiator_statechange = initiator_init(
            self,
            identifier,
            amount,
            secret,
            token_address,
            target,
        )

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it happens (issue #374)
        #
        # Dispatch the state change even if there are no routes to create the
        # wal entry.
        self.handle_state_change(init_initiator_statechange)

        return async_result

    def mediate_mediated_transfer2(self, transfer):
        init_mediator_statechange = mediator_init(self, transfer)
        self.handle_state_change(init_mediator_statechange)

    def target_mediated_transfer2(self, transfer):
        init_target_statechange = target_init(self, transfer)
        self.handle_state_change(init_target_statechange)
Ejemplo n.º 5
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        if config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN:
            raise ValueError('settle_timeout must be larger-or-equal to {}'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            ))

        # create lock file only if SQLite backend is file, to prevent two instances
        #  from accesing a single db
        if config['database_path'] != ':memory:':
            self.db_lock = filelock.FileLock(path.dirname(config['database_path']) + "/.lock")
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked
        else:
            self.db_lock = None

        private_key = PrivateKey(private_key_bin)
        pubkey = private_key.public_key.format(compressed=False)
        protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )
        transport.protocol = protocol

        self.token_to_channelgraph = dict()
        self.manager_to_token = dict()
        self.swapkey_to_tokenswap = dict()
        self.swapkey_to_greenlettask = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = protocol

        message_handler = RaidenMessageHandler(self)
        state_machine_event_handler = StateMachineEventHandler(self)
        pyethapp_blockchain_events = PyethappBlockchainEvents()
        greenlet_task_dispatcher = GreenletTasksDispatcher()

        alarm = AlarmTask(chain)

        # prime the block number cache and set the callbacks
        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.poll_blockchain_events)
        alarm.register_callback(self.set_block_number)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']
            )
        )

        registry_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )

        self.alarm = alarm
        self.message_handler = message_handler
        self.state_machine_event_handler = state_machine_event_handler
        self.pyethapp_blockchain_events = pyethapp_blockchain_events
        self.greenlet_task_dispatcher = greenlet_task_dispatcher

        self.on_message = message_handler.on_message

        self.tokens_to_connectionmanagers = dict()

        self.serialization_file = None
        self.protocol.start()
        alarm.start()
        if config['database_path'] != ':memory:':
            snapshot_dir = os.path.join(
                path.dirname(self.config['database_path']),
                'snapshots'
            )
            if not os.path.exists(snapshot_dir):
                os.makedirs(
                    snapshot_dir
                )

            self.serialization_file = path.join(
                snapshot_dir,
                'data.pickle',
            )

            self.register_registry(self.chain.default_registry.address)
            self.restore_from_snapshots()

            registry_event.join()
    def __init__(self, chain, default_registry, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN or
            config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX
        )
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN, NETTINGCHANNEL_SETTLE_TIMEOUT_MAX
            ))

        self.token_to_channelgraph = dict()
        self.tokens_to_connectionmanagers = dict()
        self.manager_to_token = dict()
        self.swapkey_to_tokenswap = dict()
        self.swapkey_to_greenlettask = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.message_handler = RaidenMessageHandler(self)
        self.state_machine_event_handler = StateMachineEventHandler(self)
        self.blockchain_events = BlockchainEvents()
        self.greenlet_task_dispatcher = GreenletTasksDispatcher()
        self.on_message = self.message_handler.on_message
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']
            )
        )

        if config['database_path'] != ':memory:':
            self.database_dir = os.path.dirname(config['database_path'])
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.snapshot_dir = os.path.join(self.database_dir, 'snapshots')
            self.serialization_file = os.path.join(self.snapshot_dir, 'data.pickle')

            if not os.path.exists(self.snapshot_dir):
                os.makedirs(self.snapshot_dir)

            # Prevent concurrent acces to the same db
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_dir = None
            self.lock_file = None
            self.snapshot_dir = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        self.start()
class RaidenService:
    """ A Raiden node. """
    # pylint: disable=too-many-instance-attributes,too-many-public-methods

    def __init__(self, chain, default_registry, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN or
            config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX
        )
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN, NETTINGCHANNEL_SETTLE_TIMEOUT_MAX
            ))

        self.token_to_channelgraph = dict()
        self.tokens_to_connectionmanagers = dict()
        self.manager_to_token = dict()
        self.swapkey_to_tokenswap = dict()
        self.swapkey_to_greenlettask = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.message_handler = RaidenMessageHandler(self)
        self.state_machine_event_handler = StateMachineEventHandler(self)
        self.blockchain_events = BlockchainEvents()
        self.greenlet_task_dispatcher = GreenletTasksDispatcher()
        self.on_message = self.message_handler.on_message
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']
            )
        )

        if config['database_path'] != ':memory:':
            self.database_dir = os.path.dirname(config['database_path'])
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.snapshot_dir = os.path.join(self.database_dir, 'snapshots')
            self.serialization_file = os.path.join(self.snapshot_dir, 'data.pickle')

            if not os.path.exists(self.snapshot_dir):
                os.makedirs(self.snapshot_dir)

            # Prevent concurrent acces to the same db
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_dir = None
            self.lock_file = None
            self.snapshot_dir = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        self.start()

    def start(self):
        """ Start the node. """
        # XXX Should this really be here? Or will start() never be called again
        # after stop() in the lifetime of Raiden apart from the tests? This is
        # at least at the moment prompted by tests/integration/test_transer.py
        if self.stop_event and self.stop_event.is_set():
            self.stop_event.clear()

        self.alarm.start()

        # Prime the block number cache and set the callbacks
        self._block_number = self.alarm.last_block_number
        self.alarm.register_callback(self.poll_blockchain_events)
        self.alarm.register_callback(self.set_block_number)

        # Registry registration must start *after* the alarm task, this avoid
        # corner cases were the registry is queried in block A, a new block B
        # is mined, and the alarm starts polling at block C.
        self.register_registry(self.default_registry.address)

        # Restore from snapshot must come after registering the registry as we
        # need to know the registered tokens to populate `token_to_channelgraph`
        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked
            self.restore_from_snapshots()

        # Start the protocol after the registry is queried to avoid warning
        # about unknown channels.
        self.protocol.start()

        # Health check needs the protocol layer
        self.start_neighbours_healthcheck()

        self.start_event.set()

    def start_neighbours_healthcheck(self):
        for graph in self.token_to_channelgraph.values():
            for neighbour in graph.get_neighbours():
                if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
                    self.start_health_check_for(neighbour)

    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.protocol.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(self.protocol.greenlets)
        wait_for.extend(self.greenlet_task_dispatcher.stop())
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        # save the state after all tasks are done
        if self.serialization_file:
            save_snapshot(self.serialization_file, self)

        if self.db_lock is not None:
            self.db_lock.release()

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def restore_from_snapshots(self):
        data = load_snapshot(self.serialization_file)
        data_exists_and_is_recent = (
            data is not None and
            'registry_address' in data and
            data['registry_address'] == ROPSTEN_REGISTRY_ADDRESS
        )

        if data_exists_and_is_recent:
            first_channel = True
            for channel in data['channels']:
                try:
                    self.restore_channel(channel)
                    first_channel = False
                except AddressWithoutCode as e:
                    log.warn(
                        'Channel without code while restoring. Must have been '
                        'already settled while we were offline.',
                        error=str(e)
                    )
                except AttributeError as e:
                    if first_channel:
                        log.warn(
                            'AttributeError during channel restoring. If code has changed'
                            ' then this is fine. If not then please report a bug.',
                            error=str(e)
                        )
                        break
                    else:
                        raise

            for restored_queue in data['queues']:
                self.restore_queue(restored_queue)

            self.protocol.receivedhashes_to_acks = data['receivedhashes_to_acks']
            self.protocol.nodeaddresses_to_nonces = data['nodeaddresses_to_nonces']

            self.restore_transfer_states(data['transfers'])

    def set_block_number(self, block_number):
        state_change = Block(block_number)
        self.state_machine_event_handler.log_and_dispatch_to_all_tasks(state_change)

        for graph in self.token_to_channelgraph.values():
            for channel in graph.address_to_channel.values():
                channel.state_transition(state_change)

        # To avoid races, only update the internal cache after all the state
        # tasks have been updated.
        self._block_number = block_number

    def set_node_network_state(self, node_address, network_state):
        for graph in self.token_to_channelgraph.values():
            channel = graph.partneraddress_to_channel.get(node_address)

            if channel:
                channel.network_state = network_state

    def start_health_check_for(self, node_address):
        self.protocol.start_health_check(node_address)

    def get_block_number(self):
        return self._block_number

    def poll_blockchain_events(self, current_block=None):
        # pylint: disable=unused-argument
        on_statechange = self.state_machine_event_handler.on_blockchain_statechange

        for state_change in self.blockchain_events.poll_state_change(self._block_number):
            on_statechange(state_change)

    def find_channel_by_address(self, netting_channel_address_bin):
        for graph in self.token_to_channelgraph.values():
            channel = graph.address_to_channel.get(netting_channel_address_bin)

            if channel is not None:
                return channel

        raise ValueError('unknown channel {}'.format(encode_hex(netting_channel_address_bin)))

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    def register_secret(self, secret: bytes):
        """ Register the secret with any channel that has a hashlock on it.

        This must search through all channels registered for a given hashlock
        and ignoring the tokens. Useful for refund transfer, split transfer,
        and token swaps.

        Raises:
            TypeError: If secret is unicode data.
        """
        if not isinstance(secret, bytes):
            raise TypeError('secret must be bytes')

        hashlock = sha3(secret)
        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        for hash_channel in self.token_to_hashlock_to_channels.values():
            for channel in hash_channel[hashlock]:
                channel.register_secret(secret)

                # The protocol ignores duplicated messages.
                self.send_async(
                    channel.partner_state.address,
                    revealsecret_message,
                )

    def register_channel_for_hashlock(self, token_address, channel, hashlock):
        channels_registered = self.token_to_hashlock_to_channels[token_address][hashlock]

        if channel not in channels_registered:
            channels_registered.append(channel)

    def handle_secret(  # pylint: disable=too-many-arguments
            self,
            identifier,
            token_address,
            secret,
            partner_secret_message,
            hashlock):
        """ Unlock/Witdraws locks, register the secret, and send Secret
        messages as necessary.

        This function will:
            - Unlock the locks created by this node and send a Secret message to
            the corresponding partner so that she can withdraw the token.
            - Withdraw the lock from sender.
            - Register the secret for the locks received and reveal the secret
            to the senders


        Note:
            The channel needs to be registered with
            `raiden.register_channel_for_hashlock`.
        """
        # handling the secret needs to:
        # - unlock the token for all `forward_channel` (the current one
        #   and the ones that failed with a refund)
        # - send a message to each of the forward nodes allowing them
        #   to withdraw the token
        # - register the secret for the `originating_channel` so that a
        #   proof can be made, if necessary
        # - reveal the secret to the `sender` node (otherwise we
        #   cannot withdraw the token)
        channels_list = self.token_to_hashlock_to_channels[token_address][hashlock]
        channels_to_remove = list()

        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        messages_to_send = []
        for channel in channels_list:
            # unlock a pending lock
            if channel.our_state.is_known(hashlock):
                secret = channel.create_secret(identifier, secret)
                self.sign(secret)

                channel.register_transfer(
                    self.get_block_number(),
                    secret,
                )

                messages_to_send.append((
                    channel.partner_state.address,
                    secret,
                ))

                channels_to_remove.append(channel)

            # withdraw a pending lock
            elif channel.partner_state.is_known(hashlock):
                if partner_secret_message:
                    is_balance_proof = (
                        partner_secret_message.sender == channel.partner_state.address and
                        partner_secret_message.channel == channel.channel_address
                    )

                    if is_balance_proof:
                        channel.register_transfer(
                            self.get_block_number(),
                            partner_secret_message,
                        )
                        channels_to_remove.append(channel)
                    else:
                        channel.register_secret(secret)
                        messages_to_send.append((
                            channel.partner_state.address,
                            revealsecret_message,
                        ))
                else:
                    channel.register_secret(secret)
                    messages_to_send.append((
                        channel.partner_state.address,
                        revealsecret_message,
                    ))

            else:
                log.error(
                    'Channel is registered for a given lock but the lock is not contained in it.'
                )

        for channel in channels_to_remove:
            channels_list.remove(channel)

        if not channels_list:
            del self.token_to_hashlock_to_channels[token_address][hashlock]

        # send the messages last to avoid races
        for recipient, message in messages_to_send:
            self.send_async(
                recipient,
                message,
            )

    def get_channel_details(self, token_address, netting_channel):
        channel_details = netting_channel.detail()
        our_state = ChannelEndState(
            channel_details['our_address'],
            channel_details['our_balance'],
            None,
            EMPTY_MERKLE_TREE,
        )
        partner_state = ChannelEndState(
            channel_details['partner_address'],
            channel_details['partner_balance'],
            None,
            EMPTY_MERKLE_TREE,
        )

        def register_channel_for_hashlock(channel, hashlock):
            self.register_channel_for_hashlock(
                token_address,
                channel,
                hashlock,
            )

        channel_address = netting_channel.address
        reveal_timeout = self.config['reveal_timeout']
        settle_timeout = channel_details['settle_timeout']

        external_state = ChannelExternalState(
            register_channel_for_hashlock,
            netting_channel,
        )

        channel_detail = ChannelDetails(
            channel_address,
            our_state,
            partner_state,
            external_state,
            reveal_timeout,
            settle_timeout,
        )

        return channel_detail

    def restore_channel(self, serialized_channel):
        token_address = serialized_channel.token_address

        netting_channel = self.chain.netting_channel(
            serialized_channel.channel_address,
        )

        # restoring balances from the blockchain since the serialized
        # value could be falling behind.
        channel_details = netting_channel.detail()

        # our_address is checked by detail
        assert channel_details['partner_address'] == serialized_channel.partner_address

        if serialized_channel.our_leaves:
            our_layers = compute_layers(serialized_channel.our_leaves)
            our_tree = MerkleTreeState(our_layers)
        else:
            our_tree = EMPTY_MERKLE_TREE

        our_state = ChannelEndState(
            channel_details['our_address'],
            channel_details['our_balance'],
            serialized_channel.our_balance_proof,
            our_tree,
        )

        if serialized_channel.partner_leaves:
            partner_layers = compute_layers(serialized_channel.partner_leaves)
            partner_tree = MerkleTreeState(partner_layers)
        else:
            partner_tree = EMPTY_MERKLE_TREE

        partner_state = ChannelEndState(
            channel_details['partner_address'],
            channel_details['partner_balance'],
            serialized_channel.partner_balance_proof,
            partner_tree,
        )

        def register_channel_for_hashlock(channel, hashlock):
            self.register_channel_for_hashlock(
                token_address,
                channel,
                hashlock,
            )

        external_state = ChannelExternalState(
            register_channel_for_hashlock,
            netting_channel,
        )
        details = ChannelDetails(
            serialized_channel.channel_address,
            our_state,
            partner_state,
            external_state,
            serialized_channel.reveal_timeout,
            channel_details['settle_timeout'],
        )

        graph = self.token_to_channelgraph[token_address]
        graph.add_channel(details)
        channel = graph.address_to_channel.get(
            serialized_channel.channel_address,
        )

        channel.our_state.balance_proof = serialized_channel.our_balance_proof
        channel.partner_state.balance_proof = serialized_channel.partner_balance_proof

    def restore_queue(self, serialized_queue):
        receiver_address = serialized_queue['receiver_address']
        token_address = serialized_queue['token_address']

        queue = self.protocol.get_channel_queue(
            receiver_address,
            token_address,
        )

        for messagedata in serialized_queue['messages']:
            queue.put(messagedata)

    def restore_transfer_states(self, transfer_states):
        self.identifier_to_statemanagers = transfer_states

    def register_registry(self, registry_address):
        proxies = get_relevant_proxies(
            self.chain,
            self.address,
            registry_address,
        )

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.blockchain_events.add_proxies_listeners(proxies)

        for manager in proxies.channel_managers:
            token_address = manager.token_address()
            manager_address = manager.address

            channels_detail = list()
            netting_channels = proxies.channelmanager_nettingchannels[manager_address]
            for channel in netting_channels:
                detail = self.get_channel_details(token_address, channel)
                channels_detail.append(detail)

            edge_list = manager.channels_addresses()
            graph = ChannelGraph(
                self.address,
                manager_address,
                token_address,
                edge_list,
                channels_detail,
            )

            self.manager_to_token[manager_address] = token_address
            self.token_to_channelgraph[token_address] = graph

            self.tokens_to_connectionmanagers[token_address] = ConnectionManager(
                self,
                token_address,
                graph
            )

    def channel_manager_is_registered(self, manager_address):
        return manager_address in self.manager_to_token

    def register_channel_manager(self, manager_address):
        manager = self.default_registry.manager(manager_address)
        netting_channels = [
            self.chain.netting_channel(channel_address)
            for channel_address in manager.channels_by_participant(self.address)
        ]

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.blockchain_events.add_channel_manager_listener(manager)
        for channel in netting_channels:
            self.blockchain_events.add_netting_channel_listener(channel)

        token_address = manager.token_address()
        edge_list = manager.channels_addresses()
        channels_detail = [
            self.get_channel_details(token_address, channel)
            for channel in netting_channels
        ]

        graph = ChannelGraph(
            self.address,
            manager_address,
            token_address,
            edge_list,
            channels_detail,
        )

        self.manager_to_token[manager_address] = token_address
        self.token_to_channelgraph[token_address] = graph

        self.tokens_to_connectionmanagers[token_address] = ConnectionManager(
            self,
            token_address,
            graph
        )

    def register_netting_channel(self, token_address, channel_address):
        netting_channel = self.chain.netting_channel(channel_address)
        self.blockchain_events.add_netting_channel_listener(netting_channel)

        detail = self.get_channel_details(token_address, netting_channel)
        graph = self.token_to_channelgraph[token_address]
        graph.add_channel(detail)

    def connection_manager_for_token(self, token_address):
        if not isaddress(token_address):
            raise InvalidAddress('token address is not valid.')
        if token_address in self.tokens_to_connectionmanagers.keys():
            manager = self.tokens_to_connectionmanagers[token_address]
        else:
            raise InvalidAddress('token is not registered.')
        return manager

    def leave_all_token_networks_async(self):
        leave_results = []
        for token_address in self.token_to_channelgraph.keys():
            try:
                connection_manager = self.connection_manager_for_token(token_address)
                leave_results.append(connection_manager.leave_async())
            except InvalidAddress:
                pass
        combined_result = AsyncResult()
        gevent.spawn(gevent.wait, leave_results).link(combined_result)
        return combined_result

    def close_and_settle(self):
        log.info('raiden will close and settle all channels now')

        connection_managers = [
            self.connection_manager_for_token(token_address) for
            token_address in self.token_to_channelgraph
        ]

        def blocks_to_wait():
            return max(
                connection_manager.min_settle_blocks
                for connection_manager in connection_managers
            )

        all_channels = list(
            itertools.chain.from_iterable(
                [connection_manager.open_channels for connection_manager in connection_managers]
            )
        )

        leaving_greenlet = self.leave_all_token_networks_async()
        # using the un-cached block number here
        last_block = self.chain.block_number()

        earliest_settlement = last_block + blocks_to_wait()

        # TODO: estimate and set a `timeout` parameter in seconds
        # based on connection_manager.min_settle_blocks and an average
        # blocktime from the past

        current_block = last_block
        while current_block < earliest_settlement:
            gevent.sleep(self.alarm.wait_time)
            last_block = self.chain.block_number()
            if last_block != current_block:
                current_block = last_block
                avg_block_time = self.chain.estimate_blocktime()
                wait_blocks_left = blocks_to_wait()
                not_settled = sum(
                    1 for channel in all_channels
                    if not channel.state == CHANNEL_STATE_SETTLED
                )
                if not_settled == 0:
                    log.debug('nothing left to settle')
                    break
                log.info(
                    'waiting at least %s more blocks (~%s sec) for settlement'
                    '(%s channels not yet settled)' % (
                        wait_blocks_left,
                        wait_blocks_left * avg_block_time,
                        not_settled
                    )
                )

            leaving_greenlet.wait(timeout=blocks_to_wait() * self.chain.estimate_blocktime() * 1.5)

        if any(channel.state != CHANNEL_STATE_SETTLED for channel in all_channels):
            log.error(
                'Some channels were not settled!',
                channels=[
                    pex(channel.channel_address) for channel in all_channels
                    if channel.state != CHANNEL_STATE_SETTLED
                ]
            )

    def mediated_transfer_async(self, token_address, amount, target, identifier):
        """ Transfer `amount` between this node and `target`.

        This method will start an asyncronous transfer, the transfer might fail
        or succeed depending on a couple of factors:

            - Existence of a path that can be used, through the usage of direct
              or intermediary channels.
            - Network speed, making the transfer sufficiently fast so it doesn't
              expire.
        """

        async_result = self.start_mediated_transfer(
            token_address,
            amount,
            identifier,
            target,
        )

        return async_result

    def direct_transfer_async(self, token_address, amount, target, identifier):
        """ Do a direct tranfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payer's perspective),
        whereas the mediated transfer requires 6 messages.
        """
        graph = self.token_to_channelgraph[token_address]
        direct_channel = graph.partneraddress_to_channel.get(target)

        direct_channel_with_capacity = (
            direct_channel and
            direct_channel.can_transfer and
            amount <= direct_channel.distributable
        )

        if direct_channel_with_capacity:
            direct_transfer = direct_channel.create_directtransfer(amount, identifier)
            self.sign(direct_transfer)
            direct_channel.register_transfer(
                self.get_block_number(),
                direct_transfer,
            )

            direct_transfer_state_change = ActionTransferDirect(
                identifier,
                amount,
                token_address,
                direct_channel.partner_state.address,
            )
            # TODO: add the transfer sent event
            state_change_id = self.transaction_log.log(direct_transfer_state_change)

            # TODO: This should be set once the direct transfer is acknowledged
            transfer_success = EventTransferSentSuccess(
                identifier,
                amount,
                target,
            )
            self.transaction_log.log_events(
                state_change_id,
                [transfer_success],
                self.get_block_number()
            )

            async_result = self.protocol.send_async(
                direct_channel.partner_state.address,
                direct_transfer,
            )

        else:
            async_result = AsyncResult()
            async_result.set(False)

        return async_result

    def start_mediated_transfer(self, token_address, amount, identifier, target):
        # pylint: disable=too-many-locals

        async_result = AsyncResult()
        graph = self.token_to_channelgraph[token_address]

        available_routes = get_best_routes(
            graph,
            self.protocol.nodeaddresses_networkstatuses,
            self.address,
            target,
            amount,
            None,
        )

        if not available_routes:
            async_result.set(False)
            return async_result

        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        route_state = RoutesState(available_routes)
        our_address = self.address
        block_number = self.get_block_number()

        transfer_state = LockedTransferState(
            identifier=identifier,
            amount=amount,
            token=token_address,
            initiator=self.address,
            target=target,
            expiration=None,
            hashlock=None,
            secret=None,
        )

        # Issue #489
        #
        # Raiden may fail after a state change using the random generator is
        # handled but right before the snapshot is taken. If that happens on
        # the next initialization when raiden is recovering and applying the
        # pending state changes a new secret will be generated and the
        # resulting events won't match, this breaks the architecture model,
        # since it's assumed the re-execution of a state change will always
        # produce the same events.
        #
        # TODO: Removed the secret generator from the InitiatorState and add
        # the secret into all state changes that require one, this way the
        # secret will be serialized with the state change and the recovery will
        # use the same /random/ secret.
        random_generator = RandomSecretGenerator()

        init_initiator = ActionInitInitiator(
            our_address=our_address,
            transfer=transfer_state,
            routes=route_state,
            random_generator=random_generator,
            block_number=block_number,
        )

        state_manager = StateManager(initiator.state_transition, None)
        self.state_machine_event_handler.log_and_dispatch(state_manager, init_initiator)

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it hapens (issue #374)
        self.identifier_to_statemanagers[identifier].append(state_manager)
        self.identifier_to_results[identifier].append(async_result)

        return async_result

    def mediate_mediated_transfer(self, message):
        # pylint: disable=too-many-locals
        identifier = message.identifier
        amount = message.lock.amount
        target = message.target
        token = message.token
        graph = self.token_to_channelgraph[token]

        available_routes = get_best_routes(
            graph,
            self.protocol.nodeaddresses_networkstatuses,
            self.address,
            target,
            amount,
            message.sender,
        )

        from_channel = graph.partneraddress_to_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        our_address = self.address
        from_transfer = lockedtransfer_from_message(message)
        route_state = RoutesState(available_routes)
        block_number = self.get_block_number()

        init_mediator = ActionInitMediator(
            our_address,
            from_transfer,
            route_state,
            from_route,
            block_number,
        )

        state_manager = StateManager(mediator.state_transition, None)

        self.state_machine_event_handler.log_and_dispatch(state_manager, init_mediator)

        self.identifier_to_statemanagers[identifier].append(state_manager)

    def target_mediated_transfer(self, message):
        graph = self.token_to_channelgraph[message.token]
        from_channel = graph.partneraddress_to_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        from_transfer = lockedtransfer_from_message(message)
        our_address = self.address
        block_number = self.get_block_number()

        init_target = ActionInitTarget(
            our_address,
            from_route,
            from_transfer,
            block_number,
        )

        state_manager = StateManager(target_task.state_transition, None)
        self.state_machine_event_handler.log_and_dispatch(state_manager, init_target)

        identifier = message.identifier
        self.identifier_to_statemanagers[identifier].append(state_manager)
Ejemplo n.º 8
0
class RaidenService(object):  # pylint: disable=too-many-instance-attributes
    """ A Raiden node. """
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        # pylint: disable=too-many-arguments

        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(
            private_key_bin,
            ctx=GLOBAL_CTX,
            raw=True,
        )
        pubkey = private_key.pubkey.serialize(compressed=False)

        self.registries = list()
        self.managers_by_asset_address = dict()
        self.managers_by_address = dict()

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        event_handler = RaidenEventHandler(self)

        alarm = AlarmTask(chain)
        # ignore the blocknumber
        alarm.register_callback(
            lambda _: event_handler.poll_all_event_listeners())
        alarm.start()

        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.set_block_number)

        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(self, config['send_ping_time'],
                                               config['max_unresponsive_time'])
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.event_handler = event_handler
        self.message_handler = message_handler
        self.start_event_listener = event_handler.start_event_listener

        self.on_message = message_handler.on_message
        self.on_event = event_handler.on_event

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def set_block_number(self, blocknumber):
        self._blocknumber = blocknumber

    def get_block_number(self):
        return self._blocknumber

    def get_manager_by_asset_address(self, asset_address_bin):
        """ Return the manager for the given `asset_address_bin`.  """
        try:
            return self.managers_by_asset_address[asset_address_bin]
        except KeyError:
            raise UnknownAssetAddress(asset_address_bin)

    def get_manager_by_address(self, manager_address_bin):
        return self.managers_by_address[manager_address_bin]

    def find_channel_by_address(self, netting_channel_address_bin):
        for manager in self.managers_by_address.itervalues():
            channel = manager.address_channel.get(netting_channel_address_bin)

            if channel is not None:
                return channel

        raise ValueError('unknown channel {}'.format(
            encode_hex(netting_channel_address_bin)))

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send(self, *args):
        raise NotImplementedError('use send_and_wait or send_async')

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    # api design regarding locks:
    # - `register_secret` method was added because secret registration can be a
    #   cross asset operation
    # - unlocking a lock is not a cross asset operation, for this reason it's
    #   only available in the asset manager

    def register_secret(self, secret):
        """ Register the secret with any channel that has a hashlock on it.

        This must search through all channels registered for a given hashlock
        and ignoring the assets. Useful for refund transfer, split transfer,
        and exchanges.
        """
        for asset_manager in self.managers_by_asset_address.values():
            try:
                asset_manager.register_secret(secret)
            except:  # pylint: disable=bare-except
                # Only channels that care about the given secret can be
                # registered and channels that have claimed the lock must
                # be removed, so an exception should not happen at this
                # point, nevertheless handle it because we dont want a
                # error in a channel to mess the state from others.
                log.error('programming error')

    def message_for_task(self, message, hashlock):
        """ Sends the message to the corresponding task.

        The corresponding task is found by matching the hashlock.

        Return:
            Nothing if a corresponding task is found,raise Exception otherwise
        """
        found = False
        for asset_manager in self.managers_by_asset_address.values():
            task = asset_manager.transfermanager.transfertasks.get(hashlock)

            if task is not None:
                task.on_response(message)
                found = True

        if not found:
            # Log a warning and don't process further
            if log.isEnabledFor(logging.WARN):
                log.warn(
                    'Received %s hashlock message from unknown channel.'
                    'Sender: %s',
                    message.__class__.__name__,
                    pex(message.sender),
                )
            raise UnknownAddress

    def register_registry(self, registry):
        """ Register the registry and intialize all the related assets and
        channels.
        """
        translator = ContractTranslator(REGISTRY_ABI)

        assetadded = registry.assetadded_filter()

        all_manager_addresses = registry.manager_addresses()

        self.start_event_listener(
            'Registry {}'.format(pex(registry.address)),
            assetadded,
            translator,
        )

        self.registries.append(registry)

        for manager_address in all_manager_addresses:
            channel_manager = self.chain.manager(manager_address)
            self.register_channel_manager(channel_manager)

    def register_channel_manager(self, channel_manager):
        """ Discover and register the channels for the given asset. """
        translator = ContractTranslator(CHANNEL_MANAGER_ABI)

        # To avoid missing changes, first create the filter, call the
        # contract and then start polling.
        channelnew = channel_manager.channelnew_filter()

        all_netting_contracts = channel_manager.channels_by_participant(
            self.address)

        self.start_event_listener(
            'ChannelManager {}'.format(pex(channel_manager.address)),
            channelnew,
            translator,
        )

        asset_address_bin = channel_manager.asset_address()
        channel_manager_address_bin = channel_manager.address
        edges = channel_manager.channels_addresses()
        channel_graph = ChannelGraph(edges)

        asset_manager = AssetManager(
            self,
            asset_address_bin,
            channel_manager_address_bin,
            channel_graph,
        )
        self.managers_by_asset_address[asset_address_bin] = asset_manager
        self.managers_by_address[channel_manager_address_bin] = asset_manager

        for netting_contract_address in all_netting_contracts:
            asset_manager.register_channel_by_address(
                netting_contract_address,
                self.config['reveal_timeout'],
            )

    def stop(self):
        for asset_manager in self.managers_by_asset_address.itervalues():
            for task in asset_manager.transfermanager.transfertasks.itervalues(
            ):
                task.kill()

        wait_for = [self.alarm]
        self.alarm.stop_async()
        if self.healthcheck is not None:
            self.healthcheck.stop_async()
            wait_for.append(self.healthcheck)
        self.protocol.stop_async()

        wait_for.extend(self.protocol.address_greenlet.itervalues())

        for asset_manager in self.managers_by_asset_address.itervalues():
            wait_for.extend(
                asset_manager.transfermanager.transfertasks.itervalues())

        self.event_handler.uninstall_listeners()
        gevent.wait(wait_for)
Ejemplo n.º 9
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(private_key_bin)
        pubkey = private_key.public_key.format(compressed=False)

        self.channelgraphs = dict()
        self.manager_token = dict()
        self.swapkeys_tokenswaps = dict()
        self.swapkeys_greenlettasks = dict()

        self.identifier_statemanager = defaultdict(list)
        self.identifier_result = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.tokens_hashlocks_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        state_machine_event_handler = StateMachineEventHandler(self)
        pyethapp_blockchain_events = PyethappBlockchainEvents()
        greenlet_task_dispatcher = GreenletTasksDispatcher()

        alarm = AlarmTask(chain)
        # ignore the blocknumber
        alarm.register_callback(self.poll_blockchain_events)
        alarm.start()

        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.set_block_number)

        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(
                self,
                config['send_ping_time'],
                config['max_unresponsive_time']
            )
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.message_handler = message_handler
        self.state_machine_event_handler = state_machine_event_handler
        self.pyethapp_blockchain_events = pyethapp_blockchain_events
        self.greenlet_task_dispatcher = greenlet_task_dispatcher

        self.on_message = message_handler.on_message

        self.tokens_connectionmanagers = dict()  # token_address: ConnectionManager
Ejemplo n.º 10
0
class RaidenService(object):
    """ A Raiden node. """
    # pylint: disable=too-many-instance-attributes,too-many-public-methods

    def __init__(self, chain, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(private_key_bin)
        pubkey = private_key.public_key.format(compressed=False)

        self.channelgraphs = dict()
        self.manager_token = dict()
        self.swapkeys_tokenswaps = dict()
        self.swapkeys_greenlettasks = dict()

        self.identifier_statemanager = defaultdict(list)
        self.identifier_result = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.tokens_hashlocks_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        state_machine_event_handler = StateMachineEventHandler(self)
        pyethapp_blockchain_events = PyethappBlockchainEvents()
        greenlet_task_dispatcher = GreenletTasksDispatcher()

        alarm = AlarmTask(chain)
        # ignore the blocknumber
        alarm.register_callback(self.poll_blockchain_events)
        alarm.start()

        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.set_block_number)

        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(
                self,
                config['send_ping_time'],
                config['max_unresponsive_time']
            )
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.message_handler = message_handler
        self.state_machine_event_handler = state_machine_event_handler
        self.pyethapp_blockchain_events = pyethapp_blockchain_events
        self.greenlet_task_dispatcher = greenlet_task_dispatcher

        self.on_message = message_handler.on_message

        self.tokens_connectionmanagers = dict()  # token_address: ConnectionManager

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def set_block_number(self, blocknumber):
        self._blocknumber = blocknumber

        state_change = Block(blocknumber)
        self.state_machine_event_handler.dispatch_to_all_tasks(state_change)

        for graph in self.channelgraphs.itervalues():
            for channel in graph.address_channel.itervalues():
                channel.state_transition(state_change)

    def get_block_number(self):
        return self._blocknumber

    def poll_blockchain_events(self, block_number):  # pylint: disable=unused-argument
        on_statechange = self.state_machine_event_handler.on_blockchain_statechange

        for state_change in self.pyethapp_blockchain_events.poll_state_change():
            on_statechange(state_change)

    def find_channel_by_address(self, netting_channel_address_bin):
        for graph in self.channelgraphs.itervalues():
            channel = graph.address_channel.get(netting_channel_address_bin)

            if channel is not None:
                return channel

        raise ValueError('unknown channel {}'.format(encode_hex(netting_channel_address_bin)))

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send(self, *args):
        raise NotImplementedError('use send_and_wait or send_async')

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    def register_secret(self, secret):
        """ Register the secret with any channel that has a hashlock on it.

        This must search through all channels registered for a given hashlock
        and ignoring the tokens. Useful for refund transfer, split transfer,
        and token swaps.
        """
        hashlock = sha3(secret)
        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        for hash_channel in self.tokens_hashlocks_channels.itervalues():
            for channel in hash_channel[hashlock]:
                try:
                    channel.register_secret(secret)

                    # This will potentially be executed multiple times and could suffer
                    # from amplification, the protocol will ignore messages that were
                    # already registered and send it only until a first Ack is
                    # received.
                    self.send_async(
                        channel.partner_state.address,
                        revealsecret_message,
                    )
                except:  # pylint: disable=bare-except
                    # Only channels that care about the given secret can be
                    # registered and channels that have claimed the lock must
                    # be removed, so an exception should not happen at this
                    # point, nevertheless handle it because we dont want an
                    # error in a channel to mess the state from others.
                    log.error('programming error')

    def register_channel_for_hashlock(self, token_address, channel, hashlock):
        channels_registered = self.tokens_hashlocks_channels[token_address][hashlock]

        if channel not in channels_registered:
            channels_registered.append(channel)

    def handle_secret(  # pylint: disable=too-many-arguments
            self,
            identifier,
            token_address,
            secret,
            partner_secret_message,
            hashlock):
        """ Unlock/Witdraws locks, register the secret, and send Secret
        messages as necessary.

        This function will:
            - Unlock the locks created by this node and send a Secret message to
            the corresponding partner so that she can withdraw the token.
            - Withdraw the lock from sender.
            - Register the secret for the locks received and reveal the secret
            to the senders

        Note:
            The channel needs to be registered with
            `raiden.register_channel_for_hashlock`.
        """
        # handling the secret needs to:
        # - unlock the token for all `forward_channel` (the current one
        #   and the ones that failed with a refund)
        # - send a message to each of the forward nodes allowing them
        #   to withdraw the token
        # - register the secret for the `originating_channel` so that a
        #   proof can be made, if necessary
        # - reveal the secret to the `sender` node (otherwise we
        #   cannot withdraw the token)
        channels_list = self.tokens_hashlocks_channels[token_address][hashlock]
        channels_to_remove = list()

        # Dont use the partner_secret_message.token since it might not match
        # the current token manager
        our_secret_message = Secret(
            identifier,
            secret,
            token_address,
        )
        self.sign(our_secret_message)

        revealsecret_message = RevealSecret(secret)
        self.sign(revealsecret_message)

        for channel in channels_list:
            # unlock a sent lock
            if channel.partner_state.balance_proof.is_unclaimed(hashlock):
                channel.release_lock(secret)
                self.send_async(
                    channel.partner_state.address,
                    our_secret_message,
                )
                channels_to_remove.append(channel)

            # withdraw a pending lock
            if channel.our_state.balance_proof.is_unclaimed(hashlock):
                if partner_secret_message:
                    matching_sender = (
                        partner_secret_message.sender == channel.partner_state.address
                    )
                    matching_token = partner_secret_message.token == channel.token_address

                    if matching_sender and matching_token:
                        channel.withdraw_lock(secret)
                        channels_to_remove.append(channel)
                    else:
                        channel.register_secret(secret)
                        self.send_async(
                            channel.partner_state.address,
                            revealsecret_message,
                        )
                else:
                    channel.register_secret(secret)
                    self.send_async(
                        channel.partner_state.address,
                        revealsecret_message,
                    )

        for channel in channels_to_remove:
            channels_list.remove(channel)

        if len(channels_list) == 0:
            del self.tokens_hashlocks_channels[token_address][hashlock]

    def get_channel_details(self, token_address, netting_channel):
        channel_details = netting_channel.detail(self.address)
        our_state = ChannelEndState(
            channel_details['our_address'],
            channel_details['our_balance'],
            netting_channel.opened(),
        )
        partner_state = ChannelEndState(
            channel_details['partner_address'],
            channel_details['partner_balance'],
            netting_channel.opened(),
        )

        def register_channel_for_hashlock(channel, hashlock):
            self.register_channel_for_hashlock(
                token_address,
                channel,
                hashlock,
            )

        channel_address = netting_channel.address
        reveal_timeout = self.config['reveal_timeout']
        settle_timeout = channel_details['settle_timeout']

        external_state = ChannelExternalState(
            register_channel_for_hashlock,
            netting_channel,
        )

        channel_detail = ChannelDetails(
            channel_address,
            our_state,
            partner_state,
            external_state,
            reveal_timeout,
            settle_timeout,
        )

        return channel_detail

    def register_registry(self, registry_address):
        proxies = get_relevant_proxies(
            self.chain,
            self.address,
            registry_address,
        )

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.pyethapp_blockchain_events.add_proxies_listeners(proxies)

        block_number = self.get_block_number()

        for manager in proxies.channel_managers:
            token_address = manager.token_address()
            manager_address = manager.address

            channels_detail = list()
            netting_channels = proxies.channelmanager_nettingchannels[manager_address]
            for channel in netting_channels:
                detail = self.get_channel_details(token_address, channel)
                channels_detail.append(detail)

            edge_list = manager.channels_addresses()
            graph = ChannelGraph(
                self.address,
                manager_address,
                token_address,
                edge_list,
                channels_detail,
                block_number,
            )

            self.manager_token[manager_address] = token_address
            self.channelgraphs[token_address] = graph

            self.tokens_connectionmanagers[token_address] = ConnectionManager(
                self,
                token_address,
                graph
            )

    def register_channel_manager(self, manager_address):
        manager = self.chain.manager(manager_address)
        netting_channels = [
            self.chain.netting_channel(channel_address)
            for channel_address in manager.channels_by_participant(self.address)
        ]

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.pyethapp_blockchain_events.add_channel_manager_listener(manager)
        for channel in netting_channels:
            self.pyethapp_blockchain_events.add_netting_channel_listener(channel)

        token_address = manager.token_address()
        edge_list = manager.channels_addresses()
        channels_detail = [
            self.get_channel_details(token_address, channel)
            for channel in netting_channels
        ]

        block_number = self.get_block_number()
        graph = ChannelGraph(
            self.address,
            manager_address,
            token_address,
            edge_list,
            channels_detail,
            block_number,
        )

        self.manager_token[manager_address] = token_address
        self.channelgraphs[token_address] = graph

        self.tokens_connectionmanagers[token_address] = ConnectionManager(
            self,
            token_address,
            graph
        )

    def register_netting_channel(self, token_address, channel_address):
        netting_channel = self.chain.netting_channel(channel_address)
        self.pyethapp_blockchain_events.add_netting_channel_listener(netting_channel)

        block_number = self.get_block_number()
        detail = self.get_channel_details(token_address, netting_channel)
        graph = self.channelgraphs[token_address]
        graph.add_channel(detail, block_number)

    def connection_manager_for_token(self, token_address):
        if not isaddress(token_address):
            raise InvalidAddress('token address is not valid.')
        if token_address in self.tokens_connectionmanagers.keys():
            manager = self.tokens_connectionmanagers[token_address]
        else:
            raise InvalidAddress('token is not registered.')
        return manager

    def stop(self):
        wait_for = [self.alarm]

        wait_for.extend(self.greenlet_task_dispatcher.stop())

        self.alarm.stop_async()
        if self.healthcheck is not None:
            self.healthcheck.stop_async()
            wait_for.append(self.healthcheck)
        self.protocol.stop_async()

        wait_for.extend(self.protocol.address_greenlet.itervalues())

        self.pyethapp_blockchain_events.uninstall_all_event_listeners()
        gevent.wait(wait_for)

    def transfer_async(self, token_address, amount, target, identifier=None):
        """ Transfer `amount` between this node and `target`.

        This method will start an asyncronous transfer, the transfer might fail
        or succeed depending on a couple of factors:

            - Existence of a path that can be used, through the usage of direct
              or intermediary channels.
            - Network speed, making the transfer sufficiently fast so it doesn't
              timeout.
        """
        graph = self.channelgraphs[token_address]

        if identifier is None:
            identifier = create_default_identifier(self.address, token_address, target)

        direct_channel = graph.partneraddress_channel.get(target)
        if direct_channel:
            async_result = self._direct_or_mediated_transfer(
                token_address,
                amount,
                identifier,
                direct_channel,
            )
            return async_result

        else:
            async_result = self._mediated_transfer(
                token_address,
                amount,
                identifier,
                target,
            )

            return async_result

    def _direct_or_mediated_transfer(self, token_address, amount, identifier, direct_channel):
        """ Check the direct channel and if possible use it, otherwise start a
        mediated transfer.
        """

        if not direct_channel.isopen:
            log.info(
                'DIRECT CHANNEL %s > %s is closed',
                pex(direct_channel.our_state.address),
                pex(direct_channel.partner_state.address),
            )

            async_result = self._mediated_transfer(
                token_address,
                amount,
                identifier,
                direct_channel.partner_state.address,
            )
            return async_result

        elif amount > direct_channel.distributable:
            log.info(
                'DIRECT CHANNEL %s > %s doesnt have enough funds [%s]',
                pex(direct_channel.our_state.address),
                pex(direct_channel.partner_state.address),
                amount,
            )

            async_result = self._mediated_transfer(
                token_address,
                amount,
                identifier,
                direct_channel.partner_state.address,
            )
            return async_result

        else:
            direct_transfer = direct_channel.create_directtransfer(amount, identifier)
            self.sign(direct_transfer)
            direct_channel.register_transfer(direct_transfer)

            async_result = self.protocol.send_async(
                direct_channel.partner_state.address,
                direct_transfer,
            )
            return async_result

    def _mediated_transfer(self, token_address, amount, identifier, target):
        return self.start_mediated_transfer(token_address, amount, identifier, target)

    def start_mediated_transfer(self, token_address, amount, identifier, target):
        # pylint: disable=too-many-locals
        graph = self.channelgraphs[token_address]
        routes = graph.get_best_routes(
            self.address,
            target,
            amount,
            lock_timeout=None,
        )

        available_routes = [
            route
            for route in map(route_to_routestate, routes)
            if route.state == CHANNEL_STATE_OPENED
        ]

        identifier = create_default_identifier(self.address, token_address, target)
        route_state = RoutesState(available_routes)
        our_address = self.address
        block_number = self.get_block_number()

        transfer_state = LockedTransferState(
            identifier=identifier,
            amount=amount,
            token=token_address,
            initiator=self.address,
            target=target,
            expiration=None,
            hashlock=None,
            secret=None,
        )

        # Issue #489
        #
        # Raiden may fail after a state change using the random generator is
        # handled but right before the snapshot is taken. If that happens on
        # the next initialization when raiden is recovering and applying the
        # pending state changes a new secret will be generated and the
        # resulting events won't match, this breaks the architecture model,
        # since it's assumed the re-execution of a state change will always
        # produce the same events.
        #
        # TODO: Removed the secret generator from the InitiatorState and add
        # the secret into all state changes that require one, this way the
        # secret will be serialized with the state change and the recovery will
        # use the same /random/ secret.
        random_generator = RandomSecretGenerator()

        init_initiator = ActionInitInitiator(
            our_address=our_address,
            transfer=transfer_state,
            routes=route_state,
            random_generator=random_generator,
            block_number=block_number,
        )

        state_manager = StateManager(initiator.state_transition, None)
        all_events = state_manager.dispatch(init_initiator)

        for event in all_events:
            self.state_machine_event_handler.on_event(event)

        async_result = AsyncResult()

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it hapens (issue #374)
        self.identifier_statemanager[identifier].append(state_manager)
        self.identifier_result[identifier].append(async_result)

        return async_result

    def mediate_mediated_transfer(self, message):
        # pylint: disable=too-many-locals
        identifier = message.identifier
        amount = message.lock.amount
        target = message.target
        token = message.token
        graph = self.channelgraphs[token]
        routes = graph.get_best_routes(
            self.address,
            target,
            amount,
            lock_timeout=None,
        )

        available_routes = [
            route
            for route in map(route_to_routestate, routes)
            if route.state == CHANNEL_STATE_OPENED
        ]

        from_channel = graph.partneraddress_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        our_address = self.address
        from_transfer = lockedtransfer_from_message(message)
        route_state = RoutesState(available_routes)
        block_number = self.get_block_number()

        init_mediator = ActionInitMediator(
            our_address,
            from_transfer,
            route_state,
            from_route,
            block_number,
        )

        state_manager = StateManager(mediator.state_transition, None)
        all_events = state_manager.dispatch(init_mediator)

        for event in all_events:
            self.state_machine_event_handler.on_event(event)

        self.identifier_statemanager[identifier].append(state_manager)

    def target_mediated_transfer(self, message):
        graph = self.channelgraphs[message.token]
        from_channel = graph.partneraddress_channel[message.sender]
        from_route = channel_to_routestate(from_channel, message.sender)

        from_transfer = lockedtransfer_from_message(message)
        our_address = self.address
        block_number = self.get_block_number()

        init_target = ActionInitTarget(
            our_address,
            from_route,
            from_transfer,
            block_number,
        )

        state_manager = StateManager(target_task.state_transition, None)
        all_events = state_manager.dispatch(init_target)

        for event in all_events:
            self.state_machine_event_handler.on_event(event)

        identifier = message.identifier
        self.identifier_statemanager[identifier].append(state_manager)
Ejemplo n.º 11
0
    def __init__(self, chain, default_registry, private_key_bin, transport,
                 discovery, config):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        self.tokens_to_connectionmanagers = dict()
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(
            lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(
            endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent acces to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        # Lock used to serialize calls to `poll_blockchain_events`, this is
        # important to give a consistent view of the node state.
        self.event_poll_lock = gevent.lock.Semaphore()

        self.start()
Ejemplo n.º 12
0
class RaidenService:
    """ A Raiden node. """
    def __init__(self, chain, default_registry, private_key_bin, transport,
                 discovery, config):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        self.tokens_to_connectionmanagers = dict()
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(
            lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(
            endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent acces to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        # Lock used to serialize calls to `poll_blockchain_events`, this is
        # important to give a consistent view of the node state.
        self.event_poll_lock = gevent.lock.Semaphore()

        self.start()

    def start(self):
        """ Start the node. """
        # XXX Should this really be here? Or will start() never be called again
        # after stop() in the lifetime of Raiden apart from the tests? This is
        # at least at the moment prompted by tests/integration/test_transer.py
        if self.stop_event and self.stop_event.is_set():
            self.stop_event.clear()

        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked

        # The database may be :memory:
        storage = sqlite.SQLiteStorage(self.database_path,
                                       serialize.PickleSerializer())
        self.wal, unapplied_events = wal.restore_from_latest_snapshot(
            node.state_transition,
            storage,
        )

        # First run, initialize the basic state
        if self.wal.state_manager.current_state is None:
            block_number = self.chain.block_number()
            first_run = True

            state_change = ActionInitNode(block_number)
            self.wal.log_and_dispatch(state_change, block_number)

        # The alarm task must be started after the snapshot is loaded or the
        # state is primed, the callbacks assume the node is initialized.
        self.alarm.start()
        self.alarm.register_callback(self.poll_blockchain_events)
        self.alarm.register_callback(self.set_block_number)
        self._block_number = self.chain.block_number()

        # Registry registration must start *after* the alarm task. This
        # avoids corner cases where the registry is queried in block A, a new
        # block B is mined, and the alarm starts polling at block C.
        if first_run:
            self.register_payment_network(self.default_registry.address)

        # Start the protocol after the registry is queried to avoid warning
        # about unknown channels.
        self.protocol.start()

        # Health check needs the protocol layer
        self.start_neighbours_healthcheck()

        self.start_event.set()

        for event in unapplied_events:
            on_raiden_event(self, event)

    def start_neighbours_healthcheck(self):
        for neighbour in views.all_neighbour_nodes(
                self.wal.state_manager.current_state):
            if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
                self.start_health_check_for(neighbour)

    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.protocol.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(self.protocol.greenlets)
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        if self.db_lock is not None:
            self.db_lock.release()

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def set_block_number(self, block_number):
        state_change = Block(block_number)
        self.handle_state_change(state_change, block_number)

        # To avoid races, only update the internal cache after all the state
        # tasks have been updated.
        self._block_number = block_number

    def handle_state_change(self, state_change, block_number=None):
        is_logging = log.isEnabledFor(logging.DEBUG)

        if is_logging:
            log.debug('STATE CHANGE',
                      node=pex(self.address),
                      state_change=state_change)

        if block_number is None:
            block_number = self.get_block_number()

        event_list = self.wal.log_and_dispatch(state_change, block_number)

        for event in event_list:
            if is_logging:
                log.debug('EVENT', node=pex(self.address), event=event)

            on_raiden_event(self, event)

        return event_list

    def set_node_network_state(self, node_address, network_state):
        state_change = ActionChangeNodeNetworkState(node_address,
                                                    network_state)
        self.wal.log_and_dispatch(state_change, self.get_block_number())

    def start_health_check_for(self, node_address):
        self.protocol.start_health_check(node_address)

    def get_block_number(self):
        return views.block_number(self.wal.state_manager.current_state)

    def poll_blockchain_events(self, current_block=None):  # pylint: disable=unused-argument
        with self.event_poll_lock:
            for event in self.blockchain_events.poll_blockchain_events():
                on_blockchain_event(self, event)

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    def register_payment_network(self, registry_address):
        proxies = get_relevant_proxies(
            self.chain,
            self.address,
            registry_address,
        )

        # Install the filters first to avoid missing changes, as a consequence
        # some events might be applied twice.
        self.blockchain_events.add_proxies_listeners(proxies)

        token_network_list = list()
        for manager in proxies.channel_managers:
            manager_address = manager.address
            netting_channel_proxies = proxies.channelmanager_nettingchannels[
                manager_address]
            network = get_token_network_state_from_proxies(
                self, manager, netting_channel_proxies)
            token_network_list.append(network)

        payment_network = PaymentNetworkState(
            registry_address,
            token_network_list,
        )

        state_change = ContractReceiveNewPaymentNetwork(payment_network)
        self.handle_state_change(state_change)

    def connection_manager_for_token(self, token_address):
        if not isaddress(token_address):
            raise InvalidAddress('token address is not valid.')

        registry_address = self.default_registry.address
        known_token_networks = views.get_token_network_addresses_for(
            self.wal.state_manager.current_state,
            registry_address,
        )

        if token_address not in known_token_networks:
            raise InvalidAddress('token is not registered.')

        manager = self.tokens_to_connectionmanagers.get(token_address)

        if manager is None:
            manager = ConnectionManager(self, token_address)
            self.tokens_to_connectionmanagers[token_address] = manager

        return manager

    def leave_all_token_networks(self):
        state_change = ActionLeaveAllNetworks()
        self.wal.log_and_dispatch(state_change, self.get_block_number())

    def close_and_settle(self):
        log.info('raiden will close and settle all channels now')

        self.leave_all_token_networks()

        connection_managers = [
            self.tokens_to_connectionmanagers[token_address]
            for token_address in self.tokens_to_connectionmanagers
        ]

        if connection_managers:
            waiting.wait_for_settle_all_channels(
                self,
                self.alarm.wait_time,
            )

    def mediated_transfer_async(self, token_address, amount, target,
                                identifier):
        """ Transfer `amount` between this node and `target`.

        This method will start an asyncronous transfer, the transfer might fail
        or succeed depending on a couple of factors:

            - Existence of a path that can be used, through the usage of direct
              or intermediary channels.
            - Network speed, making the transfer sufficiently fast so it doesn't
              expire.
        """

        async_result = self.start_mediated_transfer(
            token_address,
            amount,
            identifier,
            target,
        )

        return async_result

    def direct_transfer_async(self, token_address, amount, target, identifier):
        """ Do a direct transfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payers perspective),
        whereas the mediated transfer requires 6 messages.
        """

        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        registry_address = self.default_registry.address
        direct_transfer = ActionTransferDirect(
            registry_address,
            token_address,
            target,
            identifier,
            amount,
        )

        self.handle_state_change(direct_transfer)

    def start_mediated_transfer(self, token_address, amount, identifier,
                                target):
        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        assert identifier not in self.identifier_to_results

        async_result = AsyncResult()
        self.identifier_to_results[identifier].append(async_result)

        secret = random_secret()
        init_initiator_statechange = initiator_init(
            self,
            identifier,
            amount,
            secret,
            token_address,
            target,
        )

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it happens (issue #374)
        #
        # Dispatch the state change even if there are no routes to create the
        # wal entry.
        self.handle_state_change(init_initiator_statechange)

        return async_result

    def mediate_mediated_transfer(self, transfer):
        init_mediator_statechange = mediator_init(self, transfer)
        self.handle_state_change(init_mediator_statechange)

    def target_mediated_transfer(self, transfer):
        init_target_statechange = target_init(self, transfer)
        self.handle_state_change(init_target_statechange)
Ejemplo n.º 13
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        # pylint: disable=too-many-arguments

        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(
            private_key_bin,
            ctx=GLOBAL_CTX,
            raw=True,
        )
        pubkey = private_key.pubkey.serialize(compressed=False)

        self.registries = list()
        self.managers_by_token_address = dict()
        self.managers_by_address = dict()

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        event_handler = RaidenEventHandler(self)

        alarm = AlarmTask(chain)
        # ignore the blocknumber
        alarm.register_callback(
            lambda _: event_handler.poll_all_event_listeners())
        alarm.start()

        self._blocknumber = alarm.last_block_number
        alarm.register_callback(self.set_block_number)

        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(self, config['send_ping_time'],
                                               config['max_unresponsive_time'])
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.event_handler = event_handler
        self.message_handler = message_handler
        self.start_event_listener = event_handler.start_event_listener

        self.on_message = message_handler.on_message
        self.on_event = event_handler.on_event

        self.api_event_manager = APIEventManager(self)

        # Adding events to Queue should be stopped if connection closed by client,
        # and a max-size of the Queue should be introduced!
        # At the moment, as soon as the callbacks are registered, the Queue will fill indefinately,
        # if no client polls the Events from the Queue
        self.api_event_manager.register_callbacks()
Ejemplo n.º 14
0
class RaidenService(object):  # pylint: disable=too-many-instance-attributes
    """ A Raiden node. """

    def __init__(self, chain, private_key_bin, transport, discovery, config):
        # pylint: disable=too-many-arguments

        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        private_key = PrivateKey(
            private_key_bin,
            ctx=GLOBAL_CTX,
            raw=True,
        )
        pubkey = private_key.pubkey.serialize(compressed=False)

        self.registries = list()
        self.managers_by_asset_address = dict()
        self.managers_by_address = dict()
        self.event_listeners = list()

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = RaidenProtocol(transport, discovery, self)
        transport.protocol = self.protocol

        message_handler = RaidenMessageHandler(self)
        event_handler = RaidenEventHandler(self)

        alarm = AlarmTask(chain)
        alarm.start()
        if config['max_unresponsive_time'] > 0:
            self.healthcheck = HealthcheckTask(
                self,
                config['send_ping_time'],
                config['max_unresponsive_time']
            )
            self.healthcheck.start()
        else:
            self.healthcheck = None

        self.api = RaidenAPI(self)
        self.alarm = alarm
        self.event_handler = event_handler
        self.message_handler = message_handler

        self.on_message = message_handler.on_message
        self.on_event = event_handler.on_event

    def __repr__(self):
        return '<{} {}>'.format(self.__class__.__name__, pex(self.address))

    def get_manager_by_asset_address(self, asset_address_bin):
        """ Return the manager for the given `asset_address_bin`.  """
        return self.managers_by_asset_address[asset_address_bin]

    def get_manager_by_address(self, manager_address_bin):
        return self.managers_by_address[manager_address_bin]

    def find_channel_by_address(self, netting_channel_address_bin):
        for manager in self.managers_by_address.itervalues():
            channel = manager.address_channel.get(netting_channel_address_bin)

            if channel is not None:
                return channel

        raise ValueError('unknown channel {}'.format(encode_hex(netting_channel_address_bin)))

    def sign(self, message):
        """ Sign message inplace. """
        if not isinstance(message, SignedMessage):
            raise ValueError('{} is not signable.'.format(repr(message)))

        message.sign(self.private_key, self.address)

    def send(self, *args):
        raise NotImplementedError('use send_and_wait or send_async')

    def send_async(self, recipient, message):
        """ Send `message` to `recipient` using the raiden protocol.

        The protocol will take care of resending the message on a given
        interval until an Acknowledgment is received or a given number of
        tries.
        """

        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        if recipient == self.address:
            raise ValueError('programming error, sending message to itself')

        return self.protocol.send_async(recipient, message)

    def send_and_wait(self, recipient, message, timeout):
        """ Send `message` to `recipient` and wait for the response or `timeout`.

        Args:
            recipient (address): The address of the node that will receive the
                message.
            message: The transfer message.
            timeout (float): How long should we wait for a response from `recipient`.

        Returns:
            None: If the wait timed out
            object: The result from the event
        """
        if not isaddress(recipient):
            raise ValueError('recipient is not a valid address.')

        self.protocol.send_and_wait(recipient, message, timeout)

    # api design regarding locks:
    # - `register_secret` method was added because secret registration can be a
    #   cross asset operation
    # - unlocking a lock is not a cross asset operation, for this reason it's
    #   only available in the asset manager

    def register_secret(self, secret):
        """ Register the secret with any channel that has a hashlock on it.

        This must search through all channels registered for a given hashlock
        and ignoring the assets. Useful for refund transfer, split transfer,
        and exchanges.
        """
        for asset_manager in self.managers_by_asset_address.values():
            try:
                asset_manager.register_secret(secret)
            except:  # pylint: disable=bare-except
                # Only channels that care about the given secret can be
                # registered and channels that have claimed the lock must
                # be removed, so an excpetion should not happen at this
                # point, nevertheless handle it because we dont want a
                # error in a channel to mess the state from others.
                log.error('programming error')

    def message_for_task(self, message, hashlock):
        """ Sends the message to the corresponding task.

        The corresponding task is found by matching the hashlock.

        Return:
            bool: True if a corresponding task is found, False otherwise.
        """
        # allow multiple managers to register for the hashlock (used for exchanges)
        found = 0

        for asset_manager in self.managers_by_asset_address.values():
            task = asset_manager.transfermanager.transfertasks.get(hashlock)

            if task is not None:
                task.on_response(message)
                found += 1

        return found

    def register_registry(self, registry):
        """ Register the registry and intialize all the related assets and
        channels.
        """
        translator = ContractTranslator(REGISTRY_ABI)

        assetadded = registry.assetadded_filter()

        all_manager_addresses = registry.manager_addresses()

        task_name = 'Registry {}'.format(pex(registry.address))
        asset_listener = LogListenerTask(
            task_name,
            assetadded,
            self.on_event,
            translator,
        )
        asset_listener.start()
        self.event_listeners.append(asset_listener)

        self.registries.append(registry)

        for manager_address in all_manager_addresses:
            channel_manager = self.chain.manager(manager_address)
            self.register_channel_manager(channel_manager)

    def register_channel_manager(self, channel_manager):
        """ Discover and register the channels for the given asset. """
        translator = ContractTranslator(CHANNEL_MANAGER_ABI)

        # To avoid missing changes, first create the filter, call the
        # contract and then start polling.
        channelnew = channel_manager.channelnew_filter()

        all_netting_contracts = channel_manager.channels_by_participant(self.address)

        task_name = 'ChannelManager {}'.format(pex(channel_manager.address))
        channel_listener = LogListenerTask(
            task_name,
            channelnew,
            self.on_event,
            translator,
        )
        channel_listener.start()
        self.event_listeners.append(channel_listener)

        asset_address_bin = channel_manager.asset_address()
        channel_manager_address_bin = channel_manager.address
        edges = channel_manager.channels_addresses()
        channel_graph = ChannelGraph(edges)

        asset_manager = AssetManager(
            self,
            asset_address_bin,
            channel_manager_address_bin,
            channel_graph,
        )
        self.managers_by_asset_address[asset_address_bin] = asset_manager
        self.managers_by_address[channel_manager_address_bin] = asset_manager

        for netting_contract_address in all_netting_contracts:
            asset_manager.register_channel_by_address(
                netting_contract_address,
                self.config['reveal_timeout'],
            )

    def stop(self):
        for listener in self.event_listeners:
            listener.stop_async()
            self.chain.uninstall_filter(listener.filter_.filter_id_raw)

        for asset_manager in self.managers_by_asset_address.itervalues():
            for task in asset_manager.transfermanager.transfertasks.itervalues():
                task.kill()

        wait_for = [self.alarm]
        self.alarm.stop_async()
        if self.healthcheck is not None:
            self.healthcheck.stop_async()
            wait_for.append(self.healthcheck)
        self.protocol.stop_async()

        wait_for.extend(self.event_listeners)
        wait_for.extend(self.protocol.address_greenlet.itervalues())

        for asset_manager in self.managers_by_asset_address.itervalues():
            wait_for.extend(asset_manager.transfermanager.transfertasks.itervalues())

        gevent.wait(wait_for)
Ejemplo n.º 15
0
    def __init__(self, chain, private_key_bin, transport, discovery, config):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        if config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN:
            raise ValueError('settle_timeout must be larger-or-equal to {}'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            ))

        private_key = PrivateKey(private_key_bin)
        pubkey = private_key.public_key.format(compressed=False)
        protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )
        transport.protocol = protocol

        self.channelgraphs = dict()
        self.manager_token = dict()
        self.swapkeys_tokenswaps = dict()
        self.swapkeys_greenlettasks = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.tokens_hashlocks_channels = defaultdict(lambda: defaultdict(list))

        self.chain = chain
        self.config = config
        self.privkey = private_key_bin
        self.pubkey = pubkey
        self.private_key = private_key
        self.address = privatekey_to_address(private_key_bin)
        self.protocol = protocol

        message_handler = RaidenMessageHandler(self)
        state_machine_event_handler = StateMachineEventHandler(self)
        pyethapp_blockchain_events = PyethappBlockchainEvents()
        greenlet_task_dispatcher = GreenletTasksDispatcher()

        alarm = AlarmTask(chain)

        # prime the block number cache and set the callbacks
        self._blocknumber = alarm.last_block_number
        alarm.register_callback(lambda _: self.poll_blockchain_events())
        alarm.register_callback(self.set_block_number)

        alarm.start()

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']
            )
        )

        self.channels_serialization_path = None
        self.channels_queue_path = None
        if config['database_path'] != ':memory:':
            self.channels_serialization_path = path.join(
                path.dirname(self.config['database_path']),
                'channels.pickle',
            )

            self.channels_queue_path = path.join(
                path.dirname(self.config['database_path']),
                'queues.pickle',
            )

            if path.exists(self.channels_serialization_path):
                serialized_channels = list()

                with open(self.channels_serialization_path, 'r') as handler:
                    try:
                        while True:
                            serialized_channels.append(pickle.load(handler))
                    except EOFError:
                        pass

                for channel in serialized_channels:
                    self.restore_channel(channel)

            if path.exists(self.channels_queue_path):
                with open(self.channels_queue_path, 'r') as handler:
                    channel_state = pickle.load(handler)

                for restored_queue in channel_state['channel_queues']:
                    self.restore_queue(restored_queue)

                self.protocol.receivedhashes_acks = channel_state['receivedhashes_acks']
                self.protocol.nodeaddresses_nonces = channel_state['nodeaddresses_nonces']

        self.alarm = alarm
        self.message_handler = message_handler
        self.state_machine_event_handler = state_machine_event_handler
        self.pyethapp_blockchain_events = pyethapp_blockchain_events
        self.greenlet_task_dispatcher = greenlet_task_dispatcher

        self.on_message = message_handler.on_message

        self.tokens_connectionmanagers = dict()  # token_address: ConnectionManager