class RaidenService(object): """ A Raiden node. """ # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__(self, chain, private_key_bin, transport, discovery, config): if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32: raise ValueError('invalid private_key') private_key = PrivateKey(private_key_bin) pubkey = private_key.public_key.format(compressed=False) self.channelgraphs = dict() self.manager_token = dict() self.swapkeys_tokenswaps = dict() self.swapkeys_greenlettasks = dict() self.identifier_statemanager = defaultdict(list) self.identifier_result = defaultdict(list) # This is a map from a hashlock to a list of channels, the same # hashlock can be used in more than one token (for tokenswaps), a # channel should be removed from this list only when the lock is # released/withdrawn but not when the secret is registered. self.tokens_hashlocks_channels = defaultdict(lambda: defaultdict(list)) self.chain = chain self.config = config self.privkey = private_key_bin self.pubkey = pubkey self.private_key = private_key self.address = privatekey_to_address(private_key_bin) self.protocol = RaidenProtocol(transport, discovery, self) transport.protocol = self.protocol message_handler = RaidenMessageHandler(self) state_machine_event_handler = StateMachineEventHandler(self) pyethapp_blockchain_events = PyethappBlockchainEvents() greenlet_task_dispatcher = GreenletTasksDispatcher() alarm = AlarmTask(chain) # ignore the blocknumber alarm.register_callback(self.poll_blockchain_events) alarm.start() self._blocknumber = alarm.last_block_number alarm.register_callback(self.set_block_number) if config['max_unresponsive_time'] > 0: self.healthcheck = HealthcheckTask( self, config['send_ping_time'], config['max_unresponsive_time'] ) self.healthcheck.start() else: self.healthcheck = None self.api = RaidenAPI(self) self.alarm = alarm self.message_handler = message_handler self.state_machine_event_handler = state_machine_event_handler self.pyethapp_blockchain_events = pyethapp_blockchain_events self.greenlet_task_dispatcher = greenlet_task_dispatcher self.on_message = message_handler.on_message self.tokens_connectionmanagers = dict() # token_address: ConnectionManager def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, pex(self.address)) def set_block_number(self, blocknumber): self._blocknumber = blocknumber state_change = Block(blocknumber) self.state_machine_event_handler.dispatch_to_all_tasks(state_change) for graph in self.channelgraphs.itervalues(): for channel in graph.address_channel.itervalues(): channel.state_transition(state_change) def get_block_number(self): return self._blocknumber def poll_blockchain_events(self, block_number): # pylint: disable=unused-argument on_statechange = self.state_machine_event_handler.on_blockchain_statechange for state_change in self.pyethapp_blockchain_events.poll_state_change(): on_statechange(state_change) def find_channel_by_address(self, netting_channel_address_bin): for graph in self.channelgraphs.itervalues(): channel = graph.address_channel.get(netting_channel_address_bin) if channel is not None: return channel raise ValueError('unknown channel {}'.format(encode_hex(netting_channel_address_bin))) def sign(self, message): """ Sign message inplace. """ if not isinstance(message, SignedMessage): raise ValueError('{} is not signable.'.format(repr(message))) message.sign(self.private_key, self.address) def send(self, *args): raise NotImplementedError('use send_and_wait or send_async') def send_async(self, recipient, message): """ Send `message` to `recipient` using the raiden protocol. The protocol will take care of resending the message on a given interval until an Acknowledgment is received or a given number of tries. """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') if recipient == self.address: raise ValueError('programming error, sending message to itself') return self.protocol.send_async(recipient, message) def send_and_wait(self, recipient, message, timeout): """ Send `message` to `recipient` and wait for the response or `timeout`. Args: recipient (address): The address of the node that will receive the message. message: The transfer message. timeout (float): How long should we wait for a response from `recipient`. Returns: None: If the wait timed out object: The result from the event """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') self.protocol.send_and_wait(recipient, message, timeout) def register_secret(self, secret): """ Register the secret with any channel that has a hashlock on it. This must search through all channels registered for a given hashlock and ignoring the tokens. Useful for refund transfer, split transfer, and token swaps. """ hashlock = sha3(secret) revealsecret_message = RevealSecret(secret) self.sign(revealsecret_message) for hash_channel in self.tokens_hashlocks_channels.itervalues(): for channel in hash_channel[hashlock]: try: channel.register_secret(secret) # This will potentially be executed multiple times and could suffer # from amplification, the protocol will ignore messages that were # already registered and send it only until a first Ack is # received. self.send_async( channel.partner_state.address, revealsecret_message, ) except: # pylint: disable=bare-except # Only channels that care about the given secret can be # registered and channels that have claimed the lock must # be removed, so an exception should not happen at this # point, nevertheless handle it because we dont want an # error in a channel to mess the state from others. log.error('programming error') def register_channel_for_hashlock(self, token_address, channel, hashlock): channels_registered = self.tokens_hashlocks_channels[token_address][hashlock] if channel not in channels_registered: channels_registered.append(channel) def handle_secret( # pylint: disable=too-many-arguments self, identifier, token_address, secret, partner_secret_message, hashlock): """ Unlock/Witdraws locks, register the secret, and send Secret messages as necessary. This function will: - Unlock the locks created by this node and send a Secret message to the corresponding partner so that she can withdraw the token. - Withdraw the lock from sender. - Register the secret for the locks received and reveal the secret to the senders Note: The channel needs to be registered with `raiden.register_channel_for_hashlock`. """ # handling the secret needs to: # - unlock the token for all `forward_channel` (the current one # and the ones that failed with a refund) # - send a message to each of the forward nodes allowing them # to withdraw the token # - register the secret for the `originating_channel` so that a # proof can be made, if necessary # - reveal the secret to the `sender` node (otherwise we # cannot withdraw the token) channels_list = self.tokens_hashlocks_channels[token_address][hashlock] channels_to_remove = list() # Dont use the partner_secret_message.token since it might not match # the current token manager our_secret_message = Secret( identifier, secret, token_address, ) self.sign(our_secret_message) revealsecret_message = RevealSecret(secret) self.sign(revealsecret_message) for channel in channels_list: # unlock a sent lock if channel.partner_state.balance_proof.is_unclaimed(hashlock): channel.release_lock(secret) self.send_async( channel.partner_state.address, our_secret_message, ) channels_to_remove.append(channel) # withdraw a pending lock if channel.our_state.balance_proof.is_unclaimed(hashlock): if partner_secret_message: matching_sender = ( partner_secret_message.sender == channel.partner_state.address ) matching_token = partner_secret_message.token == channel.token_address if matching_sender and matching_token: channel.withdraw_lock(secret) channels_to_remove.append(channel) else: channel.register_secret(secret) self.send_async( channel.partner_state.address, revealsecret_message, ) else: channel.register_secret(secret) self.send_async( channel.partner_state.address, revealsecret_message, ) for channel in channels_to_remove: channels_list.remove(channel) if len(channels_list) == 0: del self.tokens_hashlocks_channels[token_address][hashlock] def get_channel_details(self, token_address, netting_channel): channel_details = netting_channel.detail(self.address) our_state = ChannelEndState( channel_details['our_address'], channel_details['our_balance'], netting_channel.opened(), ) partner_state = ChannelEndState( channel_details['partner_address'], channel_details['partner_balance'], netting_channel.opened(), ) def register_channel_for_hashlock(channel, hashlock): self.register_channel_for_hashlock( token_address, channel, hashlock, ) channel_address = netting_channel.address reveal_timeout = self.config['reveal_timeout'] settle_timeout = channel_details['settle_timeout'] external_state = ChannelExternalState( register_channel_for_hashlock, netting_channel, ) channel_detail = ChannelDetails( channel_address, our_state, partner_state, external_state, reveal_timeout, settle_timeout, ) return channel_detail def register_registry(self, registry_address): proxies = get_relevant_proxies( self.chain, self.address, registry_address, ) # Install the filters first to avoid missing changes, as a consequence # some events might be applied twice. self.pyethapp_blockchain_events.add_proxies_listeners(proxies) block_number = self.get_block_number() for manager in proxies.channel_managers: token_address = manager.token_address() manager_address = manager.address channels_detail = list() netting_channels = proxies.channelmanager_nettingchannels[manager_address] for channel in netting_channels: detail = self.get_channel_details(token_address, channel) channels_detail.append(detail) edge_list = manager.channels_addresses() graph = ChannelGraph( self.address, manager_address, token_address, edge_list, channels_detail, block_number, ) self.manager_token[manager_address] = token_address self.channelgraphs[token_address] = graph self.tokens_connectionmanagers[token_address] = ConnectionManager( self, token_address, graph ) def register_channel_manager(self, manager_address): manager = self.chain.manager(manager_address) netting_channels = [ self.chain.netting_channel(channel_address) for channel_address in manager.channels_by_participant(self.address) ] # Install the filters first to avoid missing changes, as a consequence # some events might be applied twice. self.pyethapp_blockchain_events.add_channel_manager_listener(manager) for channel in netting_channels: self.pyethapp_blockchain_events.add_netting_channel_listener(channel) token_address = manager.token_address() edge_list = manager.channels_addresses() channels_detail = [ self.get_channel_details(token_address, channel) for channel in netting_channels ] block_number = self.get_block_number() graph = ChannelGraph( self.address, manager_address, token_address, edge_list, channels_detail, block_number, ) self.manager_token[manager_address] = token_address self.channelgraphs[token_address] = graph self.tokens_connectionmanagers[token_address] = ConnectionManager( self, token_address, graph ) def register_netting_channel(self, token_address, channel_address): netting_channel = self.chain.netting_channel(channel_address) self.pyethapp_blockchain_events.add_netting_channel_listener(netting_channel) block_number = self.get_block_number() detail = self.get_channel_details(token_address, netting_channel) graph = self.channelgraphs[token_address] graph.add_channel(detail, block_number) def connection_manager_for_token(self, token_address): if not isaddress(token_address): raise InvalidAddress('token address is not valid.') if token_address in self.tokens_connectionmanagers.keys(): manager = self.tokens_connectionmanagers[token_address] else: raise InvalidAddress('token is not registered.') return manager def stop(self): wait_for = [self.alarm] wait_for.extend(self.greenlet_task_dispatcher.stop()) self.alarm.stop_async() if self.healthcheck is not None: self.healthcheck.stop_async() wait_for.append(self.healthcheck) self.protocol.stop_async() wait_for.extend(self.protocol.address_greenlet.itervalues()) self.pyethapp_blockchain_events.uninstall_all_event_listeners() gevent.wait(wait_for) def transfer_async(self, token_address, amount, target, identifier=None): """ Transfer `amount` between this node and `target`. This method will start an asyncronous transfer, the transfer might fail or succeed depending on a couple of factors: - Existence of a path that can be used, through the usage of direct or intermediary channels. - Network speed, making the transfer sufficiently fast so it doesn't timeout. """ graph = self.channelgraphs[token_address] if identifier is None: identifier = create_default_identifier(self.address, token_address, target) direct_channel = graph.partneraddress_channel.get(target) if direct_channel: async_result = self._direct_or_mediated_transfer( token_address, amount, identifier, direct_channel, ) return async_result else: async_result = self._mediated_transfer( token_address, amount, identifier, target, ) return async_result def _direct_or_mediated_transfer(self, token_address, amount, identifier, direct_channel): """ Check the direct channel and if possible use it, otherwise start a mediated transfer. """ if not direct_channel.isopen: log.info( 'DIRECT CHANNEL %s > %s is closed', pex(direct_channel.our_state.address), pex(direct_channel.partner_state.address), ) async_result = self._mediated_transfer( token_address, amount, identifier, direct_channel.partner_state.address, ) return async_result elif amount > direct_channel.distributable: log.info( 'DIRECT CHANNEL %s > %s doesnt have enough funds [%s]', pex(direct_channel.our_state.address), pex(direct_channel.partner_state.address), amount, ) async_result = self._mediated_transfer( token_address, amount, identifier, direct_channel.partner_state.address, ) return async_result else: direct_transfer = direct_channel.create_directtransfer(amount, identifier) self.sign(direct_transfer) direct_channel.register_transfer(direct_transfer) async_result = self.protocol.send_async( direct_channel.partner_state.address, direct_transfer, ) return async_result def _mediated_transfer(self, token_address, amount, identifier, target): return self.start_mediated_transfer(token_address, amount, identifier, target) def start_mediated_transfer(self, token_address, amount, identifier, target): # pylint: disable=too-many-locals graph = self.channelgraphs[token_address] routes = graph.get_best_routes( self.address, target, amount, lock_timeout=None, ) available_routes = [ route for route in map(route_to_routestate, routes) if route.state == CHANNEL_STATE_OPENED ] identifier = create_default_identifier(self.address, token_address, target) route_state = RoutesState(available_routes) our_address = self.address block_number = self.get_block_number() transfer_state = LockedTransferState( identifier=identifier, amount=amount, token=token_address, initiator=self.address, target=target, expiration=None, hashlock=None, secret=None, ) # Issue #489 # # Raiden may fail after a state change using the random generator is # handled but right before the snapshot is taken. If that happens on # the next initialization when raiden is recovering and applying the # pending state changes a new secret will be generated and the # resulting events won't match, this breaks the architecture model, # since it's assumed the re-execution of a state change will always # produce the same events. # # TODO: Removed the secret generator from the InitiatorState and add # the secret into all state changes that require one, this way the # secret will be serialized with the state change and the recovery will # use the same /random/ secret. random_generator = RandomSecretGenerator() init_initiator = ActionInitInitiator( our_address=our_address, transfer=transfer_state, routes=route_state, random_generator=random_generator, block_number=block_number, ) state_manager = StateManager(initiator.state_transition, None) all_events = state_manager.dispatch(init_initiator) for event in all_events: self.state_machine_event_handler.on_event(event) async_result = AsyncResult() # TODO: implement the network timeout raiden.config['msg_timeout'] and # cancel the current transfer if it hapens (issue #374) self.identifier_statemanager[identifier].append(state_manager) self.identifier_result[identifier].append(async_result) return async_result def mediate_mediated_transfer(self, message): # pylint: disable=too-many-locals identifier = message.identifier amount = message.lock.amount target = message.target token = message.token graph = self.channelgraphs[token] routes = graph.get_best_routes( self.address, target, amount, lock_timeout=None, ) available_routes = [ route for route in map(route_to_routestate, routes) if route.state == CHANNEL_STATE_OPENED ] from_channel = graph.partneraddress_channel[message.sender] from_route = channel_to_routestate(from_channel, message.sender) our_address = self.address from_transfer = lockedtransfer_from_message(message) route_state = RoutesState(available_routes) block_number = self.get_block_number() init_mediator = ActionInitMediator( our_address, from_transfer, route_state, from_route, block_number, ) state_manager = StateManager(mediator.state_transition, None) all_events = state_manager.dispatch(init_mediator) for event in all_events: self.state_machine_event_handler.on_event(event) self.identifier_statemanager[identifier].append(state_manager) def target_mediated_transfer(self, message): graph = self.channelgraphs[message.token] from_channel = graph.partneraddress_channel[message.sender] from_route = channel_to_routestate(from_channel, message.sender) from_transfer = lockedtransfer_from_message(message) our_address = self.address block_number = self.get_block_number() init_target = ActionInitTarget( our_address, from_route, from_transfer, block_number, ) state_manager = StateManager(target_task.state_transition, None) all_events = state_manager.dispatch(init_target) for event in all_events: self.state_machine_event_handler.on_event(event) identifier = message.identifier self.identifier_statemanager[identifier].append(state_manager)
class RaidenService(object): # pylint: disable=too-many-instance-attributes """ A Raiden node. """ def __init__(self, chain, private_key_bin, transport, discovery, config): # pylint: disable=too-many-arguments if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32: raise ValueError('invalid private_key') private_key = PrivateKey( private_key_bin, ctx=GLOBAL_CTX, raw=True, ) pubkey = private_key.pubkey.serialize(compressed=False) self.registries = list() self.managers_by_asset_address = dict() self.managers_by_address = dict() self.chain = chain self.config = config self.privkey = private_key_bin self.pubkey = pubkey self.private_key = private_key self.address = privatekey_to_address(private_key_bin) self.protocol = RaidenProtocol(transport, discovery, self) transport.protocol = self.protocol message_handler = RaidenMessageHandler(self) event_handler = RaidenEventHandler(self) alarm = AlarmTask(chain) # ignore the blocknumber alarm.register_callback( lambda _: event_handler.poll_all_event_listeners()) alarm.start() self._blocknumber = alarm.last_block_number alarm.register_callback(self.set_block_number) if config['max_unresponsive_time'] > 0: self.healthcheck = HealthcheckTask(self, config['send_ping_time'], config['max_unresponsive_time']) self.healthcheck.start() else: self.healthcheck = None self.api = RaidenAPI(self) self.alarm = alarm self.event_handler = event_handler self.message_handler = message_handler self.start_event_listener = event_handler.start_event_listener self.on_message = message_handler.on_message self.on_event = event_handler.on_event def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, pex(self.address)) def set_block_number(self, blocknumber): self._blocknumber = blocknumber def get_block_number(self): return self._blocknumber def get_manager_by_asset_address(self, asset_address_bin): """ Return the manager for the given `asset_address_bin`. """ try: return self.managers_by_asset_address[asset_address_bin] except KeyError: raise UnknownAssetAddress(asset_address_bin) def get_manager_by_address(self, manager_address_bin): return self.managers_by_address[manager_address_bin] def find_channel_by_address(self, netting_channel_address_bin): for manager in self.managers_by_address.itervalues(): channel = manager.address_channel.get(netting_channel_address_bin) if channel is not None: return channel raise ValueError('unknown channel {}'.format( encode_hex(netting_channel_address_bin))) def sign(self, message): """ Sign message inplace. """ if not isinstance(message, SignedMessage): raise ValueError('{} is not signable.'.format(repr(message))) message.sign(self.private_key, self.address) def send(self, *args): raise NotImplementedError('use send_and_wait or send_async') def send_async(self, recipient, message): """ Send `message` to `recipient` using the raiden protocol. The protocol will take care of resending the message on a given interval until an Acknowledgment is received or a given number of tries. """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') if recipient == self.address: raise ValueError('programming error, sending message to itself') return self.protocol.send_async(recipient, message) def send_and_wait(self, recipient, message, timeout): """ Send `message` to `recipient` and wait for the response or `timeout`. Args: recipient (address): The address of the node that will receive the message. message: The transfer message. timeout (float): How long should we wait for a response from `recipient`. Returns: None: If the wait timed out object: The result from the event """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') self.protocol.send_and_wait(recipient, message, timeout) # api design regarding locks: # - `register_secret` method was added because secret registration can be a # cross asset operation # - unlocking a lock is not a cross asset operation, for this reason it's # only available in the asset manager def register_secret(self, secret): """ Register the secret with any channel that has a hashlock on it. This must search through all channels registered for a given hashlock and ignoring the assets. Useful for refund transfer, split transfer, and exchanges. """ for asset_manager in self.managers_by_asset_address.values(): try: asset_manager.register_secret(secret) except: # pylint: disable=bare-except # Only channels that care about the given secret can be # registered and channels that have claimed the lock must # be removed, so an exception should not happen at this # point, nevertheless handle it because we dont want a # error in a channel to mess the state from others. log.error('programming error') def message_for_task(self, message, hashlock): """ Sends the message to the corresponding task. The corresponding task is found by matching the hashlock. Return: Nothing if a corresponding task is found,raise Exception otherwise """ found = False for asset_manager in self.managers_by_asset_address.values(): task = asset_manager.transfermanager.transfertasks.get(hashlock) if task is not None: task.on_response(message) found = True if not found: # Log a warning and don't process further if log.isEnabledFor(logging.WARN): log.warn( 'Received %s hashlock message from unknown channel.' 'Sender: %s', message.__class__.__name__, pex(message.sender), ) raise UnknownAddress def register_registry(self, registry): """ Register the registry and intialize all the related assets and channels. """ translator = ContractTranslator(REGISTRY_ABI) assetadded = registry.assetadded_filter() all_manager_addresses = registry.manager_addresses() self.start_event_listener( 'Registry {}'.format(pex(registry.address)), assetadded, translator, ) self.registries.append(registry) for manager_address in all_manager_addresses: channel_manager = self.chain.manager(manager_address) self.register_channel_manager(channel_manager) def register_channel_manager(self, channel_manager): """ Discover and register the channels for the given asset. """ translator = ContractTranslator(CHANNEL_MANAGER_ABI) # To avoid missing changes, first create the filter, call the # contract and then start polling. channelnew = channel_manager.channelnew_filter() all_netting_contracts = channel_manager.channels_by_participant( self.address) self.start_event_listener( 'ChannelManager {}'.format(pex(channel_manager.address)), channelnew, translator, ) asset_address_bin = channel_manager.asset_address() channel_manager_address_bin = channel_manager.address edges = channel_manager.channels_addresses() channel_graph = ChannelGraph(edges) asset_manager = AssetManager( self, asset_address_bin, channel_manager_address_bin, channel_graph, ) self.managers_by_asset_address[asset_address_bin] = asset_manager self.managers_by_address[channel_manager_address_bin] = asset_manager for netting_contract_address in all_netting_contracts: asset_manager.register_channel_by_address( netting_contract_address, self.config['reveal_timeout'], ) def stop(self): for asset_manager in self.managers_by_asset_address.itervalues(): for task in asset_manager.transfermanager.transfertasks.itervalues( ): task.kill() wait_for = [self.alarm] self.alarm.stop_async() if self.healthcheck is not None: self.healthcheck.stop_async() wait_for.append(self.healthcheck) self.protocol.stop_async() wait_for.extend(self.protocol.address_greenlet.itervalues()) for asset_manager in self.managers_by_asset_address.itervalues(): wait_for.extend( asset_manager.transfermanager.transfertasks.itervalues()) self.event_handler.uninstall_listeners() gevent.wait(wait_for)
class RaidenService(object): # pylint: disable=too-many-instance-attributes """ A Raiden node. """ def __init__(self, chain, private_key_bin, transport, discovery, config): # pylint: disable=too-many-arguments if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32: raise ValueError('invalid private_key') private_key = PrivateKey( private_key_bin, ctx=GLOBAL_CTX, raw=True, ) pubkey = private_key.pubkey.serialize(compressed=False) self.registries = list() self.managers_by_asset_address = dict() self.managers_by_address = dict() self.event_listeners = list() self.chain = chain self.config = config self.privkey = private_key_bin self.pubkey = pubkey self.private_key = private_key self.address = privatekey_to_address(private_key_bin) self.protocol = RaidenProtocol(transport, discovery, self) transport.protocol = self.protocol message_handler = RaidenMessageHandler(self) event_handler = RaidenEventHandler(self) alarm = AlarmTask(chain) alarm.start() if config['max_unresponsive_time'] > 0: self.healthcheck = HealthcheckTask( self, config['send_ping_time'], config['max_unresponsive_time'] ) self.healthcheck.start() else: self.healthcheck = None self.api = RaidenAPI(self) self.alarm = alarm self.event_handler = event_handler self.message_handler = message_handler self.on_message = message_handler.on_message self.on_event = event_handler.on_event def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, pex(self.address)) def get_manager_by_asset_address(self, asset_address_bin): """ Return the manager for the given `asset_address_bin`. """ return self.managers_by_asset_address[asset_address_bin] def get_manager_by_address(self, manager_address_bin): return self.managers_by_address[manager_address_bin] def find_channel_by_address(self, netting_channel_address_bin): for manager in self.managers_by_address.itervalues(): channel = manager.address_channel.get(netting_channel_address_bin) if channel is not None: return channel raise ValueError('unknown channel {}'.format(encode_hex(netting_channel_address_bin))) def sign(self, message): """ Sign message inplace. """ if not isinstance(message, SignedMessage): raise ValueError('{} is not signable.'.format(repr(message))) message.sign(self.private_key, self.address) def send(self, *args): raise NotImplementedError('use send_and_wait or send_async') def send_async(self, recipient, message): """ Send `message` to `recipient` using the raiden protocol. The protocol will take care of resending the message on a given interval until an Acknowledgment is received or a given number of tries. """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') if recipient == self.address: raise ValueError('programming error, sending message to itself') return self.protocol.send_async(recipient, message) def send_and_wait(self, recipient, message, timeout): """ Send `message` to `recipient` and wait for the response or `timeout`. Args: recipient (address): The address of the node that will receive the message. message: The transfer message. timeout (float): How long should we wait for a response from `recipient`. Returns: None: If the wait timed out object: The result from the event """ if not isaddress(recipient): raise ValueError('recipient is not a valid address.') self.protocol.send_and_wait(recipient, message, timeout) # api design regarding locks: # - `register_secret` method was added because secret registration can be a # cross asset operation # - unlocking a lock is not a cross asset operation, for this reason it's # only available in the asset manager def register_secret(self, secret): """ Register the secret with any channel that has a hashlock on it. This must search through all channels registered for a given hashlock and ignoring the assets. Useful for refund transfer, split transfer, and exchanges. """ for asset_manager in self.managers_by_asset_address.values(): try: asset_manager.register_secret(secret) except: # pylint: disable=bare-except # Only channels that care about the given secret can be # registered and channels that have claimed the lock must # be removed, so an excpetion should not happen at this # point, nevertheless handle it because we dont want a # error in a channel to mess the state from others. log.error('programming error') def message_for_task(self, message, hashlock): """ Sends the message to the corresponding task. The corresponding task is found by matching the hashlock. Return: bool: True if a corresponding task is found, False otherwise. """ # allow multiple managers to register for the hashlock (used for exchanges) found = 0 for asset_manager in self.managers_by_asset_address.values(): task = asset_manager.transfermanager.transfertasks.get(hashlock) if task is not None: task.on_response(message) found += 1 return found def register_registry(self, registry): """ Register the registry and intialize all the related assets and channels. """ translator = ContractTranslator(REGISTRY_ABI) assetadded = registry.assetadded_filter() all_manager_addresses = registry.manager_addresses() task_name = 'Registry {}'.format(pex(registry.address)) asset_listener = LogListenerTask( task_name, assetadded, self.on_event, translator, ) asset_listener.start() self.event_listeners.append(asset_listener) self.registries.append(registry) for manager_address in all_manager_addresses: channel_manager = self.chain.manager(manager_address) self.register_channel_manager(channel_manager) def register_channel_manager(self, channel_manager): """ Discover and register the channels for the given asset. """ translator = ContractTranslator(CHANNEL_MANAGER_ABI) # To avoid missing changes, first create the filter, call the # contract and then start polling. channelnew = channel_manager.channelnew_filter() all_netting_contracts = channel_manager.channels_by_participant(self.address) task_name = 'ChannelManager {}'.format(pex(channel_manager.address)) channel_listener = LogListenerTask( task_name, channelnew, self.on_event, translator, ) channel_listener.start() self.event_listeners.append(channel_listener) asset_address_bin = channel_manager.asset_address() channel_manager_address_bin = channel_manager.address edges = channel_manager.channels_addresses() channel_graph = ChannelGraph(edges) asset_manager = AssetManager( self, asset_address_bin, channel_manager_address_bin, channel_graph, ) self.managers_by_asset_address[asset_address_bin] = asset_manager self.managers_by_address[channel_manager_address_bin] = asset_manager for netting_contract_address in all_netting_contracts: asset_manager.register_channel_by_address( netting_contract_address, self.config['reveal_timeout'], ) def stop(self): for listener in self.event_listeners: listener.stop_async() self.chain.uninstall_filter(listener.filter_.filter_id_raw) for asset_manager in self.managers_by_asset_address.itervalues(): for task in asset_manager.transfermanager.transfertasks.itervalues(): task.kill() wait_for = [self.alarm] self.alarm.stop_async() if self.healthcheck is not None: self.healthcheck.stop_async() wait_for.append(self.healthcheck) self.protocol.stop_async() wait_for.extend(self.event_listeners) wait_for.extend(self.protocol.address_greenlet.itervalues()) for asset_manager in self.managers_by_asset_address.itervalues(): wait_for.extend(asset_manager.transfermanager.transfertasks.itervalues()) gevent.wait(wait_for)