def run( privatekey, registry_contract_address, secret_registry_contract_address, discovery_contract_address, listen_address, structlog, logfile, scenario, stage_prefix, ): # pylint: disable=unused-argument # TODO: only enabled structlog on "initiators" structlog.configure(structlog, log_file=logfile) (listen_host, listen_port) = split_endpoint(listen_address) config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['privatekey_hex'] = privatekey privatekey_bin = decode_hex(privatekey) rpc_client = JSONRPCClient( '127.0.0.1', 8545, privatekey_bin, ) blockchain_service = BlockChainService( privatekey_bin, rpc_client, GAS_PRICE, ) discovery = ContractDiscovery( blockchain_service, decode_hex(discovery_contract_address), ) registry = blockchain_service.registry( registry_contract_address, ) secret_registry = blockchain_service.secret_registry( secret_registry_contract_address, ) throttle_policy = TokenBucket( config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate'], ) transport = UDPTransport( discovery, server._udp_socket((listen_host, listen_port)), throttle_policy, config['protocol'], dict(), ) app = App( config, blockchain_service, registry, secret_registry, transport, discovery, ) app.discovery.register( app.raiden.address, listen_host, listen_port, ) app.raiden.install_payment_network_filters(app.raiden.default_registry.address) if scenario: script = json.load(scenario) tools = ConsoleTools( app.raiden, app.discovery, app.config['settle_timeout'], app.config['reveal_timeout'], ) transfers_by_peer = {} tokens = script['tokens'] token_address = None peer = None our_node = hexlify(app.raiden.address) log.warning('our address is {}'.format(our_node)) for token in tokens: # skip tokens that we're not part of nodes = token['channels'] if our_node not in nodes: continue partner_nodes = [ node for node in nodes if node != our_node ] # allow for prefunded tokens if 'token_address' in token: token_address = token['token_address'] else: token_address = tools.create_token(registry_contract_address) transfers_with_amount = token['transfers_with_amount'] # FIXME: in order to do bidirectional channels, only one side # (i.e. only token['channels'][0]) should # open; others should join by calling # raiden.api.deposit, AFTER the channel came alive! # NOTE: leaving unidirectional for now because it most # probably will get to higher throughput log.warning('Waiting for all nodes to come online') api = RaidenAPI(app.raiden) for node in partner_nodes: api.start_health_check_for(node) while True: all_reachable = all( api.get_node_network_state(node) == NODE_NETWORK_REACHABLE for node in partner_nodes ) if all_reachable: break gevent.sleep(5) log.warning('All nodes are online') if our_node != nodes[-1]: our_index = nodes.index(our_node) peer = nodes[our_index + 1] tools.token_network_register(app.raiden.default_registry.address, token_address) amount = transfers_with_amount[nodes[-1]] while True: try: app.discovery.get(peer.decode('hex')) break except KeyError: log.warning('Error: peer {} not found in discovery'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Opening channel with {} for {}'.format(peer, token_address)) api.channel_open(app.raiden.default_registry.address, token_address, peer) break except KeyError: log.warning('Error: could not open channel with {}'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Funding channel with {} for {}'.format(peer, token_address)) api.channel_deposit( app.raiden.default_registry.address, token_address, peer, amount, ) break except Exception: log.warning('Error: could not deposit {} for {}'.format(amount, peer)) time.sleep(random.randrange(30)) if our_index == 0: last_node = nodes[-1] transfers_by_peer[last_node] = int(amount) else: peer = nodes[-2] if stage_prefix is not None: open('{}.stage1'.format(stage_prefix), 'a').close() log.warning('Done with initialization, waiting to continue...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() transfer_results = {'total_time': 0, 'timestamps': []} def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async): def transfer_(): log.warning('Making {} transfers to {}'.format(total_transfers, peer)) initial_time = time.time() times = [0] * total_transfers for index in range(total_transfers): RaidenAPI(app.raiden).transfer( app.raiden.default_registry.address, token_address.decode('hex'), amount_per_transfer, peer, ) times[index] = time.time() transfer_results['total_time'] = time.time() - initial_time transfer_results['timestamps'] = times log.warning('Making {} transfers took {}'.format( total_transfers, transfer_results['total_time'])) log.warning('Times: {}'.format(times)) if is_async: return gevent.spawn(transfer_) else: transfer_() # If sending to multiple targets, do it asynchronously, otherwise # keep it simple and just send to the single target on my thread. if len(transfers_by_peer) > 1: greenlets = [] for peer_, amount in transfers_by_peer.items(): greenlet = transfer(token_address, 1, amount, peer_, True) if greenlet is not None: greenlets.append(greenlet) gevent.joinall(greenlets) elif len(transfers_by_peer) == 1: for peer_, amount in transfers_by_peer.items(): transfer(token_address, 1, amount, peer_, False) log.warning('Waiting for termination') open('{}.stage2'.format(stage_prefix), 'a').close() log.warning('Waiting for transfers to finish, will write results...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() open('{}.stage3'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() else: log.warning('No scenario file supplied, doing nothing!') open('{}.stage2'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def tps_run( host, port, config, privatekey, rpc_server, registry_address, secret_registry_address, token_address, transfer_amount, parallel): # pylint: disable=too-many-locals,too-many-arguments ourprivkey, _ = hostport_to_privkeyaddr(host, port) rpc_connection = rpc_server.split(':') rpc_connection = (rpc_connection[0], int(rpc_connection[1])) with codecs.open(config, encoding='utf8') as handler: config = yaml.load(handler) config['host'] = host config['port'] = port config['privkey'] = ourprivkey rpc_connection = rpc_server.split(':') host, port = (rpc_connection[0], int(rpc_connection[1])) rpc_client = JSONRPCClient( host, port, privatekey, ) blockchain_service = BlockChainService(privatekey, rpc_client) discovery = Discovery() found_ouraddress = False for node in config['nodes']: _, address = hostport_to_privkeyaddr(node['host'], node['port']) discovery.register(address, node['host'], node['port']) if host == node['host'] and str(port) == node['port']: found_ouraddress = True if not found_ouraddress: print('We are not registered in the configuration file') sys.exit(1) throttle_policy = TokenBucket( config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate'], ) transport = UDPTransport( discovery, server._udp_socket((host, port)), throttle_policy, config['protocol'], ) app = App( config=config, chain=blockchain_service, query_start_block=0, default_registry=registry_address, default_secret_registry=secret_registry_address, transport=transport, discovery=discovery, ) for _ in range(parallel): gevent.spawn(random_transfer, app, token_address, transfer_amount) # wait for interrupt event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def app( address, keystore_path, gas_price, eth_rpc_endpoint, registry_contract_address, discovery_contract_address, listen_address, rpccorsdomain, mapped_socket, logging, logfile, log_json, max_unresponsive_time, send_ping_time, api_address, rpc, sync_check, console, password_file, web_ui, datadir, eth_client_communication, nat, ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,unused-argument from raiden.app import App from raiden.network.blockchain_service import BlockChainService if not mapped_socket: raise RuntimeError('Missing socket') address_hex = address_encoder(address) if address else None address_hex, privatekey_bin = prompt_account(address_hex, keystore_path, password_file) address = address_decoder(address_hex) (listen_host, listen_port) = split_endpoint(listen_address) (api_host, api_port) = split_endpoint(api_address) if datadir is None: datadir = os.path.join(os.path.expanduser('~'), '.raiden') config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['console'] = console config['rpc'] = rpc config['web_ui'] = rpc and web_ui config['api_host'] = api_host config['api_port'] = api_port config['socket'] = mapped_socket.socket config['external_ip'] = mapped_socket.external_ip config['external_port'] = mapped_socket.external_port config['protocol']['nat_keepalive_retries'] = DEFAULT_NAT_KEEPALIVE_RETRIES timeout = max_unresponsive_time / DEFAULT_NAT_KEEPALIVE_RETRIES config['protocol']['nat_keepalive_timeout'] = timeout privatekey_hex = hexlify(privatekey_bin) config['privatekey_hex'] = privatekey_hex endpoint = eth_rpc_endpoint # Fallback to default port if only an IP address is given rpc_port = 8545 if eth_rpc_endpoint.startswith('http://'): endpoint = eth_rpc_endpoint[len('http://'):] rpc_port = 80 elif eth_rpc_endpoint.startswith('https://'): endpoint = eth_rpc_endpoint[len('https://'):] rpc_port = 443 if ':' not in endpoint: # no port was given in url rpc_host = endpoint else: rpc_host, rpc_port = split_endpoint(endpoint) rpc_client = JSONRPCClient( rpc_host, rpc_port, privatekey_bin, gas_price, ) blockchain_service = BlockChainService( privatekey_bin, rpc_client, gas_price, ) # this assumes the eth node is already online check_json_rpc(rpc_client) check_discovery_registration_gas(blockchain_service, address) net_id = int(blockchain_service.client.rpccall_with_retry('net_version')) if sync_check: check_synced(net_id, blockchain_service) database_path = os.path.join(datadir, 'netid_%s' % net_id, address_hex[:8], 'log.db') config['database_path'] = database_path print('You are connected to the {} network and the DB path is: {}'.format( ID_TO_NETWORKNAME[net_id], database_path, )) registry = blockchain_service.registry(registry_contract_address, ) discovery = ContractDiscovery( blockchain_service.node_address, blockchain_service.discovery(discovery_contract_address)) registry = blockchain_service.registry(registry_contract_address) throttle_policy = TokenBucket(config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate']) transport = UDPTransport( discovery, mapped_socket.socket, throttle_policy, config['protocol'], ) raiden_app = App( config, blockchain_service, registry, discovery, transport, ) return raiden_app
def __init__(self, chain, default_registry, private_key_bin, transport, discovery, config): if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32: raise ValueError('invalid private_key') invalid_timeout = ( config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX ) if invalid_timeout: raise ValueError('settle_timeout must be in range [{}, {}]'.format( NETTINGCHANNEL_SETTLE_TIMEOUT_MIN, NETTINGCHANNEL_SETTLE_TIMEOUT_MAX )) self.tokens_to_connectionmanagers = dict() self.identifier_to_results = defaultdict(list) # This is a map from a secrethash to a list of channels, the same # secrethash can be used in more than one token (for tokenswaps), a # channel should be removed from this list only when the lock is # released/withdrawn but not when the secret is registered. self.token_to_secrethash_to_channels = defaultdict(lambda: defaultdict(list)) self.chain = chain self.default_registry = default_registry self.config = config self.privkey = private_key_bin self.address = privatekey_to_address(private_key_bin) endpoint_registration_event = gevent.spawn( discovery.register, self.address, config['external_ip'], config['external_port'], ) endpoint_registration_event.link_exception(endpoint_registry_exception_handler) self.private_key = PrivateKey(private_key_bin) self.pubkey = self.private_key.public_key.format(compressed=False) self.protocol = UDPTransport( transport, discovery, self, config['protocol']['retry_interval'], config['protocol']['retries_before_backoff'], config['protocol']['nat_keepalive_retries'], config['protocol']['nat_keepalive_timeout'], config['protocol']['nat_invitation_timeout'], ) # TODO: remove this cyclic dependency transport.protocol = self.protocol self.blockchain_events = BlockchainEvents() self.alarm = AlarmTask(chain) self.shutdown_timeout = config['shutdown_timeout'] self._block_number = None self.stop_event = Event() self.start_event = Event() self.chain.client.inject_stop_event(self.stop_event) self.wal = None self.database_path = config['database_path'] if self.database_path != ':memory:': database_dir = os.path.dirname(config['database_path']) os.makedirs(database_dir, exist_ok=True) self.database_dir = database_dir # Prevent concurrent acces to the same db self.lock_file = os.path.join(self.database_dir, '.lock') self.db_lock = filelock.FileLock(self.lock_file) else: self.database_path = ':memory:' self.database_dir = None self.lock_file = None self.serialization_file = None self.db_lock = None # If the endpoint registration fails the node will quit, this must # finish before starting the protocol endpoint_registration_event.join() # Lock used to serialize calls to `poll_blockchain_events`, this is # important to give a consistent view of the node state. self.event_poll_lock = gevent.lock.Semaphore() self.start()
class RaidenService: """ A Raiden node. """ def __init__(self, chain, default_registry, private_key_bin, transport, discovery, config): if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32: raise ValueError('invalid private_key') invalid_timeout = ( config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX ) if invalid_timeout: raise ValueError('settle_timeout must be in range [{}, {}]'.format( NETTINGCHANNEL_SETTLE_TIMEOUT_MIN, NETTINGCHANNEL_SETTLE_TIMEOUT_MAX )) self.tokens_to_connectionmanagers = dict() self.identifier_to_results = defaultdict(list) # This is a map from a secrethash to a list of channels, the same # secrethash can be used in more than one token (for tokenswaps), a # channel should be removed from this list only when the lock is # released/withdrawn but not when the secret is registered. self.token_to_secrethash_to_channels = defaultdict(lambda: defaultdict(list)) self.chain = chain self.default_registry = default_registry self.config = config self.privkey = private_key_bin self.address = privatekey_to_address(private_key_bin) endpoint_registration_event = gevent.spawn( discovery.register, self.address, config['external_ip'], config['external_port'], ) endpoint_registration_event.link_exception(endpoint_registry_exception_handler) self.private_key = PrivateKey(private_key_bin) self.pubkey = self.private_key.public_key.format(compressed=False) self.protocol = UDPTransport( transport, discovery, self, config['protocol']['retry_interval'], config['protocol']['retries_before_backoff'], config['protocol']['nat_keepalive_retries'], config['protocol']['nat_keepalive_timeout'], config['protocol']['nat_invitation_timeout'], ) # TODO: remove this cyclic dependency transport.protocol = self.protocol self.blockchain_events = BlockchainEvents() self.alarm = AlarmTask(chain) self.shutdown_timeout = config['shutdown_timeout'] self._block_number = None self.stop_event = Event() self.start_event = Event() self.chain.client.inject_stop_event(self.stop_event) self.wal = None self.database_path = config['database_path'] if self.database_path != ':memory:': database_dir = os.path.dirname(config['database_path']) os.makedirs(database_dir, exist_ok=True) self.database_dir = database_dir # Prevent concurrent acces to the same db self.lock_file = os.path.join(self.database_dir, '.lock') self.db_lock = filelock.FileLock(self.lock_file) else: self.database_path = ':memory:' self.database_dir = None self.lock_file = None self.serialization_file = None self.db_lock = None # If the endpoint registration fails the node will quit, this must # finish before starting the protocol endpoint_registration_event.join() # Lock used to serialize calls to `poll_blockchain_events`, this is # important to give a consistent view of the node state. self.event_poll_lock = gevent.lock.Semaphore() self.start() def start(self): """ Start the node. """ # XXX Should this really be here? Or will start() never be called again # after stop() in the lifetime of Raiden apart from the tests? This is # at least at the moment prompted by tests/integration/test_transer.py if self.stop_event and self.stop_event.is_set(): self.stop_event.clear() if self.database_dir is not None: self.db_lock.acquire(timeout=0) assert self.db_lock.is_locked # The database may be :memory: storage = sqlite.SQLiteStorage(self.database_path, serialize.PickleSerializer()) self.wal, unapplied_events = wal.restore_from_latest_snapshot( node.state_transition, storage, ) last_log_block_number = None # First run, initialize the basic state if self.wal.state_manager.current_state is None: block_number = self.chain.block_number() state_change = ActionInitNode( random.Random(), block_number, ) self.wal.log_and_dispatch(state_change, block_number) else: # Get the last known block number after reapplying all the state changes from the log last_log_block_number = views.block_number(self.wal.state_manager.current_state) # The alarm task must be started after the snapshot is loaded or the # state is primed, the callbacks assume the node is initialized. self.alarm.start() self.alarm.register_callback(self.poll_blockchain_events) self.alarm.register_callback(self.set_block_number) self._block_number = self.chain.block_number() # Registry registration must start *after* the alarm task. This # avoids corner cases where the registry is queried in block A, a new # block B is mined, and the alarm starts polling at block C. # If last_log_block_number is None, the wal.state_manager.current_state was # None in the log, meaning we don't have any events we care about, so just # read the latest state from the network self.register_payment_network(self.default_registry.address, last_log_block_number) # Start the protocol after the registry is queried to avoid warning # about unknown channels. self.protocol.start() # Health check needs the protocol layer self.start_neighbours_healthcheck() self.start_event.set() for event in unapplied_events: on_raiden_event(self, event) def start_neighbours_healthcheck(self): for neighbour in views.all_neighbour_nodes(self.wal.state_manager.current_state): if neighbour != ConnectionManager.BOOTSTRAP_ADDR: self.start_health_check_for(neighbour) def stop(self): """ Stop the node. """ # Needs to come before any greenlets joining self.stop_event.set() self.protocol.stop_and_wait() self.alarm.stop_async() wait_for = [self.alarm] wait_for.extend(self.protocol.greenlets) # We need a timeout to prevent an endless loop from trying to # contact the disconnected client gevent.wait(wait_for, timeout=self.shutdown_timeout) # Filters must be uninstalled after the alarm task has stopped. Since # the events are polled by an alarm task callback, if the filters are # uninstalled before the alarm task is fully stopped the callback # `poll_blockchain_events` will fail. # # We need a timeout to prevent an endless loop from trying to # contact the disconnected client try: with gevent.Timeout(self.shutdown_timeout): self.blockchain_events.uninstall_all_event_listeners() except (gevent.timeout.Timeout, RaidenShuttingDown): pass if self.db_lock is not None: self.db_lock.release() def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, pex(self.address)) def set_block_number(self, block_number): state_change = Block(block_number) self.handle_state_change(state_change, block_number) # To avoid races, only update the internal cache after all the state # tasks have been updated. self._block_number = block_number def handle_state_change(self, state_change, block_number=None): is_logging = log.isEnabledFor(logging.DEBUG) if is_logging: log.debug('STATE CHANGE', node=pex(self.address), state_change=state_change) if block_number is None: block_number = self.get_block_number() event_list = self.wal.log_and_dispatch(state_change, block_number) for event in event_list: if is_logging: log.debug('EVENT', node=pex(self.address), event=event) on_raiden_event(self, event) return event_list def set_node_network_state(self, node_address, network_state): state_change = ActionChangeNodeNetworkState(node_address, network_state) self.wal.log_and_dispatch(state_change, self.get_block_number()) def start_health_check_for(self, node_address): self.protocol.start_health_check(node_address) def get_block_number(self): return views.block_number(self.wal.state_manager.current_state) def poll_blockchain_events(self, current_block=None): # pylint: disable=unused-argument with self.event_poll_lock: for event in self.blockchain_events.poll_blockchain_events(): on_blockchain_event(self, event) def sign(self, message): """ Sign message inplace. """ if not isinstance(message, SignedMessage): raise ValueError('{} is not signable.'.format(repr(message))) message.sign(self.private_key, self.address) def register_payment_network(self, registry_address, from_block=None): proxies = get_relevant_proxies( self.chain, self.address, registry_address, ) # Install the filters first to avoid missing changes, as a consequence # some events might be applied twice. self.blockchain_events.add_proxies_listeners(proxies, from_block) token_network_list = list() for manager in proxies.channel_managers: manager_address = manager.address netting_channel_proxies = proxies.channelmanager_nettingchannels[manager_address] network = get_token_network_state_from_proxies(self, manager, netting_channel_proxies) token_network_list.append(network) payment_network = PaymentNetworkState( registry_address, token_network_list, ) state_change = ContractReceiveNewPaymentNetwork(payment_network) self.handle_state_change(state_change) def connection_manager_for_token(self, registry_address, token_address): if not isaddress(token_address): raise InvalidAddress('token address is not valid.') known_token_networks = views.get_token_network_addresses_for( self.wal.state_manager.current_state, registry_address, ) if token_address not in known_token_networks: raise InvalidAddress('token is not registered.') manager = self.tokens_to_connectionmanagers.get(token_address) if manager is None: manager = ConnectionManager(self, registry_address, token_address) self.tokens_to_connectionmanagers[token_address] = manager return manager def leave_all_token_networks(self): state_change = ActionLeaveAllNetworks() self.wal.log_and_dispatch(state_change, self.get_block_number()) def close_and_settle(self): log.info('raiden will close and settle all channels now') self.leave_all_token_networks() connection_managers = [ self.tokens_to_connectionmanagers[token_address] for token_address in self.tokens_to_connectionmanagers ] if connection_managers: waiting.wait_for_settle_all_channels( self, self.alarm.wait_time, ) def mediated_transfer_async( self, registry_address, token_address, amount, target, identifier, ): """ Transfer `amount` between this node and `target`. This method will start an asyncronous transfer, the transfer might fail or succeed depending on a couple of factors: - Existence of a path that can be used, through the usage of direct or intermediary channels. - Network speed, making the transfer sufficiently fast so it doesn't expire. """ async_result = self.start_mediated_transfer( registry_address, token_address, amount, target, identifier, ) return async_result def direct_transfer_async(self, registry_address, token_address, amount, target, identifier): """ Do a direct transfer with target. Direct transfers are non cancellable and non expirable, since these transfers are a signed balance proof with the transferred amount incremented. Because the transfer is non cancellable, there is a level of trust with the target. After the message is sent the target is effectively paid and then it is not possible to revert. The async result will be set to False iff there is no direct channel with the target or the payer does not have balance to complete the transfer, otherwise because the transfer is non expirable the async result *will never be set to False* and if the message is sent it will hang until the target node acknowledge the message. This transfer should be used as an optimization, since only two packets are required to complete the transfer (from the payers perspective), whereas the mediated transfer requires 6 messages. """ self.protocol.start_health_check(target) if identifier is None: identifier = create_default_identifier() direct_transfer = ActionTransferDirect( registry_address, token_address, target, identifier, amount, ) self.handle_state_change(direct_transfer) def start_mediated_transfer( self, registry_address, token_address, amount, target, identifier, ): self.protocol.start_health_check(target) if identifier is None: identifier = create_default_identifier() assert identifier not in self.identifier_to_results async_result = AsyncResult() self.identifier_to_results[identifier].append(async_result) secret = random_secret() init_initiator_statechange = initiator_init( self, identifier, amount, secret, registry_address, token_address, target, ) # TODO: implement the network timeout raiden.config['msg_timeout'] and # cancel the current transfer if it happens (issue #374) # # Dispatch the state change even if there are no routes to create the # wal entry. self.handle_state_change(init_initiator_statechange) return async_result def mediate_mediated_transfer(self, transfer: LockedTransfer): init_mediator_statechange = mediator_init(self, transfer) self.handle_state_change(init_mediator_statechange) def target_mediated_transfer(self, transfer: LockedTransfer): init_target_statechange = target_init(self, transfer) self.handle_state_change(init_target_statechange)
def create_apps( blockchain_services, endpoint_discovery_services, registry_address, raiden_udp_ports, reveal_timeout, settle_timeout, database_paths, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, ): """ Create the apps.""" # pylint: disable=too-many-locals services = zip(blockchain_services, endpoint_discovery_services) apps = [] for idx, (blockchain, discovery) in enumerate(services): port = raiden_udp_ports[idx] private_key = blockchain.private_key nodeid = privatekey_to_address(private_key) host = '127.0.0.1' discovery.register(nodeid, host, port) config = { 'host': host, 'port': port, 'external_ip': host, 'external_port': port, 'privatekey_hex': hexlify(private_key), 'reveal_timeout': reveal_timeout, 'settle_timeout': settle_timeout, 'database_path': database_paths[idx], 'protocol': { 'retry_interval': retry_interval, 'retries_before_backoff': retries_before_backoff, 'throttle_capacity': throttle_capacity, 'throttle_fill_rate': throttle_fill_rate, 'nat_invitation_timeout': nat_invitation_timeout, 'nat_keepalive_retries': nat_keepalive_retries, 'nat_keepalive_timeout': nat_keepalive_timeout, }, 'rpc': True, 'console': False, } config_copy = App.DEFAULT_CONFIG.copy() config_copy.update(config) registry = blockchain.registry(registry_address) throttle_policy = TokenBucket(config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate']) transport = UDPTransport( discovery, server._udp_socket((host, port)), # pylint: disable=protected-access throttle_policy, config['protocol'], ) app = App( config_copy, blockchain, registry, discovery, transport, ) apps.append(app) return apps