def after_new_deposit_join_network( raiden: "RaidenService", state_change: ContractReceiveChannelDeposit ) -> None: our_address = raiden.address if our_address != state_change.deposit_transaction.participant_address: return # TODO: preserve the old state of the channel # previous_balance = previous_channel_state.our_state.contract_balance # balance_was_zero = previous_balance == 0 balance_was_zero = False if balance_was_zero: connection_manager = raiden.connection_manager_for_token_network( state_change.canonical_identifier.token_network_address ) join_channel = spawn_named( "cm-join_channel", connection_manager.join_channel, our_address, state_change.deposit_transaction.contract_balance, ) raiden.add_pending_greenlet(join_channel)
def spawn_retry(self) -> Optional[Greenlet]: """This makes sure, there is only one retry greenlet running at a time.""" if not self._retry_greenlet: self._retry_greenlet = spawn_named("cm-retry_connect", self.retry_connect) return self._retry_greenlet return None
def after_new_route_join_network( raiden: "RaidenService", channelnew: ContractReceiveRouteNew ) -> None: """When a new node joins the network it is time to see if we need to open new channels. """ connection_manager = raiden.connection_manager_for_token_network( channelnew.token_network_address ) retry_connect = spawn_named("cm-retry_connect", connection_manager.retry_connect) raiden.add_pending_greenlet(retry_connect)
def sort_servers_closest( servers: Sequence[str], max_timeout: float = 3.0, samples_per_server: int = 3, sample_delay: float = 0.125, ) -> Dict[str, float]: """Sorts a list of servers by http round-trip time Params: servers: sequence of http server urls Returns: sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed and excessively slow servers (possibly empty) The default timeout was chosen after measuring the long tail of the development matrix servers. Under no stress, servers will have a very long tail of up to 2.5 seconds (measured 15/01/2020), which can lead to failure during startup if the timeout is too low. This increases the timeout so that the network hiccups won't cause Raiden startup failures. """ if not {urlparse(url).scheme for url in servers}.issubset({"http", "https"}): raise TransportError("Invalid server urls") rtt_greenlets = set( spawn_named( "get_average_http_response_time", get_average_http_response_time, url=server_url, samples=samples_per_server, sample_delay=sample_delay, ) for server_url in servers ) total_timeout = samples_per_server * (max_timeout + sample_delay) results = [] for greenlet in gevent.iwait(rtt_greenlets, timeout=total_timeout): result = greenlet.get() if result is not None: results.append(result) gevent.killall(rtt_greenlets) if not results: raise TransportError( f"No Matrix server available with good latency, requests takes more " f"than {max_timeout} seconds." ) server_url_to_rtt = dict(sorted(results, key=itemgetter(1))) log.debug("Available Matrix homeservers", servers=server_url_to_rtt) return server_url_to_rtt
def run_services(options: Dict[str, Any]) -> None: if options["config_file"]: log.debug("Using config file", config_file=options["config_file"]) app = run_app(**options) gevent_tasks: List[gevent.Greenlet] = list() if options["console"]: from raiden.ui.console import Console console = Console(app) console.start() gevent_tasks.append(console) gevent_tasks.append(spawn_named("check_version", check_version, get_system_spec()["raiden"])) gevent_tasks.append(spawn_named("check_gas_reserve", check_gas_reserve, app.raiden)) gevent_tasks.append( spawn_named( "check_network_id", check_network_id, app.raiden.rpc_client.chain_id, app.raiden.rpc_client.web3, ) ) spawn_user_deposit_task = app.user_deposit and ( options["pathfinding_service_address"] or options["enable_monitoring"] ) if spawn_user_deposit_task: gevent_tasks.append( spawn_named("check_rdn_deposits", check_rdn_deposits, app.raiden, app.user_deposit) ) stop_event: AsyncResult[Optional[signal.Signals]] # pylint: disable=no-member stop_event = AsyncResult() def sig_set(sig: int, _frame: Any = None) -> None: stop_event.set(signal.Signals(sig)) # pylint: disable=no-member gevent.signal.signal(signal.SIGQUIT, sig_set) # pylint: disable=no-member gevent.signal.signal(signal.SIGTERM, sig_set) # pylint: disable=no-member gevent.signal.signal(signal.SIGINT, sig_set) # pylint: disable=no-member # The SIGPIPE handler should not be installed. It is handled by the python # runtime, and an exception will be raised at the call site that triggered # the error. # # The default SIGPIPE handler set by the libc will terminate the process # [4]. However, the CPython interpreter changes the handler to IGN [3]. # This allows for error reporting by the system calls that write to files. # Because of this, calling `send` to a closed socket will return an `EPIPE` # error [2], the error is then converted to an exception [5,6]. # # 1 - https://github.com/python/cpython/blob/3.8/Modules/socketmodule.c#L4088 # 2 - http://man7.org/linux/man-pages/man2/send.2.html # 3 - https://github.com/python/cpython/blob/3.8/Python/pylifecycle.c#L2306-L2307 # 4 - https://www.gnu.org/software/libc/manual/html_node/Operation-Error-Signals.html # 5 - https://github.com/python/cpython/blob/3.8/Modules/socketmodule.c#L836-L838 # 6 - https://github.com/python/cpython/blob/3.8/Modules/socketmodule.c#L627-L628 # 7 - https://docs.python.org/3/library/signal.html#note-on-sigpipe # # gevent.signal.signal(signal.SIGPIPE, sig_set) # pylint: disable=no-member # quit if any task exits, successfully or not app.raiden.greenlet.link(stop_event) for task in gevent_tasks: task.link(stop_event) try: signal_received = stop_event.get() if signal_received: print("\r", end="") # Reset cursor to overwrite a possibly printed "^C" log.info("Signal received. Shutting down.", signal=signal_received) finally: for task in gevent_tasks: task.kill() app.raiden.stop() gevent.joinall( set(gevent_tasks + [app.raiden]), app.config.shutdown_timeout, raise_error=True ) app.stop()
def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if len(nonfunded_channels) == 0 and len(possible_new_partners) == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met possible_partners = nonfunded_partners + possible_new_partners join_partners: List[Address] = [] # Also filter the possible partners by excluding offline addresses for possible_partner in possible_partners: if len(join_partners) == n_to_join: break reachability = self.raiden.transport.force_check_address_reachability( possible_partner) if reachability == AddressReachability.REACHABLE: join_partners.append(possible_partner) log.debug( "Spawning greenlets to join partners", node=to_checksum_address(self.raiden.address), num_greenlets=len(join_partners), ) greenlets = set( spawn_named(f"cm-join_partner-{to_checksum_address(partner)}", self._join_partner, partner) for partner in join_partners) gevent.joinall(greenlets, raise_error=True) return True
def main( # pylint: disable=too-many-arguments,too-many-locals private_key: PrivateKey, state_db: str, web3: Web3, contracts: Dict[str, Contract], start_block: BlockNumber, host: str, port: int, min_reward: int, confirmations: BlockTimeout, operator: str, info_message: str, debug_shell: bool, accept_disclaimer: bool, ) -> int: """The Monitoring service for the Raiden Network.""" log.info("Starting Raiden Monitoring Service") click.secho(MS_DISCLAIMER, fg="yellow") if not accept_disclaimer: click.confirm(CONFIRMATION_OF_UNDERSTANDING, abort=True) log.info("Using RPC endpoint", rpc_url=get_web3_provider_info(web3)) hex_addresses = { name: to_checksum_address(contract.address) for name, contract in contracts.items() } log.info("Contract information", addresses=hex_addresses, start_block=start_block) task = None api = None try: service = MonitoringService( web3=web3, private_key=private_key, contracts=contracts, sync_start_block=start_block, required_confirmations=confirmations, poll_interval=DEFAULT_POLL_INTERVALL, db_filename=state_db, min_reward=min_reward, ) if debug_shell: import IPython IPython.embed() return 0 task = spawn_named("MonitoringService", service.start) log.debug("Starting API") api = MSApi(monitoring_service=service, operator=operator, info_message=info_message) api.run(host=host, port=port) task.get() finally: log.info("Stopping Monitoring Service...") if api: api.stop() if task: task.kill() task.get() return 0