def _sync(self, timeout_ms: int, latency_ms: int) -> None: """ Reimplements MatrixClient._sync """ log.debug( "Sync called", node=node_address_from_userid(self.user_id), user_id=self.user_id, sync_iteration=self.sync_iteration, sync_filter_id=self._sync_filter_id, last_sync_time=self.last_sync, ) time_before_sync = time.monotonic() time_since_last_sync_in_seconds = time_before_sync - self.last_sync # If it takes longer than `timeout_ms + latency_ms` to call `_sync` # again, we throw an exception. The exception is only thrown when in # development mode. timeout_in_seconds = (timeout_ms + latency_ms) // 1_000 timeout_reached = ( time_since_last_sync_in_seconds >= timeout_in_seconds and self.environment == Environment.DEVELOPMENT) # The second sync is the first full sync and can be slow. This is # acceptable, we only want to know if we fail to sync quickly # afterwards. # As the runtime is evaluated in the subsequent run, we only run this # after the second iteration is finished. if timeout_reached: if IDLE: IDLE.log() raise MatrixSyncMaxTimeoutReached( f"Time between syncs exceeded timeout: " f"{time_since_last_sync_in_seconds}s > {timeout_in_seconds}s. {IDLE}" ) log.debug( "Calling api.sync", node=node_address_from_userid(self.user_id), user_id=self.user_id, sync_iteration=self.sync_iteration, time_since_last_sync_in_seconds=time_since_last_sync_in_seconds, ) self.last_sync = time_before_sync response = self.api.sync(since=self.sync_token, timeout_ms=timeout_ms, filter=self._sync_filter_id) time_after_sync = time.monotonic() log.debug( "api.sync returned", node=node_address_from_userid(self.user_id), user_id=self.user_id, sync_iteration=self.sync_iteration, time_after_sync=time_after_sync, time_taken=time_after_sync - time_before_sync, ) if response: token = uuid4() log.debug( "Sync returned", node=node_address_from_userid(self.user_id), token=token, elapsed=time_after_sync - time_before_sync, current_user=self.user_id, presence_events_qty=len(response["presence"]["events"]), to_device_events_qty=len(response["to_device"]["events"]), rooms_invites_qty=len(response["rooms"]["invite"]), rooms_leaves_qty=len(response["rooms"]["leave"]), rooms_joined_member_count=sum( room["summary"].get("m.joined_member_count", 0) for room in response["rooms"]["join"].values()), rooms_invited_member_count=sum( room["summary"].get("m.invited_member_count", 0) for room in response["rooms"]["join"].values()), rooms_join_state_qty=sum( len(room["state"]) for room in response["rooms"]["join"].values()), rooms_join_timeline_events_qty=sum( len(room["timeline"]["events"]) for room in response["rooms"]["join"].values()), rooms_join_state_events_qty=sum( len(room["state"]["events"]) for room in response["rooms"]["join"].values()), rooms_join_ephemeral_events_qty=sum( len(room["ephemeral"]["events"]) for room in response["rooms"]["join"].values()), rooms_join_account_data_events_qty=sum( len(room["account_data"]["events"]) for room in response["rooms"]["join"].values()), ) # Updating the sync token should only be done after the response is # saved in the queue, otherwise the data can be lost in a stop/start. self.response_queue.put((token, response, datetime.now())) self.sync_token = response["next_batch"] self.sync_progress.set_synced(token)
def run(ctx: Context, **kwargs: Any) -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements if kwargs["config_file"]: apply_config_file(run, kwargs, ctx) configure_logging( kwargs["log_config"], log_json=kwargs["log_json"], log_file=kwargs["log_file"], disable_debug_logfile=kwargs["disable_debug_logfile"], debug_log_file_path=kwargs["debug_logfile_path"], ) flamegraph = kwargs.pop("flamegraph", None) switch_tracing = kwargs.pop("switch_tracing", None) profiler = None switch_monitor = None enable_gevent_monitoring_signal() if flamegraph: # pragma: no cover windows_not_supported("flame graph") from raiden.utils.profiling.sampler import FlameGraphCollector, TraceSampler os.makedirs(flamegraph, exist_ok=True) now = datetime.datetime.now().isoformat() address = to_checksum_address(kwargs["address"]) stack_path = os.path.join(flamegraph, f"{address}_{now}_stack.data") stack_stream = open(stack_path, "w") flame = FlameGraphCollector(stack_stream) profiler = TraceSampler(flame) if switch_tracing is True: # pragma: no cover windows_not_supported("switch tracing") from raiden.utils.profiling.greenlets import SwitchMonitoring switch_monitor = SwitchMonitoring() if kwargs["environment_type"] == Environment.DEVELOPMENT: IDLE.enable() memory_logger = None log_memory_usage_interval = kwargs.pop("log_memory_usage_interval", 0) if log_memory_usage_interval > 0: # pragma: no cover windows_not_supported("memory usage logging") from raiden.utils.profiling.memory import MemoryLogger memory_logger = MemoryLogger(log_memory_usage_interval) memory_logger.start() if ctx.invoked_subcommand is not None: # Pass parsed args on to subcommands. ctx.obj = kwargs return raiden_version = get_system_spec()["raiden"] click.secho(f"Welcome to Raiden, version {raiden_version}!", fg="green") click.secho( textwrap.dedent("""\ +------------------------------------------------------------------------+ | This is a Beta version of experimental open source software released | | as a test version under an MIT license and may contain errors and/or | | bugs. No guarantee or representation whatsoever is made regarding its | | suitability (or its use) for any purpose or regarding its compliance | | with any applicable laws and regulations. Use of the software is at | | your own risk and discretion and by using the software you warrant and | | represent that you have read this disclaimer, understand its contents, | | assume all risk related thereto and hereby release, waive, discharge | | and covenant not to hold liable Brainbot Labs Establishment or any of | | its officers, employees or affiliates from and for any direct or | | indirect damage resulting from the software or the use thereof. | | Such to the extent as permissible by applicable laws and regulations. | | | | Privacy warning: Please be aware, that by using the Raiden Client, | | among others your Ethereum address, channels, channel deposits, | | settlements and the Ethereum address of your channel counterparty will | | be stored on the Ethereum chain, i.e. on servers of Ethereum node | | operators and ergo are to a certain extent publicly available. The | | same might also be stored on systems of parties running Raiden nodes | | connected to the same token network. Data present in the Ethereum | | chain is very unlikely to be able to be changed, removed or deleted | | from the public arena. | | | | Also be aware, that data on individual Raiden token transfers will be | | made available via the Matrix protocol to the recipient, | | intermediating nodes of a specific transfer as well as to the Matrix | | server operators, see Raiden Transport Specification. | +------------------------------------------------------------------------+""" ), fg="yellow", ) if not kwargs["accept_disclaimer"]: click.confirm( "\nHave you read, understood and hereby accept the above " "disclaimer and privacy warning?", abort=True, ) # Name used in the exception handlers, make sure the kwargs contains the # key with the correct name by always running it. name_or_id = ID_TO_CHAINNAME.get(kwargs["chain_id"], kwargs["chain_id"]) # TODO: # - Ask for confirmation to quit if there are any locked transfers that did # not timeout. try: run_services(kwargs) except KeyboardInterrupt: # The user requested a shutdown. Assume that if the exception # propagated all the way to the top-level everything was shutdown # properly. # # Notes about edge cases: # - It could happen the exception was handled somewhere else in the # code, and did not reach the top-level, ideally that should result in # an exit with a non-zero code, but currently there is not way to # detect that. # - Just because the exception reached main, it doesn't mean that all # services were properly cleaned up. Ideally at this stage we should # run extra code to verify the state of the main services, and if any # of the is not properly shutdown exit with a non-zero code. pass except (ReplacementTransactionUnderpriced, EthereumNonceTooLow) as ex: click.secho( f"{ex}. Please make sure that this Raiden node is the " f"only user of the selected account", fg="red", ) sys.exit(ReturnCode.ETH_ACCOUNT_ERROR) except (ConnectionError, ConnectTimeout, RequestsConnectionError, ReadTimeoutError): print(COMMUNICATION_ERROR.format(kwargs["eth_rpc_endpoint"])) sys.exit(ReturnCode.GENERIC_COMMUNICATION_ERROR) except EthNodeInterfaceError as e: click.secho(str(e), fg="red") sys.exit(ReturnCode.ETH_INTERFACE_ERROR) except RaidenUnrecoverableError as ex: write_stack_trace(ex) sys.exit(ReturnCode.FATAL) except APIServerPortInUseError as ex: click.secho( f"ERROR: API Address {ex} is in use. Use --api-address <host:port> " f"to specify a different port.", fg="red", ) sys.exit(ReturnCode.PORT_ALREADY_IN_USE) except (KeystoreAuthenticationError, KeystoreFileNotFound) as e: click.secho(str(e), fg="red") sys.exit(ReturnCode.ETH_ACCOUNT_ERROR) except ConfigurationError as e: click.secho(str(e), fg="red") sys.exit(ReturnCode.RAIDEN_CONFIGURATION_ERROR) except ContractCodeMismatch as e: click.secho( f"{e}. This may happen if Raiden is configured to use an " f"unsupported version of the contracts.", fg="red", ) sys.exit(ReturnCode.SMART_CONTRACTS_CONFIGURATION_ERROR) except AddressWithoutCode as e: click.secho( f"{e}. This may happen if an external ERC20 smart contract " f"selfdestructed, or if the configured address is misconfigured, make " f"sure the used address is not a normal account but a smart contract, " f"and that it is deployed to {name_or_id}.", fg="red", ) sys.exit(ReturnCode.SMART_CONTRACTS_CONFIGURATION_ERROR) except filelock.Timeout: click.secho( f"FATAL: Another Raiden instance already running for account " f"{to_checksum_address(kwargs['address'])} on network id {name_or_id}", fg="red", ) sys.exit(ReturnCode.RAIDEN_CONFIGURATION_ERROR) except Exception as ex: write_stack_trace(ex) sys.exit(ReturnCode.FATAL) finally: # pragma: no cover # teardown order is important because of side-effects, both the # switch_monitor and profiler could use the tracing api, for the # teardown code to work correctly the teardown has to be done in the # reverse order of the initialization. if switch_monitor is not None: switch_monitor.stop() if memory_logger is not None: memory_logger.stop() if profiler is not None: profiler.stop()