def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) event1 = ScheduledEvent( trigger_timestamp=23 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_timestamp=24 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 1
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [to_checksum_address(token_network_address)], ) event1 = ScheduledEvent( trigger_block_number=BlockNumber(23), event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_block_number=BlockNumber(24), event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 1
def test_trigger_scheduled_events(monitoring_service: MonitoringService): monitoring_service.context.required_confirmations = 5 create_default_token_network(monitoring_service.context) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=make_channel_identifier(), non_closing_participant=make_address(), ) trigger_timestamp = Timestamp(get_posix_utc_time_now()) assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 0 monitoring_service.context.database.upsert_scheduled_event( ScheduledEvent(trigger_timestamp=trigger_timestamp, event=triggered_event)) assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 1 # Now run `_trigger_scheduled_events` and see if the event is removed monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 0
def test_scheduled_events(ms_database): # Add token network used as foreign key ms_database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", ['a'], ) event1 = ScheduledEvent( trigger_block_number=23, event=ActionMonitoringTriggeredEvent( token_network_address='a', channel_identifier=1, non_closing_participant='b', ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_block_number=24, event=ActionMonitoringTriggeredEvent( token_network_address='a', channel_identifier=1, non_closing_participant='b', ), ) ms_database.upsert_scheduled_event(event=event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22)) == 0 assert len(ms_database.get_scheduled_events(23)) == 1 assert len(ms_database.get_scheduled_events(24)) == 2 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22)) == 0 assert len(ms_database.get_scheduled_events(23)) == 1 assert len(ms_database.get_scheduled_events(24)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22)) == 0 assert len(ms_database.get_scheduled_events(23)) == 1 assert len(ms_database.get_scheduled_events(24)) == 1
def create_scheduled_event(row: sqlite3.Row) -> ScheduledEvent: event_type = EVENT_ID_TYPE_MAP[row["event_type"]] sub_event = event_type( row["token_network_address"], row["channel_identifier"], row["non_closing_participant"], ) return ScheduledEvent( trigger_block_number=row["trigger_block_number"], event=sub_event )
def create_scheduled_event(row: sqlite3.Row) -> ScheduledEvent: event_type = EVENT_ID_TYPE_MAP[row["event_type"]] sub_event = event_type( TokenNetworkAddress( to_canonical_address(row["token_network_address"])), row["channel_identifier"], row["non_closing_participant"], ) return ScheduledEvent(trigger_timestamp=row["trigger_timestamp"], event=sub_event)
def test_trigger_scheduled_events(monitoring_service: MonitoringService): monitoring_service.context.required_confirmations = 5 create_default_token_network(monitoring_service.context) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=make_channel_identifier(), non_closing_participant=make_address(), ) current_confirmed_block = monitoring_service.context.latest_confirmed_block # Trigger the event on a currently unconfirmed block trigger_block = BlockNumber(current_confirmed_block + 1) assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0 monitoring_service.context.database.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=triggered_event) ) assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 1 # Now run `_trigger_scheduled_events` and see if the event is removed monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0
def action_monitoring_triggered_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ActionMonitoringTriggeredEvent) log.info("Triggering channel monitoring") monitor_request = context.database.get_monitor_request( token_network_address=event.token_network_address, channel_id=event.channel_identifier, non_closing_signer=event.non_closing_participant, ) if monitor_request is None: log.error( "MonitorRequest cannot be found", token_network_address=event.token_network_address, channel_id=event.channel_identifier, ) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.STATE).inc() return channel = context.database.get_channel( token_network_address=monitor_request.token_network_address, channel_id=monitor_request.channel_identifier, ) if channel is None: log.error("Channel cannot be found", monitor_request=monitor_request) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.STATE).inc() return if not _is_mr_valid(monitor_request, channel): log.error( "MonitorRequest lost its validity", monitor_request=monitor_request, channel=channel, ) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.PROTOCOL).inc() return last_onchain_nonce = 0 if channel.update_status: last_onchain_nonce = channel.update_status.nonce if monitor_request.nonce <= last_onchain_nonce: log.info( "Another MS submitted the last known channel state", monitor_request=monitor_request, ) return latest_block = context.web3.eth.blockNumber last_confirmed_block = context.latest_confirmed_block user_address = monitor_request.non_closing_signer user_deposit = get_pessimistic_udc_balance( udc=context.user_deposit_contract, address=user_address, from_block=last_confirmed_block, to_block=latest_block, ) if monitor_request.reward_amount < context.min_reward: log.info( "Monitor request not executed due to insufficient reward amount", monitor_request=monitor_request, min_reward=context.min_reward, ) return if user_deposit < monitor_request.reward_amount * UDC_SECURITY_MARGIN_FACTOR_MS: log.debug( "User deposit is insufficient -> try monitoring again later", monitor_request=monitor_request, min_reward=context.min_reward, ) context.database.upsert_scheduled_event( ScheduledEvent( trigger_block_number=BlockNumber(last_confirmed_block + 1), event=event)) return assert (channel.monitor_tx_hash is None ), "This MS already monitored this channel. Should be impossible." try: # Attackers might be able to construct MRs that make this fail. # Since we execute a gas estimation before doing the `transact`, # the gas estimation will fail before any gas is used. # If we stop doing a gas estimation, a `call` has to be done before # the `transact` to prevent attackers from wasting the MS's gas. tx_hash = TransactionHash( bytes( context.monitoring_service_contract.functions.monitor( monitor_request.signer, monitor_request.non_closing_signer, monitor_request.balance_hash, monitor_request.nonce, monitor_request.additional_hash, monitor_request.closing_signature, monitor_request.non_closing_signature, monitor_request.reward_amount, monitor_request.token_network_address, monitor_request.reward_proof_signature, ).transact({"from": context.ms_state.address}))) except Exception as exc: # pylint: disable=broad-except first_allowed = BlockNumber( _first_allowed_block_to_monitor(event.token_network_address, channel, context)) failed_at = context.web3.eth.blockNumber log.error( "Sending tx failed", exc_info=True, err=exc, first_allowed=first_allowed, failed_at=failed_at, ) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.BLOCKCHAIN).inc() return log.info( "Sent transaction calling `monitor` for channel", token_network_address=channel.token_network_address, channel_identifier=channel.identifier, transaction_hash=encode_hex(tx_hash), ) assert tx_hash is not None with context.database.conn: # Add tx hash to list of waiting transactions context.database.add_waiting_transaction(tx_hash) channel.monitor_tx_hash = tx_hash context.database.upsert_channel(channel)
def monitor_new_balance_proof_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ReceiveMonitoringNewBalanceProofEvent) channel = context.database.get_channel(event.token_network_address, event.channel_identifier) if channel is None: log.error( "Channel not in database", token_network_address=event.token_network_address, identifier=event.channel_identifier, ) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.STATE).inc() return log.info( "Received MSC NewBalanceProof event", token_network_address=event.token_network_address, identifier=event.channel_identifier, evt=event, ) # check for known monitor calls and update accordingly update_status = channel.update_status if update_status is None: log.info( "Creating channel update state", token_network_address=channel.token_network_address, channel_identifier=channel.identifier, new_nonce=event.nonce, new_sender=event.ms_address, ) channel.update_status = OnChainUpdateStatus( update_sender_address=event.ms_address, nonce=event.nonce) context.database.upsert_channel(channel) else: # nonce not bigger, should never happen as it is checked in the contract if event.nonce < update_status.nonce: log.error( "MSC NewBalanceProof nonce smaller than the known one, ignoring.", know_nonce=update_status.nonce, received_nonce=event.nonce, ) metrics.get_metrics_for_label( metrics.ERRORS_LOGGED, metrics.ErrorCategory.PROTOCOL).inc() return log.info( "Updating channel update state", token_network_address=channel.token_network_address, channel_identifier=channel.identifier, new_nonce=event.nonce, new_sender=event.ms_address, ) # update channel status update_status.nonce = event.nonce update_status.update_sender_address = event.ms_address context.database.upsert_channel(channel) # check if this was our update, if so schedule the call of # `claimReward` it will be checked there that our update was the latest one if event.ms_address == context.ms_state.address: assert channel.closing_block is not None, "closing_block not set" # Transactions go into the next mined block, so we could trigger one block # before the `claim` call is allowed to succeed to include it in the # first possible block. # Unfortunately, parity does the gas estimation on the current block # instead of the next one, so we have to wait for the first allowed # block to be finished to send the transaction successfully on parity. trigger_block = BlockNumber(channel.closing_block + channel.settle_timeout + 1) # trigger the claim reward action by an event triggered_event = ActionClaimRewardTriggeredEvent( token_network_address=channel.token_network_address, channel_identifier=channel.identifier, non_closing_participant=event.raiden_node_address, ) log.info( "Received event for own update, triggering reward claim", token_network_address=event.token_network_address, identifier=channel.identifier, scheduled_event=triggered_event, trigger_block=trigger_block, closing_block=channel.closing_block, settle_timeout=channel.settle_timeout, ) # Add scheduled event if it not exists yet # If the event is already scheduled (e.g. after a restart) the DB takes care that # it is only stored once context.database.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=triggered_event))
def channel_closed_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ReceiveChannelClosedEvent) channel = context.database.get_channel(event.token_network_address, event.channel_identifier) if channel is None: log.error( "Channel not in database", token_network_address=event.token_network_address, identifier=event.channel_identifier, ) metrics.get_metrics_for_label(metrics.ERRORS_LOGGED, metrics.ErrorCategory.STATE).inc() return # Check if the settle timeout is already over. # This is important when starting up the MS. settle_period_end_block = event.block_number + channel.settle_timeout settle_period_over = settle_period_end_block < context.latest_confirmed_block if not settle_period_over: # Trigger the monitoring action event handler, this will check if a # valid MR is available. # This enables the client to send a late MR # also see https://github.com/raiden-network/raiden-services/issues/29 if channel.participant1 == event.closing_participant: non_closing_participant = channel.participant2 else: non_closing_participant = channel.participant1 # Transactions go into the next mined block, so we could trigger one block # before the `monitor` call is allowed to succeed to include it in the # first possible block. # Unfortunately, parity does the gas estimation on the current block # instead of the next one, so we have to wait for the first allowed # block to be finished to send the transaction successfully on parity. trigger_block = BlockNumber( _first_allowed_block_to_monitor(event.token_network_address, channel, context)) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=channel.token_network_address, channel_identifier=channel.identifier, non_closing_participant=non_closing_participant, ) log.info( "Channel closed, triggering monitoring check", token_network_address=event.token_network_address, identifier=channel.identifier, scheduled_event=triggered_event, trigger_block=trigger_block, ) # Add scheduled event if it not exists yet. If the event is already # scheduled (e.g. after a restart) the DB takes care that it is only # stored once. context.database.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=triggered_event)) else: log.warning( "Settle period timeout is in the past, skipping", token_network_address=event.token_network_address, identifier=channel.identifier, settle_period_end_block=settle_period_end_block, latest_committed_block=context.latest_committed_block, latest_confirmed_block=context.latest_confirmed_block, ) channel.state = ChannelState.CLOSED channel.closing_block = event.block_number channel.closing_participant = event.closing_participant context.database.upsert_channel(channel)
def monitor_new_balance_proof_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ReceiveMonitoringNewBalanceProofEvent) channel = context.db.get_channel(event.token_network_address, event.channel_identifier) if channel is None: log.error( "Channel not in database", token_network_address=event.token_network_address, identifier=event.channel_identifier, ) return log.info( "Received MSC NewBalanceProof event", token_network_address=event.token_network_address, identifier=event.channel_identifier, evt=event, ) # check for known monitor calls and update accordingly update_status = channel.update_status if update_status is None: log.info( "Creating channel update state", token_network_address=channel.token_network_address, channel_identifier=channel.identifier, new_nonce=event.nonce, new_sender=event.ms_address, ) channel.update_status = OnChainUpdateStatus( update_sender_address=event.ms_address, nonce=event.nonce) context.db.upsert_channel(channel) else: # nonce not bigger, should never happen as it is checked in the contract if event.nonce < update_status.nonce: log.error( "MSC NewBalanceProof nonce smaller than the known one, ignoring.", know_nonce=update_status.nonce, received_nonce=event.nonce, ) return log.info( "Updating channel update state", token_network_address=channel.token_network_address, channel_identifier=channel.identifier, new_nonce=event.nonce, new_sender=event.ms_address, ) # update channel status update_status.nonce = event.nonce update_status.update_sender_address = event.ms_address context.db.upsert_channel(channel) # check if this was our update, if so schedule the call # of `claimReward` # it will be checked there that our update was the latest one if event.ms_address == context.ms_state.address: assert channel.closing_block is not None, "closing_block not set" trigger_block = BlockNumber(channel.closing_block + channel.settle_timeout) # trigger the claim reward action by an event event = ActionClaimRewardTriggeredEvent( token_network_address=channel.token_network_address, channel_identifier=channel.identifier, non_closing_participant=event.raiden_node_address, ) # Add scheduled event if it not exists yet # If the event is already scheduled (e.g. after a restart) the DB takes care that # it is only stored once context.db.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=cast(Event, event)))
def channel_closed_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ReceiveChannelClosedEvent) channel = context.db.get_channel(event.token_network_address, event.channel_identifier) if channel is None: log.error( "Channel not in database", token_network_address=event.token_network_address, identifier=event.channel_identifier, ) return # Check if the settle timeout is already over. # This is important when starting up the MS. settle_period_end_block = event.block_number + channel.settle_timeout settle_period_over = settle_period_end_block < context.last_known_block if not settle_period_over: # Trigger the monitoring action event handler, this will check if a # valid MR is available. # This enables the client to send a late MR # also see https://github.com/raiden-network/raiden-services/issues/29 if channel.participant1 == event.closing_participant: non_closing_participant = channel.participant2 else: non_closing_participant = channel.participant1 # Transactions go into the next mined block, so trigger one block # before the `monitor` call is allowed to succeed to include it in the # first possible block. trigger_block = BlockNumber( _first_allowed_block_to_monitor(event.token_network_address, channel, context) - 1) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=channel.token_network_address, channel_identifier=channel.identifier, non_closing_participant=non_closing_participant, ) log.info( "Channel closed, triggering monitoring check", token_network_address=event.token_network_address, identifier=channel.identifier, scheduled_event=triggered_event, trigger_block=trigger_block, ) # Add scheduled event if it not exists yet. If the event is already # scheduled (e.g. after a restart) the DB takes care that it is only # stored once. context.db.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=cast(Event, triggered_event))) else: log.warning( "Settle period timeout is in the past, skipping", token_network_address=event.token_network_address, identifier=channel.identifier, settle_period_end_block=settle_period_end_block, known_block=context.last_known_block, ) channel.state = ChannelState.CLOSED channel.closing_block = event.block_number channel.closing_participant = event.closing_participant context.db.upsert_channel(channel)
def channel_closed_event_handler(event: Event, context: Context) -> None: assert isinstance(event, ReceiveChannelClosedEvent) channel = context.db.get_channel(event.token_network_address, event.channel_identifier) if channel is None: log.error( "Channel not in database", token_network_address=event.token_network_address, identifier=event.channel_identifier, ) return # check if the settle timeout is already over # this is important when starting up the MS settle_period_end_block = event.block_number + channel.settle_timeout settle_period_over = settle_period_end_block < context.last_known_block if not settle_period_over: # trigger the monitoring action event handler, this will check if a # valid MR is avilable. # This enables the client to send a late MR # also see https://github.com/raiden-network/raiden-services/issues/29 if channel.participant1 == event.closing_participant: non_closing_participant = channel.participant2 else: non_closing_participant = channel.participant1 client_update_period: int = round( channel.settle_timeout * RATIO_OF_SETTLE_TIMEOUT_BEFORE_MONITOR) trigger_block = BlockNumber(event.block_number + client_update_period) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=channel.token_network_address, channel_identifier=channel.identifier, non_closing_participant=non_closing_participant, ) log.info( "Channel closed, triggering monitoring check", token_network_address=event.token_network_address, identifier=channel.identifier, scheduled_event=triggered_event, trigger_block=trigger_block, ) # Add scheduled event if it not exists yet # If the event is already scheduled (e.g. after a restart) the DB takes care that # it is only stored once context.db.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=cast(Event, triggered_event))) else: log.warning( "Settle period timeout is in the past, skipping", token_network_address=event.token_network_address, identifier=channel.identifier, settle_period_end_block=settle_period_end_block, known_block=context.last_known_block, ) channel.state = ChannelState.CLOSED channel.closing_block = event.block_number channel.closing_participant = event.closing_participant context.db.upsert_channel(channel)