Exemple #1
0
def test_scheduled_events(ms_database: Database):
    # Add token network used as foreign key
    token_network_address = TokenNetworkAddress(bytes([1] * 20))
    ms_database.conn.execute(
        "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)",
        [
            to_checksum_address(token_network_address),
            DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT
        ],
    )

    event1 = ScheduledEvent(
        trigger_timestamp=23 * 15,
        event=ActionMonitoringTriggeredEvent(
            token_network_address=token_network_address,
            channel_identifier=ChannelID(1),
            non_closing_participant=Address(bytes([1] * 20)),
        ),
    )

    assert ms_database.scheduled_event_count() == 0
    ms_database.upsert_scheduled_event(event=event1)
    assert ms_database.scheduled_event_count() == 1

    event2 = ScheduledEvent(
        trigger_timestamp=24 * 15,
        event=ActionMonitoringTriggeredEvent(
            token_network_address=token_network_address,
            channel_identifier=ChannelID(1),
            non_closing_participant=Address(bytes([1] * 20)),
        ),
    )

    ms_database.upsert_scheduled_event(event2)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(22 * 15)) == 0
    assert len(ms_database.get_scheduled_events(23 * 15)) == 1
    assert len(ms_database.get_scheduled_events(24 * 15)) == 2

    ms_database.upsert_scheduled_event(event1)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(22 * 15)) == 0
    assert len(ms_database.get_scheduled_events(23 * 15)) == 1
    assert len(ms_database.get_scheduled_events(24 * 15)) == 2

    ms_database.remove_scheduled_event(event2)
    assert len(ms_database.get_scheduled_events(22 * 15)) == 0
    assert len(ms_database.get_scheduled_events(23 * 15)) == 1
    assert len(ms_database.get_scheduled_events(24 * 15)) == 1
def test_scheduled_events(ms_database: Database):
    # Add token network used as foreign key
    token_network_address = TokenNetworkAddress(bytes([1] * 20))
    ms_database.conn.execute(
        "INSERT INTO token_network(address) VALUES (?)",
        [to_checksum_address(token_network_address)],
    )

    event1 = ScheduledEvent(
        trigger_block_number=BlockNumber(23),
        event=ActionMonitoringTriggeredEvent(
            token_network_address=token_network_address,
            channel_identifier=ChannelID(1),
            non_closing_participant=Address(bytes([1] * 20)),
        ),
    )

    assert ms_database.scheduled_event_count() == 0
    ms_database.upsert_scheduled_event(event=event1)
    assert ms_database.scheduled_event_count() == 1

    event2 = ScheduledEvent(
        trigger_block_number=BlockNumber(24),
        event=ActionMonitoringTriggeredEvent(
            token_network_address=token_network_address,
            channel_identifier=ChannelID(1),
            non_closing_participant=Address(bytes([1] * 20)),
        ),
    )

    ms_database.upsert_scheduled_event(event2)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0
    assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1
    assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2

    ms_database.upsert_scheduled_event(event1)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0
    assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1
    assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2

    ms_database.remove_scheduled_event(event2)
    assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0
    assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1
    assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 1
def test_action_monitoring_triggered_event_handler_without_sufficient_balance_doesnt_trigger_monitor_call(  # noqa
        context: Context, ):
    """ Tests that `monitor` is not called when user has insufficient balance in user deposit contract

    Also a test for https://github.com/raiden-network/raiden-services/issues/29 , as the MR
    is sent after the channel has been closed.
    """
    context = setup_state_with_closed_channel(context)

    context.db.upsert_monitor_request(
        get_signed_monitor_request(nonce=Nonce(6),
                                   reward_amount=TokenAmount(10)))

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.db.get_channel(trigger_event.token_network_address,
                                     trigger_event.channel_identifier)
    assert channel
    assert channel.closing_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2).call.return_value = 0
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
def test_action_monitoring_triggered_event_handler_with_insufficient_reward_amount_does_not_trigger_monitor_call(  # noqa
        context: Context, ):
    """ Tests that `monitor` is not called when the ActionMonitoringTriggeredEvent is triggered but
    the monitor request shows an insufficient reward amount
    """
    context = setup_state_with_closed_channel(context)

    context.db.upsert_monitor_request(
        get_signed_monitor_request(nonce=Nonce(6),
                                   reward_amount=TokenAmount(0)))

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.db.get_channel(trigger_event.token_network_address,
                                     trigger_event.channel_identifier)
    assert channel
    assert channel.closing_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2).call.return_value = 21
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
def test_action_monitoring_rescheduling_when_user_lacks_funds(context: Context):
    reward_amount = TokenAmount(10)
    context = setup_state_with_closed_channel(context)
    context.db.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=reward_amount)
    )

    event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )
    scheduled_events_before = context.db.get_scheduled_events(max_trigger_block=BlockNumber(10000))

    # Try to call monitor when the user has insufficient funds
    with patch("monitoring_service.handlers.get_pessimistic_udc_balance", Mock(return_value=0)):
        action_monitoring_triggered_event_handler(event, context)
    assert not context.monitoring_service_contract.functions.monitor.called

    # Now the event must have been rescheduled
    # TODO: check that the event is rescheduled to trigger at the right block
    scheduled_events_after = context.db.get_scheduled_events(max_trigger_block=BlockNumber(10000))
    new_events = set(scheduled_events_after) - set(scheduled_events_before)
    assert len(new_events) == 1
    assert new_events.pop().event == event

    # With sufficient funds it must succeed
    with patch(
        "monitoring_service.handlers.get_pessimistic_udc_balance",
        Mock(return_value=reward_amount * UDC_SECURITY_MARGIN_FACTOR_MS),
    ):
        action_monitoring_triggered_event_handler(event, context)
    assert context.monitoring_service_contract.functions.monitor.called
Exemple #6
0
def test_trigger_scheduled_events(monitoring_service: MonitoringService):
    monitoring_service.context.required_confirmations = 5

    create_default_token_network(monitoring_service.context)

    triggered_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=make_channel_identifier(),
        non_closing_participant=make_address(),
    )

    trigger_timestamp = Timestamp(get_posix_utc_time_now())

    assert len(
        monitoring_service.database.get_scheduled_events(
            trigger_timestamp)) == 0
    monitoring_service.context.database.upsert_scheduled_event(
        ScheduledEvent(trigger_timestamp=trigger_timestamp,
                       event=triggered_event))
    assert len(
        monitoring_service.database.get_scheduled_events(
            trigger_timestamp)) == 1

    # Now run `_trigger_scheduled_events` and see if the event is removed
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(
        monitoring_service.database.get_scheduled_events(
            trigger_timestamp)) == 0
def test_scheduled_events(ms_database):
    # Add token network used as foreign key
    ms_database.conn.execute(
        "INSERT INTO token_network(address) VALUES (?)",
        ['a'],
    )

    event1 = ScheduledEvent(
        trigger_block_number=23,
        event=ActionMonitoringTriggeredEvent(
            token_network_address='a',
            channel_identifier=1,
            non_closing_participant='b',
        ),
    )

    assert ms_database.scheduled_event_count() == 0
    ms_database.upsert_scheduled_event(event=event1)
    assert ms_database.scheduled_event_count() == 1

    event2 = ScheduledEvent(
        trigger_block_number=24,
        event=ActionMonitoringTriggeredEvent(
            token_network_address='a',
            channel_identifier=1,
            non_closing_participant='b',
        ),
    )

    ms_database.upsert_scheduled_event(event=event2)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(22)) == 0
    assert len(ms_database.get_scheduled_events(23)) == 1
    assert len(ms_database.get_scheduled_events(24)) == 2

    ms_database.upsert_scheduled_event(event=event1)
    assert ms_database.scheduled_event_count() == 2

    assert len(ms_database.get_scheduled_events(22)) == 0
    assert len(ms_database.get_scheduled_events(23)) == 1
    assert len(ms_database.get_scheduled_events(24)) == 2

    ms_database.remove_scheduled_event(event2)
    assert len(ms_database.get_scheduled_events(22)) == 0
    assert len(ms_database.get_scheduled_events(23)) == 1
    assert len(ms_database.get_scheduled_events(24)) == 1
    def assert_mr_is_ignored(mr):
        context.db.upsert_monitor_request(mr)

        event = ActionMonitoringTriggeredEvent(
            token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
            channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
            non_closing_participant=DEFAULT_PARTICIPANT2,
        )

        action_monitoring_triggered_event_handler(event, context)
        assert not context.monitoring_service_contract.functions.monitor.called
def test_action_monitoring_triggered_event_handler_does_not_trigger_monitor_call_when_nonce_to_small(  # noqa
        context: Context, ):
    context = setup_state_with_closed_channel(context)

    event3 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address('C'),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.db.get_channel(event3.token_network_address,
                                     event3.channel_identifier)
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(event3, context)

    # add MR to DB, with nonce being smaller than in event3
    context.db.upsert_monitor_request(
        get_signed_monitor_request(nonce=Nonce(4)))

    event4 = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.db.get_channel(event4.token_network_address,
                                     event4.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.closing_tx_hash is None

    action_monitoring_triggered_event_handler(event4, context)

    assert context.db.channel_count() == 1
    assert channel
    assert channel.closing_tx_hash is None
def test_mr_available_before_channel_triggers_monitor_call(context: Context, ):
    """ Tests that the MR is read from the DB, even if it is supplied before the channel was opened.

    See https://github.com/raiden-network/raiden-services/issues/26
    """

    # add MR to DB
    context.db.upsert_monitor_request(get_signed_monitor_request())

    context = setup_state_with_closed_channel(context)

    event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2).call.return_value = 100
    action_monitoring_triggered_event_handler(event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is True
Exemple #11
0
def test_trigger_scheduled_events(monitoring_service: MonitoringService):
    monitoring_service.context.required_confirmations = 5

    create_default_token_network(monitoring_service.context)

    triggered_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=make_channel_identifier(),
        non_closing_participant=make_address(),
    )

    current_confirmed_block = monitoring_service.context.latest_confirmed_block
    # Trigger the event on a currently unconfirmed block
    trigger_block = BlockNumber(current_confirmed_block + 1)

    assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0
    monitoring_service.context.database.upsert_scheduled_event(
        ScheduledEvent(trigger_block_number=trigger_block, event=triggered_event)
    )
    assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 1

    # Now run `_trigger_scheduled_events` and see if the event is removed
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0
Exemple #12
0
def channel_closed_event_handler(event: Event, context: Context) -> None:
    assert isinstance(event, ReceiveChannelClosedEvent)
    channel = context.database.get_channel(event.token_network_address,
                                           event.channel_identifier)

    if channel is None:
        log.error(
            "Channel not in database",
            token_network_address=event.token_network_address,
            identifier=event.channel_identifier,
        )
        metrics.get_metrics_for_label(metrics.ERRORS_LOGGED,
                                      metrics.ErrorCategory.STATE).inc()
        return

    # Check if the settle timeout is already over.
    # This is important when starting up the MS.
    settle_period_end_block = event.block_number + channel.settle_timeout
    settle_period_over = settle_period_end_block < context.latest_confirmed_block
    if not settle_period_over:
        # Trigger the monitoring action event handler, this will check if a
        # valid MR is available.
        # This enables the client to send a late MR
        # also see https://github.com/raiden-network/raiden-services/issues/29
        if channel.participant1 == event.closing_participant:
            non_closing_participant = channel.participant2
        else:
            non_closing_participant = channel.participant1

        # Transactions go into the next mined block, so we could trigger one block
        # before the `monitor` call is allowed to succeed to include it in the
        # first possible block.
        # Unfortunately, parity does the gas estimation on the current block
        # instead of the next one, so we have to wait for the first allowed
        # block to be finished to send the transaction successfully on parity.
        trigger_block = BlockNumber(
            _first_allowed_block_to_monitor(event.token_network_address,
                                            channel, context))

        triggered_event = ActionMonitoringTriggeredEvent(
            token_network_address=channel.token_network_address,
            channel_identifier=channel.identifier,
            non_closing_participant=non_closing_participant,
        )

        log.info(
            "Channel closed, triggering monitoring check",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            scheduled_event=triggered_event,
            trigger_block=trigger_block,
        )

        # Add scheduled event if it not exists yet. If the event is already
        # scheduled (e.g. after a restart) the DB takes care that it is only
        # stored once.
        context.database.upsert_scheduled_event(
            ScheduledEvent(trigger_block_number=trigger_block,
                           event=triggered_event))
    else:
        log.warning(
            "Settle period timeout is in the past, skipping",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            settle_period_end_block=settle_period_end_block,
            latest_committed_block=context.latest_committed_block,
            latest_confirmed_block=context.latest_confirmed_block,
        )

    channel.state = ChannelState.CLOSED
    channel.closing_block = event.block_number
    channel.closing_participant = event.closing_participant
    context.database.upsert_channel(channel)
Exemple #13
0
def test_crash(tmpdir, get_accounts, get_private_key, mockchain):  # pylint: disable=too-many-locals
    """ Process blocks and compare results with/without crash

    A somewhat meaninful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    channel_identifier = ChannelID(3)
    c1, c2 = get_accounts(2)
    token_network_address = TokenNetworkAddress(
        to_canonical_address(get_random_address()))
    balance_proof = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=TokenAmount(2),
        priv_key=get_private_key(c1),
        channel_identifier=channel_identifier,
        token_network_address=token_network_address,
        chain_id=ChainID(1),
        additional_hash="0x%064x" % 0,
        locked_amount=0,
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    monitor_request = balance_proof.get_monitor_request(
        get_private_key(c2),
        reward_amount=TokenAmount(0),
        msc_address=TEST_MSC_ADDRESS)

    events = [
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                participant1=c1,
                participant2=c2,
                settle_timeout=20,
                block_number=BlockNumber(0),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(1))],
        [
            ActionMonitoringTriggeredEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                non_closing_participant=c2,
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(3))],
    ]
    mockchain(events)

    server_private_key = get_random_privkey()

    contracts = {
        CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
        CONTRACT_MONITORING_SERVICE: ContractMock(),
        CONTRACT_USER_DEPOSIT: ContractMock(),
        CONTRACT_SERVICE_REGISTRY: ContractMock(),
    }

    def new_ms(filename):
        ms = MonitoringService(
            web3=Web3Mock(),
            private_key=server_private_key,
            contracts=contracts,
            db_filename=os.path.join(tmpdir, filename),
        )
        msc = Mock()
        ms.context.monitoring_service_contract = msc
        ms.monitor_mock = msc.functions.monitor.return_value.transact  # type: ignore
        ms.monitor_mock.return_value = bytes(0)  # type: ignore
        return ms

    # initialize both monitoring services
    stable_ms = new_ms("stable.db")
    crashy_ms = new_ms("crashy.db")
    for ms in [stable_ms, crashy_ms]:
        ms.database.conn.execute(
            "INSERT INTO token_network(address) VALUES (?)",
            [to_checksum_address(token_network_address)],
        )
        ms.context.ms_state.blockchain_state.token_network_addresses = [
            token_network_address
        ]
        ms.database.upsert_monitor_request(monitor_request)
        ms.database.conn.commit()

    # process each block and compare results between crashy and stable ms
    for to_block in range(len(events)):
        crashy_ms = new_ms("crashy.db")  # new instance to simulate crash
        stable_ms.monitor_mock.reset_mock()  # clear calls from last block
        result_state: List[dict] = []
        for ms in [stable_ms, crashy_ms]:
            ms._process_new_blocks(to_block)  # pylint: disable=protected-access
            result_state.append(
                dict(
                    blockchain_state=ms.context.ms_state.blockchain_state,
                    db_dump=list(ms.database.conn.iterdump()),
                    monitor_calls=ms.monitor_mock.mock_calls,
                ))

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(),
                                              result_state[1].values()):
            # do asserts for each key separately to get better error messages
            assert stable_state == crashy_state
def test_crash(
    monitoring_service: MonitoringService,  # adds stake in ServiceRegistry
    web3,
    contracts_manager,
    server_private_key,
    token_network_registry_contract,
    monitoring_service_contract,
    user_deposit_contract,
    tmpdir,
    generate_raiden_clients,
    token_network,
):
    """ Process blocks and compare results with/without crash

    A somewhat meaninful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    channel_identifier = ChannelID(3)
    c1, c2 = generate_raiden_clients(2)
    monitor_request = c2.get_monitor_request(
        balance_proof=c1.get_balance_proof(
            channel_id=channel_identifier,
            nonce=1,
            additional_hash='0x11',
            transferred_amount=2,
            locked_amount=0,
            locksroot='0x00',
        ),
        reward_amount=0,
    )

    events = [
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network.address,
                channel_identifier=channel_identifier,
                participant1=c1.address,
                participant2=c2.address,
                settle_timeout=20,
                block_number=BlockNumber(0),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(1))],
        [
            ActionMonitoringTriggeredEvent(
                token_network_address=token_network.address,
                channel_identifier=channel_identifier,
                non_closing_participant=c2.address,
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(3))],
    ]

    def new_ms(filename):
        ms = MonitoringService(
            web3=web3,
            private_key=server_private_key,
            contracts={
                CONTRACT_TOKEN_NETWORK_REGISTRY:
                token_network_registry_contract,
                CONTRACT_MONITORING_SERVICE: monitoring_service_contract,
                CONTRACT_USER_DEPOSIT: user_deposit_contract,
            },
            db_filename=os.path.join(tmpdir, filename),
        )
        ms.bcl = MockBlockchainListener(events)  # type: ignore
        msc = Mock()
        ms.context.monitoring_service_contract = msc
        ms.monitor_mock = msc.functions.monitor.return_value.transact  # type: ignore
        ms.monitor_mock.return_value = bytes(0)  # type: ignore
        return ms

    # initialize both monitoring services
    stable_ms = new_ms('stable.db')
    crashy_ms = new_ms('crashy.db')
    for ms in [stable_ms, crashy_ms]:
        ms.database.conn.execute(
            "INSERT INTO token_network(address) VALUES (?)",
            [token_network.address])
        ms.context.ms_state.blockchain_state.token_network_addresses = [
            token_network.address
        ]
        ms.database.upsert_monitor_request(monitor_request)
        ms.database.conn.commit()

    # process each block and compare results between crashy and stable ms
    for to_block in range(len(events)):
        crashy_ms = new_ms('crashy.db')  # new instance to simulate crash
        stable_ms.monitor_mock.reset_mock()  # clear calls from last block
        result_state: List[dict] = []
        for ms in [stable_ms, crashy_ms]:
            ms._process_new_blocks(to_block)
            result_state.append(
                dict(
                    blockchain_state=ms.context.ms_state.blockchain_state,
                    db_dump=list(ms.database.conn.iterdump()),
                    monitor_calls=ms.monitor_mock.mock_calls,
                ))

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(),
                                              result_state[1].values()):
            # do asserts for each key separately to get better error messages
            assert stable_state == crashy_state
Exemple #15
0
def channel_closed_event_handler(event: Event, context: Context) -> None:
    assert isinstance(event, ReceiveChannelClosedEvent)
    channel = context.db.get_channel(event.token_network_address,
                                     event.channel_identifier)

    if channel is None:
        log.error(
            "Channel not in database",
            token_network_address=event.token_network_address,
            identifier=event.channel_identifier,
        )
        return

    # Check if the settle timeout is already over.
    # This is important when starting up the MS.
    settle_period_end_block = event.block_number + channel.settle_timeout
    settle_period_over = settle_period_end_block < context.last_known_block
    if not settle_period_over:
        # Trigger the monitoring action event handler, this will check if a
        # valid MR is available.
        # This enables the client to send a late MR
        # also see https://github.com/raiden-network/raiden-services/issues/29
        if channel.participant1 == event.closing_participant:
            non_closing_participant = channel.participant2
        else:
            non_closing_participant = channel.participant1

        # Transactions go into the next mined block, so trigger one block
        # before the `monitor` call is allowed to succeed to include it in the
        # first possible block.
        trigger_block = BlockNumber(
            _first_allowed_block_to_monitor(event.token_network_address,
                                            channel, context) - 1)

        triggered_event = ActionMonitoringTriggeredEvent(
            token_network_address=channel.token_network_address,
            channel_identifier=channel.identifier,
            non_closing_participant=non_closing_participant,
        )

        log.info(
            "Channel closed, triggering monitoring check",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            scheduled_event=triggered_event,
            trigger_block=trigger_block,
        )

        # Add scheduled event if it not exists yet. If the event is already
        # scheduled (e.g. after a restart) the DB takes care that it is only
        # stored once.
        context.db.upsert_scheduled_event(
            ScheduledEvent(trigger_block_number=trigger_block,
                           event=cast(Event, triggered_event)))
    else:
        log.warning(
            "Settle period timeout is in the past, skipping",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            settle_period_end_block=settle_period_end_block,
            known_block=context.last_known_block,
        )

    channel.state = ChannelState.CLOSED
    channel.closing_block = event.block_number
    channel.closing_participant = event.closing_participant
    context.db.upsert_channel(channel)
Exemple #16
0
def channel_closed_event_handler(event: Event, context: Context) -> None:
    assert isinstance(event, ReceiveChannelClosedEvent)
    channel = context.db.get_channel(event.token_network_address,
                                     event.channel_identifier)

    if channel is None:
        log.error(
            "Channel not in database",
            token_network_address=event.token_network_address,
            identifier=event.channel_identifier,
        )
        return

    # check if the settle timeout is already over
    # this is important when starting up the MS
    settle_period_end_block = event.block_number + channel.settle_timeout
    settle_period_over = settle_period_end_block < context.last_known_block
    if not settle_period_over:
        # trigger the monitoring action event handler, this will check if a
        # valid MR is avilable.
        # This enables the client to send a late MR
        # also see https://github.com/raiden-network/raiden-services/issues/29
        if channel.participant1 == event.closing_participant:
            non_closing_participant = channel.participant2
        else:
            non_closing_participant = channel.participant1

        client_update_period: int = round(
            channel.settle_timeout * RATIO_OF_SETTLE_TIMEOUT_BEFORE_MONITOR)
        trigger_block = BlockNumber(event.block_number + client_update_period)

        triggered_event = ActionMonitoringTriggeredEvent(
            token_network_address=channel.token_network_address,
            channel_identifier=channel.identifier,
            non_closing_participant=non_closing_participant,
        )

        log.info(
            "Channel closed, triggering monitoring check",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            scheduled_event=triggered_event,
            trigger_block=trigger_block,
        )

        # Add scheduled event if it not exists yet
        # If the event is already scheduled (e.g. after a restart) the DB takes care that
        # it is only stored once
        context.db.upsert_scheduled_event(
            ScheduledEvent(trigger_block_number=trigger_block,
                           event=cast(Event, triggered_event)))
    else:
        log.warning(
            "Settle period timeout is in the past, skipping",
            token_network_address=event.token_network_address,
            identifier=channel.identifier,
            settle_period_end_block=settle_period_end_block,
            known_block=context.last_known_block,
        )

    channel.state = ChannelState.CLOSED
    channel.closing_block = event.block_number
    channel.closing_participant = event.closing_participant
    context.db.upsert_channel(channel)