Ejemplo n.º 1
0
class Channel:
    # pylint: disable=too-many-instance-attributes
    token_network_address: TokenNetworkAddress = field(
        metadata={"marshmallow_field": ChecksumAddress(required=True)})
    channel_id: ChannelID
    participant1: Address = field(
        metadata={"marshmallow_field": ChecksumAddress(required=True)})
    participant2: Address = field(
        metadata={"marshmallow_field": ChecksumAddress(required=True)})
    fee_schedule1: FeeSchedule = field(default_factory=FeeSchedule)
    fee_schedule2: FeeSchedule = field(default_factory=FeeSchedule)

    # Set by PFSCapacityUpdate
    capacity1: TokenAmount = TokenAmount(0)
    capacity2: TokenAmount = TokenAmount(0)
    update_nonce1: Nonce = Nonce(0)
    update_nonce2: Nonce = Nonce(0)
    reveal_timeout1: BlockTimeout = DEFAULT_REVEAL_TIMEOUT
    reveal_timeout2: BlockTimeout = DEFAULT_REVEAL_TIMEOUT

    Schema: ClassVar[Type[marshmallow.Schema]]

    @property
    def views(self) -> Tuple["ChannelView", "ChannelView"]:
        return ChannelView(channel=self), ChannelView(channel=self,
                                                      reverse=True)
def test_action_claim_reward_triggered_event_handler_without_update_state_doesnt_trigger_claim_call(  # noqa
    context: Context,
):
    """Tests that `claimReward` is called when the ActionMonitoringTriggeredEvent is triggered and
    user has sufficient balance in user deposit contract
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(0))
    )

    trigger_event = ActionClaimRewardTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.claim_tx_hash is None

    # Set update state
    channel.update_status = OnChainUpdateStatus(
        update_sender_address=Address(bytes([1] * 20)), nonce=Nonce(6)
    )
    context.database.upsert_channel(channel)

    action_claim_reward_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.claimReward.called is False
def get_capacity_update_message(  # pylint: disable=too-many-arguments
    updating_participant: Address,
    other_participant: Address,
    chain_id=ChainID(61),
    channel_identifier=DEFAULT_CHANNEL_ID,
    token_network_address: TokenNetworkAddress = DEFAULT_TOKEN_NETWORK_ADDRESS,
    updating_nonce=Nonce(1),
    other_nonce=Nonce(0),
    updating_capacity=TA(90),
    other_capacity=TA(110),
    reveal_timeout: BlockTimeout = BlockTimeout(2),
    privkey_signer: bytes = PRIVATE_KEY_1,
) -> PFSCapacityUpdate:
    updatepfs_message = PFSCapacityUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=chain_id,
            channel_identifier=channel_identifier,
            token_network_address=token_network_address,
        ),
        updating_participant=updating_participant,
        other_participant=other_participant,
        updating_nonce=updating_nonce,
        other_nonce=other_nonce,
        updating_capacity=updating_capacity,
        other_capacity=other_capacity,
        reveal_timeout=reveal_timeout,
        signature=EMPTY_SIGNATURE,
    )

    updatepfs_message.sign(LocalSigner(privkey_signer))

    return updatepfs_message
Ejemplo n.º 4
0
def test_waiting_messages(pathfinding_service_mock):
    participant1_privkey, participant1 = make_privkey_address()
    token_network_address = TokenNetworkAddress(b"1" * 20)
    channel_id = ChannelID(1)

    # register token network internally
    database = pathfinding_service_mock.database
    database.conn.execute(
        "INSERT INTO token_network(address) VALUES (?)",
        [to_checksum_address(token_network_address)],
    )

    fee_update = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=token_network_address,
            channel_identifier=channel_id,
        ),
        updating_participant=participant1,
        fee_schedule=FeeScheduleState(),
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    fee_update.sign(LocalSigner(participant1_privkey))

    capacity_update = PFSCapacityUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=token_network_address,
            channel_identifier=channel_id,
        ),
        updating_participant=make_address(),
        other_participant=make_address(),
        updating_nonce=Nonce(1),
        other_nonce=Nonce(1),
        updating_capacity=TokenAmount(100),
        other_capacity=TokenAmount(111),
        reveal_timeout=BlockTimeout(50),
        signature=EMPTY_SIGNATURE,
    )
    capacity_update.sign(LocalSigner(participant1_privkey))

    for message in (fee_update, capacity_update):
        database.insert_waiting_message(message)

        recovered_messages = list(
            database.pop_waiting_messages(
                token_network_address=token_network_address,
                channel_id=channel_id))
        assert len(recovered_messages) == 1
        assert message == recovered_messages[0]

        recovered_messages2 = list(
            database.pop_waiting_messages(
                token_network_address=token_network_address,
                channel_id=channel_id))
        assert len(recovered_messages2) == 0
def test_channel_bp_updated_event_handler_lower_nonce_than_expected(context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    context = setup_state_with_closed_channel(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(1),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert context.database.channel_count() == 1
    assert channel
    assert channel.update_status is None

    non_closing_balance_proof_updated_event_handler(event_bp, context)
    # send twice the same message to trigger the non-increasing nonce
    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (
        metrics_state.get_delta(
            "events_log_errors_total", labels=metrics.ErrorCategory.PROTOCOL.to_label_dict()
        )
        == 1.0
    )
def test_action_monitoring_triggered_event_handler_with_insufficient_reward_amount_does_not_trigger_monitor_call(  # noqa
    context: Context,
):
    """Tests that `monitor` is not called when the ActionMonitoringTriggeredEvent is triggered but
    the monitor request shows an insufficient reward amount
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(0))
    )

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.monitor_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2
    ).call.return_value = 21
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
def test_channel_bp_updated_event_handler_channel_not_in_database(context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    # only setup the token network without channels
    create_default_token_network(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel is None
    assert context.database.channel_count() == 0

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (
        metrics_state.get_delta(
            "events_log_errors_total", labels=metrics.ErrorCategory.STATE.to_label_dict()
        )
        == 1.0
    )
def test_action_monitoring_triggered_event_handler_without_sufficient_balance_doesnt_trigger_monitor_call(  # noqa
    context: Context,
):
    """Tests that `monitor` is not called when user has insufficient balance in user deposit contract

    Also a test for https://github.com/raiden-network/raiden-services/issues/29 , as the MR
    is sent after the channel has been closed.
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(10))
    )

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.monitor_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2
    ).call.return_value = 0
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
Ejemplo n.º 9
0
def create_signed_monitor_request(
    chain_id: ChainID = TEST_CHAIN_ID,
    nonce: Nonce = Nonce(5),
    reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT,
    closing_privkey: PrivateKey = DEFAULT_PRIVATE_KEY1,
    nonclosing_privkey: PrivateKey = DEFAULT_PRIVATE_KEY2,
) -> MonitorRequest:
    bp = HashedBalanceProof(
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        chain_id=chain_id,
        balance_hash="",
        nonce=nonce,
        additional_hash="",
        priv_key=closing_privkey,
    )
    monitor_request = bp.get_monitor_request(privkey=nonclosing_privkey,
                                             reward_amount=reward_amount,
                                             msc_address=TEST_MSC_ADDRESS)

    # Some signature correctness checks
    assert monitor_request.signer == private_key_to_address(closing_privkey)
    assert monitor_request.non_closing_signer == private_key_to_address(
        nonclosing_privkey)
    assert monitor_request.reward_proof_signer == private_key_to_address(
        nonclosing_privkey)

    return monitor_request
Ejemplo n.º 10
0
def test_channel_bp_updated_event_handler_sets_update_status_if_not_set(context: Context):
    context = setup_state_with_closed_channel(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is None

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == DEFAULT_PARTICIPANT1

    event_bp2 = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(5),
        block_number=BlockNumber(53),
    )

    non_closing_balance_proof_updated_event_handler(event_bp2, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 5
    assert channel.update_status.update_sender_address == DEFAULT_PARTICIPANT1
Ejemplo n.º 11
0
 def update_capacity(
     self,
     capacity: TokenAmount,
     nonce: Nonce = Nonce(0),
     reveal_timeout: Optional[BlockTimeout] = None,
 ) -> None:
     self.update_nonce = nonce
     self.capacity = capacity
     if reveal_timeout is not None:
         self.reveal_timeout = reveal_timeout
Ejemplo n.º 12
0
def test_action_monitoring_triggered_event_handler_does_not_trigger_monitor_call_when_nonce_to_small(  # noqa
    context: Context,
):
    context = setup_state_with_closed_channel(context)

    event3 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address(bytes([3] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(event3.token_network_address, event3.channel_identifier)
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(event3, context)

    # add MR to DB, with nonce being smaller than in event3
    context.database.upsert_monitor_request(create_signed_monitor_request(nonce=Nonce(4)))

    event4 = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(event4.token_network_address, event4.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.monitor_tx_hash is None

    action_monitoring_triggered_event_handler(event4, context)

    assert context.database.channel_count() == 1
    assert channel
    assert channel.monitor_tx_hash is None
Ejemplo n.º 13
0
def test_monitor_new_balance_proof_event_handler_idempotency(context: Context):
    context = setup_state_with_closed_channel(context)

    new_balance_event = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(2),
        ms_address=Address(context.ms_state.address),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == context.ms_state.address

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == context.ms_state.address
Ejemplo n.º 14
0
def request_monitoring_message(token_network, get_accounts,
                               get_private_key) -> RequestMonitoring:
    c1, c2 = get_accounts(2)

    balance_proof_c2 = HashedBalanceProof(
        channel_identifier=ChannelID(1),
        token_network_address=TokenNetworkAddress(
            to_canonical_address(token_network.address)),
        chain_id=ChainID(61),
        nonce=Nonce(2),
        additional_hash="0x%064x" % 0,
        transferred_amount=TokenAmount(1),
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
        priv_key=get_private_key(c2),
    )

    return balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=TokenAmount(1),
        monitoring_service_contract_address=MonitoringServiceAddress(
            bytes([11] * 20)),
    )
Ejemplo n.º 15
0
def test_action_monitoring_rescheduling_when_user_lacks_funds(context: Context):
    reward_amount = TokenAmount(10)
    context = setup_state_with_closed_channel(context)
    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=reward_amount)
    )

    event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )
    scheduled_events_before = context.database.get_scheduled_events(
        max_trigger_timestamp=Timestamp(get_posix_utc_time_now())
    )

    # Try to call monitor when the user has insufficient funds
    with patch("monitoring_service.handlers.get_pessimistic_udc_balance", Mock(return_value=0)):
        action_monitoring_triggered_event_handler(event, context)
    assert not context.monitoring_service_contract.functions.monitor.called

    # Now the event must have been rescheduled
    # TODO: check that the event is rescheduled to trigger at the right block
    scheduled_events_after = context.database.get_scheduled_events(
        max_trigger_timestamp=Timestamp(get_posix_utc_time_now())
    )
    new_events = set(scheduled_events_after) - set(scheduled_events_before)
    assert len(new_events) == 1
    assert new_events.pop().event == event

    # With sufficient funds it must succeed
    with patch(
        "monitoring_service.handlers.get_pessimistic_udc_balance",
        Mock(return_value=reward_amount * UDC_SECURITY_MARGIN_FACTOR_MS),
    ):
        action_monitoring_triggered_event_handler(event, context)
    assert context.monitoring_service_contract.functions.monitor.called
Ejemplo n.º 16
0
    def f(
        chain_id: ChainID = TEST_CHAIN_ID,
        amount: TokenAmount = TokenAmount(50),
        nonce: Nonce = Nonce(1),
        channel_id: ChannelID = ChannelID(1),
    ) -> RequestMonitoring:
        balance_proof = HashedBalanceProof(
            channel_identifier=channel_id,
            token_network_address=TokenNetworkAddress(b"1" * 20),
            chain_id=chain_id,
            nonce=nonce,
            additional_hash="",
            balance_hash=encode_hex(bytes([amount])),
            priv_key=PrivateKey(get_random_privkey()),
        )
        request_monitoring = balance_proof.get_request_monitoring(
            privkey=non_closing_privkey,
            reward_amount=TokenAmount(55),
            monitoring_service_contract_address=TEST_MSC_ADDRESS,
        )

        # usually not a property of RequestMonitoring, but added for convenience in these tests
        request_monitoring.non_closing_signer = to_checksum_address(non_closing_address)
        return request_monitoring
Ejemplo n.º 17
0
def test_pfs_with_mocked_client(  # pylint: disable=too-many-arguments
    web3,
    token_network_registry_contract,
    channel_descriptions_case_1: List,
    get_accounts,
    user_deposit_contract,
    token_network,
    custom_token,
    create_channel,
    get_private_key,
):  # pylint: disable=too-many-locals
    """Instantiates some MockClients and the PathfindingService.

    Mocks blockchain events to setup a token network with a given topology, specified in
    the channel_description fixture. Tests all PFS methods w.r.t. to that topology
    """
    clients = get_accounts(7)
    token_network_address = TokenNetworkAddress(
        to_canonical_address(token_network.address))

    with patch("pathfinding_service.service.MatrixListener", new=Mock):
        pfs = PathfindingService(
            web3=web3,
            contracts={
                CONTRACT_TOKEN_NETWORK_REGISTRY:
                token_network_registry_contract,
                CONTRACT_USER_DEPOSIT: user_deposit_contract,
            },
            required_confirmations=BlockTimeout(1),
            db_filename=":memory:",
            poll_interval=0.1,
            sync_start_block=BlockNumber(0),
            private_key=PrivateKey(
                decode_hex(
                    "3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266"
                )),
        )

    # greenlet needs to be started and context switched to
    pfs.start()
    pfs.updated.wait(timeout=5)

    # there should be one token network registered
    assert len(pfs.token_networks) == 1

    token_network_model = pfs.token_networks[token_network_address]
    graph = token_network_model.G
    channel_identifiers = []
    for (
            p1_index,
            p1_capacity,
            _p1_fee,
            _p1_reveal_timeout,
            _p1_reachability,
            p2_index,
            p2_capacity,
            _p2_fee,
            _p2_reveal_timeout,
            _p2_reachability,
    ) in channel_descriptions_case_1:
        # order is important here because we check order later
        channel_id = create_channel(clients[p1_index], clients[p2_index])[0]
        channel_identifiers.append(channel_id)

        for address, partner_address, amount in [
            (clients[p1_index], clients[p2_index], p1_capacity),
            (clients[p2_index], clients[p1_index], p2_capacity),
        ]:
            if amount == 0:
                continue
            custom_token.functions.mint(amount).transact({"from": address})
            custom_token.functions.approve(token_network.address,
                                           amount).transact({"from": address})
            token_network.functions.setTotalDeposit(
                channel_id, address, amount,
                partner_address).transact({"from": address})

    web3.testing.mine(1)  # 1 confirmation block
    pfs.updated.wait(timeout=5)

    # there should be as many open channels as described
    assert len(token_network_model.channel_id_to_addresses.keys()) == len(
        channel_descriptions_case_1)

    # check that deposits, settle_timeout and transfers got registered
    for index in range(len(channel_descriptions_case_1)):
        channel_identifier = channel_identifiers[index]
        p1_address, p2_address = token_network_model.channel_id_to_addresses[
            channel_identifier]
        view1: ChannelView = graph[p1_address][p2_address]["view"]
        view2: ChannelView = graph[p2_address][p1_address]["view"]
        assert view1.reveal_timeout == DEFAULT_REVEAL_TIMEOUT
        assert view2.reveal_timeout == DEFAULT_REVEAL_TIMEOUT

    # now close all channels
    for (
            index,
        (
            p1_index,
            _p1_capacity,
            _p1_fee,
            _p1_reveal_timeout,
            _p1_reachability,
            p2_index,
            _p2_capacity,
            _p2_fee,
            _p2_reveal_timeout,
            _p2_reachability,
        ),
    ) in enumerate(channel_descriptions_case_1):
        channel_id = channel_identifiers[index]
        balance_proof = HashedBalanceProof(
            nonce=Nonce(1),
            transferred_amount=0,
            priv_key=get_private_key(clients[p2_index]),
            channel_identifier=channel_id,
            token_network_address=TokenNetworkAddress(
                to_canonical_address(token_network.address)),
            chain_id=TEST_CHAIN_ID,
            additional_hash="0x%064x" % 0,
            locked_amount=0,
            locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
        )
        token_network.functions.closeChannel(
            channel_id,
            clients[p2_index],
            clients[p1_index],
            balance_proof.balance_hash,
            balance_proof.nonce,
            balance_proof.additional_hash,
            balance_proof.signature,
            balance_proof.get_counter_signature(
                get_private_key(clients[p1_index])),
        ).transact({
            "from": clients[p1_index],
            "gas": 200_000
        })

    web3.testing.mine(1)  # 1 confirmation block
    pfs.updated.wait(timeout=5)

    # there should be no channels
    assert len(token_network_model.channel_id_to_addresses.keys()) == 0
    pfs.stop()
Ejemplo n.º 18
0
def test_e2e(  # pylint: disable=too-many-arguments,too-many-locals
    web3,
    monitoring_service_contract,
    user_deposit_contract,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    """Test complete message lifecycle
    1) client opens channel & submits monitoring request
    2) other client closes channel
    3) MS registers channelClose event
    4) MS calls monitoring contract update
    5) wait for channel settle
    6) MS claims the reward
    """
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    initial_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )

    ms_greenlet = gevent.spawn(monitoring_service.start)

    # need to wait here till the MS has some time to react
    gevent.sleep(0.01)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})
    # Wait until the MS reacts, which it does after giving the client some time
    # to update the channel itself.

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    web3.testing.timeTravel(settleable_after - 1)
    monitoring_service.get_timestamp_now = lambda: settleable_after - 1

    # Now give the monitoring service a chance to submit the missing BP
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]

    # wait for settle timeout
    web3.testing.timeTravel(settleable_after + 1)
    monitoring_service.get_timestamp_now = lambda: settleable_after + 1

    # Let the MS claim its reward
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [
        MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED,
        MonitoringServiceEvent.REWARD_CLAIMED,
    ]

    final_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    assert final_balance == (initial_balance + reward_amount)

    ms_greenlet.kill()
Ejemplo n.º 19
0
def test_first_allowed_monitoring(
    web3: Web3,
    monitoring_service_contract,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    triggered_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    assert len(triggered_events) == 1

    monitor_trigger = triggered_events[0]
    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )
    assert channel

    # Calling monitor too early must fail. To test this, we call a few seconds
    # before the trigger timestamp.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp - 5)  # type: ignore

    with pytest.raises(TransactionTooEarlyException):
        handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == []

    # If our `monitor` call fails, we won't try again. Force a retry in this
    # test by clearing monitor_tx_hash.
    channel.monitor_tx_hash = None
    monitoring_service.database.upsert_channel(channel)

    # Now we can try again. The first try mined a new block, so now we're one
    # block further and `monitor` should succeed.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp)  # type: ignore
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
Ejemplo n.º 20
0
def test_reschedule_too_early_events(
    web3: Web3,
    monitoring_service_contract,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    scheduled_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )

    monitor_trigger = _first_allowed_timestamp_to_monitor(
        scheduled_events[0].event.token_network_address, channel, monitoring_service.context
    )

    assert len(scheduled_events) == 1
    first_trigger_timestamp = scheduled_events[0].trigger_timestamp
    assert first_trigger_timestamp == monitor_trigger

    # Calling monitor too early must fail
    monitoring_service.get_timestamp_now = lambda: settleable_after
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert monitoring_service.try_scheduled_events_after == pytest.approx(settleable_after, 100)

    # Failed event is still scheduled, since it was too early for it to succeed
    scheduled_events = monitoring_service.database.get_scheduled_events(settleable_after)
    assert len(scheduled_events) == 1
    # ...and it should be blocked from retrying for a while.
    assert (
        monitoring_service.try_scheduled_events_after
        == monitoring_service.get_timestamp_now() + MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    )

    # Now it could be executed, but won't due to MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    web3.testing.timeTravel(settleable_after - 1)  # type: ignore
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 1

    # Check that is does succeed if it wasn't for MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    monitoring_service.try_scheduled_events_after = monitoring_service.get_timestamp_now() - 1
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 0
Ejemplo n.º 21
0
def test_crash(
    tmpdir, get_accounts, get_private_key, mockchain
):  # pylint: disable=too-many-locals
    """Process blocks and compare results with/without crash

    A somewhat meaningful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    channel_identifier = ChannelID(3)
    c1, c2 = get_accounts(2)
    token_network_address = TokenNetworkAddress(to_canonical_address(get_random_address()))
    balance_proof = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=TokenAmount(2),
        priv_key=get_private_key(c1),
        channel_identifier=channel_identifier,
        token_network_address=token_network_address,
        chain_id=ChainID(61),
        additional_hash="0x%064x" % 0,
        locked_amount=0,
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    monitor_request = balance_proof.get_monitor_request(
        get_private_key(c2), reward_amount=TokenAmount(0), msc_address=TEST_MSC_ADDRESS
    )

    events = [
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                participant1=c1,
                participant2=c2,
                block_number=BlockNumber(0),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(1))],
        [
            ActionMonitoringTriggeredEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                non_closing_participant=c2,
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(3))],
    ]
    mockchain(events)

    server_private_key = PrivateKey(get_random_privkey())

    contracts = {
        CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
        CONTRACT_MONITORING_SERVICE: ContractMock(),
        CONTRACT_USER_DEPOSIT: ContractMock(),
        CONTRACT_SERVICE_REGISTRY: ContractMock(),
    }

    def new_ms(filename):
        ms = MonitoringService(
            web3=Web3Mock(),
            private_key=server_private_key,
            contracts=contracts,
            db_filename=os.path.join(tmpdir, filename),
            poll_interval=0,
            required_confirmations=BlockTimeout(0),
            sync_start_block=BlockNumber(0),
        )
        msc = Mock()
        ms.context.monitoring_service_contract = msc
        ms.monitor_mock = msc.functions.monitor.return_value.transact
        ms.monitor_mock.return_value = bytes(0)
        return ms

    # initialize both monitoring services
    stable_ms = new_ms("stable.db")
    crashy_ms = new_ms("crashy.db")
    for ms in [stable_ms, crashy_ms]:
        # mock database time to make results reproducible
        ms.database.conn.create_function("CURRENT_TIMESTAMP", 1, lambda: "2000-01-01")

        ms.database.conn.execute(
            "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)",
            [to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT],
        )
        ms.context.ms_state.blockchain_state.token_network_addresses = [token_network_address]
        ms.database.upsert_monitor_request(monitor_request)
        ms.database.conn.commit()

    # process each block and compare results between crashy and stable ms
    for to_block in range(len(events)):
        crashy_ms = new_ms("crashy.db")  # new instance to simulate crash
        stable_ms.monitor_mock.reset_mock()  # clear calls from last block
        result_state: List[dict] = []
        for ms in [stable_ms, crashy_ms]:
            ms._process_new_blocks(BlockNumber(to_block))  # pylint: disable=protected-access
            result_state.append(
                dict(
                    blockchain_state=ms.context.ms_state.blockchain_state,
                    db_dump=list(ms.database.conn.iterdump()),
                    monitor_calls=ms.monitor_mock.mock_calls,
                )
            )

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()):
            if isinstance(stable_state, BlockchainState):
                assert stable_state.chain_id == crashy_state.chain_id
                assert (
                    stable_state.token_network_registry_address
                    == crashy_state.token_network_registry_address
                )
                assert stable_state.latest_committed_block == crashy_state.latest_committed_block
                assert (
                    stable_state.monitor_contract_address == crashy_state.monitor_contract_address
                )
                # Do not compare `current_event_filter_interval`, this is allowed to be different
            else:
                assert stable_state == crashy_state
Ejemplo n.º 22
0
    def populate_token_network(
        token_network: TokenNetwork,
        reachability_state: SimpleReachabilityContainer,
        addresses: List[Address],
        channel_descriptions: List,
    ):
        for (
            channel_id,
            (
                p1_index,
                p1_capacity,
                _p1_fee,
                p1_reveal_timeout,
                p1_reachability,
                p2_index,
                p2_capacity,
                _p2_fee,
                p2_reveal_timeout,
                p2_reachability,
            ),
        ) in enumerate(channel_descriptions):
            participant1 = addresses[p1_index]
            participant2 = addresses[p2_index]
            token_network.handle_channel_opened_event(
                channel_identifier=ChannelID(channel_id),
                participant1=participant1,
                participant2=participant2,
            )

            token_network.handle_channel_balance_update_message(
                PFSCapacityUpdate(
                    canonical_identifier=CanonicalIdentifier(
                        chain_identifier=ChainID(61),
                        channel_identifier=ChannelID(channel_id),
                        token_network_address=TokenNetworkAddress(token_network.address),
                    ),
                    updating_participant=addresses[p1_index],
                    other_participant=addresses[p2_index],
                    updating_nonce=Nonce(1),
                    other_nonce=Nonce(1),
                    updating_capacity=p1_capacity,
                    other_capacity=p2_capacity,
                    reveal_timeout=p1_reveal_timeout,
                    signature=EMPTY_SIGNATURE,
                ),
                updating_capacity_partner=TokenAmount(0),
                other_capacity_partner=TokenAmount(0),
            )
            token_network.handle_channel_balance_update_message(
                PFSCapacityUpdate(
                    canonical_identifier=CanonicalIdentifier(
                        chain_identifier=ChainID(61),
                        channel_identifier=ChannelID(channel_id),
                        token_network_address=TokenNetworkAddress(token_network.address),
                    ),
                    updating_participant=addresses[p2_index],
                    other_participant=addresses[p1_index],
                    updating_nonce=Nonce(2),
                    other_nonce=Nonce(1),
                    updating_capacity=p2_capacity,
                    other_capacity=p1_capacity,
                    reveal_timeout=p2_reveal_timeout,
                    signature=EMPTY_SIGNATURE,
                ),
                updating_capacity_partner=TokenAmount(p1_capacity),
                other_capacity_partner=TokenAmount(p2_capacity),
            )

            # Update presence state according to scenario
            reachability_state.reachabilities[participant1] = p1_reachability
            reachability_state.reachabilities[participant2] = p2_reachability
Ejemplo n.º 23
0
def populate_token_network_random(
    token_network_model: TokenNetwork, private_keys: List[str]
) -> None:
    number_of_channels = 300
    # seed for pseudo-randomness from config constant, that changes from time to time
    random.seed(number_of_channels)

    for channel_id_int in range(number_of_channels):
        channel_id = ChannelID(channel_id_int)

        private_key1, private_key2 = random.sample(private_keys, 2)
        address1 = private_key_to_address(private_key1)
        address2 = private_key_to_address(private_key2)
        token_network_model.handle_channel_opened_event(
            channel_identifier=channel_id,
            participant1=address1,
            participant2=address2,
        )

        # deposit to channels
        deposit1 = TokenAmount(random.randint(0, 1000))
        deposit2 = TokenAmount(random.randint(0, 1000))
        address1, address2 = token_network_model.channel_id_to_addresses[channel_id]
        token_network_model.handle_channel_balance_update_message(
            PFSCapacityUpdate(
                canonical_identifier=CanonicalIdentifier(
                    chain_identifier=ChainID(61),
                    channel_identifier=channel_id,
                    token_network_address=TokenNetworkAddress(token_network_model.address),
                ),
                updating_participant=address1,
                other_participant=address2,
                updating_nonce=Nonce(1),
                other_nonce=Nonce(1),
                updating_capacity=deposit1,
                other_capacity=deposit2,
                reveal_timeout=BlockTimeout(2),
                signature=EMPTY_SIGNATURE,
            ),
            updating_capacity_partner=TokenAmount(0),
            other_capacity_partner=TokenAmount(0),
        )
        token_network_model.handle_channel_balance_update_message(
            PFSCapacityUpdate(
                canonical_identifier=CanonicalIdentifier(
                    chain_identifier=ChainID(61),
                    channel_identifier=channel_id,
                    token_network_address=TokenNetworkAddress(token_network_model.address),
                ),
                updating_participant=address2,
                other_participant=address1,
                updating_nonce=Nonce(2),
                other_nonce=Nonce(1),
                updating_capacity=deposit2,
                other_capacity=deposit1,
                reveal_timeout=BlockTimeout(2),
                signature=EMPTY_SIGNATURE,
            ),
            updating_capacity_partner=TokenAmount(deposit1),
            other_capacity_partner=TokenAmount(deposit2),
        )
Ejemplo n.º 24
0
def test_monitor_new_balance_proof_event_handler_sets_update_status(context: Context):
    context = setup_state_with_closed_channel(context)

    new_balance_event = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(2),
        ms_address=Address(bytes([4] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(62),
    )

    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is None
    assert get_scheduled_claim_event(context.database) is None

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == bytes([4] * 20)

    # closing block * avg. time per block + token network settle timeout
    expected_trigger_timestamp = 52 * 15 + context.database.get_token_network_settle_timeout(
        channel.token_network_address
    )

    scheduled_claim_event = get_scheduled_claim_event(context.database)
    assert scheduled_claim_event is not None
    assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp

    new_balance_event2 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address(bytes([4] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(63),
    )

    monitor_new_balance_proof_event_handler(new_balance_event2, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 5
    assert channel.update_status.update_sender_address == bytes([4] * 20)

    scheduled_claim_event = get_scheduled_claim_event(context.database)
    assert scheduled_claim_event is not None
    assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp