def test_save_and_load_token_networks(pathfinding_service_mock_empty):
    pfs = pathfinding_service_mock_empty

    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes([2] * 20))
    channel_id = ChannelID(1)
    p1 = Address(bytes([3] * 20))
    p2 = Address(bytes([4] * 20))
    events = [
        ReceiveTokenNetworkCreatedEvent(
            token_address=token_address,
            token_network_address=token_network_address,
            settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
            block_number=BlockNumber(1),
        ),
        ReceiveChannelOpenedEvent(
            token_network_address=token_network_address,
            channel_identifier=channel_id,
            participant1=p1,
            participant2=p2,
            block_number=BlockNumber(2),
        ),
    ]
    for event in events:
        pfs.handle_event(event)
    assert len(pfs.token_networks) == 1

    loaded_networks = pfs._load_token_networks()  # pylint: disable=protected-access
    assert len(loaded_networks) == 1

    orig = list(pfs.token_networks.values())[0]
    loaded = list(loaded_networks.values())[0]
    assert loaded.address == orig.address
    assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses
    assert loaded.G.nodes == orig.G.nodes
def test_channel_closed_event_handler_closes_existing_channel(context: Context):
    context = setup_state_with_open_channel(context)
    current_block = get_posix_utc_time_now() // 15

    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(current_block + 1),
    )

    channel_closed_event_handler(event, context)

    assert context.database.channel_count() == 1
    assert_channel_state(context, ChannelState.CLOSED)
    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(current_block),
    )
    channel_closed_event_handler(event, context)

    # ActionMonitoringTriggeredEvent has been triggered
    assert context.database.scheduled_event_count() == 1

    assert context.database.channel_count() == 1
    assert_channel_state(context, ChannelState.CLOSED)
Esempio n. 3
0
def test_limit_inclusivity_in_query_blockchain_events(
        web3: Web3, wait_for_blocks, token_network_registry_contract):
    query = create_tnr_contract_events_query(
        web3, token_network_registry_contract.address)

    # A new token network has been registered by the `token_network_registry_contract` fixture
    events = query()
    assert len(events) == 1
    event = events[0]
    assert event["event"] == EVENT_TOKEN_NETWORK_CREATED
    registry_event_block = BlockNumber(event["blockNumber"])

    # test to_block is inclusive
    events = query_blockchain_events(
        web3=web3,
        contract_addresses=[token_network_registry_contract.address],
        from_block=BlockNumber(0),
        to_block=BlockNumber(registry_event_block - 1),
    )
    assert len(events) == 0

    events = query_blockchain_events(
        web3=web3,
        contract_addresses=[token_network_registry_contract.address],
        from_block=BlockNumber(0),
        to_block=registry_event_block,
    )
    assert len(events) == 1

    # mine some more blocks
    wait_for_blocks(5)
    current_block_number = web3.eth.block_number
    assert current_block_number > registry_event_block

    # test to_block is inclusive
    events = query_blockchain_events(
        web3=web3,
        contract_addresses=[token_network_registry_contract.address],
        from_block=BlockNumber(registry_event_block + 1),
        to_block=current_block_number,
    )
    assert len(events) == 0

    events = query_blockchain_events(
        web3=web3,
        contract_addresses=[token_network_registry_contract.address],
        from_block=registry_event_block,
        to_block=current_block_number,
    )
    assert len(events) == 1

    # test that querying just one block works
    events = query_blockchain_events(
        web3=web3,
        contract_addresses=[token_network_registry_contract.address],
        from_block=registry_event_block,
        to_block=registry_event_block,
    )
    assert len(events) == 1
def get_contract_addresses_and_start_block(
    chain_id: ChainID,
    contracts: List[str],
    address_overwrites: Dict[str, Address],
    development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO,
    contracts_version: Optional[str] = None,
) -> Tuple[Dict[str, Address], BlockNumber]:
    """Returns contract addresses and start query block for a given chain and contracts version.

    The default contracts can be overwritten by the additional parameters.

    Args:
        chain_id: The chain id to look for deployed contracts.
        contracts: The list of contracts which should be considered
        address_overwrites: Dict of addresses which should be used instead of
            the ones in the requested deployment.
        contracts_version: The version of the contracts to use.

    Returns: A dictionary with the contract addresses and start block for the given information
    """
    contract_data = get_contracts_deployment_info(
        chain_id=chain_id,
        version=contracts_version,
        development_environment=development_environment,
    )
    if not contract_data:
        log.error(
            "No deployed contracts were found at the default registry",
            contracts_version=contracts_version,
        )
        sys.exit(1)

    # Get deployed addresses for those contracts which have no overwrites
    addresses = {
        c: (
            address_overwrites.get(c)
            or to_canonical_address(contract_data["contracts"][c]["address"])
        )
        for c in contracts
    }

    # Set start block to zero if any contract addresses are overwritten
    if any(address_overwrites.values()):
        start_block = BlockNumber(0)
    else:
        start_block = BlockNumber(
            max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts))
        )

    return addresses, start_block
Esempio n. 5
0
    def __init__(
        self,
        filename: str,
        chain_id: ChainID,
        pfs_address: Address,
        sync_start_block: BlockNumber = BlockNumber(0),
        allow_create: bool = False,
        enable_tracing: bool = False,
        **contract_addresses: Address,
    ):
        super().__init__(filename,
                         allow_create=allow_create,
                         enable_tracing=enable_tracing)
        self.pfs_address = pfs_address

        # Keep the journal around and skip inode updates.
        # References:
        # https://sqlite.org/atomiccommit.html#_persistent_rollback_journals
        # https://sqlite.org/pragma.html#pragma_journal_mode
        with self._cursor() as cursor:
            cursor.execute("PRAGMA journal_mode=PERSIST")

        self._setup(
            chain_id=chain_id,
            receiver=pfs_address,
            sync_start_block=sync_start_block,
            **contract_addresses,
        )
Esempio n. 6
0
    def _run(self) -> None:  # pylint: disable=method-hidden
        try:
            self.matrix_listener.start()
        except (Timeout, ConnectionError) as exc:
            log.critical("Could not connect to broadcasting system.", exc=exc)
            sys.exit(1)

        self.matrix_listener.link(self.startup_finished)
        try:
            self.matrix_listener.startup_finished.get(
                timeout=MATRIX_START_TIMEOUT)
        except Timeout:
            raise Exception("MatrixListener did not start in time.")
        self.startup_finished.set()

        log.info(
            "Listening to token network registry",
            registry_address=self.registry_address,
            start_block=self.database.get_latest_committed_block(),
        )
        while not self._is_running.is_set():
            self._process_new_blocks(
                BlockNumber(self.web3.eth.block_number -
                            self.required_confirmations))

            # Let tests waiting for this event know that we're done with processing
            self.updated.set()
            self.updated.clear()

            # Sleep, then collect errors from greenlets
            gevent.sleep(self._poll_interval)
            gevent.joinall({self.matrix_listener}, timeout=0, raise_error=True)
Esempio n. 7
0
def pathfinding_service_mock_empty() -> Generator[PathfindingService, None, None]:
    with patch("pathfinding_service.service.MatrixListener", new=Mock):
        web3_mock = Web3Mock()

        mock_udc = Mock(address=bytes([8] * 20))
        mock_udc.functions.effectiveBalance.return_value.call.return_value = 10000
        mock_udc.functions.token.return_value.call.return_value = to_checksum_address(
            bytes([7] * 20)
        )
        pathfinding_service = PathfindingService(
            web3=web3_mock,
            contracts={
                CONTRACT_TOKEN_NETWORK_REGISTRY: Mock(address=bytes([9] * 20)),
                CONTRACT_USER_DEPOSIT: mock_udc,
            },
            sync_start_block=BlockNumber(0),
            required_confirmations=BlockTimeout(0),
            poll_interval=0,
            private_key=PrivateKey(
                decode_hex("3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266")
            ),
            db_filename=":memory:",
        )

        yield pathfinding_service
        pathfinding_service.stop()
Esempio n. 8
0
def monitoring_service(  # pylint: disable=too-many-arguments
    ms_address,
    web3: Web3,
    monitoring_service_contract,
    user_deposit_contract,
    token_network_registry_contract,
    ms_database: Database,
    get_private_key,
    service_registry,
):
    ms = MonitoringService(
        web3=web3,
        private_key=get_private_key(ms_address),
        contracts={
            CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract,
            CONTRACT_MONITORING_SERVICE: monitoring_service_contract,
            CONTRACT_USER_DEPOSIT: user_deposit_contract,
            CONTRACT_SERVICE_REGISTRY: service_registry,
        },
        sync_start_block=BlockNumber(0),
        required_confirmations=BlockTimeout(0),  # for faster tests
        poll_interval=0.01,  # for faster tests
        db_filename=":memory:",
    )
    # We need a shared db between MS and RC so the MS can use MR saved by the RC
    ms.context.database = ms_database
    ms.database = ms_database
    ms.chain_id = TEST_CHAIN_ID  # workaround for https://github.com/ethereum/web3.py/issues/1677
    return ms
def test_channel_bp_updated_event_handler_channel_not_in_database(context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    # only setup the token network without channels
    create_default_token_network(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel is None
    assert context.database.channel_count() == 0

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (
        metrics_state.get_delta(
            "events_log_errors_total", labels=metrics.ErrorCategory.STATE.to_label_dict()
        )
        == 1.0
    )
def test_channel_bp_updated_event_handler_lower_nonce_than_expected(context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    context = setup_state_with_closed_channel(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(1),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert context.database.channel_count() == 1
    assert channel
    assert channel.update_status is None

    non_closing_balance_proof_updated_event_handler(event_bp, context)
    # send twice the same message to trigger the non-increasing nonce
    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (
        metrics_state.get_delta(
            "events_log_errors_total", labels=metrics.ErrorCategory.PROTOCOL.to_label_dict()
        )
        == 1.0
    )
def test_prometheus_event_handling_raise_exception(pathfinding_service_mock_empty):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    pfs = pathfinding_service_mock_empty

    event = ReceiveTokenNetworkCreatedEvent(
        token_address=TokenAddress(bytes([1] * 20)),
        token_network_address=TokenNetworkAddress(bytes([2] * 20)),
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(1),
    )

    pfs.handle_token_network_created = Mock(side_effect=KeyError())

    with pytest.raises(KeyError):
        pfs.handle_event(event)
        # The exceptions raised in the wrapped part of the prometheus logging
        # will not be handled anywhere at the moment.
        # Force an exception and test correct logging of it anyways,
        # since at some point higher in the call stack we could catch exceptions.
        assert (
            metrics_state.get_delta(
                "events_exceptions_total",
                labels={"event_type": "ReceiveTokenNetworkCreatedEvent"},
            )
            == 1.0
        )
Esempio n. 12
0
def test_get_blockchain_events_returns_early_for_invalid_interval(
        web3: Web3, token_network_registry_contract: Contract):
    events = get_blockchain_events(
        web3=web3,
        token_network_addresses=[],
        chain_state=BlockchainState(
            chain_id=ChainID(1),
            token_network_registry_address=to_canonical_address(
                token_network_registry_contract.address),
            latest_committed_block=BlockNumber(4),
        ),
        from_block=BlockNumber(10),
        to_block=BlockNumber(5),
    )

    assert len(events) == 0
Esempio n. 13
0
 def query_callback():
     return query_blockchain_events(
         web3=web3,
         contract_addresses=[contract_address],
         from_block=BlockNumber(0),
         to_block=web3.eth.block_number,
     )
def test_token_channel_coop_settled(pathfinding_service_mock, token_network_model):
    setup_channel(pathfinding_service_mock, token_network_model)

    token_network_address = make_token_network_address()

    # Test invalid token network address
    settle_event = ReceiveChannelSettledEvent(
        token_network_address=token_network_address,
        channel_identifier=ChannelID(1),
        block_number=BlockNumber(2),
    )

    pathfinding_service_mock.handle_event(settle_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Test proper token network address
    settle_event = dataclasses.replace(
        settle_event, token_network_address=token_network_model.address
    )

    pathfinding_service_mock.handle_event(settle_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 0

    # Test non-existent channel
    settle_event = dataclasses.replace(settle_event, channel_identifier=ChannelID(123))

    pathfinding_service_mock.handle_event(settle_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 0
def test_channel_closed_event_handler_ignores_existing_channel_after_timeout(context: Context):
    context = setup_state_with_open_channel(context)
    context.web3.eth.block_number = BlockNumber(200)

    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(52),
    )
    channel_closed_event_handler(event, context)

    # no ActionMonitoringTriggeredEvent has been triggered
    assert context.database.scheduled_event_count() == 0

    assert context.database.channel_count() == 1
    assert_channel_state(context, ChannelState.CLOSED)
def test_channel_bp_updated_event_handler_sets_update_status_if_not_set(context: Context):
    context = setup_state_with_closed_channel(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is None

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == DEFAULT_PARTICIPANT1

    event_bp2 = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(5),
        block_number=BlockNumber(53),
    )

    non_closing_balance_proof_updated_event_handler(event_bp2, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        event_bp.token_network_address, event_bp.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 5
    assert channel.update_status.update_sender_address == DEFAULT_PARTICIPANT1
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty):

    metrics_state = save_metrics_state(metrics.REGISTRY)
    pfs = pathfinding_service_mock_empty

    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes([2] * 20))
    channel_id = ChannelID(1)
    p1 = Address(bytes([3] * 20))
    p2 = Address(bytes([4] * 20))
    events = [
        ReceiveTokenNetworkCreatedEvent(
            token_address=token_address,
            token_network_address=token_network_address,
            settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
            block_number=BlockNumber(1),
        ),
        ReceiveChannelOpenedEvent(
            token_network_address=token_network_address,
            channel_identifier=channel_id,
            participant1=p1,
            participant2=p2,
            block_number=BlockNumber(2),
        ),
    ]
    for event in events:
        pfs.handle_event(event)

        # check that we have non-zero processing time for the events we created
        assert (
            metrics_state.get_delta(
                "events_processing_duration_seconds_sum",
                labels={"event_type": event.__class__.__name__},
            )
            > 0.0
        )
        # there should be no exception raised
        assert (
            metrics_state.get_delta(
                "events_exceptions_total", labels={"event_type": event.__class__.__name__}
            )
            == 0.0
        )
def setup_channel(pathfinding_service_mock, token_network_model):
    channel_event = ReceiveChannelOpenedEvent(
        token_network_address=token_network_model.address,
        channel_identifier=ChannelID(1),
        participant1=PARTICIPANT1,
        participant2=PARTICIPANT2,
        block_number=BlockNumber(1),
    )
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 0
    pathfinding_service_mock.handle_event(channel_event)
 def new_service(filename):
     service = PathfindingService(
         web3=Web3Mock(),
         private_key=server_private_key,
         contracts=contracts,
         sync_start_block=BlockNumber(0),
         required_confirmations=BlockTimeout(0),
         poll_interval=0,
         db_filename=os.path.join(tmpdir, filename),
     )
     return service
def test_channel_settled_event_handler_leaves_existing_channel(context: Context):
    context = setup_state_with_closed_channel(context)

    event = ReceiveChannelSettledEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=ChannelID(4),
        block_number=BlockNumber(52),
    )
    channel_settled_event_handler(event, context)

    assert context.database.channel_count() == 1
    assert_channel_state(context, ChannelState.CLOSED)
Esempio n. 21
0
def test_get_blockchain_events_adaptive_reduces_block_interval_after_timeout(
        web3: Web3, token_network_registry_contract: Contract):
    chain_state = BlockchainState(
        chain_id=ChainID(1),
        token_network_registry_address=to_canonical_address(
            token_network_registry_contract.address),
        latest_committed_block=BlockNumber(4),
    )

    assert chain_state.current_event_filter_interval == DEFAULT_FILTER_INTERVAL

    with patch("raiden_libs.blockchain.get_blockchain_events",
               side_effect=ReadTimeout):
        _ = get_blockchain_events_adaptive(
            web3=web3,
            token_network_addresses=[],
            blockchain_state=chain_state,
            latest_confirmed_block=BlockNumber(1),
        )

        assert chain_state.current_event_filter_interval == DEFAULT_FILTER_INTERVAL // 5
def test_logging_processor():
    # test if our logging processor changes bytes to checksum addresses
    # even if bytes-addresses are entangled into events
    logger = Mock()
    log_method = Mock()

    address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9")
    address_log = format_to_hex(
        _logger=logger, _log_method=log_method, event_dict=dict(address=address)
    )
    assert to_checksum_address(address) == address_log["address"]

    address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1")
    event = ReceiveTokenNetworkCreatedEvent(
        token_address=address,
        token_network_address=TokenNetworkAddress(address2),
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(1),
    )
    event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event))
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address) == event_log["event"]["token_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address2) == event_log["event"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent"
    )

    message = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=TokenNetworkAddress(address),
            channel_identifier=ChannelID(1),
        ),
        updating_participant=PARTICIPANT1,
        fee_schedule=FeeScheduleState(),
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    message_log = format_to_hex(
        _logger=logger, _log_method=log_method, event_dict=dict(message=message)
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address)
        == message_log["message"]["canonical_identifier"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        message_log["message"]["type_name"] == "PFSFeeUpdate"
    )
Esempio n. 23
0
def create_channel(
        update_status: Optional[OnChainUpdateStatus] = None) -> Channel:
    return Channel(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        identifier=DEFAULT_CHANNEL_IDENTIFIER,
        participant1=DEFAULT_PARTICIPANT1,
        participant2=DEFAULT_PARTICIPANT2,
        state=random.choice(list(ChannelState)),
        closing_block=BlockNumber(random.randint(0, UINT256_MAX)),
        closing_participant=DEFAULT_PARTICIPANT1,
        monitor_tx_hash=make_transaction_hash(),
        claim_tx_hash=make_transaction_hash(),
        update_status=update_status,
    )
Esempio n. 24
0
def get_pessimistic_udc_balance(udc: Contract, address: Address,
                                from_block: BlockNumber,
                                to_block: BlockNumber) -> TokenAmount:
    """Get the effective UDC balance using the block with the lowest result.

    Blocks between the latest confirmed block and the latest block should be
    considered. For performance reasons, only the bounds of that range are
    checked. This is acceptable, since the effectiveBalance calculation already
    guards against withdraws by the user.
    """
    return min(
        udc.functions.effectiveBalance(address).call(
            block_identifier=BlockNumber(block))
        for block in (from_block, to_block))
Esempio n. 25
0
 def new_ms(filename):
     ms = MonitoringService(
         web3=Web3Mock(),
         private_key=server_private_key,
         contracts=contracts,
         db_filename=os.path.join(tmpdir, filename),
         poll_interval=0,
         required_confirmations=BlockTimeout(0),
         sync_start_block=BlockNumber(0),
     )
     msc = Mock()
     ms.context.monitoring_service_contract = msc
     ms.monitor_mock = msc.functions.monitor.return_value.transact
     ms.monitor_mock.return_value = bytes(0)
     return ms
def test_channel_opened_event_handler_adds_channel(context: Context):
    create_default_token_network(context)
    event = ReceiveChannelOpenedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        participant1=DEFAULT_PARTICIPANT1,
        participant2=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(42),
    )

    assert context.database.channel_count() == 0
    channel_opened_event_handler(event, context)

    assert context.database.channel_count() == 1
    assert_channel_state(context, ChannelState.OPENED)
def test_channel_closed_event_handler_trigger_action_monitor_event_without_monitor_request(
    context: Context,
):
    context = setup_state_with_open_channel(context)
    current_block_number = get_posix_utc_time_now() // 15

    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(current_block_number + 1),
    )

    channel_closed_event_handler(event, context)
    assert context.database.scheduled_event_count() == 1
def test_token_network_created_handlers_add_network(context: Context):
    event = ReceiveTokenNetworkCreatedEvent(
        token_address=DEFAULT_TOKEN_ADDRESS,
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(12),
    )

    assert len(context.database.get_token_network_addresses()) == 0

    token_network_created_handler(event, context)
    assert len(context.database.get_token_network_addresses()) == 1

    # Test idempotency
    token_network_created_handler(event, context)
    assert len(context.database.get_token_network_addresses()) == 1
def setup_state_with_open_channel(context: Context) -> Context:
    create_default_token_network(context)
    event = ReceiveChannelOpenedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        participant1=DEFAULT_PARTICIPANT1,
        participant2=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(42),
    )
    assert context.database.channel_count() == 0

    channel_opened_event_handler(event, context)
    context.web3.eth.get_block = lambda x: Mock(
        timestamp=context.web3.eth.block_number * 15 if x == "latest" else x * 15
    )

    return context
def test_monitor_reward_claimed_event_handler(context: Context, log):
    metrics_state = save_metrics_state(metrics.REGISTRY)

    context = setup_state_with_closed_channel(context)

    claim_event = ReceiveMonitoringRewardClaimedEvent(
        ms_address=context.ms_state.address,
        amount=TokenAmount(1),
        reward_identifier="REWARD",
        block_number=BlockNumber(23),
    )

    monitor_reward_claim_event_handler(claim_event, context)

    assert (
        metrics_state.get_delta(
            "economics_reward_claims_successful_total", labels=metrics.Who.US.to_label_dict()
        )
        == 1.0
    )
    assert (
        metrics_state.get_delta(
            "economics_reward_claims_token_total", labels=metrics.Who.US.to_label_dict()
        )
        == 1.0
    )

    assert log.has("Successfully claimed reward")

    claim_event = dataclasses.replace(claim_event, ms_address=Address(bytes([3] * 20)))
    monitor_reward_claim_event_handler(claim_event, context)

    assert (
        metrics_state.get_delta(
            "economics_reward_claims_successful_total", labels=metrics.Who.THEY.to_label_dict()
        )
        == 1.0
    )
    assert (
        metrics_state.get_delta(
            "economics_reward_claims_token_total", labels=metrics.Who.THEY.to_label_dict()
        )
        == 1.0
    )

    assert log.has("Another MS claimed reward")