def test_prometheus_event_handling_raise_exception(pathfinding_service_mock_empty):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    pfs = pathfinding_service_mock_empty

    event = ReceiveTokenNetworkCreatedEvent(
        token_address=TokenAddress(bytes([1] * 20)),
        token_network_address=TokenNetworkAddress(bytes([2] * 20)),
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(1),
    )

    pfs.handle_token_network_created = Mock(side_effect=KeyError())

    with pytest.raises(KeyError):
        pfs.handle_event(event)
        # The exceptions raised in the wrapped part of the prometheus logging
        # will not be handled anywhere at the moment.
        # Force an exception and test correct logging of it anyways,
        # since at some point higher in the call stack we could catch exceptions.
        assert (
            metrics_state.get_delta(
                "events_exceptions_total",
                labels={"event_type": "ReceiveTokenNetworkCreatedEvent"},
            )
            == 1.0
        )
def test_save_and_load_token_networks(pathfinding_service_mock_empty):
    pfs = pathfinding_service_mock_empty

    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes([2] * 20))
    channel_id = ChannelID(1)
    p1 = Address(bytes([3] * 20))
    p2 = Address(bytes([4] * 20))
    events = [
        ReceiveTokenNetworkCreatedEvent(
            token_address=token_address,
            token_network_address=token_network_address,
            settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
            block_number=BlockNumber(1),
        ),
        ReceiveChannelOpenedEvent(
            token_network_address=token_network_address,
            channel_identifier=channel_id,
            participant1=p1,
            participant2=p2,
            block_number=BlockNumber(2),
        ),
    ]
    for event in events:
        pfs.handle_event(event)
    assert len(pfs.token_networks) == 1

    loaded_networks = pfs._load_token_networks()  # pylint: disable=protected-access
    assert len(loaded_networks) == 1

    orig = list(pfs.token_networks.values())[0]
    loaded = list(loaded_networks.values())[0]
    assert loaded.address == orig.address
    assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses
    assert loaded.G.nodes == orig.G.nodes
def test_logging_processor():
    # test if our logging processor changes bytes to checksum addresses
    # even if bytes-addresses are entangled into events
    logger = Mock()
    log_method = Mock()

    address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9")
    address_log = format_to_hex(
        _logger=logger, _log_method=log_method, event_dict=dict(address=address)
    )
    assert to_checksum_address(address) == address_log["address"]

    address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1")
    event = ReceiveTokenNetworkCreatedEvent(
        token_address=address,
        token_network_address=TokenNetworkAddress(address2),
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(1),
    )
    event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event))
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address) == event_log["event"]["token_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address2) == event_log["event"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent"
    )

    message = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=TokenNetworkAddress(address),
            channel_identifier=ChannelID(1),
        ),
        updating_participant=PARTICIPANT1,
        fee_schedule=FeeScheduleState(),
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    message_log = format_to_hex(
        _logger=logger, _log_method=log_method, event_dict=dict(message=message)
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address)
        == message_log["message"]["canonical_identifier"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        message_log["message"]["type_name"] == "PFSFeeUpdate"
    )
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty):

    metrics_state = save_metrics_state(metrics.REGISTRY)
    pfs = pathfinding_service_mock_empty

    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes([2] * 20))
    channel_id = ChannelID(1)
    p1 = Address(bytes([3] * 20))
    p2 = Address(bytes([4] * 20))
    events = [
        ReceiveTokenNetworkCreatedEvent(
            token_address=token_address,
            token_network_address=token_network_address,
            settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
            block_number=BlockNumber(1),
        ),
        ReceiveChannelOpenedEvent(
            token_network_address=token_network_address,
            channel_identifier=channel_id,
            participant1=p1,
            participant2=p2,
            block_number=BlockNumber(2),
        ),
    ]
    for event in events:
        pfs.handle_event(event)

        # check that we have non-zero processing time for the events we created
        assert (
            metrics_state.get_delta(
                "events_processing_duration_seconds_sum",
                labels={"event_type": event.__class__.__name__},
            )
            > 0.0
        )
        # there should be no exception raised
        assert (
            metrics_state.get_delta(
                "events_exceptions_total", labels={"event_type": event.__class__.__name__}
            )
            == 0.0
        )
def test_token_network_created(pathfinding_service_mock):
    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20)))
    network_event = ReceiveTokenNetworkCreatedEvent(
        token_address=token_address,
        token_network_address=token_network_address,
        settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
        block_number=BlockNumber(1),
    )

    assert not pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 1

    pathfinding_service_mock.handle_event(network_event)
    assert pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 2

    # Test idempotency
    pathfinding_service_mock.handle_event(network_event)
    assert pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 2
def test_crash(tmpdir, mockchain):  # pylint: disable=too-many-locals
    """Process blocks and compare results with/without crash

    A somewhat meaninful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    token_address = TokenAddress(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes([2] * 20))
    channel_id = ChannelID(1)
    p1 = Address(bytes([3] * 20))
    p2 = Address(bytes([4] * 20))
    events = [
        [
            ReceiveTokenNetworkCreatedEvent(
                token_address=token_address,
                token_network_address=token_network_address,
                settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT,
                block_number=BlockNumber(1),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(2))],
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_id,
                participant1=p1,
                participant2=p2,
                block_number=BlockNumber(3),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(4))],
    ]
    mockchain(events)

    server_private_key = PrivateKey(get_random_privkey())
    contracts = {
        CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
        CONTRACT_USER_DEPOSIT: ContractMock(),
    }

    def new_service(filename):
        service = PathfindingService(
            web3=Web3Mock(),
            private_key=server_private_key,
            contracts=contracts,
            sync_start_block=BlockNumber(0),
            required_confirmations=BlockTimeout(0),
            poll_interval=0,
            db_filename=os.path.join(tmpdir, filename),
        )
        return service

    # initialize stable service
    stable_service = new_service("stable.db")

    # process each block and compare results between crashy and stable service
    for to_block in range(len(events)):
        crashy_service = new_service("crashy.db")  # new instance to simulate crash
        result_state: List[dict] = []
        for service in [stable_service, crashy_service]:
            service._process_new_blocks(BlockNumber(to_block))  # pylint: disable=protected-access
            result_state.append(dict(db_dump=list(service.database.conn.iterdump())))

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()):
            if isinstance(stable_state, BlockchainState):
                assert stable_state.chain_id == crashy_state.chain_id
                assert (
                    stable_state.token_network_registry_address
                    == crashy_state.token_network_registry_address
                )
                assert stable_state.latest_committed_block == crashy_state.latest_committed_block
                assert (
                    stable_state.monitor_contract_address == crashy_state.monitor_contract_address
                )
                # Do not compare `current_event_filter_interval`, this is allowed to be different
            else:
                assert stable_state == crashy_state

        crashy_service.database.conn.close()  # close the db connection so we can access it again
예제 #7
0
def get_blockchain_events(
    web3: Web3,
    token_network_addresses: List[TokenNetworkAddress],
    chain_state: BlockchainState,
    from_block: BlockNumber,
    to_block: BlockNumber,
) -> List[Event]:
    # Check if the current block was already processed
    if from_block > to_block:
        return []

    log.info(
        "Querying new block(s)",
        from_block=from_block,
        to_block=to_block,
        # When `to_block` == `from_block` we query one block, so add one
        num_blocks=to_block - from_block + 1,
    )

    # first check for new token networks and add to state
    registry_events = query_blockchain_events(
        web3=web3,
        contract_addresses=[chain_state.token_network_registry_address],
        from_block=from_block,
        to_block=to_block,
    )

    events: List[Event] = []
    for event_dict in registry_events:
        token_network_address = TokenNetworkAddress(
            to_canonical_address(event_dict["args"]["token_network_address"]))
        events.append(
            ReceiveTokenNetworkCreatedEvent(
                token_network_address=token_network_address,
                token_address=TokenAddress(
                    to_canonical_address(event_dict["args"]["token_address"])),
                settle_timeout=event_dict["args"]["settle_timeout"],
                block_number=event_dict["blockNumber"],
            ))
        token_network_addresses.append(token_network_address)

    # then check all token networks
    network_events = query_blockchain_events(
        web3=web3,
        contract_addresses=token_network_addresses,  # type: ignore
        from_block=from_block,
        to_block=to_block,
    )

    for event_dict in network_events:
        event = parse_token_network_event(event_dict)
        if event:
            events.append(event)

    # get events from monitoring service contract, this only queries the chain
    # if the monitor contract address is set in chain_state
    monitoring_events = get_monitoring_blockchain_events(
        web3=web3,
        monitor_contract_address=chain_state.monitor_contract_address,
        from_block=from_block,
        to_block=to_block,
    )
    events.extend(monitoring_events)

    # commit new block number
    events.append(UpdatedHeadBlockEvent(head_block_number=to_block))

    return events
예제 #8
0
    TokenNetworkAddress,
)

from monitoring_service.states import (
    Channel,
    HashedBalanceProof,
    MonitorRequest,
    OnChainUpdateStatus,
)
from raiden_contracts.constants import ChannelState
from raiden_contracts.utils.type_aliases import PrivateKey
from raiden_libs.utils import private_key_to_address
from tests.constants import TEST_CHAIN_ID, TEST_MSC_ADDRESS

DEFAULT_TOKEN_NETWORK_ADDRESS = TokenNetworkAddress(bytes([1] * 20))
DEFAULT_TOKEN_ADDRESS = TokenAddress(bytes([9] * 20))
DEFAULT_CHANNEL_IDENTIFIER = ChannelID(3)
DEFAULT_PRIVATE_KEY1 = PrivateKey(decode_hex("0x" + "1" * 64))
DEFAULT_PRIVATE_KEY2 = PrivateKey(decode_hex("0x" + "2" * 64))
DEFAULT_PARTICIPANT1 = private_key_to_address(DEFAULT_PRIVATE_KEY1)
DEFAULT_PARTICIPANT2 = private_key_to_address(DEFAULT_PRIVATE_KEY2)
DEFAULT_PRIVATE_KEY_OTHER = PrivateKey(decode_hex("0x" + "3" * 64))
DEFAULT_PARTICIPANT_OTHER = private_key_to_address(DEFAULT_PRIVATE_KEY_OTHER)
DEFAULT_REWARD_AMOUNT = TokenAmount(1)
DEFAULT_SETTLE_TIMEOUT = 100 * 15  # time in seconds


def create_signed_monitor_request(
    chain_id: ChainID = TEST_CHAIN_ID,
    nonce: Nonce = Nonce(5),
    reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT,