예제 #1
0
def test_waiting_messages(pathfinding_service_mock):
    participant1_privkey, participant1 = make_privkey_address()
    token_network_address = TokenNetworkAddress(b"1" * 20)
    channel_id = ChannelID(1)

    # register token network internally
    database = pathfinding_service_mock.database
    database.conn.execute(
        "INSERT INTO token_network(address) VALUES (?)",
        [to_checksum_address(token_network_address)],
    )

    fee_update = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=token_network_address,
            channel_identifier=channel_id,
        ),
        updating_participant=participant1,
        fee_schedule=FeeScheduleState(),
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    fee_update.sign(LocalSigner(participant1_privkey))

    capacity_update = PFSCapacityUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=token_network_address,
            channel_identifier=channel_id,
        ),
        updating_participant=make_address(),
        other_participant=make_address(),
        updating_nonce=Nonce(1),
        other_nonce=Nonce(1),
        updating_capacity=TokenAmount(100),
        other_capacity=TokenAmount(111),
        reveal_timeout=BlockTimeout(50),
        signature=EMPTY_SIGNATURE,
    )
    capacity_update.sign(LocalSigner(participant1_privkey))

    for message in (fee_update, capacity_update):
        database.insert_waiting_message(message)

        recovered_messages = list(
            database.pop_waiting_messages(
                token_network_address=token_network_address,
                channel_id=channel_id))
        assert len(recovered_messages) == 1
        assert message == recovered_messages[0]

        recovered_messages2 = list(
            database.pop_waiting_messages(
                token_network_address=token_network_address,
                channel_id=channel_id))
        assert len(recovered_messages2) == 0
def test_update_fee(order, pathfinding_service_mock, token_network_model):
    metrics_state = save_metrics_state(metrics.REGISTRY)

    pathfinding_service_mock.database.insert(
        "token_network", dict(address=token_network_model.address)
    )
    if order == "normal":
        setup_channel(pathfinding_service_mock, token_network_model)
        exception_expected = False
    else:
        exception_expected = True

    fee_schedule = FeeScheduleState(
        flat=FeeAmount(1),
        proportional=ProportionalFeeAmount(int(0.1e9)),
        imbalance_penalty=[(TokenAmount(0), FeeAmount(0)), (TokenAmount(10), FeeAmount(10))],
    )
    fee_update = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=token_network_model.address,
            channel_identifier=ChannelID(1),
        ),
        updating_participant=PARTICIPANT1,
        fee_schedule=fee_schedule,
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    fee_update.sign(LocalSigner(PARTICIPANT1_PRIVKEY))
    pathfinding_service_mock.handle_message(fee_update)

    # Test for metrics having seen the processing of the message
    assert (
        metrics_state.get_delta(
            "messages_processing_duration_seconds_sum",
            labels={"message_type": "PFSFeeUpdate"},
        )
        > 0.0
    )
    assert metrics_state.get_delta(
        "messages_exceptions_total", labels={"message_type": "PFSFeeUpdate"}
    ) == float(exception_expected)

    if order == "fee_update_before_channel_open":
        setup_channel(pathfinding_service_mock, token_network_model)

    cv = token_network_model.G[PARTICIPANT1][PARTICIPANT2]["view"]
    for key in ("flat", "proportional", "imbalance_penalty"):
        assert getattr(cv.fee_schedule_sender, key) == getattr(fee_schedule, key)
def test_action_claim_reward_triggered_event_handler_without_update_state_doesnt_trigger_claim_call(  # noqa
    context: Context,
):
    """Tests that `claimReward` is called when the ActionMonitoringTriggeredEvent is triggered and
    user has sufficient balance in user deposit contract
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(0))
    )

    trigger_event = ActionClaimRewardTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.claim_tx_hash is None

    # Set update state
    channel.update_status = OnChainUpdateStatus(
        update_sender_address=Address(bytes([1] * 20)), nonce=Nonce(6)
    )
    context.database.upsert_channel(channel)

    action_claim_reward_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.claimReward.called is False
예제 #4
0
def test_get_ious_via_debug_endpoint(
    api_sut_with_debug: PFSApi, api_url: str, addresses: List[Address]
):
    hex_addrs = [to_checksum_address(addr) for addr in addresses]
    iou = IOU(
        sender=addresses[0],
        receiver=addresses[4],
        amount=TokenAmount(111),
        claimable_until=7619644,
        signature=Signature(
            decode_hex("118a93e9fd0a3a1c3d6edbad194b5c9d95715c754881d80e23e985793b1e13de")
        ),
        claimed=False,
        chain_id=ChainID(61),
        one_to_n_address=api_sut_with_debug.one_to_n_address,
    )
    api_sut_with_debug.pathfinding_service.database.upsert_iou(iou)

    # now there must be an iou debug endpoint for a request of a sender in the database
    url_iou_debug = api_url + f"/v1/_debug/ious/{hex_addrs[0]}"
    response_debug = requests.get(url_iou_debug)
    assert response_debug.status_code == 200
    response_iou = response_debug.json()
    assert response_iou == {"sender": hex_addrs[0], "amount": 111, "claimable_until": 7619644}

    # but there is no iou debug endpoint for a request of a sender not in the database
    url_iou_debug = api_url + f"/v1/_debug/ious/{hex_addrs[1]}"
    response_debug = requests.get(url_iou_debug)
    assert response_debug.status_code == 200
    ious = response_debug.json()
    assert ious == {}
def test_action_monitoring_triggered_event_handler_without_sufficient_balance_doesnt_trigger_monitor_call(  # noqa
    context: Context,
):
    """Tests that `monitor` is not called when user has insufficient balance in user deposit contract

    Also a test for https://github.com/raiden-network/raiden-services/issues/29 , as the MR
    is sent after the channel has been closed.
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(10))
    )

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.monitor_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2
    ).call.return_value = 0
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
def test_action_monitoring_triggered_event_handler_with_insufficient_reward_amount_does_not_trigger_monitor_call(  # noqa
    context: Context,
):
    """Tests that `monitor` is not called when the ActionMonitoringTriggeredEvent is triggered but
    the monitor request shows an insufficient reward amount
    """
    context = setup_state_with_closed_channel(context)

    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(0))
    )

    trigger_event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(
        trigger_event.token_network_address, trigger_event.channel_identifier
    )
    assert channel
    assert channel.monitor_tx_hash is None

    context.user_deposit_contract.functions.effectiveBalance(
        DEFAULT_PARTICIPANT2
    ).call.return_value = 21
    action_monitoring_triggered_event_handler(trigger_event, context)

    # check that the monitor call has been done
    assert context.monitoring_service_contract.functions.monitor.called is False
def test_metrics_iou(  # pylint: disable=too-many-locals
    pathfinding_service_web3_mock: PathfindingService,
    one_to_n_contract,
    web3: Web3,
    deposit_to_udc,
    get_accounts,
    get_private_key,
):
    pfs = pathfinding_service_web3_mock

    metrics_state = save_metrics_state(metrics.REGISTRY)
    # Prepare test data
    account = [decode_hex(acc) for acc in get_accounts(1)][0]
    local_signer = LocalSigner(private_key=get_private_key(account))
    iou = IOU(
        sender=account,
        receiver=pfs.address,
        amount=TokenAmount(100),
        claimable_until=web3.eth.get_block("latest").timestamp +
        100,  # type: ignore
        signature=Signature(bytes([1] * 64)),
        chain_id=ChainID(61),
        one_to_n_address=to_canonical_address(one_to_n_contract.address),
        claimed=False,
    )
    iou.signature = Signature(local_signer.sign(iou.packed_data()))
    pfs.database.upsert_iou(iou)
    deposit_to_udc(iou.sender, 300)

    # Claim IOUs
    skipped, failures = claim_ious(
        ious=[iou],
        claim_cost_rdn=TokenAmount(100),
        one_to_n_contract=one_to_n_contract,
        web3=web3,
        database=pfs.database,
    )
    assert (skipped, failures) == (0, 0)

    assert (metrics_state.get_delta(
        "economics_iou_claims_total",
        labels=metrics.IouStatus.SUCCESSFUL.to_label_dict()) == 1.0)
    assert (metrics_state.get_delta(
        "economics_iou_claims_token_total",
        labels=metrics.IouStatus.SUCCESSFUL.to_label_dict(),
    ) == 100.0)
 def test_payment(iou, service_fee=TokenAmount(1)):
     # IOU check reads the block number from here, so it has to be up to date
     pfs.blockchain_state.latest_committed_block = web3.eth.block_number
     pathfinding_service.api.process_payment(
         iou=iou,
         pathfinding_service=pfs,
         service_fee=service_fee,
         one_to_n_address=to_canonical_address(one_to_n_contract.address),
     )
예제 #9
0
def request_monitoring_message(token_network, get_accounts,
                               get_private_key) -> RequestMonitoring:
    c1, c2 = get_accounts(2)

    balance_proof_c2 = HashedBalanceProof(
        channel_identifier=ChannelID(1),
        token_network_address=TokenNetworkAddress(
            to_canonical_address(token_network.address)),
        chain_id=ChainID(61),
        nonce=Nonce(2),
        additional_hash="0x%064x" % 0,
        transferred_amount=TokenAmount(1),
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
        priv_key=get_private_key(c2),
    )

    return balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=TokenAmount(1),
        monitoring_service_contract_address=MonitoringServiceAddress(
            bytes([11] * 20)),
    )
예제 #10
0
    def f(
        chain_id: ChainID = TEST_CHAIN_ID,
        amount: TokenAmount = TokenAmount(50),
        nonce: Nonce = Nonce(1),
        channel_id: ChannelID = ChannelID(1),
    ) -> RequestMonitoring:
        balance_proof = HashedBalanceProof(
            channel_identifier=channel_id,
            token_network_address=TokenNetworkAddress(b"1" * 20),
            chain_id=chain_id,
            nonce=nonce,
            additional_hash="",
            balance_hash=encode_hex(bytes([amount])),
            priv_key=PrivateKey(get_random_privkey()),
        )
        request_monitoring = balance_proof.get_request_monitoring(
            privkey=non_closing_privkey,
            reward_amount=TokenAmount(55),
            monitoring_service_contract_address=TEST_MSC_ADDRESS,
        )

        # usually not a property of RequestMonitoring, but added for convenience in these tests
        request_monitoring.non_closing_signer = to_checksum_address(non_closing_address)
        return request_monitoring
def test_monitor_reward_claimed_event_handler(context: Context, log):
    metrics_state = save_metrics_state(metrics.REGISTRY)

    context = setup_state_with_closed_channel(context)

    claim_event = ReceiveMonitoringRewardClaimedEvent(
        ms_address=context.ms_state.address,
        amount=TokenAmount(1),
        reward_identifier="REWARD",
        block_number=BlockNumber(23),
    )

    monitor_reward_claim_event_handler(claim_event, context)

    assert (
        metrics_state.get_delta(
            "economics_reward_claims_successful_total", labels=metrics.Who.US.to_label_dict()
        )
        == 1.0
    )
    assert (
        metrics_state.get_delta(
            "economics_reward_claims_token_total", labels=metrics.Who.US.to_label_dict()
        )
        == 1.0
    )

    assert log.has("Successfully claimed reward")

    claim_event = dataclasses.replace(claim_event, ms_address=Address(bytes([3] * 20)))
    monitor_reward_claim_event_handler(claim_event, context)

    assert (
        metrics_state.get_delta(
            "economics_reward_claims_successful_total", labels=metrics.Who.THEY.to_label_dict()
        )
        == 1.0
    )
    assert (
        metrics_state.get_delta(
            "economics_reward_claims_token_total", labels=metrics.Who.THEY.to_label_dict()
        )
        == 1.0
    )

    assert log.has("Another MS claimed reward")
def test_monitor_new_balance_proof_event_handler_idempotency(context: Context):
    context = setup_state_with_closed_channel(context)

    new_balance_event = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(2),
        ms_address=Address(context.ms_state.address),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == context.ms_state.address

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == context.ms_state.address
def test_action_monitoring_triggered_event_handler_does_not_trigger_monitor_call_when_nonce_to_small(  # noqa
    context: Context,
):
    context = setup_state_with_closed_channel(context)

    event3 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address(bytes([3] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(event3.token_network_address, event3.channel_identifier)
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(event3, context)

    # add MR to DB, with nonce being smaller than in event3
    context.database.upsert_monitor_request(create_signed_monitor_request(nonce=Nonce(4)))

    event4 = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(event4.token_network_address, event4.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.monitor_tx_hash is None

    action_monitoring_triggered_event_handler(event4, context)

    assert context.database.channel_count() == 1
    assert channel
    assert channel.monitor_tx_hash is None
def test_action_monitoring_rescheduling_when_user_lacks_funds(context: Context):
    reward_amount = TokenAmount(10)
    context = setup_state_with_closed_channel(context)
    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(6), reward_amount=reward_amount)
    )

    event = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )
    scheduled_events_before = context.database.get_scheduled_events(
        max_trigger_timestamp=Timestamp(get_posix_utc_time_now())
    )

    # Try to call monitor when the user has insufficient funds
    with patch("monitoring_service.handlers.get_pessimistic_udc_balance", Mock(return_value=0)):
        action_monitoring_triggered_event_handler(event, context)
    assert not context.monitoring_service_contract.functions.monitor.called

    # Now the event must have been rescheduled
    # TODO: check that the event is rescheduled to trigger at the right block
    scheduled_events_after = context.database.get_scheduled_events(
        max_trigger_timestamp=Timestamp(get_posix_utc_time_now())
    )
    new_events = set(scheduled_events_after) - set(scheduled_events_before)
    assert len(new_events) == 1
    assert new_events.pop().event == event

    # With sufficient funds it must succeed
    with patch(
        "monitoring_service.handlers.get_pessimistic_udc_balance",
        Mock(return_value=reward_amount * UDC_SECURITY_MARGIN_FACTOR_MS),
    ):
        action_monitoring_triggered_event_handler(event, context)
    assert context.monitoring_service_contract.functions.monitor.called
예제 #15
0
def populate_token_network_random(
    token_network_model: TokenNetwork, private_keys: List[str]
) -> None:
    number_of_channels = 300
    # seed for pseudo-randomness from config constant, that changes from time to time
    random.seed(number_of_channels)

    for channel_id_int in range(number_of_channels):
        channel_id = ChannelID(channel_id_int)

        private_key1, private_key2 = random.sample(private_keys, 2)
        address1 = private_key_to_address(private_key1)
        address2 = private_key_to_address(private_key2)
        token_network_model.handle_channel_opened_event(
            channel_identifier=channel_id,
            participant1=address1,
            participant2=address2,
        )

        # deposit to channels
        deposit1 = TokenAmount(random.randint(0, 1000))
        deposit2 = TokenAmount(random.randint(0, 1000))
        address1, address2 = token_network_model.channel_id_to_addresses[channel_id]
        token_network_model.handle_channel_balance_update_message(
            PFSCapacityUpdate(
                canonical_identifier=CanonicalIdentifier(
                    chain_identifier=ChainID(61),
                    channel_identifier=channel_id,
                    token_network_address=TokenNetworkAddress(token_network_model.address),
                ),
                updating_participant=address1,
                other_participant=address2,
                updating_nonce=Nonce(1),
                other_nonce=Nonce(1),
                updating_capacity=deposit1,
                other_capacity=deposit2,
                reveal_timeout=BlockTimeout(2),
                signature=EMPTY_SIGNATURE,
            ),
            updating_capacity_partner=TokenAmount(0),
            other_capacity_partner=TokenAmount(0),
        )
        token_network_model.handle_channel_balance_update_message(
            PFSCapacityUpdate(
                canonical_identifier=CanonicalIdentifier(
                    chain_identifier=ChainID(61),
                    channel_identifier=channel_id,
                    token_network_address=TokenNetworkAddress(token_network_model.address),
                ),
                updating_participant=address2,
                other_participant=address1,
                updating_nonce=Nonce(2),
                other_nonce=Nonce(1),
                updating_capacity=deposit2,
                other_capacity=deposit1,
                reveal_timeout=BlockTimeout(2),
                signature=EMPTY_SIGNATURE,
            ),
            updating_capacity_partner=TokenAmount(deposit1),
            other_capacity_partner=TokenAmount(deposit2),
        )
def test_monitor_new_balance_proof_event_handler_sets_update_status(context: Context):
    context = setup_state_with_closed_channel(context)

    new_balance_event = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(2),
        ms_address=Address(bytes([4] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(62),
    )

    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is None
    assert get_scheduled_claim_event(context.database) is None

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == bytes([4] * 20)

    # closing block * avg. time per block + token network settle timeout
    expected_trigger_timestamp = 52 * 15 + context.database.get_token_network_settle_timeout(
        channel.token_network_address
    )

    scheduled_claim_event = get_scheduled_claim_event(context.database)
    assert scheduled_claim_event is not None
    assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp

    new_balance_event2 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address(bytes([4] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(63),
    )

    monitor_new_balance_proof_event_handler(new_balance_event2, context)

    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address, new_balance_event.channel_identifier
    )
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 5
    assert channel.update_status.update_sender_address == bytes([4] * 20)

    scheduled_claim_event = get_scheduled_claim_event(context.database)
    assert scheduled_claim_event is not None
    assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp
예제 #17
0
def test_first_allowed_monitoring(
    web3: Web3,
    monitoring_service_contract,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    triggered_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    assert len(triggered_events) == 1

    monitor_trigger = triggered_events[0]
    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )
    assert channel

    # Calling monitor too early must fail. To test this, we call a few seconds
    # before the trigger timestamp.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp - 5)  # type: ignore

    with pytest.raises(TransactionTooEarlyException):
        handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == []

    # If our `monitor` call fails, we won't try again. Force a retry in this
    # test by clearing monitor_tx_hash.
    channel.monitor_tx_hash = None
    monitoring_service.database.upsert_channel(channel)

    # Now we can try again. The first try mined a new block, so now we're one
    # block further and `monitor` should succeed.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp)  # type: ignore
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
예제 #18
0
def test_process_payment(
    pathfinding_service_web3_mock,
    deposit_to_udc,
    create_account,
    get_private_key,
    make_iou,
    one_to_n_contract,
    web3,
):
    pfs = pathfinding_service_web3_mock
    service_fee = TokenAmount(1)
    sender = create_account()
    privkey = get_private_key(sender)
    deposit_to_udc(sender, round(1 * UDC_SECURITY_MARGIN_FACTOR_PFS))
    web3.testing.mine(pathfinding_service_web3_mock.required_confirmations)
    one_to_n_address = to_canonical_address(one_to_n_contract.address)

    def process_payment(*args, **kwargs):
        # IOU check reads the block number from here, so it has to be up to date
        pfs.blockchain_state.latest_committed_block = web3.eth.block_number
        pathfinding_service.api.process_payment(*args, **kwargs)

    # Make payment
    iou = make_iou(privkey, pfs.address, amount=1)
    pfs.blockchain_state.latest_committed_block = web3.eth.block_number
    process_payment(iou, pfs, service_fee, one_to_n_address)

    # The same payment can't be reused
    with pytest.raises(exceptions.InsufficientServicePayment):
        process_payment(iou, pfs, service_fee, one_to_n_address)

    # Increasing the amount would make the payment work again, if we had enough
    # deposit. But we set the deposit one token too low.
    deposit_to_udc(sender, round(2 * UDC_SECURITY_MARGIN_FACTOR_PFS) - 1)
    iou = make_iou(privkey, pfs.address, amount=2)
    pfs.blockchain_state.latest_committed_block = web3.eth.block_number
    with pytest.raises(exceptions.DepositTooLow) as tb:
        process_payment(iou, pfs, service_fee, one_to_n_address)
    assert tb.value.error_details[
        "required_deposit"] == 2 * UDC_SECURITY_MARGIN_FACTOR_PFS
    assert tb.value.error_details[
        "seen_deposit"] == 1 * UDC_SECURITY_MARGIN_FACTOR_PFS
    assert tb.value.error_details["block_number"] == web3.eth.block_number

    # With the higher amount and enough deposit, it works again!
    deposit_to_udc(sender, round(2 * UDC_SECURITY_MARGIN_FACTOR_PFS))
    web3.testing.mine(pathfinding_service_web3_mock.required_confirmations)
    iou = make_iou(privkey, pfs.address, amount=2)
    pfs.blockchain_state.latest_committed_block = web3.eth.block_number
    process_payment(iou, pfs, service_fee, one_to_n_address)

    # Make sure the client does not create new sessions unnecessarily
    iou = make_iou(privkey, pfs.address, claimable_until=20000 * 15)
    with pytest.raises(exceptions.UseThisIOU):
        process_payment(iou, pfs, service_fee, one_to_n_address)

    # Complain if the IOU has been claimed
    iou = make_iou(privkey, pfs.address, amount=3)
    pfs.database.conn.execute("UPDATE iou SET claimed=1")
    with pytest.raises(exceptions.IOUAlreadyClaimed):
        process_payment(iou, pfs, service_fee, one_to_n_address)
예제 #19
0
)
from raiden_contracts.constants import ChannelState
from raiden_contracts.utils.type_aliases import PrivateKey
from raiden_libs.utils import private_key_to_address
from tests.constants import TEST_CHAIN_ID, TEST_MSC_ADDRESS

DEFAULT_TOKEN_NETWORK_ADDRESS = TokenNetworkAddress(bytes([1] * 20))
DEFAULT_TOKEN_ADDRESS = TokenAddress(bytes([9] * 20))
DEFAULT_CHANNEL_IDENTIFIER = ChannelID(3)
DEFAULT_PRIVATE_KEY1 = PrivateKey(decode_hex("0x" + "1" * 64))
DEFAULT_PRIVATE_KEY2 = PrivateKey(decode_hex("0x" + "2" * 64))
DEFAULT_PARTICIPANT1 = private_key_to_address(DEFAULT_PRIVATE_KEY1)
DEFAULT_PARTICIPANT2 = private_key_to_address(DEFAULT_PRIVATE_KEY2)
DEFAULT_PRIVATE_KEY_OTHER = PrivateKey(decode_hex("0x" + "3" * 64))
DEFAULT_PARTICIPANT_OTHER = private_key_to_address(DEFAULT_PRIVATE_KEY_OTHER)
DEFAULT_REWARD_AMOUNT = TokenAmount(1)
DEFAULT_SETTLE_TIMEOUT = 100 * 15  # time in seconds


def create_signed_monitor_request(
    chain_id: ChainID = TEST_CHAIN_ID,
    nonce: Nonce = Nonce(5),
    reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT,
    closing_privkey: PrivateKey = DEFAULT_PRIVATE_KEY1,
    nonclosing_privkey: PrivateKey = DEFAULT_PRIVATE_KEY2,
) -> MonitorRequest:
    bp = HashedBalanceProof(
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        chain_id=chain_id,
        balance_hash="",
예제 #20
0
def test_crash(
    tmpdir, get_accounts, get_private_key, mockchain
):  # pylint: disable=too-many-locals
    """Process blocks and compare results with/without crash

    A somewhat meaningful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    channel_identifier = ChannelID(3)
    c1, c2 = get_accounts(2)
    token_network_address = TokenNetworkAddress(to_canonical_address(get_random_address()))
    balance_proof = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=TokenAmount(2),
        priv_key=get_private_key(c1),
        channel_identifier=channel_identifier,
        token_network_address=token_network_address,
        chain_id=ChainID(61),
        additional_hash="0x%064x" % 0,
        locked_amount=0,
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    monitor_request = balance_proof.get_monitor_request(
        get_private_key(c2), reward_amount=TokenAmount(0), msc_address=TEST_MSC_ADDRESS
    )

    events = [
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                participant1=c1,
                participant2=c2,
                block_number=BlockNumber(0),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(1))],
        [
            ActionMonitoringTriggeredEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                non_closing_participant=c2,
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(3))],
    ]
    mockchain(events)

    server_private_key = PrivateKey(get_random_privkey())

    contracts = {
        CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
        CONTRACT_MONITORING_SERVICE: ContractMock(),
        CONTRACT_USER_DEPOSIT: ContractMock(),
        CONTRACT_SERVICE_REGISTRY: ContractMock(),
    }

    def new_ms(filename):
        ms = MonitoringService(
            web3=Web3Mock(),
            private_key=server_private_key,
            contracts=contracts,
            db_filename=os.path.join(tmpdir, filename),
            poll_interval=0,
            required_confirmations=BlockTimeout(0),
            sync_start_block=BlockNumber(0),
        )
        msc = Mock()
        ms.context.monitoring_service_contract = msc
        ms.monitor_mock = msc.functions.monitor.return_value.transact
        ms.monitor_mock.return_value = bytes(0)
        return ms

    # initialize both monitoring services
    stable_ms = new_ms("stable.db")
    crashy_ms = new_ms("crashy.db")
    for ms in [stable_ms, crashy_ms]:
        # mock database time to make results reproducible
        ms.database.conn.create_function("CURRENT_TIMESTAMP", 1, lambda: "2000-01-01")

        ms.database.conn.execute(
            "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)",
            [to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT],
        )
        ms.context.ms_state.blockchain_state.token_network_addresses = [token_network_address]
        ms.database.upsert_monitor_request(monitor_request)
        ms.database.conn.commit()

    # process each block and compare results between crashy and stable ms
    for to_block in range(len(events)):
        crashy_ms = new_ms("crashy.db")  # new instance to simulate crash
        stable_ms.monitor_mock.reset_mock()  # clear calls from last block
        result_state: List[dict] = []
        for ms in [stable_ms, crashy_ms]:
            ms._process_new_blocks(BlockNumber(to_block))  # pylint: disable=protected-access
            result_state.append(
                dict(
                    blockchain_state=ms.context.ms_state.blockchain_state,
                    db_dump=list(ms.database.conn.iterdump()),
                    monitor_calls=ms.monitor_mock.mock_calls,
                )
            )

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()):
            if isinstance(stable_state, BlockchainState):
                assert stable_state.chain_id == crashy_state.chain_id
                assert (
                    stable_state.token_network_registry_address
                    == crashy_state.token_network_registry_address
                )
                assert stable_state.latest_committed_block == crashy_state.latest_committed_block
                assert (
                    stable_state.monitor_contract_address == crashy_state.monitor_contract_address
                )
                # Do not compare `current_event_filter_interval`, this is allowed to be different
            else:
                assert stable_state == crashy_state
예제 #21
0
def test_get_paths_validation(
    api_sut: PFSApi,
    api_url: str,
    addresses: List[Address],
    token_network_model: TokenNetwork,
    make_iou: Callable,
):
    initiator_address = to_checksum_address(addresses[0])
    target_address = to_checksum_address(addresses[1])
    url = api_url + "/v1/" + to_checksum_address(token_network_model.address) + "/paths"
    default_params = {"from": initiator_address, "to": target_address, "value": 5, "max_paths": 3}

    def request_path_with(status_code=400, **kwargs):
        params = default_params.copy()
        params.update(kwargs)
        response = requests.post(url, json=params)
        assert response.status_code == status_code, response.json()
        return response

    response = requests.post(url)
    assert response.status_code == 400
    assert response.json()["errors"].startswith("JSON payload expected")

    for address in ["notanaddress", to_normalized_address(initiator_address)]:
        response = request_path_with(**{"from": address})
        assert response.json()["error_code"] == exceptions.InvalidRequest.error_code
        assert "from" in response.json()["error_details"]

        response = request_path_with(to=address)
        assert response.json()["error_code"] == exceptions.InvalidRequest.error_code
        assert "to" in response.json()["error_details"]

    response = request_path_with(value=-10)
    assert response.json()["error_code"] == exceptions.InvalidRequest.error_code
    assert "value" in response.json()["error_details"]

    response = request_path_with(max_paths=-1)
    assert response.json()["error_code"] == exceptions.InvalidRequest.error_code
    assert "max_paths" in response.json()["error_details"]

    # successful request without payment
    request_path_with(status_code=200)

    # Exemplary test for payment errors. Different errors are serialized the
    # same way in the rest API. Checking for specific errors is tested in
    # payment_tests.
    api_sut.service_fee = TokenAmount(1)
    response = request_path_with()
    assert response.json()["error_code"] == exceptions.MissingIOU.error_code

    # prepare iou for payment tests
    iou = make_iou(
        PrivateKey(get_random_privkey()),
        api_sut.pathfinding_service.address,
        one_to_n_address=api_sut.one_to_n_address,
    )
    good_iou_dict = iou.Schema().dump(iou)

    # malformed iou
    bad_iou_dict = good_iou_dict.copy()
    del bad_iou_dict["amount"]
    response = request_path_with(iou=bad_iou_dict)
    assert response.json()["error_code"] == exceptions.InvalidRequest.error_code

    # malformed iou
    bad_iou_dict = {
        "amount": {"_hex": "0x64"},
        "chain_id": {"_hex": "0x05"},
        "claimable_until": {"_hex": "0x188cba"},
        "one_to_n_address": "0x0000000000000000000000000000000000000000",
        "receiver": "0x94DEe8e391410A9ebbA791B187df2d993212c849",
        "sender": "0x2046F7341f15D0211ca1EBeFb19d029c4Bc4c4e7",
        "signature": (
            "0x0c3066e6a954d660028695f96dfe88cabaf0bc8a385e51781ac4d21003d0b6cd7a8b2"
            "a1134115845655d1a509061f48459cd401565b5df7845c913ed329cd2351b"
        ),
    }
    response = request_path_with(iou=bad_iou_dict)
    assert response.json()["error_code"] == exceptions.InvalidRequest.error_code

    # bad signature
    bad_iou_dict = good_iou_dict.copy()
    bad_iou_dict["signature"] = "0x" + "1" * 130
    response = request_path_with(iou=bad_iou_dict)
    assert response.json()["error_code"] == exceptions.InvalidSignature.error_code

    # with successful payment
    request_path_with(iou=good_iou_dict, status_code=200)
def test_claim_fees(  # pylint: disable=too-many-locals
    pathfinding_service_web3_mock: PathfindingService,
    one_to_n_contract,
    web3: Web3,
    deposit_to_udc,
    get_accounts,
    get_private_key,
):
    pfs = pathfinding_service_web3_mock

    # Prepare test data
    accounts = [decode_hex(acc) for acc in get_accounts(7)]
    iou_inputs: List[dict] = [
        dict(sender=accounts[0], amount=100, deposit=200),
        dict(sender=accounts[1], amount=200, deposit=100),
        dict(sender=accounts[2], amount=102,
             deposit=0),  # insufficient deposit
        dict(sender=accounts[3], amount=103,
             deposit=99),  # insufficient deposit
        dict(sender=accounts[4], amount=104, claimed=True),  # already claimed
        dict(sender=accounts[4], amount=99),  # too low amount
        dict(sender=accounts[5], claimable_until=100 * 15,
             amount=104),  # does not expire, yet
        dict(
            sender=accounts[6],
            claimable_until=web3.eth.get_block(web3.eth.block_number -
                                               1).timestamp,  # type: ignore
            amount=104,
        ),  # already expired
    ]

    # Create IOUs from `iou_inputs`
    ious: List[IOU] = []
    for iou_dict in iou_inputs:
        local_signer = LocalSigner(
            private_key=get_private_key(iou_dict["sender"]))
        iou = IOU(
            sender=iou_dict["sender"],
            receiver=pfs.address,
            amount=TokenAmount(iou_dict["amount"]),
            claimable_until=iou_dict.get(
                "claimable_until",
                web3.eth.get_block("latest").timestamp + 100  # type: ignore
            ),
            signature=Signature(bytes([1] * 64)),  # dummy, replaced below
            chain_id=ChainID(61),
            one_to_n_address=to_canonical_address(one_to_n_contract.address),
            claimed=iou_dict.get("claimed", False),
        )
        iou.signature = Signature(local_signer.sign(iou.packed_data()))
        ious.append(iou)
        pfs.database.upsert_iou(iou)
        if iou_dict.get("deposit", 0) > 0:
            deposit_to_udc(iou.sender, iou_dict["deposit"])

    # Check if the right IOUs are considered to be claimable
    expected_claimable = ious[:4]
    timestamp_now = web3.eth.get_block("latest").timestamp  # type: ignore

    claimable_ious = list(
        get_claimable_ious(
            database=pfs.database,
            claimable_until_after=timestamp_now,
            claimable_until_before=timestamp_now +
            10000,  # TODO: use proper boundaries
            claim_cost_rdn=TokenAmount(100),
        ))
    assert claimable_ious == expected_claimable

    # Claim IOUs
    skipped, failures = claim_ious(
        ious=claimable_ious,
        claim_cost_rdn=TokenAmount(100),
        one_to_n_contract=one_to_n_contract,
        web3=web3,
        database=pfs.database,
    )
    assert (skipped, failures) == (2, 0)

    # Those IOUs which have enough deposit should be marked as claimed
    # * in the blockchain
    # * in the database
    # All other IOUs must not be changed.
    claimable_with_enough_deposit = ious[:2]
    for iou in ious:
        expected_claimed = iou in claimable_with_enough_deposit

        iou_in_db = pfs.database.get_iou(sender=iou.sender,
                                         claimable_until=iou.claimable_until)
        assert iou_in_db
        assert iou_in_db.claimed == expected_claimed

        is_settled = bool(
            one_to_n_contract.functions.settled_sessions(
                iou.session_id).call())
        assert is_settled == expected_claimed
예제 #23
0
def test_process_payment_errors(
    pathfinding_service_web3_mock,
    web3,
    deposit_to_udc,
    create_account,
    get_private_key,
    make_iou,
    one_to_n_contract,
):
    pfs = pathfinding_service_web3_mock
    sender = create_account()
    privkey = get_private_key(sender)

    def test_payment(iou, service_fee=TokenAmount(1)):
        # IOU check reads the block number from here, so it has to be up to date
        pfs.blockchain_state.latest_committed_block = web3.eth.block_number
        pathfinding_service.api.process_payment(
            iou=iou,
            pathfinding_service=pfs,
            service_fee=service_fee,
            one_to_n_address=to_canonical_address(one_to_n_contract.address),
        )

    # expires too early
    iou = make_iou(privkey,
                   pfs.address,
                   claimable_until=web3.eth.get_block("latest").timestamp +
                   5 * 15)
    with pytest.raises(exceptions.IOUExpiredTooEarly):
        test_payment(iou)

    # it fails it the no deposit is in the UDC
    iou = make_iou(privkey, pfs.address)
    with pytest.raises(exceptions.DepositTooLow):
        test_payment(iou)

    # adding deposit does not help immediately
    deposit_to_udc(sender, 10)
    with pytest.raises(exceptions.DepositTooLow):
        test_payment(iou)

    # must succeed after deposit is confirmed
    web3.testing.mine(pathfinding_service_web3_mock.required_confirmations)
    test_payment(iou)

    # wrong recipient
    iou = make_iou(privkey, Address(bytes([6] * 20)))
    with pytest.raises(exceptions.WrongIOURecipient):
        test_payment(iou)

    # wrong chain_id
    iou = make_iou(privkey, pfs.address, chain_id=2)
    with pytest.raises(exceptions.UnsupportedChainID):
        test_payment(iou)

    # wrong one_to_n_address
    iou = make_iou(privkey, pfs.address, one_to_n_address=bytes([1] * 20))
    with pytest.raises(exceptions.WrongOneToNAddress):
        test_payment(iou)

    # payment too low
    iou = make_iou(privkey, pfs.address)
    with pytest.raises(exceptions.InsufficientServicePayment):
        test_payment(iou, service_fee=TokenAmount(2))
예제 #24
0
    def populate_token_network(
        token_network: TokenNetwork,
        reachability_state: SimpleReachabilityContainer,
        addresses: List[Address],
        channel_descriptions: List,
    ):
        for (
            channel_id,
            (
                p1_index,
                p1_capacity,
                _p1_fee,
                p1_reveal_timeout,
                p1_reachability,
                p2_index,
                p2_capacity,
                _p2_fee,
                p2_reveal_timeout,
                p2_reachability,
            ),
        ) in enumerate(channel_descriptions):
            participant1 = addresses[p1_index]
            participant2 = addresses[p2_index]
            token_network.handle_channel_opened_event(
                channel_identifier=ChannelID(channel_id),
                participant1=participant1,
                participant2=participant2,
            )

            token_network.handle_channel_balance_update_message(
                PFSCapacityUpdate(
                    canonical_identifier=CanonicalIdentifier(
                        chain_identifier=ChainID(61),
                        channel_identifier=ChannelID(channel_id),
                        token_network_address=TokenNetworkAddress(token_network.address),
                    ),
                    updating_participant=addresses[p1_index],
                    other_participant=addresses[p2_index],
                    updating_nonce=Nonce(1),
                    other_nonce=Nonce(1),
                    updating_capacity=p1_capacity,
                    other_capacity=p2_capacity,
                    reveal_timeout=p1_reveal_timeout,
                    signature=EMPTY_SIGNATURE,
                ),
                updating_capacity_partner=TokenAmount(0),
                other_capacity_partner=TokenAmount(0),
            )
            token_network.handle_channel_balance_update_message(
                PFSCapacityUpdate(
                    canonical_identifier=CanonicalIdentifier(
                        chain_identifier=ChainID(61),
                        channel_identifier=ChannelID(channel_id),
                        token_network_address=TokenNetworkAddress(token_network.address),
                    ),
                    updating_participant=addresses[p2_index],
                    other_participant=addresses[p1_index],
                    updating_nonce=Nonce(2),
                    other_nonce=Nonce(1),
                    updating_capacity=p2_capacity,
                    other_capacity=p1_capacity,
                    reveal_timeout=p2_reveal_timeout,
                    signature=EMPTY_SIGNATURE,
                ),
                updating_capacity_partner=TokenAmount(p1_capacity),
                other_capacity_partner=TokenAmount(p2_capacity),
            )

            # Update presence state according to scenario
            reachability_state.reachabilities[participant1] = p1_reachability
            reachability_state.reachabilities[participant2] = p2_reachability
예제 #25
0
def test_reschedule_too_early_events(
    web3: Web3,
    monitoring_service_contract,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    scheduled_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )

    monitor_trigger = _first_allowed_timestamp_to_monitor(
        scheduled_events[0].event.token_network_address, channel, monitoring_service.context
    )

    assert len(scheduled_events) == 1
    first_trigger_timestamp = scheduled_events[0].trigger_timestamp
    assert first_trigger_timestamp == monitor_trigger

    # Calling monitor too early must fail
    monitoring_service.get_timestamp_now = lambda: settleable_after
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert monitoring_service.try_scheduled_events_after == pytest.approx(settleable_after, 100)

    # Failed event is still scheduled, since it was too early for it to succeed
    scheduled_events = monitoring_service.database.get_scheduled_events(settleable_after)
    assert len(scheduled_events) == 1
    # ...and it should be blocked from retrying for a while.
    assert (
        monitoring_service.try_scheduled_events_after
        == monitoring_service.get_timestamp_now() + MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    )

    # Now it could be executed, but won't due to MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    web3.testing.timeTravel(settleable_after - 1)  # type: ignore
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 1

    # Check that is does succeed if it wasn't for MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    monitoring_service.try_scheduled_events_after = monitoring_service.get_timestamp_now() - 1
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 0
예제 #26
0
def test_edge_weight(addresses):
    # pylint: disable=assigning-non-slot
    channel_id = ChannelID(1)
    participant1 = addresses[0]
    participant2 = addresses[1]
    capacity = TokenAmount(int(20 * 1e18))
    capacity_partner = TokenAmount(int(10 * 1e18))
    channel = Channel(
        token_network_address=TokenNetworkAddress(bytes([1])),
        channel_id=channel_id,
        participant1=participant1,
        participant2=participant2,
        capacity1=capacity,
        capacity2=capacity_partner,
    )
    view, view_partner = channel.views
    amount = PaymentAmount(int(1e18))  # one RDN

    # no penalty
    assert (TokenNetwork.edge_weight(visited={},
                                     view=view,
                                     view_from_partner=view_partner,
                                     amount=amount,
                                     fee_penalty=0) == 1)

    # channel already used in a previous route
    assert (TokenNetwork.edge_weight(
        visited={channel_id: 2},
        view=view,
        view_from_partner=view_partner,
        amount=amount,
        fee_penalty=0,
    ) == 3)

    # absolute fee
    view.fee_schedule_sender.flat = FeeAmount(int(0.03e18))
    assert (TokenNetwork.edge_weight(
        visited={},
        view=view,
        view_from_partner=view_partner,
        amount=amount,
        fee_penalty=100,
    ) == 4)

    # relative fee
    view.fee_schedule_sender.flat = FeeAmount(0)
    view.fee_schedule_sender.proportional = ProportionalFeeAmount(int(0.01e6))
    assert (TokenNetwork.edge_weight(
        visited={},
        view=view,
        view_from_partner=view_partner,
        amount=amount,
        fee_penalty=100,
    ) == 2)

    # partner has not enough capacity for refund (no_refund_weight) -> edge weight +1
    view_partner.capacity = TokenAmount(0)
    assert (TokenNetwork.edge_weight(
        visited={},
        view=view,
        view_from_partner=view_partner,
        amount=amount,
        fee_penalty=100,
    ) == 3)
예제 #27
0
def test_e2e(  # pylint: disable=too-many-arguments,too-many-locals
    web3,
    monitoring_service_contract,
    user_deposit_contract,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    """Test complete message lifecycle
    1) client opens channel & submits monitoring request
    2) other client closes channel
    3) MS registers channelClose event
    4) MS calls monitoring contract update
    5) wait for channel settle
    6) MS claims the reward
    """
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    initial_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )

    ms_greenlet = gevent.spawn(monitoring_service.start)

    # need to wait here till the MS has some time to react
    gevent.sleep(0.01)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})
    # Wait until the MS reacts, which it does after giving the client some time
    # to update the channel itself.

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    web3.testing.timeTravel(settleable_after - 1)
    monitoring_service.get_timestamp_now = lambda: settleable_after - 1

    # Now give the monitoring service a chance to submit the missing BP
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]

    # wait for settle timeout
    web3.testing.timeTravel(settleable_after + 1)
    monitoring_service.get_timestamp_now = lambda: settleable_after + 1

    # Let the MS claim its reward
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [
        MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED,
        MonitoringServiceEvent.REWARD_CLAIMED,
    ]

    final_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    assert final_balance == (initial_balance + reward_amount)

    ms_greenlet.kill()