예제 #1
0
def create_signed_monitor_request(
    nonce: Nonce = Nonce(5),
    reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT,
    closing_privkey: str = DEFAULT_PRIVATE_KEY1,
    nonclosing_privkey: str = DEFAULT_PRIVATE_KEY2,
) -> MonitorRequest:
    bp = HashedBalanceProof(
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        chain_id=ChainID(1),
        balance_hash="",
        nonce=nonce,
        additional_hash="",
        priv_key=closing_privkey,
    )
    monitor_request = bp.get_monitor_request(privkey=nonclosing_privkey,
                                             reward_amount=reward_amount,
                                             msc_address=TEST_MSC_ADDRESS)

    # Some signature correctness checks
    assert monitor_request.signer == private_key_to_address(closing_privkey)
    assert monitor_request.non_closing_signer == private_key_to_address(
        nonclosing_privkey)
    assert monitor_request.reward_proof_signer == private_key_to_address(
        nonclosing_privkey)

    return monitor_request
예제 #2
0
    def f(
            chain_id: ChainID = ChainID(1),
            amount: TokenAmount = TokenAmount(50),
            nonce: Nonce = Nonce(1),
            channel_id: ChannelID = ChannelID(1),
    ) -> RequestMonitoring:
        balance_proof = HashedBalanceProof(
            channel_identifier=channel_id,
            token_network_address=TokenNetworkAddress(b"1" * 20),
            chain_id=chain_id,
            nonce=nonce,
            additional_hash="",
            balance_hash=encode_hex(bytes([amount])),
            priv_key=get_random_privkey(),
        )
        request_monitoring = balance_proof.get_request_monitoring(
            privkey=non_closing_privkey,
            reward_amount=TokenAmount(55),
            monitoring_service_contract_address=TEST_MSC_ADDRESS,
        )

        # usually not a property of RequestMonitoring, but added for convenience in these tests
        request_monitoring.non_closing_signer = to_checksum_address(  # type: ignore
            non_closing_address)
        return request_monitoring
예제 #3
0
    def f():
        contract_address = get_random_address()
        channel_identifier = get_random_identifier()

        balance_hash_data = '%d' % random.randint(0, UINT64_MAX)
        additional_hash_data = '%d' % random.randint(0, UINT64_MAX)

        balance_hash = encode_hex((balance_hash_data.encode()))
        nonce = random.randint(0, UINT64_MAX)
        additional_hash = encode_hex(keccak(additional_hash_data.encode()))
        chain_id = 1

        privkey = get_random_privkey()
        privkey_non_closing = get_random_privkey()

        bp = HashedBalanceProof(  # type: ignore
            channel_identifier=channel_identifier,
            token_network_address=contract_address,
            chain_id=chain_id,
            balance_hash=balance_hash,
            nonce=nonce,
            additional_hash=additional_hash,
            priv_key=privkey,
        )
        monitor_request = UnsignedMonitorRequest.from_balance_proof(
            bp, reward_amount=TokenAmount(0)).sign(privkey_non_closing)
        return monitor_request, privkey, privkey_non_closing
예제 #4
0
def request_monitoring_message(token_network, get_accounts,
                               get_private_key) -> RequestMonitoring:
    c1, c2 = get_accounts(2)

    balance_proof_c2 = HashedBalanceProof(
        token_network_address=token_network.address,
        channel_identifier=ChannelID(1),
        chain_id=ChainID(1),
        nonce=Nonce(2),
        additional_hash="0x%064x" % 0,
        transferred_amount=TokenAmount(1),
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(EMPTY_LOCKSROOT),
        priv_key=get_private_key(c2),
    )

    return balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1), reward_amount=TokenAmount(1))
예제 #5
0
def request_monitoring_message(token_network, get_accounts, get_private_key) -> RequestMonitoring:
    c1, c2 = get_accounts(2)

    balance_proof_c2 = HashedBalanceProof(
        channel_identifier=ChannelID(1),
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        chain_id=ChainID(61),
        nonce=Nonce(2),
        additional_hash="0x%064x" % 0,
        transferred_amount=TokenAmount(1),
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
        priv_key=get_private_key(c2),
    )

    return balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=TokenAmount(1),
        monitoring_service_contract_address=MonitoringServiceAddress(bytes([11] * 20)),
    )
예제 #6
0
def get_signed_monitor_request(
    nonce: int = 5,
    reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT,
    closing_privkey: str = DEFAULT_PRIVATE_KEY1,
    nonclosing_privkey: str = DEFAULT_PRIVATE_KEY2,
) -> MonitorRequest:
    bp = HashedBalanceProof(  # type: ignore
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        chain_id=1,
        balance_hash='',
        nonce=nonce,
        additional_hash='',
        priv_key=closing_privkey,
    )
    monitor_request = UnsignedMonitorRequest.from_balance_proof(
        bp, reward_amount=reward_amount).sign(nonclosing_privkey)
    return monitor_request
예제 #7
0
 def update_transfer(self, partner_address: Address, balance_proof: HashedBalanceProof):
     """Given a valid signed balance proof, this method calls `updateNonClosingBalanceProof`
     for an open channel
     """
     local_signer = LocalSigner(decode_hex(self.privkey))
     serialized = balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE)
     non_closing_data = serialized + decode_hex(balance_proof.signature)
     non_closing_signature = encode_hex(local_signer.sign(non_closing_data))
     self.contract.functions.updateNonClosingBalanceProof(
         self.partner_to_channel_id[partner_address],
         partner_address,
         self.address,
         balance_proof.balance_hash,
         balance_proof.nonce,
         balance_proof.additional_hash,
         balance_proof.signature,
         non_closing_signature,
     ).transact({'from': self.address})
예제 #8
0
 def get_balance_proof(
     self, partner_address: Address = None, channel_id: int = None, **kwargs
 ) -> HashedBalanceProof:
     """Get a signed balance proof for an open channel.
     Parameters:
         partner_address - address of a partner the node has channel open with
         channel_id - used if `partner_address` is None
         **kwargs - arguments to HashedBalanceProof constructor
     """
     if partner_address is not None:
         assert channel_id is None
         channel_id = self.partner_to_channel_id[partner_address]
     bp = HashedBalanceProof(  # type: ignore  # workaround: mypy complains about priv_key
         channel_identifier=channel_id,
         token_network_address=self.contract.address,
         chain_id=self.chain_id,
         priv_key=self.privkey,
         **kwargs,
     )
     return bp
def test_pfs_with_mocked_client(  # pylint: disable=too-many-arguments
    web3,
    token_network_registry_contract,
    channel_descriptions_case_1: List,
    get_accounts,
    wait_for_blocks,
    user_deposit_contract,
    token_network,
    custom_token,
    create_channel,
    get_private_key,
):  # pylint: disable=too-many-locals
    """ Instantiates some MockClients and the PathfindingService.

    Mocks blockchain events to setup a token network with a given topology, specified in
    the channel_description fixture. Tests all PFS methods w.r.t. to that topology
    """
    clients = get_accounts(7)
    token_network_address = decode_hex(token_network.address)

    with patch("pathfinding_service.service.MatrixListener", new=Mock):
        pfs = PathfindingService(
            web3=web3,
            contracts={
                CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract,
                CONTRACT_USER_DEPOSIT: user_deposit_contract,
            },
            required_confirmations=1,
            db_filename=":memory:",
            poll_interval=0.1,
            sync_start_block=BlockNumber(0),
            private_key="3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266",
        )

    # greenlet needs to be started and context switched to
    pfs.start()
    wait_for_blocks(1)
    gevent.sleep(0.1)

    # there should be one token network registered
    assert len(pfs.token_networks) == 1

    token_network_model = pfs.token_networks[token_network_address]
    graph = token_network_model.G
    channel_identifiers = []
    for (
        p1_index,
        p1_deposit,
        _p1_capacity,
        _p1_fee,
        _p1_reveal_timeout,
        _p1_reachability,
        p2_index,
        p2_deposit,
        _p2_capacity,
        _p2_fee,
        _p2_reveal_timeout,
        _p2_reachability,
        _settle_timeout,
    ) in channel_descriptions_case_1:
        # order is important here because we check order later
        channel_id = create_channel(clients[p1_index], clients[p2_index])[0]
        channel_identifiers.append(channel_id)

        for address, partner_address, amount in [
            (clients[p1_index], clients[p2_index], p1_deposit),
            (clients[p2_index], clients[p1_index], p2_deposit),
        ]:
            custom_token.functions.mint(amount).transact({"from": address})
            custom_token.functions.approve(token_network.address, amount).transact(
                {"from": address}
            )
            token_network.functions.setTotalDeposit(
                channel_id, address, amount, partner_address
            ).transact({"from": address})
        gevent.sleep()
    wait_for_blocks(1)
    gevent.sleep(0.1)

    # there should be as many open channels as described
    assert len(token_network_model.channel_id_to_addresses.keys()) == len(
        channel_descriptions_case_1
    )

    # check that deposits, settle_timeout and transfers got registered
    for (
        index,
        (
            _p1_index,
            p1_deposit,
            _p1_capacity,
            _p1_fee,
            _p1_reveal_timeout,
            _p1_reachability,
            _p2_index,
            p2_deposit,
            _p2_capacity,
            _p2_fee,
            _p2_reveal_timeout,
            _p2_reachability,
            _settle_timeout,
        ),
    ) in enumerate(channel_descriptions_case_1):
        channel_identifier = channel_identifiers[index]
        p1_address, p2_address = token_network_model.channel_id_to_addresses[channel_identifier]
        view1: ChannelView = graph[p1_address][p2_address]["view"]
        view2: ChannelView = graph[p2_address][p1_address]["view"]
        assert view1.deposit == p1_deposit
        assert view2.deposit == p2_deposit
        assert view1.settle_timeout == TEST_SETTLE_TIMEOUT_MIN
        assert view2.settle_timeout == TEST_SETTLE_TIMEOUT_MIN
        assert view1.reveal_timeout == DEFAULT_REVEAL_TIMEOUT
        assert view2.reveal_timeout == DEFAULT_REVEAL_TIMEOUT

    # now close all channels
    for (
        index,
        (
            p1_index,
            _p1_deposit,
            _p1_capacity,
            _p1_fee,
            _p1_reveal_timeout,
            _p1_reachability,
            p2_index,
            _p2_deposit,
            _p2_capacity,
            _p2_fee,
            _p2_reveal_timeout,
            _p2_reachability,
            _settle_timeout,
        ),
    ) in enumerate(channel_descriptions_case_1):
        channel_id = channel_identifiers[index]
        balance_proof = HashedBalanceProof(
            nonce=Nonce(1),
            transferred_amount=0,
            priv_key=get_private_key(clients[p2_index]),
            channel_identifier=channel_id,
            token_network_address=token_network.address,
            chain_id=ChainID(1),
            additional_hash="0x%064x" % 0,
            locked_amount=0,
            locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
        )
        token_network.functions.closeChannel(
            channel_id,
            clients[p2_index],
            balance_proof.balance_hash,
            balance_proof.nonce,
            balance_proof.additional_hash,
            balance_proof.signature,
        ).transact({"from": clients[p1_index], "gas": 200_000})

    wait_for_blocks(1)
    gevent.sleep(0.1)

    # there should be no channels
    assert len(token_network_model.channel_id_to_addresses.keys()) == 0
    pfs.stop()
예제 #10
0
def test_crash(tmpdir, get_accounts, get_private_key, mockchain):  # pylint: disable=too-many-locals
    """ Process blocks and compare results with/without crash

    A somewhat meaninful crash handling is simulated by not including the
    UpdatedHeadBlockEvent in every block.
    """
    channel_identifier = ChannelID(3)
    c1, c2 = get_accounts(2)
    token_network_address = TokenNetworkAddress(
        to_canonical_address(get_random_address()))
    balance_proof = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=TokenAmount(2),
        priv_key=get_private_key(c1),
        channel_identifier=channel_identifier,
        token_network_address=token_network_address,
        chain_id=ChainID(1),
        additional_hash="0x%064x" % 0,
        locked_amount=0,
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    monitor_request = balance_proof.get_monitor_request(
        get_private_key(c2),
        reward_amount=TokenAmount(0),
        msc_address=TEST_MSC_ADDRESS)

    events = [
        [
            ReceiveChannelOpenedEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                participant1=c1,
                participant2=c2,
                settle_timeout=20,
                block_number=BlockNumber(0),
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(1))],
        [
            ActionMonitoringTriggeredEvent(
                token_network_address=token_network_address,
                channel_identifier=channel_identifier,
                non_closing_participant=c2,
            )
        ],
        [UpdatedHeadBlockEvent(BlockNumber(3))],
    ]
    mockchain(events)

    server_private_key = get_random_privkey()

    contracts = {
        CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
        CONTRACT_MONITORING_SERVICE: ContractMock(),
        CONTRACT_USER_DEPOSIT: ContractMock(),
        CONTRACT_SERVICE_REGISTRY: ContractMock(),
    }

    def new_ms(filename):
        ms = MonitoringService(
            web3=Web3Mock(),
            private_key=server_private_key,
            contracts=contracts,
            db_filename=os.path.join(tmpdir, filename),
        )
        msc = Mock()
        ms.context.monitoring_service_contract = msc
        ms.monitor_mock = msc.functions.monitor.return_value.transact  # type: ignore
        ms.monitor_mock.return_value = bytes(0)  # type: ignore
        return ms

    # initialize both monitoring services
    stable_ms = new_ms("stable.db")
    crashy_ms = new_ms("crashy.db")
    for ms in [stable_ms, crashy_ms]:
        ms.database.conn.execute(
            "INSERT INTO token_network(address) VALUES (?)",
            [to_checksum_address(token_network_address)],
        )
        ms.context.ms_state.blockchain_state.token_network_addresses = [
            token_network_address
        ]
        ms.database.upsert_monitor_request(monitor_request)
        ms.database.conn.commit()

    # process each block and compare results between crashy and stable ms
    for to_block in range(len(events)):
        crashy_ms = new_ms("crashy.db")  # new instance to simulate crash
        stable_ms.monitor_mock.reset_mock()  # clear calls from last block
        result_state: List[dict] = []
        for ms in [stable_ms, crashy_ms]:
            ms._process_new_blocks(to_block)  # pylint: disable=protected-access
            result_state.append(
                dict(
                    blockchain_state=ms.context.ms_state.blockchain_state,
                    db_dump=list(ms.database.conn.iterdump()),
                    monitor_calls=ms.monitor_mock.mock_calls,
                ))

        # both instances should have the same state after processing
        for stable_state, crashy_state in zip(result_state[0].values(),
                                              result_state[1].values()):
            # do asserts for each key separately to get better error messages
            assert stable_state == crashy_state
예제 #11
0
def test_e2e(  # pylint: disable=too-many-arguments,too-many-locals
    web3,
    monitoring_service_contract,
    user_deposit_contract,
    wait_for_blocks,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    contracts_manager,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    """Test complete message lifecycle
        1) client opens channel & submits monitoring request
        2) other client closes channel
        3) MS registers channelClose event
        4) MS calls monitoring contract update
        5) wait for channel settle
        6) MS claims the reward
    """
    query = create_ms_contract_events_query(
        web3, contracts_manager, monitoring_service_contract.address)
    initial_balance = user_deposit_contract.functions.balances(
        monitoring_service.address).call()
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    deposit = service_registry.functions.deposits(
        monitoring_service.address).call()
    assert deposit > 0

    # each client does a transfer
    channel_id = create_channel(
        c1, c2,
        settle_timeout=5)[0]  # TODO: reduce settle_timeout to speed up test

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=token_network.address,
        chain_id=ChainID(1),
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(EMPTY_LOCKSROOT),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(nonce=Nonce(1),
                                          transferred_amount=transferred_c1,
                                          priv_key=get_private_key(c1),
                                          **shared_bp_args)
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(nonce=Nonce(2),
                                          transferred_amount=transferred_c2,
                                          priv_key=get_private_key(c2),
                                          **shared_bp_args)

    ms_greenlet = gevent.spawn(monitoring_service.start, gevent.sleep)

    # need to wait here till the MS has some time to react
    gevent.sleep()

    assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        get_private_key(c1), reward_amount)
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
    ).transact({"from": c2})
    # Wait until the MS reacts, which it does after giving the client some time
    # to update the channel itself.
    wait_for_blocks(3)  # 1 block for close + 30% of 5 blocks = 2
    # Now give the monitoring service a chance to submit the missing BP
    gevent.sleep(0.1)

    assert [e.event for e in query()
            ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]

    # wait for settle timeout
    wait_for_blocks(
        2)  # timeout is 5, but we've already waited 3 blocks before

    token_network.functions.settleChannel(
        channel_id,
        c1,  # participant_B
        transferred_c1,  # participant_B_transferred_amount
        0,  # participant_B_locked_amount
        EMPTY_LOCKSROOT,  # participant_B_locksroot
        c2,  # participant_A
        transferred_c2,  # participant_A_transferred_amount
        0,  # participant_A_locked_amount
        EMPTY_LOCKSROOT,  # participant_A_locksroot
    ).transact()

    # Wait until the ChannelSettled is confirmed
    # Let the MS claim its reward
    gevent.sleep(0.1)
    assert [e.event for e in query()] == [
        MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED,
        MonitoringServiceEvent.REWARD_CLAIMED,
    ]

    final_balance = user_deposit_contract.functions.balances(
        monitoring_service.address).call()
    assert final_balance == (initial_balance + reward_amount)

    ms_greenlet.kill()
예제 #12
0
def test_first_allowed_monitoring(
    web3: Web3,
    monitoring_service_contract,
    wait_for_blocks,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2, settle_timeout=10)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.blockNumber)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.blockNumber)
    triggered_events = monitoring_service.database.get_scheduled_events(
        max_trigger_block=BlockNumber(web3.eth.blockNumber + 10)
    )
    assert len(triggered_events) == 1

    monitor_trigger = triggered_events[0]
    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )
    assert channel

    # Calling monitor too early must fail. To test this, we call it two block
    # before the trigger block.
    # This should be only one block before, but we trigger one block too late
    # to work around parity's gas estimation. See
    # https://github.com/raiden-network/raiden-services/pull/728
    wait_for_blocks(monitor_trigger.trigger_block_number - web3.eth.blockNumber - 2)
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == []

    # If our `monitor` call fails, we won't try again. Force a retry in this
    # test by clearing monitor_tx_hash.
    channel.monitor_tx_hash = None
    monitoring_service.database.upsert_channel(channel)

    # Now we can try again. The first try mined a new block, so now we're one
    # block further and `monitor` should succeed.
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
예제 #13
0
def test_e2e(  # pylint: disable=too-many-arguments,too-many-locals
    web3,
    monitoring_service_contract,
    user_deposit_contract,
    wait_for_blocks,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    """Test complete message lifecycle
    1) client opens channel & submits monitoring request
    2) other client closes channel
    3) MS registers channelClose event
    4) MS calls monitoring contract update
    5) wait for channel settle
    6) MS claims the reward
    """
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    initial_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2, settle_timeout=5)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )

    ms_greenlet = gevent.spawn(monitoring_service.start)

    # need to wait here till the MS has some time to react
    gevent.sleep(0.01)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})
    # Wait until the MS reacts, which it does after giving the client some time
    # to update the channel itself.

    wait_for_blocks(2)  # 1 block for close + 1 block for triggering the event
    # Now give the monitoring service a chance to submit the missing BP
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]

    # wait for settle timeout
    # timeout is 5, but we've already waited 3 blocks before. Additionally one block is
    # added to handle parity running gas estimation on current instead of next.
    wait_for_blocks(3)

    # Let the MS claim its reward
    gevent.sleep(0.01)
    assert [e.event for e in query()] == [
        MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED,
        MonitoringServiceEvent.REWARD_CLAIMED,
    ]

    final_balance = user_deposit_contract.functions.balances(monitoring_service.address).call()
    assert final_balance == (initial_balance + reward_amount)

    ms_greenlet.kill()
예제 #14
0
def test_first_allowed_monitoring(
    web3: Web3,
    monitoring_service_contract,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    query = create_ms_contract_events_query(web3, monitoring_service_contract.address)
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    assert service_registry.functions.hasValidRegistration(monitoring_service.address).call()

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    triggered_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    assert len(triggered_events) == 1

    monitor_trigger = triggered_events[0]
    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )
    assert channel

    # Calling monitor too early must fail. To test this, we call a few seconds
    # before the trigger timestamp.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp - 5)  # type: ignore

    with pytest.raises(TransactionTooEarlyException):
        handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == []

    # If our `monitor` call fails, we won't try again. Force a retry in this
    # test by clearing monitor_tx_hash.
    channel.monitor_tx_hash = None
    monitoring_service.database.upsert_channel(channel)

    # Now we can try again. The first try mined a new block, so now we're one
    # block further and `monitor` should succeed.
    web3.testing.timeTravel(monitor_trigger.trigger_timestamp)  # type: ignore
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
예제 #15
0
def test_reschedule_too_early_events(
    web3: Web3,
    monitoring_service_contract,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    # each client does a transfer
    channel_id = create_channel(c1, c2)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=monitoring_service.chain_id,
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(
        nonce=Nonce(1),
        transferred_amount=transferred_c1,
        priv_key=get_private_key(c1),
        **shared_bp_args,
    )
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(
        nonce=Nonce(2),
        transferred_amount=transferred_c2,
        priv_key=get_private_key(c2),
        **shared_bp_args,
    )
    monitoring_service._process_new_blocks(web3.eth.block_number)
    assert len(monitoring_service.context.database.get_token_network_addresses()) > 0

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        privkey=get_private_key(c1),
        reward_amount=reward_amount,
        monitoring_service_contract_address=MonitoringServiceAddress(
            to_canonical_address(monitoring_service_contract.address)
        ),
    )
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        c2,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
        balance_proof_c1.get_counter_signature(get_private_key(c2)),
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.block_number)

    timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp)  # type: ignore
    settle_timeout = int(token_network.functions.settle_timeout().call())
    settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout)

    scheduled_events = monitoring_service.database.get_scheduled_events(
        max_trigger_timestamp=settleable_after
    )

    channel = monitoring_service.database.get_channel(
        token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)),
        channel_id=channel_id,
    )

    monitor_trigger = _first_allowed_timestamp_to_monitor(
        scheduled_events[0].event.token_network_address, channel, monitoring_service.context
    )

    assert len(scheduled_events) == 1
    first_trigger_timestamp = scheduled_events[0].trigger_timestamp
    assert first_trigger_timestamp == monitor_trigger

    # Calling monitor too early must fail
    monitoring_service.get_timestamp_now = lambda: settleable_after
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert monitoring_service.try_scheduled_events_after == pytest.approx(settleable_after, 100)

    # Failed event is still scheduled, since it was too early for it to succeed
    scheduled_events = monitoring_service.database.get_scheduled_events(settleable_after)
    assert len(scheduled_events) == 1
    # ...and it should be blocked from retrying for a while.
    assert (
        monitoring_service.try_scheduled_events_after
        == monitoring_service.get_timestamp_now() + MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    )

    # Now it could be executed, but won't due to MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    web3.testing.timeTravel(settleable_after - 1)  # type: ignore
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 1

    # Check that is does succeed if it wasn't for MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY
    monitoring_service.try_scheduled_events_after = monitoring_service.get_timestamp_now() - 1
    monitoring_service._trigger_scheduled_events()  # pylint: disable=protected-access
    assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 0
예제 #16
0
def test_first_allowed_monitoring(
    web3,
    monitoring_service_contract,
    wait_for_blocks,
    service_registry,
    monitoring_service: MonitoringService,
    request_collector: RequestCollector,
    contracts_manager,
    deposit_to_udc,
    create_channel,
    token_network,
    get_accounts,
    get_private_key,
):
    # pylint: disable=too-many-arguments,too-many-locals,protected-access
    query = create_ms_contract_events_query(
        web3, contracts_manager, monitoring_service_contract.address)
    ms_address_hex = to_checksum_address(monitoring_service.address)
    c1, c2 = get_accounts(2)

    # add deposit for c1
    node_deposit = 10
    deposit_to_udc(c1, node_deposit)

    deposit = service_registry.functions.deposits(ms_address_hex).call()
    assert deposit > 0

    # each client does a transfer
    channel_id = create_channel(c1, c2, settle_timeout=10)[0]

    shared_bp_args = dict(
        channel_identifier=channel_id,
        token_network_address=decode_hex(token_network.address),
        chain_id=ChainID(1),
        additional_hash="0x%064x" % 0,
        locked_amount=TokenAmount(0),
        locksroot=encode_hex(EMPTY_LOCKSROOT),
    )
    transferred_c1 = 5
    balance_proof_c1 = HashedBalanceProof(nonce=Nonce(1),
                                          transferred_amount=transferred_c1,
                                          priv_key=get_private_key(c1),
                                          **shared_bp_args)
    transferred_c2 = 6
    balance_proof_c2 = HashedBalanceProof(nonce=Nonce(2),
                                          transferred_amount=transferred_c2,
                                          priv_key=get_private_key(c2),
                                          **shared_bp_args)
    monitoring_service._process_new_blocks(web3.eth.blockNumber)
    assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses

    # c1 asks MS to monitor the channel
    reward_amount = TokenAmount(1)
    request_monitoring = balance_proof_c2.get_request_monitoring(
        get_private_key(c1), reward_amount,
        monitoring_service_contract.address)
    request_collector.on_monitor_request(request_monitoring)

    # c2 closes the channel
    token_network.functions.closeChannel(
        channel_id,
        c1,
        balance_proof_c1.balance_hash,
        balance_proof_c1.nonce,
        balance_proof_c1.additional_hash,
        balance_proof_c1.signature,
    ).transact({"from": c2})

    monitoring_service._process_new_blocks(web3.eth.blockNumber)
    triggered_events = monitoring_service.database.get_scheduled_events(
        max_trigger_block=web3.eth.blockNumber + 10)
    assert len(triggered_events) == 1

    monitor_trigger = triggered_events[0]
    channel = monitoring_service.database.get_channel(
        token_network_address=decode_hex(token_network.address),
        channel_id=channel_id)
    assert channel

    # Calling monitor too early must fail. To test this, we call it one block
    # before the trigger block.
    wait_for_blocks(monitor_trigger.trigger_block_number -
                    web3.eth.blockNumber - 1)
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()] == []

    # If our `monitor` call fails, we won't try again. Force a retry in this
    # test by clearing closing_tx_hash.
    channel.closing_tx_hash = None
    monitoring_service.database.upsert_channel(channel)

    # Now we can try again. The first try mined a new block, so now we're one
    # block further and `monitor` should succeed.
    handle_event(monitor_trigger.event, monitoring_service.context)
    assert [e.event for e in query()
            ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]