def main( # pylint: disable=too-many-arguments private_key: str, state_db: str, web3: Web3, contracts: Dict[str, Contract], start_block: BlockNumber, confirmations: BlockNumber, min_reward: int, debug_shell: bool, ) -> int: """ The Monitoring service for the Raiden Network. """ log.info("Starting Raiden Monitoring Service") ms = MonitoringService( web3=web3, private_key=private_key, contracts=contracts, sync_start_block=start_block, required_confirmations=confirmations, db_filename=state_db, min_reward=min_reward, ) if debug_shell: import IPython IPython.embed() return 0 ms.start() return 0
def monitoring_service( # pylint: disable=too-many-arguments ms_address, web3: Web3, monitoring_service_contract, user_deposit_contract, token_network_registry_contract, ms_database: Database, get_private_key, service_registry, ): ms = MonitoringService( web3=web3, private_key=get_private_key(ms_address), contracts={ CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract, CONTRACT_MONITORING_SERVICE: monitoring_service_contract, CONTRACT_USER_DEPOSIT: user_deposit_contract, CONTRACT_SERVICE_REGISTRY: service_registry, }, sync_start_block=BlockNumber(0), required_confirmations=BlockTimeout(0), # for faster tests poll_interval=0.01, # for faster tests db_filename=":memory:", ) # We need a shared db between MS and RC so the MS can use MR saved by the RC ms.context.database = ms_database ms.database = ms_database return ms
def test_trigger_scheduled_events(monitoring_service: MonitoringService): monitoring_service.context.required_confirmations = 5 create_default_token_network(monitoring_service.context) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=make_channel_identifier(), non_closing_participant=make_address(), ) trigger_timestamp = Timestamp(get_posix_utc_time_now()) assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 0 monitoring_service.context.database.upsert_scheduled_event( ScheduledEvent(trigger_timestamp=trigger_timestamp, event=triggered_event)) assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 1 # Now run `_trigger_scheduled_events` and see if the event is removed monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len( monitoring_service.database.get_scheduled_events( trigger_timestamp)) == 0
def new_ms(filename): ms = MonitoringService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, db_filename=os.path.join(tmpdir, filename), ) msc = Mock() ms.context.monitoring_service_contract = msc ms.monitor_mock = msc.functions.monitor.return_value.transact # type: ignore ms.monitor_mock.return_value = bytes(0) # type: ignore return ms
def test_purge_old_monitor_requests( ms_database: Database, build_request_monitoring, request_collector, monitoring_service: MonitoringService, ): # We'll test the purge on MRs for three different channels req_mons = [ build_request_monitoring(channel_id=1), build_request_monitoring(channel_id=2), build_request_monitoring(channel_id=3), ] for req_mon in req_mons: request_collector.on_monitor_request(req_mon) # Channel 1 exists in the db token_network_address = req_mons[0].balance_proof.token_network_address ms_database.conn.execute( "INSERT INTO token_network VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) ms_database.upsert_channel( Channel( identifier=ChannelID(1), token_network_address=token_network_address, participant1=Address(b"1" * 20), participant2=Address(b"2" * 20), )) # The request for channel 2 is recent (default), but the one for channel 3 # has been added 16 minutes ago. saved_at = (datetime.utcnow() - timedelta(minutes=16)).timestamp() ms_database.conn.execute( """ UPDATE monitor_request SET saved_at = ? WHERE channel_identifier = ? """, [saved_at, hex256(3)], ) monitoring_service._purge_old_monitor_requests() # pylint: disable=protected-access remaining_mrs = ms_database.conn.execute(""" SELECT channel_identifier, waiting_for_channel FROM monitor_request ORDER BY channel_identifier """).fetchall() assert [tuple(mr) for mr in remaining_mrs] == [(1, False), (2, True)]
def new_ms(filename): ms = MonitoringService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, db_filename=os.path.join(tmpdir, filename), poll_interval=0, required_confirmations=BlockTimeout(0), sync_start_block=BlockNumber(0), ) msc = Mock() ms.context.monitoring_service_contract = msc ms.monitor_mock = msc.functions.monitor.return_value.transact ms.monitor_mock.return_value = bytes(0) return ms
def test_check_pending_transactions( web3: Web3, wait_for_blocks: Callable[[int], None], monitoring_service: MonitoringService ): monitoring_service.context.required_confirmations = 3 monitoring_service.database.add_waiting_transaction(waiting_tx_hash=make_transaction_hash()) for tx_status in (0, 1): tx_receipt = {"blockNumber": web3.eth.blockNumber, "status": tx_status} with patch.object( web3.eth, "getTransactionReceipt", Mock(return_value=tx_receipt) ), patch.object(monitoring_service.database, "remove_waiting_transaction") as remove_mock: for should_call in (False, False, False, True): monitoring_service._check_pending_transactions() # pylint: disable=protected-access # noqa assert remove_mock.called == should_call wait_for_blocks(1)
def monitoring_service_mock() -> Generator[MonitoringService, None, None]: web3_mock = Web3Mock() mock_udc = Mock(address=bytes([8] * 20)) mock_udc.functions.effectiveBalance.return_value.call.return_value = 10000 mock_udc.functions.token.return_value.call.return_value = to_checksum_address( bytes([7] * 20)) ms = MonitoringService( web3=web3_mock, private_key=PrivateKey( decode_hex( "3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266" )), db_filename=":memory:", contracts={ CONTRACT_TOKEN_NETWORK_REGISTRY: Mock(address=bytes([9] * 20)), CONTRACT_USER_DEPOSIT: mock_udc, CONTRACT_MONITORING_SERVICE: Mock(address=bytes([1] * 20)), CONTRACT_SERVICE_REGISTRY: Mock(address=bytes([2] * 20)), }, sync_start_block=BlockNumber(0), required_confirmations=BlockTimeout(0), poll_interval=0, ) yield ms
def new_ms(filename): ms = MonitoringService( web3=web3, contract_manager=contracts_manager, private_key=server_private_key, registry_address=token_network_registry_contract.address, monitor_contract_address=monitoring_service_contract.address, db_filename=os.path.join(tmpdir, filename), user_deposit_contract_address=user_deposit_contract.address, ) ms.bcl = MockBlockchainListener(events) # type: ignore msc = Mock() ms.context.monitoring_service_contract = msc ms.monitor_mock = msc.functions.monitor.return_value.transact # type:ignore ms.monitor_mock.return_value = bytes(0) # type:ignore return ms
def new_ms(filename): ms = MonitoringService( web3=web3, private_key=server_private_key, contracts={ CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract, CONTRACT_MONITORING_SERVICE: monitoring_service_contract, CONTRACT_USER_DEPOSIT: user_deposit_contract, }, db_filename=os.path.join(tmpdir, filename), ) ms.bcl = MockBlockchainListener(events) # type: ignore msc = Mock() ms.context.monitoring_service_contract = msc ms.monitor_mock = msc.functions.monitor.return_value.transact # type: ignore ms.monitor_mock.return_value = bytes(0) # type: ignore return ms
def main( # pylint: disable=too-many-arguments private_key: str, state_db: str, web3: Web3, contracts: Dict[str, Contract], start_block: BlockNumber, confirmations: BlockNumber, min_reward: int, debug_shell: bool, ) -> int: """ The Monitoring service for the Raiden Network. """ log.info("Starting Raiden Monitoring Service") log.info("Web3 client", node_address=web3.providers[0].endpoint_uri) hex_addresses = { name: to_checksum_address(contract.address) for name, contract in contracts.items() } log.info("Contract information", addresses=hex_addresses, start_block=start_block) ms = MonitoringService( web3=web3, private_key=private_key, contracts=contracts, sync_start_block=start_block, required_confirmations=confirmations, db_filename=state_db, min_reward=min_reward, ) if debug_shell: import IPython IPython.embed() return 0 ms.start() return 0
def test_trigger_scheduled_events(monitoring_service: MonitoringService): monitoring_service.context.required_confirmations = 5 create_default_token_network(monitoring_service.context) triggered_event = ActionMonitoringTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=make_channel_identifier(), non_closing_participant=make_address(), ) current_confirmed_block = monitoring_service.context.latest_confirmed_block # Trigger the event on a currently unconfirmed block trigger_block = BlockNumber(current_confirmed_block + 1) assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0 monitoring_service.context.database.upsert_scheduled_event( ScheduledEvent(trigger_block_number=trigger_block, event=triggered_event) ) assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 1 # Now run `_trigger_scheduled_events` and see if the event is removed monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(trigger_block)) == 0
def main( private_key: str, state_db: str, web3: Web3, contracts: Dict[str, Contract], start_block: BlockNumber, confirmations: BlockNumber, min_reward: int, ) -> int: """ The Monitoring service for the Raiden Network. """ log.info("Starting Raiden Monitoring Service") ms = MonitoringService( web3=web3, private_key=private_key, contracts=contracts, sync_start_block=start_block, required_confirmations=confirmations, db_filename=state_db, min_reward=min_reward, ) ms.start() return 0
def monitoring_service( server_private_key, web3, monitoring_service_contract, user_deposit_contract, token_network_registry_contract, send_funds, contracts_manager: ContractManager, service_registry, custom_token, ms_database, ): # register MS in ServiceRegistry ms_address = private_key_to_address(server_private_key) send_funds(ms_address) deposit = 10 # any amount is sufficient for regsitration, right now custom_token.functions.mint(deposit).transact({'from': ms_address}) custom_token.functions.approve( service_registry.address, deposit, ).transact({'from': ms_address}) service_registry.functions.deposit(deposit).transact({'from': ms_address}) ms = MonitoringService( web3=web3, contract_manager=contracts_manager, private_key=server_private_key, registry_address=token_network_registry_contract.address, monitor_contract_address=monitoring_service_contract.address, user_deposit_contract_address=user_deposit_contract.address, required_confirmations=1, # for faster tests poll_interval=0.01, # for faster tests db_filename=':memory:', ) # We need a shared db between MS and RC so the MS can use MR saved by the RC ms.context.db = ms_database return ms
def main( keystore_file: str, password: str, eth_rpc: str, registry_address: Address, monitor_contract_address: Address, user_deposit_contract_address: Address, start_block: int, confirmations: int, log_level: str, state_db: str, min_reward: int, ) -> None: setup_logging(log_level) with open(keystore_file, 'r') as keystore: try: private_key = Account.decrypt( keyfile_json=json.load(keystore), password=password, ) except ValueError as error: log.critical( 'Could not decode keyfile with given password. Please try again.', reason=str(error), ) sys.exit(1) provider = HTTPProvider(eth_rpc) web3 = Web3(provider) contract_manager = ContractManager(contracts_precompiled_path()) contract_infos = get_contract_addresses_and_start_block( chain_id=int(web3.net.version), contracts_version=None, token_network_registry_address=registry_address, monitor_contract_address=monitor_contract_address, user_deposit_contract_address=user_deposit_contract_address, start_block=start_block, ) if contract_infos is None: log.critical( 'Could not find correct contracts to use. Please check your configuration' ) sys.exit(1) else: log.info( 'Contract information', registry_address=contract_infos[CONTRACT_TOKEN_NETWORK_REGISTRY], monitor_contract_address=contract_infos[ CONTRACT_MONITORING_SERVICE], user_deposit_contract_address=contract_infos[ CONTRACT_USER_DEPOSIT], sync_start_block=contract_infos[START_BLOCK_ID], ) ms = MonitoringService( web3=web3, contract_manager=contract_manager, private_key=private_key, registry_address=contract_infos[CONTRACT_TOKEN_NETWORK_REGISTRY], monitor_contract_address=contract_infos[CONTRACT_MONITORING_SERVICE], user_deposit_contract_address=contract_infos[CONTRACT_USER_DEPOSIT], sync_start_block=contract_infos[START_BLOCK_ID], required_confirmations=confirmations, db_filename=state_db, min_reward=min_reward, ) ms.start()
def test_first_allowed_monitoring( web3: Web3, monitoring_service_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query(web3, monitoring_service_contract.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2, settle_timeout=10)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.blockNumber) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.blockNumber) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_block=BlockNumber(web3.eth.blockNumber + 10) ) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) assert channel # Calling monitor too early must fail. To test this, we call it two block # before the trigger block. # This should be only one block before, but we trigger one block too late # to work around parity's gas estimation. See # https://github.com/raiden-network/raiden-services/pull/728 wait_for_blocks(monitor_trigger.trigger_block_number - web3.eth.blockNumber - 2) handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing monitor_tx_hash. channel.monitor_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
def test_first_allowed_monitoring( web3: Web3, monitoring_service_contract, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query(web3, monitoring_service_contract.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.block_number) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.block_number) timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp) # type: ignore settle_timeout = int(token_network.functions.settle_timeout().call()) settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_timestamp=settleable_after ) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) assert channel # Calling monitor too early must fail. To test this, we call a few seconds # before the trigger timestamp. web3.testing.timeTravel(monitor_trigger.trigger_timestamp - 5) # type: ignore with pytest.raises(TransactionTooEarlyException): handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing monitor_tx_hash. channel.monitor_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. web3.testing.timeTravel(monitor_trigger.trigger_timestamp) # type: ignore handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
def test_reschedule_too_early_events( web3: Web3, monitoring_service_contract, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) # each client does a transfer channel_id = create_channel(c1, c2)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.block_number) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.block_number) timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp) # type: ignore settle_timeout = int(token_network.functions.settle_timeout().call()) settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout) scheduled_events = monitoring_service.database.get_scheduled_events( max_trigger_timestamp=settleable_after ) channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) monitor_trigger = _first_allowed_timestamp_to_monitor( scheduled_events[0].event.token_network_address, channel, monitoring_service.context ) assert len(scheduled_events) == 1 first_trigger_timestamp = scheduled_events[0].trigger_timestamp assert first_trigger_timestamp == monitor_trigger # Calling monitor too early must fail monitoring_service.get_timestamp_now = lambda: settleable_after monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert monitoring_service.try_scheduled_events_after == pytest.approx(settleable_after, 100) # Failed event is still scheduled, since it was too early for it to succeed scheduled_events = monitoring_service.database.get_scheduled_events(settleable_after) assert len(scheduled_events) == 1 # ...and it should be blocked from retrying for a while. assert ( monitoring_service.try_scheduled_events_after == monitoring_service.get_timestamp_now() + MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY ) # Now it could be executed, but won't due to MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY web3.testing.timeTravel(settleable_after - 1) # type: ignore monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 1 # Check that is does succeed if it wasn't for MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY monitoring_service.try_scheduled_events_after = monitoring_service.get_timestamp_now() - 1 monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 0
def test_first_allowed_monitoring( web3, monitoring_service_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, contracts_manager, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query( web3, contracts_manager, monitoring_service_contract.address) ms_address_hex = to_checksum_address(monitoring_service.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) deposit = service_registry.functions.deposits(ms_address_hex).call() assert deposit > 0 # each client does a transfer channel_id = create_channel(c1, c2, settle_timeout=10)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=ChainID(1), additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(EMPTY_LOCKSROOT), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof(nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof(nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args) monitoring_service._process_new_blocks(web3.eth.blockNumber) assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( get_private_key(c1), reward_amount, monitoring_service_contract.address) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.blockNumber) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_block=web3.eth.blockNumber + 10) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=decode_hex(token_network.address), channel_id=channel_id) assert channel # Calling monitor too early must fail. To test this, we call it one block # before the trigger block. wait_for_blocks(monitor_trigger.trigger_block_number - web3.eth.blockNumber - 1) handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing closing_tx_hash. channel.closing_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query() ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
def test_e2e( # pylint: disable=too-many-arguments,too-many-locals web3, monitoring_service_contract, user_deposit_contract, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): """Test complete message lifecycle 1) client opens channel & submits monitoring request 2) other client closes channel 3) MS registers channelClose event 4) MS calls monitoring contract update 5) wait for channel settle 6) MS claims the reward """ query = create_ms_contract_events_query(web3, monitoring_service_contract.address) initial_balance = user_deposit_contract.functions.balances(monitoring_service.address).call() c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) ms_greenlet = gevent.spawn(monitoring_service.start) # need to wait here till the MS has some time to react gevent.sleep(0.01) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) # Wait until the MS reacts, which it does after giving the client some time # to update the channel itself. timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp) settle_timeout = int(token_network.functions.settle_timeout().call()) settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout) web3.testing.timeTravel(settleable_after - 1) monitoring_service.get_timestamp_now = lambda: settleable_after - 1 # Now give the monitoring service a chance to submit the missing BP gevent.sleep(0.01) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED] # wait for settle timeout web3.testing.timeTravel(settleable_after + 1) monitoring_service.get_timestamp_now = lambda: settleable_after + 1 # Let the MS claim its reward gevent.sleep(0.01) assert [e.event for e in query()] == [ MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED, MonitoringServiceEvent.REWARD_CLAIMED, ] final_balance = user_deposit_contract.functions.balances(monitoring_service.address).call() assert final_balance == (initial_balance + reward_amount) ms_greenlet.kill()
def main( # pylint: disable=too-many-arguments,too-many-locals private_key: PrivateKey, state_db: str, web3: Web3, contracts: Dict[str, Contract], start_block: BlockNumber, host: str, port: int, min_reward: int, confirmations: BlockTimeout, operator: str, info_message: str, debug_shell: bool, accept_disclaimer: bool, ) -> int: """The Monitoring service for the Raiden Network.""" log.info("Starting Raiden Monitoring Service") click.secho(MS_DISCLAIMER, fg="yellow") if not accept_disclaimer: click.confirm(CONFIRMATION_OF_UNDERSTANDING, abort=True) if not confirmations: chain_id = ChainID(web3.eth.chain_id) confirmations = (BlockTimeout(0) if "arbitrum" in ID_TO_CHAINNAME.get( chain_id, "") else DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) log.info("Setting number of confirmation blocks", confirmations=confirmations) log.info("Using RPC endpoint", rpc_url=get_web3_provider_info(web3)) hex_addresses = { name: to_checksum_address(contract.address) for name, contract in contracts.items() } log.info("Contract information", addresses=hex_addresses, start_block=start_block) task = None api = None try: service = MonitoringService( web3=web3, private_key=private_key, contracts=contracts, sync_start_block=start_block, required_confirmations=confirmations, poll_interval=DEFAULT_POLL_INTERVALL, db_filename=state_db, min_reward=min_reward, ) if debug_shell: import IPython IPython.embed() return 0 task = spawn_named("MonitoringService", service.start) log.debug("Starting API") api = MSApi(monitoring_service=service, operator=operator, info_message=info_message) api.run(host=host, port=port) task.get() finally: log.info("Stopping Monitoring Service...") if api: api.stop() if task: task.kill() task.get() return 0