def request_collector(ms_address, ms_database, get_private_key): with patch("request_collector.server.MatrixListener"): rc = RequestCollector(private_key=get_private_key(ms_address), state_db=ms_database) rc.start() yield rc rc.stop() rc.join()
def main(private_key: str, state_db: str) -> int: """ The request collector for the monitoring service. """ log.info("Starting Raiden Monitoring Request Collector") database = SharedDatabase(state_db) RequestCollector(private_key=private_key, state_db=database).listen_forever() print('Exiting...') return 0
def main(private_key: PrivateKey, state_db: str, matrix_server: List[str], accept_disclaimer: bool) -> int: """The request collector for the monitoring service.""" log.info("Starting Raiden Monitoring Request Collector") click.secho(MS_DISCLAIMER, fg="yellow") if not accept_disclaimer: click.confirm(CONFIRMATION_OF_UNDERSTANDING, abort=True) if state_db != ":memory:" and not os.path.exists(state_db): log.error( "Database file from monitoring service not found. Is the monitoring service running?", expected_db_path=state_db, ) sys.exit(1) database = SharedDatabase(state_db) service = RequestCollector(private_key=private_key, state_db=database, matrix_servers=matrix_server) service.start() service.listen_forever() print("Exiting...") return 0
def request_collector( server_private_key, ms_database, web3, monitoring_service_contract, token_network_registry_contract, send_funds, contracts_manager: ContractManager, ): with patch('request_collector.server.MatrixListener'): rc = RequestCollector(private_key=server_private_key, state_db=ms_database) rc.start() yield rc rc.stop() rc.join()
def main( monitoring_channel: str, matrix_homeserver: str, matrix_username: str, matrix_password: str, state_db: str, log_level: str, log_config: TextIO, ): """Console script for request_collector. Logging can be quickly set by specifying a global log level or in a detailed way by using a log configuration file. See https://docs.python.org/3.7/library/logging.config.html#logging-config-dictschema for a detailed description of the format. """ assert log_config is None setup_logging(log_level, log_config) log.info("Starting Raiden Monitoring Request Collector") transport = MatrixTransport( matrix_homeserver, matrix_username, matrix_password, monitoring_channel, ) database = StateDBSqlite(state_db) service = None try: service = RequestCollector( state_db=database, transport=transport, ) service.run() except (KeyboardInterrupt, SystemExit): print('Exiting...') finally: log.info('Stopping Pathfinding Service...') if service: service.stop() return 0
def main( keystore_file: str, password: str, state_db: str, log_level: str, ): """Console script for request_collector. Logging can be quickly set by specifying a global log level or in a detailed way by using a log configuration file. See https://docs.python.org/3.7/library/logging.config.html#logging-config-dictschema for a detailed description of the format. """ setup_logging(log_level) with open(keystore_file, 'r') as keystore: try: private_key = Account.decrypt( keyfile_json=json.load(keystore), password=password, ) except ValueError: log.critical( 'Could not decode keyfile with given password. Please try again.' ) sys.exit(1) log.info("Starting Raiden Monitoring Request Collector") database = SharedDatabase(state_db) RequestCollector( private_key=encode_hex(private_key), state_db=database, ).listen_forever() print('Exiting...') return 0
def request_collector( server_private_key, blockchain, dummy_transport, state_db_sqlite, web3, monitoring_service_contract, token_network_registry_contract, send_funds, contracts_manager: ContractManager, ): rc = RequestCollector( state_db=state_db_sqlite, transport=dummy_transport, ) rc.start() yield rc rc.stop()
def main(private_key: str, state_db: str) -> int: """ The request collector for the monitoring service. """ log.info("Starting Raiden Monitoring Request Collector") if state_db != ":memory:" and not os.path.exists(state_db): log.error( "Database file from monitoring service not found. Is the monitoring service running?", expected_db_path=state_db, ) sys.exit(1) database = SharedDatabase(state_db) service = RequestCollector(private_key=private_key, state_db=database) service.start() service.listen_forever() print("Exiting...") return 0
def test_e2e( web3, generate_raiden_clients, monitoring_service_contract, user_deposit_contract, wait_for_blocks, custom_token, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, contracts_manager, ): """Test complete message lifecycle 1) client opens channel & submits monitoring request 2) other client closes channel 3) MS registers channelClose event 4) MS calls monitoring contract update 5) wait for channel settle 6) MS claims the reward """ query = create_ms_contract_events_query( web3, contracts_manager, monitoring_service_contract.address) initial_balance = user_deposit_contract.functions.balances( monitoring_service.address).call() c1, c2 = generate_raiden_clients(2) # add deposit for c1 node_deposit = 10 custom_token.functions.approve(user_deposit_contract.address, node_deposit).transact({"from": c1.address}) user_deposit_contract.functions.deposit(c1.address, node_deposit).transact( {"from": c1.address}) deposit = service_registry.functions.deposits( monitoring_service.address).call() assert deposit > 0 # each client does a transfer c1.open_channel(c2.address) transferred_c1 = 5 balance_proof_c1 = c1.get_balance_proof( c2.address, nonce=1, transferred_amount=transferred_c1, locked_amount=0, locksroot="0x%064x" % 0, additional_hash="0x%064x" % 0, ) transferred_c2 = 6 balance_proof_c2 = c2.get_balance_proof( c1.address, nonce=2, transferred_amount=transferred_c2, locked_amount=0, locksroot="0x%064x" % 0, additional_hash="0x%064x" % 0, ) ms_greenlet = gevent.spawn(monitoring_service.start, gevent.sleep) # need to wait here till the MS has some time to react gevent.sleep() assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses # c1 asks MS to monitor the channel reward_amount = 1 request_monitoring = c1.get_request_monitoring(balance_proof_c2, reward_amount) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel c2.close_channel(c1.address, balance_proof_c1) # Wait until the MS reacts, which it does after giving the client some time # to update the channel itself. wait_for_blocks(5) # 30% of 15 blocks # Now give the monitoring service a chance to submit the missing BP gevent.sleep(0.1) assert [e.event for e in query() ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED.value] # wait for settle timeout wait_for_blocks(15) c2.settle_channel( c1.address, (transferred_c2, transferred_c1), (0, 0), # locked_amount ("0x%064x" % 0, "0x%064x" % 0), # locksroot ) # Wait until the ChannelSettled is confirmed # Let the MS claim its reward gevent.sleep(0.1) assert [e.event for e in query()] == [ MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED.value, MonitoringServiceEvent.REWARD_CLAIMED.value, ] final_balance = user_deposit_contract.functions.balances( monitoring_service.address).call() assert final_balance == (initial_balance + reward_amount) ms_greenlet.kill()
def test_e2e( # pylint: disable=too-many-arguments,too-many-locals web3, monitoring_service_contract, user_deposit_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, contracts_manager, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): """Test complete message lifecycle 1) client opens channel & submits monitoring request 2) other client closes channel 3) MS registers channelClose event 4) MS calls monitoring contract update 5) wait for channel settle 6) MS claims the reward """ query = create_ms_contract_events_query( web3, contracts_manager, monitoring_service_contract.address) initial_balance = user_deposit_contract.functions.balances( monitoring_service.address).call() c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) deposit = service_registry.functions.deposits( monitoring_service.address).call() assert deposit > 0 # each client does a transfer channel_id = create_channel( c1, c2, settle_timeout=5)[0] # TODO: reduce settle_timeout to speed up test shared_bp_args = dict( channel_identifier=channel_id, token_network_address=token_network.address, chain_id=ChainID(1), additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(EMPTY_LOCKSROOT), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof(nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof(nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args) ms_greenlet = gevent.spawn(monitoring_service.start, gevent.sleep) # need to wait here till the MS has some time to react gevent.sleep() assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( get_private_key(c1), reward_amount) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, ).transact({"from": c2}) # Wait until the MS reacts, which it does after giving the client some time # to update the channel itself. wait_for_blocks(3) # 1 block for close + 30% of 5 blocks = 2 # Now give the monitoring service a chance to submit the missing BP gevent.sleep(0.1) assert [e.event for e in query() ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED] # wait for settle timeout wait_for_blocks( 2) # timeout is 5, but we've already waited 3 blocks before token_network.functions.settleChannel( channel_id, c1, # participant_B transferred_c1, # participant_B_transferred_amount 0, # participant_B_locked_amount EMPTY_LOCKSROOT, # participant_B_locksroot c2, # participant_A transferred_c2, # participant_A_transferred_amount 0, # participant_A_locked_amount EMPTY_LOCKSROOT, # participant_A_locksroot ).transact() # Wait until the ChannelSettled is confirmed # Let the MS claim its reward gevent.sleep(0.1) assert [e.event for e in query()] == [ MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED, MonitoringServiceEvent.REWARD_CLAIMED, ] final_balance = user_deposit_contract.functions.balances( monitoring_service.address).call() assert final_balance == (initial_balance + reward_amount) ms_greenlet.kill()
def test_first_allowed_monitoring( web3: Web3, monitoring_service_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query(web3, monitoring_service_contract.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2, settle_timeout=10)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.blockNumber) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.blockNumber) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_block=BlockNumber(web3.eth.blockNumber + 10) ) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) assert channel # Calling monitor too early must fail. To test this, we call it two block # before the trigger block. # This should be only one block before, but we trigger one block too late # to work around parity's gas estimation. See # https://github.com/raiden-network/raiden-services/pull/728 wait_for_blocks(monitor_trigger.trigger_block_number - web3.eth.blockNumber - 2) handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing monitor_tx_hash. channel.monitor_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
def test_e2e( # pylint: disable=too-many-arguments,too-many-locals web3, monitoring_service_contract, user_deposit_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): """Test complete message lifecycle 1) client opens channel & submits monitoring request 2) other client closes channel 3) MS registers channelClose event 4) MS calls monitoring contract update 5) wait for channel settle 6) MS claims the reward """ query = create_ms_contract_events_query(web3, monitoring_service_contract.address) initial_balance = user_deposit_contract.functions.balances(monitoring_service.address).call() c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2, settle_timeout=5)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) ms_greenlet = gevent.spawn(monitoring_service.start) # need to wait here till the MS has some time to react gevent.sleep(0.01) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) # Wait until the MS reacts, which it does after giving the client some time # to update the channel itself. wait_for_blocks(2) # 1 block for close + 1 block for triggering the event # Now give the monitoring service a chance to submit the missing BP gevent.sleep(0.01) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED] # wait for settle timeout # timeout is 5, but we've already waited 3 blocks before. Additionally one block is # added to handle parity running gas estimation on current instead of next. wait_for_blocks(3) # Let the MS claim its reward gevent.sleep(0.01) assert [e.event for e in query()] == [ MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED, MonitoringServiceEvent.REWARD_CLAIMED, ] final_balance = user_deposit_contract.functions.balances(monitoring_service.address).call() assert final_balance == (initial_balance + reward_amount) ms_greenlet.kill()
def test_first_allowed_monitoring( web3: Web3, monitoring_service_contract, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query(web3, monitoring_service_contract.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) assert service_registry.functions.hasValidRegistration(monitoring_service.address).call() # each client does a transfer channel_id = create_channel(c1, c2)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.block_number) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.block_number) timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp) # type: ignore settle_timeout = int(token_network.functions.settle_timeout().call()) settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_timestamp=settleable_after ) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) assert channel # Calling monitor too early must fail. To test this, we call a few seconds # before the trigger timestamp. web3.testing.timeTravel(monitor_trigger.trigger_timestamp - 5) # type: ignore with pytest.raises(TransactionTooEarlyException): handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing monitor_tx_hash. channel.monitor_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. web3.testing.timeTravel(monitor_trigger.trigger_timestamp) # type: ignore handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]
def test_reschedule_too_early_events( web3: Web3, monitoring_service_contract, monitoring_service: MonitoringService, request_collector: RequestCollector, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) # each client does a transfer channel_id = create_channel(c1, c2)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=monitoring_service.chain_id, additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof( nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args, ) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof( nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args, ) monitoring_service._process_new_blocks(web3.eth.block_number) assert len(monitoring_service.context.database.get_token_network_addresses()) > 0 # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=reward_amount, monitoring_service_contract_address=MonitoringServiceAddress( to_canonical_address(monitoring_service_contract.address) ), ) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, c2, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, balance_proof_c1.get_counter_signature(get_private_key(c2)), ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.block_number) timestamp_of_closing_block = Timestamp(web3.eth.get_block("latest").timestamp) # type: ignore settle_timeout = int(token_network.functions.settle_timeout().call()) settleable_after = Timestamp(timestamp_of_closing_block + settle_timeout) scheduled_events = monitoring_service.database.get_scheduled_events( max_trigger_timestamp=settleable_after ) channel = monitoring_service.database.get_channel( token_network_address=TokenNetworkAddress(to_canonical_address(token_network.address)), channel_id=channel_id, ) monitor_trigger = _first_allowed_timestamp_to_monitor( scheduled_events[0].event.token_network_address, channel, monitoring_service.context ) assert len(scheduled_events) == 1 first_trigger_timestamp = scheduled_events[0].trigger_timestamp assert first_trigger_timestamp == monitor_trigger # Calling monitor too early must fail monitoring_service.get_timestamp_now = lambda: settleable_after monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert monitoring_service.try_scheduled_events_after == pytest.approx(settleable_after, 100) # Failed event is still scheduled, since it was too early for it to succeed scheduled_events = monitoring_service.database.get_scheduled_events(settleable_after) assert len(scheduled_events) == 1 # ...and it should be blocked from retrying for a while. assert ( monitoring_service.try_scheduled_events_after == monitoring_service.get_timestamp_now() + MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY ) # Now it could be executed, but won't due to MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY web3.testing.timeTravel(settleable_after - 1) # type: ignore monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 1 # Check that is does succeed if it wasn't for MAX_SCHEDULED_EVENTS_RETRY_FREQUENCY monitoring_service.try_scheduled_events_after = monitoring_service.get_timestamp_now() - 1 monitoring_service._trigger_scheduled_events() # pylint: disable=protected-access assert len(monitoring_service.database.get_scheduled_events(settleable_after)) == 0
def test_first_allowed_monitoring( web3, monitoring_service_contract, wait_for_blocks, service_registry, monitoring_service: MonitoringService, request_collector: RequestCollector, contracts_manager, deposit_to_udc, create_channel, token_network, get_accounts, get_private_key, ): # pylint: disable=too-many-arguments,too-many-locals,protected-access query = create_ms_contract_events_query( web3, contracts_manager, monitoring_service_contract.address) ms_address_hex = to_checksum_address(monitoring_service.address) c1, c2 = get_accounts(2) # add deposit for c1 node_deposit = 10 deposit_to_udc(c1, node_deposit) deposit = service_registry.functions.deposits(ms_address_hex).call() assert deposit > 0 # each client does a transfer channel_id = create_channel(c1, c2, settle_timeout=10)[0] shared_bp_args = dict( channel_identifier=channel_id, token_network_address=decode_hex(token_network.address), chain_id=ChainID(1), additional_hash="0x%064x" % 0, locked_amount=TokenAmount(0), locksroot=encode_hex(EMPTY_LOCKSROOT), ) transferred_c1 = 5 balance_proof_c1 = HashedBalanceProof(nonce=Nonce(1), transferred_amount=transferred_c1, priv_key=get_private_key(c1), **shared_bp_args) transferred_c2 = 6 balance_proof_c2 = HashedBalanceProof(nonce=Nonce(2), transferred_amount=transferred_c2, priv_key=get_private_key(c2), **shared_bp_args) monitoring_service._process_new_blocks(web3.eth.blockNumber) assert monitoring_service.context.ms_state.blockchain_state.token_network_addresses # c1 asks MS to monitor the channel reward_amount = TokenAmount(1) request_monitoring = balance_proof_c2.get_request_monitoring( get_private_key(c1), reward_amount, monitoring_service_contract.address) request_collector.on_monitor_request(request_monitoring) # c2 closes the channel token_network.functions.closeChannel( channel_id, c1, balance_proof_c1.balance_hash, balance_proof_c1.nonce, balance_proof_c1.additional_hash, balance_proof_c1.signature, ).transact({"from": c2}) monitoring_service._process_new_blocks(web3.eth.blockNumber) triggered_events = monitoring_service.database.get_scheduled_events( max_trigger_block=web3.eth.blockNumber + 10) assert len(triggered_events) == 1 monitor_trigger = triggered_events[0] channel = monitoring_service.database.get_channel( token_network_address=decode_hex(token_network.address), channel_id=channel_id) assert channel # Calling monitor too early must fail. To test this, we call it one block # before the trigger block. wait_for_blocks(monitor_trigger.trigger_block_number - web3.eth.blockNumber - 1) handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query()] == [] # If our `monitor` call fails, we won't try again. Force a retry in this # test by clearing closing_tx_hash. channel.closing_tx_hash = None monitoring_service.database.upsert_channel(channel) # Now we can try again. The first try mined a new block, so now we're one # block further and `monitor` should succeed. handle_event(monitor_trigger.event, monitoring_service.context) assert [e.event for e in query() ] == [MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED]