def test_stats_endpoint( api_sut_with_debug: PFSApi, api_url: str, token_network_model: TokenNetwork ): database = api_sut_with_debug.pathfinding_service.database default_path = [Address(b"1" * 20), Address(b"2" * 20), Address(b"3" * 20)] feedback_token = FeedbackToken(token_network_model.address) estimated_fee = FeeAmount(0) def check_response(num_all: int, num_only_feedback: int, num_only_success: int) -> None: url = api_url + "/v1/_debug/stats" response = requests.get(url) assert response.status_code == 200 data = response.json() assert data["total_calculated_routes"] == num_all assert data["total_feedback_received"] == num_only_feedback assert data["total_successful_routes"] == num_only_success database.prepare_feedback(feedback_token, default_path, estimated_fee) check_response(1, 0, 0) database.update_feedback(feedback_token, default_path, False) check_response(1, 1, 0) default_path2 = default_path[1:] feedback_token2 = FeedbackToken(token_network_model.address) database.prepare_feedback(feedback_token2, default_path2, estimated_fee) check_response(2, 1, 0) database.update_feedback(feedback_token2, default_path2, True) check_response(2, 2, 1)
def test_save_and_load_token_networks(pathfinding_service_mock_empty): pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def test_ignore_mr_for_closed_channel(request_collector, build_request_monitoring, ms_database, closing_block): """MRs that come in >=10 blocks after the channel has been closed must be ignored.""" request_monitoring = build_request_monitoring() ms_database.conn.execute( "UPDATE blockchain SET latest_committed_block = ?", [100]) ms_database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [ to_checksum_address( request_monitoring.balance_proof.token_network_address) ], ) ms_database.upsert_channel( Channel( identifier=request_monitoring.balance_proof.channel_identifier, token_network_address=request_monitoring.balance_proof. token_network_address, participant1=Address(b"1" * 20), participant2=Address(b"2" * 20), closing_block=closing_block if closing_block else None, )) request_collector.on_monitor_request(request_monitoring) # When the channel is not closed, of the closing is less than 10 blocks # before the current block (100), the MR must be saved. expected_mrs = 0 if closing_block == 100 - CHANNEL_CLOSE_MARGIN else 1 assert ms_database.monitor_request_count() == expected_mrs
def ms_database() -> Database: return Database( filename=":memory:", chain_id=TEST_CHAIN_ID, msc_address=TEST_MSC_ADDRESS, registry_address=Address(bytes([3] * 20)), receiver=Address(bytes([4] * 20)), )
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) event1 = ScheduledEvent( trigger_timestamp=23 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_timestamp=24 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 1
def test_purge_old_monitor_requests( ms_database: Database, build_request_monitoring, request_collector, monitoring_service: MonitoringService, ): # We'll test the purge on MRs for three different channels req_mons = [ build_request_monitoring(channel_id=1), build_request_monitoring(channel_id=2), build_request_monitoring(channel_id=3), ] for req_mon in req_mons: request_collector.on_monitor_request(req_mon) # Channel 1 exists in the db token_network_address = req_mons[0].balance_proof.token_network_address ms_database.conn.execute( "INSERT INTO token_network VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) ms_database.upsert_channel( Channel( identifier=ChannelID(1), token_network_address=token_network_address, participant1=Address(b"1" * 20), participant2=Address(b"2" * 20), )) # The request for channel 2 is recent (default), but the one for channel 3 # has been added 16 minutes ago. saved_at = (datetime.utcnow() - timedelta(minutes=16)).timestamp() ms_database.conn.execute( """ UPDATE monitor_request SET saved_at = ? WHERE channel_identifier = ? """, [saved_at, hex256(3)], ) monitoring_service._purge_old_monitor_requests() # pylint: disable=protected-access remaining_mrs = ms_database.conn.execute(""" SELECT channel_identifier, waiting_for_channel FROM monitor_request ORDER BY channel_identifier """).fetchall() assert [tuple(mr) for mr in remaining_mrs] == [(1, False), (2, True)]
def test_action_claim_reward_triggered_event_handler_without_update_state_doesnt_trigger_claim_call( # noqa context: Context, ): """Tests that `claimReward` is called when the ActionMonitoringTriggeredEvent is triggered and user has sufficient balance in user deposit contract """ context = setup_state_with_closed_channel(context) context.database.upsert_monitor_request( create_signed_monitor_request(nonce=Nonce(6), reward_amount=TokenAmount(0)) ) trigger_event = ActionClaimRewardTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, non_closing_participant=DEFAULT_PARTICIPANT2, ) channel = context.database.get_channel( trigger_event.token_network_address, trigger_event.channel_identifier ) assert channel assert channel.claim_tx_hash is None # Set update state channel.update_status = OnChainUpdateStatus( update_sender_address=Address(bytes([1] * 20)), nonce=Nonce(6) ) context.database.upsert_channel(channel) action_claim_reward_triggered_event_handler(trigger_event, context) # check that the monitor call has been done assert context.monitoring_service_contract.functions.claimReward.called is False
def test_insert_feedback_token(pathfinding_service_mock): token_network_address = TokenNetworkAddress(b"1" * 20) route = [Address(b"2" * 20), Address(b"3" * 20)] estimated_fee = -123 token = FeedbackToken(token_network_address=token_network_address) database = pathfinding_service_mock.database database.prepare_feedback(token=token, route=route, estimated_fee=estimated_fee) # Test round-trip stored = database.get_feedback_token( token_id=token.uuid, token_network_address=token_network_address, route=route) assert stored == token # Test different UUID stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=route) assert stored is None # Test different token network address token_network_address_wrong = TokenNetworkAddress(b"9" * 20) stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address_wrong, route=route) assert stored is None # Test different route route_wrong = [Address(b"2" * 20), Address(b"3" * 20), Address(b"4" * 20)] stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=route_wrong) assert stored is None # Test empty route stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=[]) assert stored is None
def a(int_addr) -> Address: # pylint: disable=invalid-name """Create an address from an int with a short representation. This is helpful in tests because * Address creation is concise * You can easily match `a(1)` in your test with `a1` in your test output """ return Address(PrettyBytes([0] * 19 + [int_addr]))
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) # check that we have non-zero processing time for the events we created assert ( metrics_state.get_delta( "events_processing_duration_seconds_sum", labels={"event_type": event.__class__.__name__}, ) > 0.0 ) # there should be no exception raised assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": event.__class__.__name__} ) == 0.0 )
def test_feedback(pathfinding_service_mock): token_network_address = TokenNetworkAddress(b"1" * 20) route = [Address(b"2" * 20), Address(b"3" * 20)] other_route = [Address(b"2" * 20), Address(b"4" * 20)] estimated_fee = 0 token = FeedbackToken(token_network_address=token_network_address) other_token = FeedbackToken(token_network_address=token_network_address) database = pathfinding_service_mock.database assert not db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) assert not db_has_feedback_for( database=database, token=other_token, route=route) database.prepare_feedback(token=token, route=route, estimated_fee=estimated_fee) assert not db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=other_token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) rowcount = database.update_feedback(token=token, route=route, successful=True) assert rowcount == 1 assert db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=other_token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) rowcount = database.update_feedback(token=token, route=route, successful=True) assert rowcount == 0
def test_rate_limiter(): limiter = RateLimiter(allowed_bytes=100, reset_interval=timedelta(seconds=0.1)) sender = Address(b"1" * 20) for _ in range(50): assert limiter.check_and_count(sender=sender, added_bytes=2) assert not limiter.check_and_count(sender=sender, added_bytes=2) limiter.reset_if_it_is_time() assert not limiter.check_and_count(sender=sender, added_bytes=2) time.sleep(0.1) limiter.reset_if_it_is_time() assert limiter.check_and_count(sender=sender, added_bytes=2)
def test_logging_processor(): # test if our logging processor changes bytes to checksum addresses # even if bytes-addresses are entangled into events logger = Mock() log_method = Mock() address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9") address_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(address=address) ) assert to_checksum_address(address) == address_log["address"] address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1") event = ReceiveTokenNetworkCreatedEvent( token_address=address, token_network_address=TokenNetworkAddress(address2), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event)) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == event_log["event"]["token_address"] ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address2) == event_log["event"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent" ) message = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=TokenNetworkAddress(address), channel_identifier=ChannelID(1), ), updating_participant=PARTICIPANT1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) message_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(message=message) ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == message_log["message"]["canonical_identifier"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object message_log["message"]["type_name"] == "PFSFeeUpdate" )
def test_contract_info_overwrite_defaults(): address1 = Address(bytes([1] * 20)) address2 = Address(bytes([2] * 20)) address3 = Address(bytes([3] * 20)) infos, start_block = get_contract_addresses_and_start_block( chain_id=DEFAULT_CHAIN_ID, contracts_version=DEFAULT_VERSION, contracts=[ CONTRACT_TOKEN_NETWORK_REGISTRY, CONTRACT_MONITORING_SERVICE, CONTRACT_USER_DEPOSIT, ], address_overwrites={ CONTRACT_TOKEN_NETWORK_REGISTRY: address1, CONTRACT_MONITORING_SERVICE: address2, CONTRACT_USER_DEPOSIT: address3, }, ) assert infos is not None assert infos[CONTRACT_TOKEN_NETWORK_REGISTRY] == address1 assert infos[CONTRACT_MONITORING_SERVICE] == address2 assert infos[CONTRACT_USER_DEPOSIT] == address3 assert start_block == 0
def test_monitor_reward_claimed_event_handler(context: Context, log): metrics_state = save_metrics_state(metrics.REGISTRY) context = setup_state_with_closed_channel(context) claim_event = ReceiveMonitoringRewardClaimedEvent( ms_address=context.ms_state.address, amount=TokenAmount(1), reward_identifier="REWARD", block_number=BlockNumber(23), ) monitor_reward_claim_event_handler(claim_event, context) assert ( metrics_state.get_delta( "economics_reward_claims_successful_total", labels=metrics.Who.US.to_label_dict() ) == 1.0 ) assert ( metrics_state.get_delta( "economics_reward_claims_token_total", labels=metrics.Who.US.to_label_dict() ) == 1.0 ) assert log.has("Successfully claimed reward") claim_event = dataclasses.replace(claim_event, ms_address=Address(bytes([3] * 20))) monitor_reward_claim_event_handler(claim_event, context) assert ( metrics_state.get_delta( "economics_reward_claims_successful_total", labels=metrics.Who.THEY.to_label_dict() ) == 1.0 ) assert ( metrics_state.get_delta( "economics_reward_claims_token_total", labels=metrics.Who.THEY.to_label_dict() ) == 1.0 ) assert log.has("Another MS claimed reward")
def api_sut_with_debug( pathfinding_service_mock, reachability_state: SimpleReachabilityContainer, free_port: int, populate_token_network_case_1, # pylint: disable=unused-argument ) -> Iterator[PFSApi]: pathfinding_service_mock.matrix_listener.user_manager = reachability_state api = PFSApi( pathfinding_service=pathfinding_service_mock, one_to_n_address=Address(bytes([1] * 20)), debug_mode=True, operator="", info_message="", ) api.run(host=DEFAULT_API_HOST, port=free_port) yield api api.stop()
def __init__( self, filename: str, chain_id: ChainID, msc_address: MonitoringServiceAddress, registry_address: Address, receiver: Address, sync_start_block: BlockNumber = BlockNumber(0), ) -> None: super().__init__(filename, allow_create=True) self._setup( chain_id=chain_id, monitor_contract_address=Address(msc_address), token_network_registry_address=registry_address, receiver=receiver, sync_start_block=sync_start_block, )
def test_save_and_load_channel(ms_database: Database): ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(DEFAULT_TOKEN_NETWORK_ADDRESS), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) for update_status in [ None, OnChainUpdateStatus(update_sender_address=Address(bytes([1] * 20)), nonce=random.randint(0, UINT256_MAX)), ]: channel = create_channel(update_status) ms_database.upsert_channel(channel) loaded_channel = ms_database.get_channel( token_network_address=channel.token_network_address, channel_id=channel.identifier) assert loaded_channel == channel
def test_monitor_new_balance_proof_event_handler_idempotency(context: Context): context = setup_state_with_closed_channel(context) new_balance_event = ReceiveMonitoringNewBalanceProofEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, reward_amount=TokenAmount(1), nonce=Nonce(2), ms_address=Address(context.ms_state.address), raiden_node_address=DEFAULT_PARTICIPANT2, block_number=BlockNumber(23), ) channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is None monitor_new_balance_proof_event_handler(new_balance_event, context) assert context.database.scheduled_event_count() == 1 assert context.database.channel_count() == 1 channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is not None assert channel.update_status.nonce == 2 assert channel.update_status.update_sender_address == context.ms_state.address monitor_new_balance_proof_event_handler(new_balance_event, context) assert context.database.scheduled_event_count() == 1 assert context.database.channel_count() == 1 channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is not None assert channel.update_status.nonce == 2 assert channel.update_status.update_sender_address == context.ms_state.address
def api_sut( pathfinding_service_mock: PathfindingService, reachability_state: SimpleReachabilityContainer, free_port: int, populate_token_network_case_1, # pylint: disable=unused-argument ) -> Iterator[PFSApi]: pathfinding_service_mock.matrix_listener.user_manager = reachability_state pathfinding_service_mock.web3.eth.get_block = lambda x: Mock( timestamp=pathfinding_service_mock.blockchain_state.latest_committed_block * 15 if x == "latest" else x * 15 ) api = PFSApi( pathfinding_service=pathfinding_service_mock, one_to_n_address=Address(bytes([1] * 20)), operator="", ) api.run(host=DEFAULT_API_HOST, port=free_port) yield api api.stop()
def test_action_monitoring_triggered_event_handler_does_not_trigger_monitor_call_when_nonce_to_small( # noqa context: Context, ): context = setup_state_with_closed_channel(context) event3 = ReceiveMonitoringNewBalanceProofEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, reward_amount=TokenAmount(1), nonce=Nonce(5), ms_address=Address(bytes([3] * 20)), raiden_node_address=DEFAULT_PARTICIPANT2, block_number=BlockNumber(23), ) channel = context.database.get_channel(event3.token_network_address, event3.channel_identifier) assert channel assert channel.update_status is None monitor_new_balance_proof_event_handler(event3, context) # add MR to DB, with nonce being smaller than in event3 context.database.upsert_monitor_request(create_signed_monitor_request(nonce=Nonce(4))) event4 = ActionMonitoringTriggeredEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, non_closing_participant=DEFAULT_PARTICIPANT2, ) channel = context.database.get_channel(event4.token_network_address, event4.channel_identifier) assert channel assert channel.update_status is not None assert channel.monitor_tx_hash is None action_monitoring_triggered_event_handler(event4, context) assert context.database.channel_count() == 1 assert channel assert channel.monitor_tx_hash is None
def _setup( self, chain_id: ChainID, receiver: Address, sync_start_block: BlockNumber, **contract_addresses: Address, ) -> None: """Make sure that the db is initialized an matches the given settings""" assert chain_id >= 0 hex_addresses: Dict[str, str] = { con: to_checksum_address(addr) for con, addr in contract_addresses.items() } with self._cursor() as cursor: initialized = cursor.execute( "SELECT name FROM sqlite_master WHERE type='table' AND name='blockchain'" ).fetchone() settings = dict(chain_id=chain_id, receiver=to_checksum_address(receiver), **hex_addresses) if initialized: self._check_settings(settings, hex_addresses) else: # create db schema with open(self.schema_filename, encoding="utf-8") as schema_file: with self._cursor() as cursor: cursor.executescript(schema_file.read()) update_stmt = "UPDATE blockchain SET {}".format(",".join( f"{key} = :{key}" for key in ["chain_id", "receiver", "latest_committed_block"] + list(hex_addresses))) with self._cursor() as cursor: cursor.execute( update_stmt, dict(latest_committed_block=sync_start_block, **settings))
def test_process_payment_errors( pathfinding_service_web3_mock, web3, deposit_to_udc, create_account, get_private_key, make_iou, one_to_n_contract, ): pfs = pathfinding_service_web3_mock sender = create_account() privkey = get_private_key(sender) def test_payment(iou, service_fee=TokenAmount(1)): # IOU check reads the block number from here, so it has to be up to date pfs.blockchain_state.latest_committed_block = web3.eth.block_number pathfinding_service.api.process_payment( iou=iou, pathfinding_service=pfs, service_fee=service_fee, one_to_n_address=to_canonical_address(one_to_n_contract.address), ) # expires too early iou = make_iou(privkey, pfs.address, claimable_until=web3.eth.get_block("latest").timestamp + 5 * 15) with pytest.raises(exceptions.IOUExpiredTooEarly): test_payment(iou) # it fails it the no deposit is in the UDC iou = make_iou(privkey, pfs.address) with pytest.raises(exceptions.DepositTooLow): test_payment(iou) # adding deposit does not help immediately deposit_to_udc(sender, 10) with pytest.raises(exceptions.DepositTooLow): test_payment(iou) # must succeed after deposit is confirmed web3.testing.mine(pathfinding_service_web3_mock.required_confirmations) test_payment(iou) # wrong recipient iou = make_iou(privkey, Address(bytes([6] * 20))) with pytest.raises(exceptions.WrongIOURecipient): test_payment(iou) # wrong chain_id iou = make_iou(privkey, pfs.address, chain_id=2) with pytest.raises(exceptions.UnsupportedChainID): test_payment(iou) # wrong one_to_n_address iou = make_iou(privkey, pfs.address, one_to_n_address=bytes([1] * 20)) with pytest.raises(exceptions.WrongOneToNAddress): test_payment(iou) # payment too low iou = make_iou(privkey, pfs.address) with pytest.raises(exceptions.InsufficientServicePayment): test_payment(iou, service_fee=TokenAmount(2))
def test_crash(tmpdir, mockchain): # pylint: disable=too-many-locals """Process blocks and compare results with/without crash A somewhat meaninful crash handling is simulated by not including the UpdatedHeadBlockEvent in every block. """ token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) ], [UpdatedHeadBlockEvent(BlockNumber(2))], [ ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(3), ) ], [UpdatedHeadBlockEvent(BlockNumber(4))], ] mockchain(events) server_private_key = PrivateKey(get_random_privkey()) contracts = { CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(), CONTRACT_USER_DEPOSIT: ContractMock(), } def new_service(filename): service = PathfindingService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, sync_start_block=BlockNumber(0), required_confirmations=BlockTimeout(0), poll_interval=0, db_filename=os.path.join(tmpdir, filename), ) return service # initialize stable service stable_service = new_service("stable.db") # process each block and compare results between crashy and stable service for to_block in range(len(events)): crashy_service = new_service("crashy.db") # new instance to simulate crash result_state: List[dict] = [] for service in [stable_service, crashy_service]: service._process_new_blocks(BlockNumber(to_block)) # pylint: disable=protected-access result_state.append(dict(db_dump=list(service.database.conn.iterdump()))) # both instances should have the same state after processing for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()): if isinstance(stable_state, BlockchainState): assert stable_state.chain_id == crashy_state.chain_id assert ( stable_state.token_network_registry_address == crashy_state.token_network_registry_address ) assert stable_state.latest_committed_block == crashy_state.latest_committed_block assert ( stable_state.monitor_contract_address == crashy_state.monitor_contract_address ) # Do not compare `current_event_filter_interval`, this is allowed to be different else: assert stable_state == crashy_state crashy_service.database.conn.close() # close the db connection so we can access it again
"type": "m.presence", "content": {"presence": presence.value}, } self._presence_callback(event, next(self._presence_update_ids)) class NonValidatingUserAddressManager(UserAddressManager): @staticmethod def _validate_userid_signature(user: User) -> Optional[Address]: match = USERID_RE.match(user.user_id) if not match: return None return to_canonical_address(match.group(1)) ADDR1 = Address(b"\x11" * 20) ADDR2 = Address(b'""""""""""""""""""""') INVALID_USER_ID = "bla:bla" USER0_ID = "@0x0000000000000000000000000000000000000000:server1" USER1_S1_ID = "@0x1111111111111111111111111111111111111111:server1" USER1_S2_ID = "@0x1111111111111111111111111111111111111111:server2" USER2_S1_ID = "@0x2222222222222222222222222222222222222222:server1" USER2_S2_ID = "@0x2222222222222222222222222222222222222222:server2" USER1_S1 = User(api=None, user_id=USER1_S1_ID) USER1_S2 = User(api=None, user_id=USER1_S2_ID) USER2_S1 = User(api=None, user_id=USER2_S1_ID) USER2_S2 = User(api=None, user_id=USER2_S2_ID) @pytest.fixture def user_directory_content():
from raiden_contracts.tests.utils import LOCKSROOT_OF_NO_LOCKS, deepcopy from raiden_libs.matrix import ( ClientManager, MatrixListener, RateLimiter, deserialize_messages, matrix_http_retry_delay, ) from raiden_libs.user_address import MultiClientUserAddressManager from tests.pathfinding.test_fee_updates import ( PRIVATE_KEY_1, PRIVATE_KEY_1_ADDRESS, get_fee_update_message, ) INVALID_PEER_ADDRESS = Address(to_canonical_address("0x" + "1" * 40)) @pytest.fixture def request_monitoring_message(token_network, get_accounts, get_private_key) -> RequestMonitoring: c1, c2 = get_accounts(2) balance_proof_c2 = HashedBalanceProof( channel_identifier=ChannelID(1), token_network_address=TokenNetworkAddress( to_canonical_address(token_network.address)), chain_id=ChainID(61), nonce=Nonce(2), additional_hash="0x%064x" % 0, transferred_amount=TokenAmount(1),
def public_key_to_address(public_key: PublicKey) -> Address: """Converts a public key to an Ethereum address.""" key_bytes = public_key.format(compressed=False) return Address(keccak(key_bytes[1:])[-20:])
def test_monitor_new_balance_proof_event_handler_sets_update_status(context: Context): context = setup_state_with_closed_channel(context) new_balance_event = ReceiveMonitoringNewBalanceProofEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, reward_amount=TokenAmount(1), nonce=Nonce(2), ms_address=Address(bytes([4] * 20)), raiden_node_address=DEFAULT_PARTICIPANT2, block_number=BlockNumber(62), ) channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is None assert get_scheduled_claim_event(context.database) is None monitor_new_balance_proof_event_handler(new_balance_event, context) assert context.database.channel_count() == 1 channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is not None assert channel.update_status.nonce == 2 assert channel.update_status.update_sender_address == bytes([4] * 20) # closing block * avg. time per block + token network settle timeout expected_trigger_timestamp = 52 * 15 + context.database.get_token_network_settle_timeout( channel.token_network_address ) scheduled_claim_event = get_scheduled_claim_event(context.database) assert scheduled_claim_event is not None assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp new_balance_event2 = ReceiveMonitoringNewBalanceProofEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=DEFAULT_CHANNEL_IDENTIFIER, reward_amount=TokenAmount(1), nonce=Nonce(5), ms_address=Address(bytes([4] * 20)), raiden_node_address=DEFAULT_PARTICIPANT2, block_number=BlockNumber(63), ) monitor_new_balance_proof_event_handler(new_balance_event2, context) assert context.database.channel_count() == 1 channel = context.database.get_channel( new_balance_event.token_network_address, new_balance_event.channel_identifier ) assert channel assert channel.update_status is not None assert channel.update_status.nonce == 5 assert channel.update_status.update_sender_address == bytes([4] * 20) scheduled_claim_event = get_scheduled_claim_event(context.database) assert scheduled_claim_event is not None assert scheduled_claim_event.trigger_timestamp == expected_trigger_timestamp
ReceiveChannelClosedEvent, ReceiveChannelOpenedEvent, ReceiveChannelSettledEvent, ReceiveTokenNetworkCreatedEvent, UpdatedHeadBlockEvent, ) from raiden_libs.logging import format_to_hex from raiden_libs.states import BlockchainState from raiden_libs.utils import to_checksum_address from tests.constants import DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT from tests.utils import save_metrics_state from ..libs.mocks.web3 import ContractMock, Web3Mock PARTICIPANT1_PRIVKEY, PARTICIPANT1 = make_privkey_address() PARTICIPANT2 = Address(bytes([2] * 20)) def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address,