def _calculate_fees(self) -> None: total = PaymentWithFeeAmount(self.value) for prev_node, mediator, next_node in reversed(list(window(self.nodes, 3))): try: view_in: ChannelView = self.G[prev_node][mediator]["view"] view_out: ChannelView = self.G[mediator][next_node]["view"] fee_out = view_out.backwards_fee_sender( balance=Balance(view_out.capacity), amount=total ) total += fee_out # type: ignore fee_in = view_in.backwards_fee_receiver( balance=Balance(view_in.capacity), amount=total ) total += fee_in # type: ignore self.fees.append(FeeAmount(fee_in + fee_out)) except UndefinedMediationFee: log.warning( "Invalid fee calculation", amount=total, view_out=view_out, view_in=view_in, fee_schedule_sender=view_out.fee_schedule_sender, fee_schedule_receiver=view_in.fee_schedule_receiver, ) self._is_valid = False
def edge_weight( visited: Dict[ChannelID, float], view: ChannelView, view_from_partner: ChannelView, amount: PaymentAmount, fee_penalty: float, ) -> float: diversity_weight = visited.get(view.channel_id, 0) # Fees for initiator and target are included here. This promotes routes # that are nice to the initiator's and target's capacities, but it's # inconsistent with the estimated total fee. try: fee_out = view.backwards_fee_sender( balance=Balance(view.capacity), amount=PaymentWithFeeAmount(amount) ) fee_in = view.backwards_fee_receiver( balance=Balance(view.capacity), amount=PaymentWithFeeAmount(amount) ) fee_weight = (fee_in + fee_out) / 1e18 * fee_penalty except UndefinedMediationFee: return float("inf") no_refund_weight = 0 if view_from_partner.capacity < int(float(amount) * 1.1): no_refund_weight = 1 return 1 + diversity_weight + fee_weight + no_refund_weight
def test_channel_deposit(raiden_chain, deposit, retry_timeout, token_addresses): app0, app1 = raiden_chain token_address = token_addresses[0] registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address) assert token_network_address channel0 = views.get_channelstate_by_token_network_and_partner( views.state_from_app(app0), token_network_address, app1.raiden.address) channel1 = views.get_channelstate_by_token_network_and_partner( views.state_from_app(app0), token_network_address, app1.raiden.address) assert channel0 is None assert channel1 is None RaidenAPI(app0.raiden).channel_open(registry_address, token_address, app1.raiden.address) timeout_seconds = 15 exception = RuntimeError( f"Did not see the channels open within {timeout_seconds} seconds") with gevent.Timeout(seconds=timeout_seconds, exception=exception): wait_both_channel_open(app0, app1, registry_address, token_address, retry_timeout) assert_synced_channel_state(token_network_address, app0, Balance(0), [], app1, Balance(0), []) RaidenAPI(app0.raiden).set_total_channel_deposit(registry_address, token_address, app1.raiden.address, deposit) exception = RuntimeError( f"Did not see the channel deposit within {timeout_seconds} seconds") with gevent.Timeout(seconds=timeout_seconds, exception=exception): waiting.wait_single_channel_deposit(app0, app1, registry_address, token_address, deposit, retry_timeout) assert_synced_channel_state(token_network_address, app0, deposit, [], app1, Balance(0), []) RaidenAPI(app1.raiden).set_total_channel_deposit(registry_address, token_address, app0.raiden.address, deposit) with gevent.Timeout(seconds=timeout_seconds, exception=exception): waiting.wait_single_channel_deposit(app1, app0, registry_address, token_address, deposit, retry_timeout) assert_synced_channel_state(token_network_address, app0, deposit, [], app1, deposit, [])
def _calculate_fees(self) -> Optional[List[FeeAmount]]: """Calcluates fees backwards for this path. Returns ``None``, if the fee calculation cannot be done. """ total = PaymentWithFeeAmount(self.value) fees: List[FeeAmount] = [] for prev_node, mediator, next_node in reversed( list(window(self.nodes, 3))): view_in: ChannelView = self.G[prev_node][mediator]["view"] view_out: ChannelView = self.G[mediator][next_node]["view"] log.debug( "Fee calculation", amount=total, view_out=view_out, view_in=view_in, amount_without_fees=total, balance_in=view_in.capacity_partner, balance_out=view_out.capacity, schedule_in=view_in.fee_schedule_receiver, schedule_out=view_out.fee_schedule_sender, receivable_amount=view_in.capacity, ) amount_with_fees = get_amount_with_fees( amount_without_fees=total, balance_in=Balance(view_in.capacity_partner), balance_out=Balance(view_out.capacity), schedule_in=view_in.fee_schedule_receiver, schedule_out=view_out.fee_schedule_sender, receivable_amount=view_in.capacity, ) if amount_with_fees is None: log.warning( "Invalid path because of invalid fee calculation", amount=total, view_out=view_out, view_in=view_in, amount_without_fees=total, balance_in=view_in.capacity_partner, balance_out=view_out.capacity, schedule_in=view_in.fee_schedule_receiver, schedule_out=view_out.fee_schedule_sender, receivable_amount=view_in.capacity, ) return None fee = PaymentWithFeeAmount(amount_with_fees - total) total += fee # type: ignore fees.append(FeeAmount(fee)) # The hop to the target does not incur mediation fees fees.append(FeeAmount(0)) return fees
def contractreceivechannelnew_from_event( new_channel_details: NewChannelDetails, channel_config: ChannelConfig, event: DecodedEvent) -> ContractReceiveChannelNew: data = event.event_data args = data["args"] settle_timeout = args["settle_timeout"] block_number = event.block_number identifier = args["channel_identifier"] token_network_address = TokenNetworkAddress(event.originating_contract) our_state = NettingChannelEndState(new_channel_details.our_address, Balance(0)) partner_state = NettingChannelEndState(new_channel_details.partner_address, Balance(0)) open_transaction = TransactionExecutionStatus( None, block_number, TransactionExecutionStatus.SUCCESS) # If the node was offline for a long period, the channel may have been # closed already, if that is the case during initialization the node will # process the other events and update the channel's state close_transaction: Optional[TransactionExecutionStatus] = None settle_transaction: Optional[TransactionExecutionStatus] = None channel_state = NettingChannelState( canonical_identifier=CanonicalIdentifier( chain_identifier=new_channel_details.chain_id, token_network_address=token_network_address, channel_identifier=identifier, ), token_address=new_channel_details.token_address, token_network_registry_address=new_channel_details. token_network_registry_address, reveal_timeout=channel_config.reveal_timeout, settle_timeout=settle_timeout, fee_schedule=channel_config.fee_schedule, our_state=our_state, partner_state=partner_state, open_transaction=open_transaction, close_transaction=close_transaction, settle_transaction=settle_transaction, ) return ContractReceiveChannelNew( channel_state=channel_state, transaction_hash=event.transaction_hash, block_number=block_number, block_hash=event.block_hash, )
def test_imbalance_penalty(): r""" Test an imbalance penalty by moving back and forth The imbalance fee looks like 20 | / | / 10 |\. / | \. / 0 | \/ --------------- 0 50 100 For each input, we first assume the channel is used to forward tokens to a payee, which moves the capacity from x1 to x2. The we assume the same amount is mediated in the opposite direction (moving from x2 to x1) and check that the calculated fee is the same as before just with the opposite sign. """ v_schedule = FeeScheduleState(imbalance_penalty=[ (TokenAmount(0), FeeAmount(10)), (TokenAmount(50), FeeAmount(0)), (TokenAmount(100), FeeAmount(20)), ]) for x1, amount, expected_fee_payee, expected_fee_payer in [ (0, 50, -6, 10), (50, 50, 12, -20), (0, 10, -2, 2), (10, 10, -2, 2), (0, 20, -5, 4), (40, 15, 0, 0), ]: x2 = x1 + amount assert v_schedule.fee_payee( balance=Balance(100 - x1), amount=PaymentWithFeeAmount( amount)) == FeeAmount(expected_fee_payee) assert v_schedule.fee_payer( balance=Balance(100 - x2), amount=PaymentWithFeeAmount( amount)) == FeeAmount(expected_fee_payer) with pytest.raises(UndefinedMediationFee): v_schedule.fee_payee(balance=Balance(0), amount=PaymentWithFeeAmount(1)) with pytest.raises(UndefinedMediationFee): v_schedule.fee_payer(balance=Balance(100), amount=PaymentWithFeeAmount(1))
def test_basic_fee(): flat_schedule = FeeScheduleState(flat=FeeAmount(2)) assert flat_schedule.fee_payer(PaymentWithFeeAmount(10), balance=Balance(0)) == FeeAmount(2) prop_schedule = FeeScheduleState( proportional=ProportionalFeeAmount(int(0.01e6))) assert prop_schedule.fee_payer(PaymentWithFeeAmount(40), balance=Balance(0)) == FeeAmount(0) assert prop_schedule.fee_payer(PaymentWithFeeAmount(60), balance=Balance(0)) == FeeAmount(1) assert prop_schedule.fee_payer(PaymentWithFeeAmount(1000), balance=Balance(0)) == FeeAmount(10) combined_schedule = FeeScheduleState(flat=FeeAmount(2), proportional=ProportionalFeeAmount( int(0.01e6))) assert combined_schedule.fee_payer(PaymentWithFeeAmount(60), balance=Balance(0)) == FeeAmount(3)
def test_fee_capping(): r""" Test the capping when one section of the fee function crossed from the positive into negative fees. Here, our fee curve looks like: Fee | 5 + |\ | \ 0 +--+-----+-> incoming_amount | 25\ 100 | \ | \ | \ | \ -15 + \ 0 When capping it, we need to insert the intersection point of (25, 0) into our piecewise linear function before capping all y values to zero. Otherwise we would just interpolate between (0, 5) and (100, 0). """ schedule = FeeScheduleState( imbalance_penalty=[(TokenAmount(0), FeeAmount(0)), (TokenAmount(100), FeeAmount(20))], flat=FeeAmount(5), ) fee_func = FeeScheduleState.mediation_fee_func( schedule_in=FeeScheduleState(), schedule_out=schedule, balance_in=Balance(0), balance_out=Balance(100), receivable=TokenAmount(100), amount_with_fees=PaymentWithFeeAmount(5), cap_fees=True, ) assert fee_func(30) == 0 # 5 - 6, capped assert fee_func(20) == 5 - 4
def edge_weight( visited: Dict[ChannelID, float], view: ChannelView, view_from_partner: ChannelView, amount: PaymentAmount, fee_penalty: float, ) -> float: diversity_weight = visited.get(view.channel_id, 0) # Fees for initiator and target are included here. This promotes routes # that are nice to the initiator's and target's capacities, but it's # inconsistent with the estimated total fee. # Enable fee apping for both fee schedules schedule_in = copy(view.fee_schedule_receiver) schedule_in.cap_fees = True schedule_out = copy(view.fee_schedule_sender) schedule_out.cap_fees = True amount_with_fees = get_amount_with_fees( amount_without_fees=PaymentWithFeeAmount(amount), balance_in=Balance(view.capacity), balance_out=Balance(view.capacity), schedule_in=schedule_in, schedule_out=schedule_out, receivable_amount=view.capacity, ) if amount_with_fees is None: return float("inf") fee = FeeAmount(amount_with_fees - amount) fee_weight = fee / 1e18 * fee_penalty no_refund_weight = 0 if view_from_partner.capacity < int(float(amount) * 1.1): no_refund_weight = 1 return 1 + diversity_weight + fee_weight + no_refund_weight
def _calculate_fees(self) -> None: total = PaymentWithFeeAmount(self.value) for prev_node, mediator, next_node in reversed( list(window(self.nodes, 3))): view_in: ChannelView = self.G[prev_node][mediator]["view"] view_out: ChannelView = self.G[mediator][next_node]["view"] amount_with_fees = get_amount_with_fees( amount_without_fees=total, balance_in=Balance(view_in.capacity_partner), balance_out=Balance(view_out.capacity), schedule_in=view_in.fee_schedule_receiver, schedule_out=view_out.fee_schedule_sender, receivable_amount=view_in.capacity, ) if amount_with_fees is None: log.warning( "Invalid fee calculation", amount=total, view_out=view_out, view_in=view_in, amount_without_fees=total, balance_in=view_in.capacity_partner, balance_out=view_out.capacity, schedule_in=view_in.fee_schedule_receiver, schedule_out=view_out.fee_schedule_sender, receivable_amount=view_in.capacity, ) self._is_valid = False break fee = PaymentWithFeeAmount(amount_with_fees - total) total += fee # type: ignore self.fees.append(FeeAmount(fee))
def test_payment_channel_proxy_basics( token_network_registry_address: TokenNetworkRegistryAddress, token_network_proxy: TokenNetwork, token_proxy: Token, chain_id: ChainID, private_keys: List[PrivateKey], web3: Web3, contract_manager: ContractManager, reveal_timeout: BlockTimeout, ) -> None: token_network_address = token_network_proxy.address partner = privatekey_to_address(private_keys[0]) rpc_client = JSONRPCClient(web3, private_keys[1]) proxy_manager = ProxyManager( rpc_client=rpc_client, contract_manager=contract_manager, metadata=ProxyManagerMetadata( token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER, filters_start_at=GENESIS_BLOCK_NUMBER, ), ) token_network_proxy = proxy_manager.token_network( address=token_network_address, block_identifier=BLOCK_ID_LATEST ) start_block = web3.eth.blockNumber channel_details = token_network_proxy.new_netting_channel( partner=partner, settle_timeout=TEST_SETTLE_TIMEOUT_MIN, given_block_identifier=BLOCK_ID_LATEST, ) channel_identifier = channel_details.channel_identifier assert channel_identifier is not None channel_state = NettingChannelState( canonical_identifier=CanonicalIdentifier( chain_identifier=chain_id, token_network_address=token_network_address, channel_identifier=channel_identifier, ), token_address=token_network_proxy.token_address(), token_network_registry_address=token_network_registry_address, reveal_timeout=reveal_timeout, settle_timeout=BlockTimeout(TEST_SETTLE_TIMEOUT_MIN), fee_schedule=FeeScheduleState(), our_state=NettingChannelEndState( address=token_network_proxy.client.address, contract_balance=Balance(0) ), partner_state=NettingChannelEndState(address=partner, contract_balance=Balance(0)), open_transaction=SuccessfulTransactionState(finished_block_number=BlockNumber(0)), ) channel_proxy_1 = proxy_manager.payment_channel( channel_state=channel_state, block_identifier=BLOCK_ID_LATEST ) assert channel_proxy_1.channel_identifier == channel_identifier assert channel_proxy_1.opened(BLOCK_ID_LATEST) is True # Test deposit initial_token_balance = 100 token_proxy.transfer(rpc_client.address, TokenAmount(initial_token_balance)) assert token_proxy.balance_of(rpc_client.address) == initial_token_balance assert token_proxy.balance_of(partner) == 0 channel_proxy_1.approve_and_set_total_deposit( total_deposit=TokenAmount(10), block_identifier=BLOCK_ID_LATEST ) # ChannelOpened, ChannelNewDeposit channel_events = get_all_netting_channel_events( proxy_manager=proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_proxy_1.channel_identifier, contract_manager=contract_manager, from_block=start_block, to_block=web3.eth.blockNumber, ) assert len(channel_events) == 2 block_before_close = web3.eth.blockNumber empty_balance_proof = BalanceProof( channel_identifier=channel_proxy_1.channel_identifier, token_network_address=token_network_address, balance_hash=EMPTY_BALANCE_HASH, nonce=0, chain_id=chain_id, transferred_amount=TokenAmount(0), ) closing_data = ( empty_balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + EMPTY_SIGNATURE ) channel_proxy_1.close( nonce=Nonce(0), balance_hash=EMPTY_BALANCE_HASH, additional_hash=EMPTY_MESSAGE_HASH, non_closing_signature=EMPTY_SIGNATURE, closing_signature=LocalSigner(private_keys[1]).sign(data=closing_data), block_identifier=BLOCK_ID_LATEST, ) assert channel_proxy_1.closed(BLOCK_ID_LATEST) is True # ChannelOpened, ChannelNewDeposit, ChannelClosed channel_events = get_all_netting_channel_events( proxy_manager=proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_proxy_1.channel_identifier, contract_manager=contract_manager, from_block=start_block, to_block=web3.eth.blockNumber, ) assert len(channel_events) == 3 # check the settlement timeouts again assert channel_proxy_1.settle_timeout() == TEST_SETTLE_TIMEOUT_MIN # update transfer -- we need to wait on +1 since we use the latest block on parity for # estimate gas and at the time the latest block is the settle timeout block. # More info: https://github.com/raiden-network/raiden/pull/3699#discussion_r270477227 rpc_client.wait_until_block( target_block_number=BlockNumber(rpc_client.block_number() + TEST_SETTLE_TIMEOUT_MIN + 1) ) transaction_hash = channel_proxy_1.settle( transferred_amount=TokenAmount(0), locked_amount=LockedAmount(0), locksroot=LOCKSROOT_OF_NO_LOCKS, partner_transferred_amount=TokenAmount(0), partner_locked_amount=LockedAmount(0), partner_locksroot=LOCKSROOT_OF_NO_LOCKS, block_identifier=BLOCK_ID_LATEST, ) assert is_tx_hash_bytes(transaction_hash) assert channel_proxy_1.settled(BLOCK_ID_LATEST) is True # ChannelOpened, ChannelNewDeposit, ChannelClosed, ChannelSettled channel_events = get_all_netting_channel_events( proxy_manager=proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_proxy_1.channel_identifier, contract_manager=contract_manager, from_block=start_block, to_block=web3.eth.blockNumber, ) assert len(channel_events) == 4 channel_details = token_network_proxy.new_netting_channel( partner=partner, settle_timeout=TEST_SETTLE_TIMEOUT_MIN, given_block_identifier=BLOCK_ID_LATEST, ) new_channel_identifier = channel_details.channel_identifier assert new_channel_identifier is not None channel_state = NettingChannelState( canonical_identifier=CanonicalIdentifier( chain_identifier=chain_id, token_network_address=token_network_address, channel_identifier=new_channel_identifier, ), token_address=token_network_proxy.token_address(), token_network_registry_address=token_network_registry_address, reveal_timeout=reveal_timeout, settle_timeout=BlockTimeout(TEST_SETTLE_TIMEOUT_MIN), fee_schedule=FeeScheduleState(), our_state=NettingChannelEndState( address=token_network_proxy.client.address, contract_balance=Balance(0) ), partner_state=NettingChannelEndState(address=partner, contract_balance=Balance(0)), open_transaction=SuccessfulTransactionState(finished_block_number=BlockNumber(0)), ) channel_proxy_2 = proxy_manager.payment_channel( channel_state=channel_state, block_identifier=BLOCK_ID_LATEST ) assert channel_proxy_2.channel_identifier == new_channel_identifier assert channel_proxy_2.opened(BLOCK_ID_LATEST) is True msg = "The channel was already closed, the second call must fail" with pytest.raises(RaidenRecoverableError): channel_proxy_1.close( nonce=Nonce(0), balance_hash=EMPTY_BALANCE_HASH, additional_hash=EMPTY_MESSAGE_HASH, non_closing_signature=EMPTY_SIGNATURE, closing_signature=LocalSigner(private_keys[1]).sign(data=closing_data), block_identifier=block_before_close, ) pytest.fail(msg) msg = "The channel is not open at latest, this must raise" with pytest.raises(RaidenUnrecoverableError): channel_proxy_1.close( nonce=Nonce(0), balance_hash=EMPTY_BALANCE_HASH, additional_hash=EMPTY_MESSAGE_HASH, non_closing_signature=EMPTY_SIGNATURE, closing_signature=LocalSigner(private_keys[1]).sign(data=closing_data), block_identifier=BLOCK_ID_LATEST, ) pytest.fail(msg) msg = ( "The channel was not opened at the provided block (latest). " "This call should never have been attempted." ) with pytest.raises(BrokenPreconditionError): channel_proxy_1.approve_and_set_total_deposit( total_deposit=TokenAmount(20), block_identifier=BLOCK_ID_LATEST ) pytest.fail(msg)
def run_smoketest(print_step: StepPrinter, setup: RaidenTestSetup) -> None: print_step("Starting Raiden") app = None try: app = run_raiden_service(**setup.args) raiden_api = app.raiden_api assert raiden_api is not None # for mypy partner_address = Address(b"1" * 20) block = BlockNumber(app.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) # Proxies now use the confirmed block hash to query the chain for # prerequisite checks. Wait a bit here to make sure that the confirmed # block hash contains the deployed token network or else things break wait_for_block(raiden=app, block_number=block, retry_timeout=1.0) raiden_api.channel_open( registry_address=TokenNetworkRegistryAddress( setup.contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY]), token_address=TokenAddress( to_canonical_address(setup.token.address)), partner_address=partner_address, ) raiden_api.set_total_channel_deposit( registry_address=TokenNetworkRegistryAddress( setup.contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY]), token_address=TokenAddress( to_canonical_address(setup.token.address)), partner_address=partner_address, total_deposit=TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(setup.token.address) ] # type: ignore print_step("Running smoketest") raiden_service = app token_network_added_events = raiden_service.default_registry.filter_token_added_events( ) events_token_addresses = [ event["args"]["token_address"] for event in token_network_added_events ] assert events_token_addresses == token_addresses token_networks = views.get_token_identifiers( views.state_from_raiden(raiden_service), raiden_service.default_registry.address) assert len(token_networks) == 1 channel_state = views.get_channelstate_for( chain_state=views.state_from_raiden(raiden_service), token_network_registry_address=raiden_service.default_registry. address, token_address=token_networks[0], partner_address=partner_address, ) assert channel_state distributable = channel.get_distributable(channel_state.our_state, channel_state.partner_state) assert distributable == TEST_DEPOSIT_AMOUNT assert Balance( distributable) == channel_state.our_state.contract_balance assert channel.get_status(channel_state) == ChannelState.STATE_OPENED port_number = raiden_service.config.rest_api.port response = requests.get( f"http://localhost:{port_number}/api/v1/channels") assert response.status_code == HTTPStatus.OK response_json = json.loads(response.content) assert response_json[0]["partner_address"] == to_checksum_address( partner_address) assert response_json[0]["state"] == "opened" assert int(response_json[0]["balance"]) > 0 finally: if app is not None: app.stop() app.greenlet.get()
def test_send_queued_messages_after_restart( # pylint: disable=unused-argument raiden_network: List[App], deposit: TokenAmount, token_addresses: List[TokenAddress], network_wait: float, ): """Test re-sending of undelivered messages on node restart""" app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) token_network_registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state, token_network_registry_address, token_address) assert token_network_address number_of_transfers = 7 amount_per_transfer = PaymentAmount(1) total_transferred_amount = TokenAmount(amount_per_transfer * number_of_transfers) # Make sure none of the transfers will be sent before the restart transfers = [] for secret_seed in range(number_of_transfers): secret = make_secret(secret_seed) secrethash = sha256_secrethash(secret) transfers.append((create_default_identifier(), amount_per_transfer, secret, secrethash)) assert isinstance(app0.raiden.raiden_event_handler, HoldRaidenEventHandler) # for mypy app0.raiden.raiden_event_handler.hold( SendLockedTransfer, {"transfer": { "lock": { "secrethash": secrethash } }}) for identifier, amount, secret, _ in transfers: app0.raiden.mediated_transfer_async( token_network_address=token_network_address, amount=amount, target=TargetAddress(app1.raiden.address), identifier=identifier, secret=secret, ) app0.stop() # Restart the app. The pending transfers must be processed. new_transport = MatrixTransport( config=app0.raiden.config.transport, environment=app0.raiden.config.environment_type) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_msc_address=app0.raiden.default_msc_address, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, routing_mode=RoutingMode.PRIVATE, ) del app0 app0_restart.start() # XXX: There is no synchronization among the app and the test, so it is # possible between `start` and the check below that some of the transfers # have completed, making it flaky. # # Make sure the transfers are in the queue and fail otherwise. chain_state = views.state_from_raiden(app0_restart.raiden) for _, _, _, secrethash in transfers: msg = "The secrethashes of the pending transfers must be in the queue after a restart." assert secrethash in chain_state.payment_mapping.secrethashes_to_task, msg with watch_for_unlock_failures(*raiden_network): exception = RuntimeError( "Timeout while waiting for balance update for app0") with gevent.Timeout(20, exception=exception): waiting.wait_for_payment_balance( raiden=app0_restart.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app1.raiden.address, target_address=app1.raiden.address, target_balance=total_transferred_amount, retry_timeout=network_wait, ) exception = RuntimeError( "Timeout while waiting for balance update for app1") with gevent.Timeout(20, exception=exception): waiting.wait_for_payment_balance( raiden=app1.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=total_transferred_amount, retry_timeout=network_wait, ) assert_synced_channel_state( token_network_address, app0_restart, Balance(deposit - total_transferred_amount), [], app1, Balance(deposit + total_transferred_amount), [], ) new_transport.stop()
def test_lock_expiry( raiden_network: List[App], token_addresses: List[TokenAddress], deposit: TokenAmount ) -> None: """Test lock expiry and removal.""" alice_app, bob_app = raiden_network token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address ) assert token_network_address hold_event_handler = bob_app.raiden.raiden_event_handler wait_message_handler = bob_app.raiden.message_handler msg = "hold event handler necessary to control messages" assert isinstance(hold_event_handler, HoldRaidenEventHandler), msg assert isinstance(wait_message_handler, WaitForMessage), msg token_network = views.get_token_network_by_address( views.state_from_app(alice_app), token_network_address ) assert token_network channel_state = get_channelstate(alice_app, bob_app, token_network_address) channel_identifier = channel_state.identifier assert ( channel_identifier in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address] ) alice_to_bob_amount = PaymentAmount(10) identifier = factories.make_payment_id() target = TargetAddress(bob_app.raiden.address) transfer_1_secret = factories.make_secret(0) transfer_1_secrethash = sha256_secrethash(transfer_1_secret) transfer_2_secret = factories.make_secret(1) transfer_2_secrethash = sha256_secrethash(transfer_2_secret) hold_event_handler.hold_secretrequest_for(secrethash=transfer_1_secrethash) transfer1_received = wait_message_handler.wait_for_message( LockedTransfer, {"lock": {"secrethash": transfer_1_secrethash}} ) transfer2_received = wait_message_handler.wait_for_message( LockedTransfer, {"lock": {"secrethash": transfer_2_secrethash}} ) remove_expired_lock_received = wait_message_handler.wait_for_message( LockExpired, {"secrethash": transfer_1_secrethash} ) alice_app.raiden.start_mediated_transfer_with_secret( token_network_address=token_network_address, amount=alice_to_bob_amount, target=target, identifier=identifier, secret=transfer_1_secret, ) transfer1_received.wait() alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address) lock = channel.get_lock(alice_bob_channel_state.our_state, transfer_1_secrethash) assert lock # This is the current state of the protocol: # # A -> B LockedTransfer # B -> A SecretRequest # - protocol didn't continue assert_synced_channel_state( token_network_address, alice_app, Balance(deposit), [lock], bob_app, Balance(deposit), [] ) # Verify lock is registered in both channel states alice_channel_state = get_channelstate(alice_app, bob_app, token_network_address) assert transfer_1_secrethash in alice_channel_state.our_state.secrethashes_to_lockedlocks bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address) assert transfer_1_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks alice_chain_state = views.state_from_raiden(alice_app.raiden) assert transfer_1_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task remove_expired_lock_received.wait() alice_channel_state = get_channelstate(alice_app, bob_app, token_network_address) assert transfer_1_secrethash not in alice_channel_state.our_state.secrethashes_to_lockedlocks # Verify Bob received the message and processed the LockExpired message bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address) assert transfer_1_secrethash not in bob_channel_state.partner_state.secrethashes_to_lockedlocks alice_chain_state = views.state_from_raiden(alice_app.raiden) assert transfer_1_secrethash not in alice_chain_state.payment_mapping.secrethashes_to_task # Make another transfer alice_to_bob_amount = PaymentAmount(10) identifier = factories.make_payment_id() hold_event_handler.hold_secretrequest_for(secrethash=transfer_2_secrethash) alice_app.raiden.start_mediated_transfer_with_secret( token_network_address=token_network_address, amount=alice_to_bob_amount, target=target, identifier=identifier, secret=transfer_2_secret, ) transfer2_received.wait() # Make sure the other transfer still exists alice_chain_state = views.state_from_raiden(alice_app.raiden) assert transfer_2_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address) assert transfer_2_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks
def test_query_events( raiden_chain, token_addresses, deposit, settle_timeout, retry_timeout, contract_manager, blockchain_type, ): app0, app1 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_address = app0.raiden.default_registry.get_token_network( token_address, BLOCK_ID_LATEST) assert token_network_address manager0 = app0.raiden.proxy_manager.token_network(token_network_address, BLOCK_ID_LATEST) channelcount0 = views.total_token_network_channels( views.state_from_app(app0), registry_address, token_address) events = get_token_network_registry_events( proxy_manager=app0.raiden.proxy_manager, token_network_registry_address=registry_address, contract_manager=contract_manager, events=ALL_EVENTS, ) assert must_have_event( events, { "event": EVENT_TOKEN_NETWORK_CREATED, "args": { "token_network_address": to_checksum_address(manager0.address), "token_address": to_checksum_address(token_address), }, }, ) if blockchain_type == "geth": # FIXME: This is apparently meant to verify that querying nonexisting blocks # returns an empty list, which is not true for parity. events = get_token_network_registry_events( proxy_manager=app0.raiden.proxy_manager, token_network_registry_address=app0.raiden.default_registry. address, contract_manager=contract_manager, events=ALL_EVENTS, from_block=BlockNumber(999999998), to_block=BlockNumber(999999999), ) assert not events RaidenAPI(app0.raiden).channel_open(registry_address, token_address, app1.raiden.address) wait_both_channel_open(app0, app1, registry_address, token_address, retry_timeout) events = get_token_network_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=manager0.address, contract_manager=contract_manager, events=ALL_EVENTS, ) _event = must_have_event( events, { "event": ChannelEvent.OPENED, "args": { "participant1": to_checksum_address(app0.raiden.address), "participant2": to_checksum_address(app1.raiden.address), "settle_timeout": settle_timeout, }, }, ) assert _event channel_id = _event["args"]["channel_identifier"] if blockchain_type == "geth": # see above events = get_token_network_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=manager0.address, contract_manager=contract_manager, events=ALL_EVENTS, from_block=BlockNumber(999999998), to_block=BlockNumber(999999999), ) assert not events # channel is created but not opened and without funds channelcount1 = views.total_token_network_channels( views.state_from_app(app0), registry_address, token_address) assert channelcount0 + 1 == channelcount1 assert_synced_channel_state(token_network_address, app0, Balance(0), [], app1, Balance(0), []) RaidenAPI(app0.raiden).set_total_channel_deposit(registry_address, token_address, app1.raiden.address, deposit) all_netting_channel_events = get_all_netting_channel_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) deposit_events = get_netting_channel_deposit_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) total_deposit_event = { "event": ChannelEvent.DEPOSIT, "args": { "participant": to_checksum_address(app0.raiden.address), "total_deposit": deposit, "channel_identifier": channel_id, }, } assert must_have_event(deposit_events, total_deposit_event) assert must_have_event(all_netting_channel_events, total_deposit_event) RaidenAPI(app0.raiden).channel_close(registry_address, token_address, app1.raiden.address) all_netting_channel_events = get_all_netting_channel_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) closed_events = get_netting_channel_closed_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) closed_event = { "event": ChannelEvent.CLOSED, "args": { "channel_identifier": channel_id, "closing_participant": to_checksum_address(app0.raiden.address), }, } assert must_have_event(closed_events, closed_event) assert must_have_event(all_netting_channel_events, closed_event) settle_expiration = app0.raiden.rpc_client.block_number( ) + settle_timeout + 5 app0.raiden.proxy_manager.client.wait_until_block( target_block_number=settle_expiration) all_netting_channel_events = get_all_netting_channel_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) settled_events = get_netting_channel_settled_events( proxy_manager=app0.raiden.proxy_manager, token_network_address=token_network_address, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) settled_event = { "event": ChannelEvent.SETTLED, "args": { "channel_identifier": channel_id } } assert must_have_event(settled_events, settled_event) assert must_have_event(all_netting_channel_events, settled_event)
def test_batch_unlock( raiden_network: List[App], token_addresses: List[TokenAddress], secret_registry_address: SecretRegistryAddress, deposit: TokenAmount, ) -> None: """Tests that batch unlock is properly called. This test will start a single incomplete transfer, the secret will be revealed *on-chain*. The node that receives the tokens has to call unlock, the node that doesn't gain anything does nothing. """ alice_app, bob_app = raiden_network alice_address = alice_app.raiden.address bob_address = bob_app.raiden.address token_network_registry_address = alice_app.raiden.default_registry.address token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(alice_app), token_network_registry_address, token_address) assert token_network_address hold_event_handler = bob_app.raiden.raiden_event_handler assert isinstance(hold_event_handler, HoldRaidenEventHandler) # Take a snapshot early on alice_app.raiden.snapshot() canonical_identifier = get_channelstate( alice_app, bob_app, token_network_address).canonical_identifier assert is_channel_registered(alice_app, bob_app, canonical_identifier) assert is_channel_registered(bob_app, alice_app, canonical_identifier) token_proxy = alice_app.raiden.proxy_manager.token(token_address) alice_initial_balance = token_proxy.balance_of(alice_app.raiden.address) bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address) # Take snapshot before transfer alice_app.raiden.snapshot() alice_to_bob_amount = 10 identifier = 1 secret = Secret(sha3(bob_address)) secrethash = sha256_secrethash(secret) secret_request_event = hold_event_handler.hold_secretrequest_for( secrethash=secrethash) alice_app.raiden.start_mediated_transfer_with_secret( token_network_address=token_network_address, amount=PaymentAmount(alice_to_bob_amount), target=TargetAddress(bob_address), identifier=PaymentID(identifier), secret=secret, ) secret_request_event.get() # wait for the messages to be exchanged alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address) lock = channel.get_lock(alice_bob_channel_state.our_state, secrethash) assert lock # This is the current state of the protocol: # # A -> B LockedTransfer # B -> A SecretRequest # - protocol didn't continue assert_synced_channel_state(token_network_address, alice_app, Balance(deposit), [lock], bob_app, Balance(deposit), []) # Test WAL restore to return the latest channel state alice_app.raiden.snapshot() our_balance_proof = alice_bob_channel_state.our_state.balance_proof restored_channel_state = channel_state_until_state_change( raiden=alice_app.raiden, canonical_identifier=alice_bob_channel_state.canonical_identifier, state_change_identifier=HIGH_STATECHANGE_ULID, ) assert restored_channel_state our_restored_balance_proof = restored_channel_state.our_state.balance_proof assert our_balance_proof == our_restored_balance_proof # Close the channel before revealing the secret off-chain. This will leave # a pending lock in the channel which has to be unlocked on-chain. # # The token network will emit a ChannelClose event, this will be polled by # both apps and each must start a task for calling settle. RaidenAPI(bob_app.raiden).channel_close(token_network_registry_address, token_address, alice_app.raiden.address) # The secret has to be registered manually because Bob never learned the # secret. The test is holding the SecretRequest to ensure the off-chain # unlock will not happen and the channel is closed with a pending lock. # # Alternatives would be to hold the unlock messages, or to stop and restart # the apps after the channel is closed. secret_registry_proxy = alice_app.raiden.proxy_manager.secret_registry( secret_registry_address) secret_registry_proxy.register_secret(secret=secret) msg = ( "The lock must still be part of the node state for the test to proceed, " "otherwise there is not unlock to be done.") assert lock, msg msg = ( "The secret must be registered before the lock expires, in order for " "the unlock to happen on-chain. Otherwise the test will fail on the " "expected balances.") assert lock.expiration > alice_app.raiden.get_block_number(), msg assert lock.secrethash == sha256_secrethash(secret) waiting.wait_for_settle( alice_app.raiden, token_network_registry_address, token_address, [alice_bob_channel_state.identifier], alice_app.raiden.alarm.sleep_time, ) msg = "The channel_state must not have been cleared, one of the ends has pending locks to do." assert is_channel_registered(alice_app, bob_app, canonical_identifier), msg assert is_channel_registered(bob_app, alice_app, canonical_identifier), msg msg = ( "Timeout while waiting for the unlock to be mined. This may happen if " "transaction is rejected, not mined, or the node's alarm task is " "not running.") with gevent.Timeout(seconds=30, exception=AssertionError(msg)): # Wait for both nodes (Bob and Alice) to see the on-chain unlock wait_for_batch_unlock( app=alice_app, token_network_address=token_network_address, receiver=bob_address, sender=alice_address, ) wait_for_batch_unlock( app=bob_app, token_network_address=token_network_address, receiver=bob_address, sender=alice_address, ) msg = ( "The nodes have done the unlock, and both ends have seen it, now the " "channel must be cleared") assert not is_channel_registered(alice_app, bob_app, canonical_identifier), msg assert not is_channel_registered(bob_app, alice_app, canonical_identifier), msg alice_new_balance = alice_initial_balance + deposit - alice_to_bob_amount bob_new_balance = bob_initial_balance + deposit + alice_to_bob_amount msg = "Unexpected end balance after channel settlement with batch unlock." assert token_proxy.balance_of( alice_app.raiden.address) == alice_new_balance, msg assert token_proxy.balance_of( bob_app.raiden.address) == bob_new_balance, msg
def fee_receiver(self, amount: PaymentAmount) -> FeeAmount: """Return the mediation fee for this channel when receiving the given amount""" return self.fee_schedule_receiver.fee(amount, Balance(self.capacity))
def make_balance() -> Balance: return Balance(random.randint(0, UINT256_MAX))
def test_imbalance_penalty(): r"""Test an imbalance penalty by moving back and forth The imbalance fee looks like 20 | / | / 10 |\. / | \. / 0 | \/ --------------- 0 50 100 For each input, we first assume the channel is used to forward tokens to a payee, which moves the capacity from x1 to x2. The we assume the same amount is mediated in the opposite direction (moving from x2 to x1) and check that the calculated fee is the same as before just with the opposite sign. """ v_schedule = FeeScheduleState( imbalance_penalty=[ (TokenAmount(0), FeeAmount(10)), (TokenAmount(50), FeeAmount(0)), (TokenAmount(100), FeeAmount(20)), ] ) reverse_schedule = FeeScheduleState( imbalance_penalty=[ (TokenAmount(0), FeeAmount(20)), (TokenAmount(50), FeeAmount(0)), (TokenAmount(100), FeeAmount(10)), ] ) for cap_fees, x1, amount, expected_fee_in, expected_fee_out in [ # Uncapped fees (False, 0, 50, -8, -10), (False, 50, 30, 20, 12), (False, 0, 10, -2, -2), (False, 10, 10, -2, -2), (False, 0, 20, -3, -4), (False, 40, 15, 0, 0), (False, 50, 31, None, 12), (False, 100, 1, None, None), # Capped fees (True, 0, 50, 0, 0), (True, 50, 30, 20, 12), (True, 0, 10, 0, 0), (True, 10, 10, 0, 0), (True, 0, 20, 0, 0), (True, 40, 15, 0, 0), ]: v_schedule.cap_fees = cap_fees amount_with_fees = get_amount_with_fees( amount_without_fees=PaymentWithFeeAmount(amount), balance_in=Balance(x1), balance_out=Balance(100), schedule_in=v_schedule, schedule_out=FeeScheduleState(cap_fees=cap_fees), receivable_amount=TokenAmount(100 - x1), ) if expected_fee_in is None: assert amount_with_fees is None else: assert amount_with_fees is not None assert amount_with_fees - amount == FeeAmount(expected_fee_in) reverse_schedule.cap_fees = cap_fees amount_with_fees = get_amount_with_fees( amount_without_fees=PaymentWithFeeAmount(amount), balance_in=Balance(0), balance_out=Balance(100 - x1), schedule_in=FeeScheduleState(cap_fees=cap_fees), schedule_out=reverse_schedule, receivable_amount=TokenAmount(100), ) if expected_fee_out is None: assert amount_with_fees is None else: assert amount_with_fees is not None assert amount_with_fees - amount == FeeAmount(expected_fee_out)