コード例 #1
0
def test_routing_mocked_pfs_happy_path_with_updated_iou(
        happy_path_fixture, one_to_n_address, our_address):
    addresses, chain_state, _, response, token_network_state = happy_path_fixture
    _, address2, _, address4 = addresses

    iou = make_iou(
        pfs_config=PFS_CONFIG,
        our_address=factories.UNIT_TRANSFER_SENDER,
        one_to_n_address=one_to_n_address,
        privkey=PRIVKEY,
        block_number=BlockNumber(10),
        chain_id=ChainID(5),
        offered_fee=TokenAmount(1),
    )
    last_iou = copy(iou)

    with patch.object(session, "post", return_value=response) as patched:
        routes, feedback_token = get_best_routes_with_iou_request_mocked(
            chain_state=chain_state,
            token_network_state=token_network_state,
            one_to_n_address=one_to_n_address,
            from_address=our_address,
            to_address=address4,
            amount=50,
            iou_json_data=dict(last_iou=last_iou.as_json()),
        )

    assert_checksum_address_in_url(patched.call_args[0][0])

    assert routes[0].next_hop_address == address2
    assert feedback_token == DEFAULT_FEEDBACK_TOKEN

    # Check for iou arguments in request payload
    payload = patched.call_args[1]["json"]
    pfs_config = CONFIG["pfs_config"]
    old_amount = last_iou.amount
    assert old_amount < payload["iou"][
        "amount"] <= pfs_config.maximum_fee + old_amount
    assert payload["iou"]["expiration_block"] == last_iou.expiration_block
    assert payload["iou"]["sender"] == to_checksum_address(last_iou.sender)
    assert payload["iou"]["receiver"] == to_checksum_address(last_iou.receiver)
    assert "signature" in payload["iou"]
コード例 #2
0
ファイル: checks.py プロジェクト: sekmet/raiden
def check_ethereum_confirmed_block_is_not_pruned(
    jsonrpc_client: JSONRPCClient, secret_registry: SecretRegistry, confirmation_blocks: int
) -> None:
    """Checks the Ethereum client is not pruning data too aggressively, because
    in some circunstances it is necessary for a node to fetch additional data
    from the smart contract.
    """
    unconfirmed_block_number = jsonrpc_client.block_number()

    # This is a small error margin. It is possible during normal operation for:
    #
    # - AlarmTask sees a new block and calls RaidenService._callback_new_block
    # - The service gets the current latest block number and computes the
    #   confirmed block number.
    # - The service fetches every filter, this can take a while.
    # - While the above is happening, it is possible for a `few_blocks` to be
    #   mined.
    # - The decode function is called, and tries to access what it thinks is
    #   the latest_confirmed_block, but it is in reality `few_blocks` older.
    #
    # This value below is the expected drift, that allows the decode function
    # mentioned above to work properly.
    maximum_delay_to_process_a_block = 2

    minimum_available_history = confirmation_blocks + maximum_delay_to_process_a_block
    target_confirmed_block = BlockNumber(unconfirmed_block_number - minimum_available_history)

    try:
        # Using the secret registry is arbitrary, any proxy with an `eth_call`
        # would work here.
        secret_registry.get_secret_registration_block_by_secrethash(
            EMPTY_SECRETHASH, block_identifier=target_confirmed_block
        )
    except ValueError:
        # If this exception is raised the Ethereum node is too aggressive with
        # the block pruning.
        raise RaidenError(
            f"The ethereum client does not have the necessary data available. "
            f"The client can not operate because the prunning strategy is too "
            f"agressive. Please make sure that at very minimum "
            f"{minimum_available_history} blocks of history are available."
        )
コード例 #3
0
def test_monitor_new_balance_proof_event_handler_idempotency(context: Context):
    context = setup_state_with_closed_channel(context)

    new_balance_event = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(2),
        ms_address=Address(bytes([3] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(
        new_balance_event.token_network_address,
        new_balance_event.channel_identifier)
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address,
        new_balance_event.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == bytes([3] * 20)

    monitor_new_balance_proof_event_handler(new_balance_event, context)

    assert context.database.scheduled_event_count() == 1
    assert context.database.channel_count() == 1
    channel = context.database.get_channel(
        new_balance_event.token_network_address,
        new_balance_event.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.update_status.nonce == 2
    assert channel.update_status.update_sender_address == bytes([3] * 20)
コード例 #4
0
ファイル: service.py プロジェクト: karlb/raiden-services
    def __init__(  # pylint: disable=too-many-arguments
        self,
        web3: Web3,
        private_key: str,
        db_filename: str,
        contracts: Dict[str, Contract],
        sync_start_block: BlockNumber = BlockNumber(0),
        required_confirmations: int = DEFAULT_REQUIRED_CONFIRMATIONS,
        poll_interval: float = 1,
        min_reward: int = 0,
    ):
        self.web3 = web3
        self.private_key = private_key
        self.address = private_key_to_address(private_key)
        self.required_confirmations = required_confirmations
        self.poll_interval = poll_interval

        web3.middleware_stack.add(construct_sign_and_send_raw_middleware(private_key))

        monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE]
        user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT]

        chain_id = ChainID(int(web3.net.version))
        self.database = Database(
            filename=db_filename,
            chain_id=chain_id,
            registry_address=contracts[CONTRACT_TOKEN_NETWORK_REGISTRY].address,
            receiver=self.address,
            msc_address=monitoring_contract.address,
            sync_start_block=sync_start_block,
        )
        ms_state = self.database.load_state()

        self.context = Context(
            ms_state=ms_state,
            db=self.database,
            w3=self.web3,
            last_known_block=0,
            monitoring_service_contract=monitoring_contract,
            user_deposit_contract=user_deposit_contract,
            min_reward=min_reward,
        )
コード例 #5
0
def test_action_monitoring_triggered_event_handler_does_not_trigger_monitor_call_when_nonce_to_small(  # noqa
        context: Context, ):
    context = setup_state_with_closed_channel(context)

    event3 = ReceiveMonitoringNewBalanceProofEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        reward_amount=TokenAmount(1),
        nonce=Nonce(5),
        ms_address=Address(bytes([3] * 20)),
        raiden_node_address=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(event3.token_network_address,
                                           event3.channel_identifier)
    assert channel
    assert channel.update_status is None

    monitor_new_balance_proof_event_handler(event3, context)

    # add MR to DB, with nonce being smaller than in event3
    context.database.upsert_monitor_request(
        create_signed_monitor_request(nonce=Nonce(4)))

    event4 = ActionMonitoringTriggeredEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        non_closing_participant=DEFAULT_PARTICIPANT2,
    )

    channel = context.database.get_channel(event4.token_network_address,
                                           event4.channel_identifier)
    assert channel
    assert channel.update_status is not None
    assert channel.monitor_tx_hash is None

    action_monitoring_triggered_event_handler(event4, context)

    assert context.database.channel_count() == 1
    assert channel
    assert channel.monitor_tx_hash is None
コード例 #6
0
    def __init__(  # pylint: disable=too-many-arguments
        self,
        web3: Web3,
        contracts: Dict[str, Contract],
        private_key: str,
        db_filename: str,
        sync_start_block: BlockNumber = BlockNumber(0),
        required_confirmations: int = 8,
        poll_interval: float = 10,
    ):
        super().__init__()

        self.web3 = web3
        self.registry_address = contracts[CONTRACT_TOKEN_NETWORK_REGISTRY].address
        self.user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT]
        self.chain_id = ChainID(int(web3.net.version))
        self.address = private_key_to_address(private_key)
        self._required_confirmations = required_confirmations
        self._poll_interval = poll_interval
        self._is_running = gevent.event.Event()

        self.database = PFSDatabase(
            filename=db_filename,
            pfs_address=self.address,
            sync_start_block=sync_start_block,
            token_network_registry_address=self.registry_address,
            chain_id=self.chain_id,
            user_deposit_contract_address=self.user_deposit_contract.address,
            allow_create=True,
        )
        self.token_networks = self._load_token_networks()

        try:
            self.matrix_listener = MatrixListener(
                private_key=private_key,
                chain_id=self.chain_id,
                callback=self.handle_message,
                service_room_suffix=PATH_FINDING_BROADCASTING_ROOM,
            )
        except ConnectionError as exc:
            log.critical("Could not connect to broadcasting system.", exc=exc)
            sys.exit(1)
コード例 #7
0
    def make_mediated_transfer_state_change(
            transfer_amount: int, allocated_fee_amount: FeeAmount,
            channel_capacity: TokenAmount) -> TransitionResult:
        transfer = factories.replace(factories.UNIT_TRANSFER_DESCRIPTION,
                                     amount=transfer_amount)
        channel_set = factories.make_channel_set_from_amounts(
            [channel_capacity])
        mediating_channel = channel_set.channels[0]
        pnrg = random.Random()

        nodeaddresses_to_networkstates = {
            mediating_channel.partner_state.address: NetworkState.REACHABLE
        }

        addresses_to_channel = {
            (
                UNIT_TOKEN_NETWORK_ADDRESS,
                mediating_channel.partner_state.address,
            ):
            mediating_channel
        }

        routes = [[
            factories.UNIT_OUR_ADDRESS,
            mediating_channel.partner_state.address,
            factories.UNIT_TRANSFER_TARGET,
        ]]

        init_action = factories.initiator_make_init_action(
            channels=channel_set,
            routes=routes,
            transfer=transfer,
            estimated_fee=allocated_fee_amount,
        )
        return initiator_manager.handle_init(
            payment_state=None,
            state_change=init_action,
            addresses_to_channel=addresses_to_channel,
            nodeaddresses_to_networkstates=nodeaddresses_to_networkstates,
            pseudo_random_generator=pnrg,
            block_number=BlockNumber(1),
        )
コード例 #8
0
def test_channel_closed_event_handler_idempotency(context: Context, ):
    context = setup_state_with_open_channel(context)
    context.last_known_block = 60

    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(52),
    )
    channel_closed_event_handler(event, context)

    # ActionMonitoringTriggeredEvent has been triggered
    assert context.db.scheduled_event_count() == 1
    assert context.db.channel_count() == 1
    assert_channel_state(context, ChannelState.CLOSED)

    # run handler again, check idempotency
    channel_closed_event_handler(event, context)
    assert context.db.scheduled_event_count() == 1
コード例 #9
0
def test_token_network_created(pathfinding_service_mock):
    token_address = Address(bytes([1] * 20))
    token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20)))
    network_event = ReceiveTokenNetworkCreatedEvent(
        token_address=token_address,
        token_network_address=token_network_address,
        block_number=BlockNumber(1),
    )

    assert not pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 1

    pathfinding_service_mock.handle_event(network_event)
    assert pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 2

    # Test idempotency
    pathfinding_service_mock.handle_event(network_event)
    assert pathfinding_service_mock.follows_token_network(token_network_address)
    assert len(pathfinding_service_mock.token_networks) == 2
コード例 #10
0
def get_proxy_manager(client: JSONRPCClient,
                      deploy: DeployedContracts) -> ProxyManager:
    contract_manager = ContractManager(
        contracts_precompiled_path(RAIDEN_CONTRACT_VERSION))

    assert "contracts" in deploy, deploy
    token_network_deployment_details = deploy["contracts"][
        CONTRACT_TOKEN_NETWORK_REGISTRY]
    deployed_at = token_network_deployment_details["block_number"]
    token_network_registry_deployed_at = BlockNumber(deployed_at)

    return ProxyManager(
        client,
        contract_manager,
        ProxyManagerMetadata(
            token_network_registry_deployed_at=
            token_network_registry_deployed_at,
            filters_start_at=token_network_registry_deployed_at,
        ),
    )
コード例 #11
0
def restart_app(app: App) -> App:
    new_transport = MatrixTransport(app.raiden.config["transport"]["matrix"])
    app = App(
        config=app.config,
        rpc_client=app.raiden.rpc_client,
        proxy_manager=app.raiden.proxy_manager,
        query_start_block=BlockNumber(0),
        default_one_to_n_address=app.raiden.default_one_to_n_address,
        default_registry=app.raiden.default_registry,
        default_secret_registry=app.raiden.default_secret_registry,
        default_service_registry=app.raiden.default_service_registry,
        default_msc_address=app.raiden.default_msc_address,
        transport=new_transport,
        raiden_event_handler=RaidenEventHandler(),
        message_handler=MessageHandler(),
        routing_mode=RoutingMode.PRIVATE,
    )

    app.start()

    return app
コード例 #12
0
def test_channel_closed_event_handler_channel_not_in_database(context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    # only setup the token network without channels
    create_default_token_network(context)

    event = ReceiveChannelClosedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=ChannelID(4),
        closing_participant=DEFAULT_PARTICIPANT2,
        block_number=BlockNumber(52),
    )
    assert context.database.channel_count() == 0
    channel_closed_event_handler(event, context)
    assert context.database.channel_count() == 0

    assert (
        metrics_state.get_delta(
            "events_log_errors_total", labels=metrics.ErrorCategory.STATE.to_label_dict()
        )
        == 1.0
    )
コード例 #13
0
def test_token_channel_closed(pathfinding_service_mock, token_network_model):
    setup_channel(pathfinding_service_mock, token_network_model)

    # Test invalid token network address
    close_event = ReceiveChannelClosedEvent(
        token_network_address=TokenNetworkAddress("0x" + "0" * 40),
        channel_identifier=ChannelID(1),
        closing_participant=PARTICIPANT1,
        block_number=BlockNumber(2),
    )

    pathfinding_service_mock.handle_event(close_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Test proper token network address
    close_event.token_network_address = token_network_model.address

    pathfinding_service_mock.handle_event(close_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 0
コード例 #14
0
    def _plan_withdraw_check_result(
        self, transaction_sent: Optional[TransactionSent], amount_to_plan_withdraw: TokenAmount
    ) -> Optional[TransactionMined]:
        if transaction_sent is None:
            failed_at = self.client.get_block(BLOCK_ID_LATEST)
            failed_at_blocknumber = failed_at["number"]

            self.client.check_for_insufficient_eth(
                transaction_name="planWithdraw",
                transaction_executed=False,
                required_gas=self.gas_measurements["UserDeposit.planWithdraw"],
                block_identifier=failed_at_blocknumber,
            )
            raise RaidenRecoverableError(
                "Plan withdraw transaction failed to be sent for an unknown reason."
            )

        transaction_mined = self.client.poll_transaction(transaction_sent)

        if not was_transaction_successfully_mined(transaction_mined):
            if amount_to_plan_withdraw <= 0:
                raise RaidenRecoverableError(
                    f"Planned withdraw amount was <= 0: {amount_to_plan_withdraw}."
                )

            failed_at_blocknumber = BlockNumber(transaction_mined.receipt["blockNumber"])

            current_balance = self.get_total_deposit(
                address=self.node_address, block_identifier=failed_at_blocknumber
            )

            if current_balance < amount_to_plan_withdraw:
                raise RaidenRecoverableError(
                    f"Couldn't plan withdraw because planned amount "
                    f"{amount_to_plan_withdraw} exceeded current balance of {current_balance}."
                )

            raise RaidenRecoverableError("Plan withdraw failed for an unknown reason.")

        return transaction_mined
コード例 #15
0
def handle_block(
    target_state: TargetTransferState,
    channel_state: NettingChannelState,
    block_number: BlockNumber,
):
    """ After Raiden learns about a new block this function must be called to
    handle expiration of the hash time lock.
    """
    transfer = target_state.transfer
    events = list()
    lock = transfer.lock

    secret_known = channel.is_secret_known(
        channel_state.partner_state,
        lock.secrethash,
    )
    lock_has_expired, _ = channel.is_lock_expired(
        end_state=channel_state.our_state,
        lock=lock,
        block_number=block_number,
        lock_expiration_threshold=BlockNumber(
            lock.expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, ),
    )

    if lock_has_expired and target_state.state != 'expired':
        failed = EventUnlockClaimFailed(
            identifier=transfer.payment_identifier,
            secrethash=transfer.lock.secrethash,
            reason=f'lock expired',
        )
        target_state.state = TargetTransferState.EXPIRED
        events = [failed]
    elif secret_known:
        events = events_for_onchain_secretreveal(
            target_state,
            channel_state,
            block_number,
        )

    return TransitionResult(target_state, events)
コード例 #16
0
def test_token_channel_new_deposit(pathfinding_service_mock,
                                   token_network_model):
    setup_channel(pathfinding_service_mock, token_network_model)

    deposit_event = ReceiveChannelNewDepositEvent(
        token_network_address=token_network_model.address,
        channel_identifier=ChannelID(1),
        participant_address=PARTICIPANT1,
        total_deposit=TokenAmount(123),
        block_number=BlockNumber(2),
    )

    pathfinding_service_mock.handle_event(deposit_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Test invalid token network address
    deposit_event.token_network_address = TokenNetworkAddress("0x" + "0" * 40)

    pathfinding_service_mock.handle_event(deposit_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1
コード例 #17
0
ファイル: smartcontracts.py プロジェクト: sekmet/raiden
def deploy_one_to_n(
    user_deposit_deploy_result: Callable[[], UserDeposit],
    service_registry_deploy_result: Callable[[], ServiceRegistry],
    deploy_client: JSONRPCClient,
    contract_manager: ContractManager,
    proxy_manager: ProxyManager,
    chain_id: ChainID,
) -> OneToN:
    user_deposit_proxy = user_deposit_deploy_result()
    service_registry_proxy = service_registry_deploy_result()
    contract, receipt = deploy_client.deploy_single_contract(
        contract_name=CONTRACT_ONE_TO_N,
        contract=contract_manager.get_contract(CONTRACT_ONE_TO_N),
        constructor_parameters=[
            user_deposit_proxy.address,
            chain_id,
            service_registry_proxy.address,
        ],
    )
    return proxy_manager.one_to_n(
        OneToNAddress(to_canonical_address(contract.address)),
        BlockNumber(receipt["blockNumber"]))
コード例 #18
0
ファイル: transfer.py プロジェクト: sekmet/raiden
def block_offset_timeout(
    raiden: RaidenService,
    error_message: Optional[str] = None,
    offset: Optional[BlockOffset] = None,
    safety_margin: int = 5,
) -> BlockTimeout:
    """
    Returns a BlockTimeout that will fire after a number of blocks. Usually created
    at the same time as a set of transfers to wait until their expiration.
    """
    expiration = BlockNumber(raiden.get_block_number() +
                             (offset or raiden.config.settle_timeout) +
                             safety_margin)
    exception = RuntimeError(
        error_message
        or "Events were not completed in the required number of blocks.")
    return BlockTimeout(
        raiden=raiden,
        exception_to_throw=exception,
        block_number=expiration,
        retry_timeout=DEFAULT_RETRY_TIMEOUT,
    )
コード例 #19
0
def pathfinding_service_web3_mock(
    web3: Web3,
    user_deposit_contract: Contract,
    get_private_key: Callable,
    create_service_account: Callable,
) -> Generator[PathfindingService, None, None]:
    pfs_address = to_canonical_address(create_service_account())
    with patch("pathfinding_service.service.MatrixListener", new=Mock):
        pathfinding_service = PathfindingService(
            web3=web3,
            contracts={
                CONTRACT_TOKEN_NETWORK_REGISTRY: Mock(address="0x" + "9" * 40),
                CONTRACT_USER_DEPOSIT: user_deposit_contract,
            },
            sync_start_block=BlockNumber(0),
            required_confirmations=BlockTimeout(1),
            poll_interval=0,
            private_key=get_private_key(pfs_address),
            db_filename=":memory:",
        )

        yield pathfinding_service
コード例 #20
0
def test_register_secret_batch_with_pruned_block(
    secret_registry_proxy: SecretRegistry,
    web3: Web3,
    private_keys: List[PrivateKey],
    contract_manager: ContractManager,
) -> None:
    """Test secret registration with a pruned given block."""
    c1_client = JSONRPCClient(web3, private_keys[1])
    c1_proxy_manager = ProxyManager(
        rpc_client=c1_client,
        contract_manager=contract_manager,
        metadata=ProxyManagerMetadata(
            token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
            filters_start_at=GENESIS_BLOCK_NUMBER,
        ),
    )
    # Now wait until this block becomes pruned
    pruned_number = c1_proxy_manager.client.block_number()
    c1_proxy_manager.wait_until_block(
        target_block_number=BlockNumber(pruned_number +
                                        STATE_PRUNING_AFTER_BLOCKS))
    secret_registry_batch_happy_path(web3, secret_registry_proxy)
コード例 #21
0
ファイル: pathfinding.py プロジェクト: christianbrb/raiden
def make_iou(
    pfs_config: PFSConfig,
    our_address: Address,
    one_to_n_address: OneToNAddress,
    privkey: bytes,
    block_number: BlockNumber,
    chain_id: ChainID,
    offered_fee: TokenAmount,
) -> IOU:
    expiration = BlockNumber(block_number + pfs_config.iou_timeout)

    iou = IOU(
        sender=our_address,
        receiver=pfs_config.info.payment_address,
        one_to_n_address=one_to_n_address,
        amount=offered_fee,
        expiration_block=expiration,
        chain_id=chain_id,
    )
    iou.sign(privkey)

    return iou
コード例 #22
0
def test_initialize_wal_throws_when_lock_is_taken(raiden_network: List[RaidenService]):
    """Raiden must throw a proper exception when the filelock of the DB is already taken.

    Test for https://github.com/raiden-network/raiden/issues/6079
    """
    app0, _ = raiden_network

    # Start a second app, that should throw an expection, as the lock is already taken
    app0_2 = RaidenService(
        config=app0.config,
        rpc_client=app0.rpc_client,
        proxy_manager=app0.proxy_manager,
        query_start_block=BlockNumber(0),
        raiden_bundle=RaidenBundle(app0.default_registry, app0.default_secret_registry),
        services_bundle=app0.default_services_bundle,
        transport=app0.transport,
        raiden_event_handler=RaidenEventHandler(),
        message_handler=MessageHandler(),
        routing_mode=RoutingMode.PRIVATE,
    )
    with pytest.raises(RaidenUnrecoverableError):
        app0_2.start()
コード例 #23
0
def test_token_channel_opened(pathfinding_service_mock, token_network_model):
    setup_channel(pathfinding_service_mock, token_network_model)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Test invalid token network address
    channel_event = ReceiveChannelOpenedEvent(
        token_network_address=TokenNetworkAddress(bytes([2] * 20)),
        channel_identifier=ChannelID(1),
        participant1=PARTICIPANT1,
        participant2=PARTICIPANT2,
        settle_timeout=20,
        block_number=BlockNumber(1),
    )

    pathfinding_service_mock.handle_event(channel_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Check that presence of these addresses is followed
    pathfinding_service_mock.matrix_listener.follow_address_presence.assert_has_calls(
        [call(PARTICIPANT1, refresh=True),
         call(PARTICIPANT2, refresh=True)])
コード例 #24
0
def test_prometheus_event_handling_raise_exception(
        pathfinding_service_mock_empty):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    pfs = pathfinding_service_mock_empty

    event = ReceiveTokenNetworkCreatedEvent(
        token_address=TokenAddress(bytes([1] * 20)),
        token_network_address=TokenNetworkAddress(bytes([2] * 20)),
        block_number=BlockNumber(1),
    )

    pfs.handle_token_network_created = Mock(side_effect=KeyError())

    with pytest.raises(KeyError):
        pfs.handle_event(event)
        # The exceptions raised in the wrapped part of the prometheus logging
        # will not be handled anywhere at the moment.
        # Force an exception and test correct logging of it anyways,
        # since at some point higher in the call stack we could catch exceptions.
        assert (metrics_state.get_delta(
            "events_exceptions_total",
            labels={"event_type": "ReceiveTokenNetworkCreatedEvent"},
        ) == 1.0)
コード例 #25
0
    def plan_withdraw(self, amount: TokenAmount,
                      given_block_identifier: BlockIdentifier) -> BlockNumber:
        """ Announce that you plan to withdraw tokens from the UserDeposit contract

        Returns the block number at which the withdraw is ready.
        """
        self._plan_withdraw_preconditions(amount, given_block_identifier)

        # Simplify our lives by disallowing concurrent plan_withdraw / withdraw calls
        with self._withdraw_lock:
            estimated_transaction = self.client.estimate_gas(
                self.proxy, "planWithdraw", {}, amount)
            transaction_sent = None
            if estimated_transaction is not None:
                transaction_sent = self.client.transact(estimated_transaction)
            transaction_mined = self._plan_withdraw_check_result(
                transaction_sent=transaction_sent,
                amount_to_plan_withdraw=amount)

        assert transaction_mined is not None, "_plan_withdraw_check_result returned None"

        return BlockNumber(transaction_mined.receipt["blockNumber"] +
                           self.get_withdraw_delay())
コード例 #26
0
def _add_blockhash_to_state_changes(storage: SQLiteStorage,
                                    cache: BlockHashCache) -> None:
    """Adds blockhash to ContractReceiveXXX and ActionInitChain state changes"""

    batch_size = 50
    batch_query = storage.batch_query_state_changes(
        batch_size=batch_size,
        filters=[
            ("_type", "raiden.transfer.state_change.ContractReceive%"),
            ("_type", "raiden.transfer.state_change.ActionInitChain"),
        ],
        logical_and=False,
    )
    for state_changes_batch in batch_query:
        # Gather query records to pass to gevent pool imap to have concurrent RPC calls
        query_records = []
        for state_change in state_changes_batch:
            data = json.loads(state_change.data)
            assert "block_hash" not in data, "v18 state changes cant contain blockhash"
            record = BlockQueryAndUpdateRecord(
                block_number=BlockNumber(int(data["block_number"])),
                data=data,
                state_change_identifier=state_change.state_change_identifier,
                cache=cache,
            )
            query_records.append(record)

        # Now perform the queries in parallel with gevent.Pool.imap and gather the
        # updated tuple entries that will update the DB
        updated_state_changes = []
        pool_generator = Pool(batch_size).imap(
            _query_blocknumber_and_update_statechange_data, query_records)
        for entry in pool_generator:
            updated_state_changes.append(entry)

        # Finally update the DB with a batched executemany()
        storage.update_state_changes(updated_state_changes)
コード例 #27
0
    def start(self,
              wait_function: Callable = time.sleep,
              check_account_gas_reserve: bool = True) -> None:
        if not self.service_registry.functions.hasValidRegistration(
                self.address).call():
            log.error("No valid registration in ServiceRegistry",
                      address=self.address)
            exit(1)

        last_gas_check_block = 0
        while True:
            last_confirmed_block = self.context.latest_confirmed_block

            # check gas reserve
            do_gas_reserve_check = (
                check_account_gas_reserve and last_confirmed_block >=
                last_gas_check_block + DEFAULT_GAS_CHECK_BLOCKS)
            if do_gas_reserve_check:
                check_gas_reserve(self.web3, self.private_key)
                last_gas_check_block = last_confirmed_block

            max_query_interval_end_block = (
                self.context.ms_state.blockchain_state.latest_commited_block +
                MAX_FILTER_INTERVAL)
            # Limit the max number of blocks that is processed per iteration
            last_block = BlockNumber(
                min(last_confirmed_block, max_query_interval_end_block))

            self._process_new_blocks(last_block)
            self._check_pending_transactions()
            self._purge_old_monitor_requests()

            try:
                wait_function(self.poll_interval)
            except KeyboardInterrupt:
                log.info("Shutting down")
                sys.exit(0)
コード例 #28
0
    def __init__(
        self,
        filename: str,
        chain_id: ChainID,
        pfs_address: Address,
        sync_start_block: BlockNumber = BlockNumber(0),
        allow_create: bool = False,
        **contract_addresses: Address,
    ):
        super().__init__(filename, allow_create=allow_create)
        self.pfs_address = pfs_address

        # Keep the journal around and skip inode updates.
        # References:
        # https://sqlite.org/atomiccommit.html#_persistent_rollback_journals
        # https://sqlite.org/pragma.html#pragma_journal_mode
        self.conn.execute("PRAGMA journal_mode=PERSIST")

        self._setup(
            chain_id=chain_id,
            receiver=pfs_address,
            sync_start_block=sync_start_block,
            **contract_addresses,
        )
コード例 #29
0
def test_channel_bp_updated_event_handler_invalid_closing_participant(
        context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    context = setup_state_with_closed_channel(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT_OTHER,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(event_bp.token_network_address,
                                           event_bp.channel_identifier)
    assert context.database.channel_count() == 1
    assert channel
    assert channel.update_status is None

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (metrics_state.get_delta(
        "events_log_errors_total",
        labels=metrics.ErrorCategory.PROTOCOL.to_label_dict()) == 1.0)
コード例 #30
0
def test_channel_bp_updated_event_handler_channel_not_in_database(
        context: Context):
    metrics_state = save_metrics_state(metrics.REGISTRY)
    # only setup the token network without channels
    create_default_token_network(context)

    event_bp = ReceiveNonClosingBalanceProofUpdatedEvent(
        token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS,
        channel_identifier=DEFAULT_CHANNEL_IDENTIFIER,
        closing_participant=DEFAULT_PARTICIPANT2,
        nonce=Nonce(2),
        block_number=BlockNumber(23),
    )

    channel = context.database.get_channel(event_bp.token_network_address,
                                           event_bp.channel_identifier)
    assert channel is None
    assert context.database.channel_count() == 0

    non_closing_balance_proof_updated_event_handler(event_bp, context)

    assert (metrics_state.get_delta(
        "events_log_errors_total",
        labels=metrics.ErrorCategory.STATE.to_label_dict()) == 1.0)