def handle_state_change(self, state_change: StateChange):
        assert self.wal

        log.debug(
            'State change',
            node=pex(self.address),
            state_change=_redact_secret(serialize.JSONSerializer.serialize(state_change)),
        )

        old_state = views.state_from_raiden(self)

        event_list = self.wal.log_and_dispatch(state_change)

        current_state = views.state_from_raiden(self)
        for balance_proof in views.detect_balance_proof_change(old_state, current_state):
            event_list.append(EventNewBalanceProofReceived(balance_proof))

        if self.dispatch_events_lock.locked():
            return []

        for event in event_list:
            log.debug(
                'Raiden event',
                node=pex(self.address),
                raiden_event=_redact_secret(serialize.JSONSerializer.serialize(event)),
            )

            try:
                self.raiden_event_handler.on_raiden_event(
                    raiden=self,
                    event=event,
                )
            except RaidenRecoverableError as e:
                log.error(str(e))
            except InvalidDBData:
                raise
            except RaidenUnrecoverableError as e:
                log_unrecoverable = (
                    self.config['environment_type'] == Environment.PRODUCTION and
                    not self.config['unrecoverable_error_should_crash']
                )
                if log_unrecoverable:
                    log.error(str(e))
                else:
                    raise

        # Take a snapshot every SNAPSHOT_STATE_CHANGES_COUNT
        # TODO: Gather more data about storage requirements
        # and update the value to specify how often we need
        # capturing a snapshot should take place
        new_snapshot_group = self.wal.storage.count_state_changes() // SNAPSHOT_STATE_CHANGES_COUNT
        if new_snapshot_group > self.snapshot_group:
            log.debug('Storing snapshot', snapshot_id=new_snapshot_group)
            self.wal.snapshot()
            self.snapshot_group = new_snapshot_group

        return event_list
Exemple #2
0
    def _initialize_monitoring_services_queue(self, chain_state: ChainState):
        """Send the monitoring requests for all current balance proofs.

        Note:
            The node must always send the *received* balance proof to the
            monitoring service, *before* sending its own locked transfer
            forward. If the monitoring service is updated after, then the
            following can happen:

            For a transfer A-B-C where this node is B

            - B receives T1 from A and processes it
            - B forwards its T2 to C
            * B crashes (the monitoring service is not updated)

            For the above scenario, the monitoring service would not have the
            latest balance proof received by B from A available with the lock
            for T1, but C would. If the channel B-C is closed and B does not
            come back online in time, the funds for the lock L1 can be lost.

            During restarts the rationale from above has to be replicated.
            Because the initialization code *is not* the same as the event
            handler. This means the balance proof updates must be done prior to
            the processing of the message queues.
        """
        msg = (
            'Transport was started before the monitoring service queue was updated. '
            'This can lead to safety issue. node:{self!r}')
        assert not self.transport, msg

        msg = (
            'The node state was not yet recovered, cant read balance proofs. node:{self!r}'
        )
        assert self.wal, msg

        current_balance_proofs = views.detect_balance_proof_change(
            old_state=ChainState(
                pseudo_random_generator=chain_state.pseudo_random_generator,
                block_number=GENESIS_BLOCK_NUMBER,
                block_hash=constants.EMPTY_HASH,
                our_address=chain_state.our_address,
                chain_id=chain_state.chain_id,
            ),
            current_state=chain_state,
        )
        for balance_proof in current_balance_proofs:
            update_services_from_balance_proof(self, chain_state,
                                               balance_proof)
Exemple #3
0
    def handle_state_change(self, state_change: StateChange) -> List[Greenlet]:
        """ Dispatch the state change and return the processing threads.

        Use this for error reporting, failures in the returned greenlets,
        should be re-raised using `gevent.joinall` with `raise_error=True`.
        """
        assert self.wal, f'WAL not restored. node:{self!r}'
        log.debug(
            'State change',
            node=pex(self.address),
            state_change=_redact_secret(
                serialize.JSONSerializer.serialize(state_change)),
        )

        old_state = views.state_from_raiden(self)

        raiden_event_list = self.wal.log_and_dispatch(state_change)

        current_state = views.state_from_raiden(self)
        for changed_balance_proof in views.detect_balance_proof_change(
                old_state, current_state):
            update_services_from_balance_proof(self, current_state,
                                               changed_balance_proof)

        log.debug(
            'Raiden events',
            node=pex(self.address),
            raiden_events=[
                _redact_secret(serialize.JSONSerializer.serialize(event))
                for event in raiden_event_list
            ],
        )

        greenlets: List[Greenlet] = list()
        if self.ready_to_process_events:
            for raiden_event in raiden_event_list:
                greenlets.append(
                    self.handle_event(raiden_event=raiden_event), )

            state_changes_count = self.wal.storage.count_state_changes()
            new_snapshot_group = (state_changes_count //
                                  SNAPSHOT_STATE_CHANGES_COUNT)
            if new_snapshot_group > self.snapshot_group:
                log.debug('Storing snapshot', snapshot_id=new_snapshot_group)
                self.wal.snapshot()
                self.snapshot_group = new_snapshot_group

        return greenlets
 def diff():
     return list(detect_balance_proof_change(old, new))
def test_detect_balance_proof_change():
    prng = random.Random()

    block_hash = make_block_hash()
    our_address = make_address()
    empty_chain = ChainState(
        pseudo_random_generator=prng,
        block_number=1,
        block_hash=block_hash,
        our_address=our_address,
        chain_id=3,
    )

    assert empty(detect_balance_proof_change(empty_chain,
                                             empty_chain)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(empty_chain,
                                    deepcopy(empty_chain))), MSG_NO_CHANGE

    token_network_registry_address = make_address()
    chain_with_registry_no_bp = deepcopy(empty_chain)
    chain_with_registry_no_bp.identifiers_to_tokennetworkregistries[
        token_network_registry_address] = TokenNetworkRegistryState(
            token_network_registry_address, [])

    assert empty(
        detect_balance_proof_change(empty_chain,
                                    chain_with_registry_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(
            chain_with_registry_no_bp,
            deepcopy(chain_with_registry_no_bp))), MSG_NO_CHANGE

    token_network_address = make_address()
    token_address = make_address()

    chain_with_token_network_no_bp = deepcopy(chain_with_registry_no_bp)
    chain_with_token_network_no_bp.identifiers_to_tokennetworkregistries[
        token_network_registry_address].tokennetworkaddresses_to_tokennetworks[
            token_network_address] = TokenNetworkState(
                address=token_network_address,
                token_address=token_address,
                network_graph=TokenNetworkGraphState(token_network_address),
            )
    assert empty(
        detect_balance_proof_change(
            empty_chain, chain_with_token_network_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(
            chain_with_registry_no_bp,
            chain_with_token_network_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(
            chain_with_token_network_no_bp,
            deepcopy(chain_with_token_network_no_bp))), MSG_NO_CHANGE

    partner_address = make_address()
    canonical_identifier = make_canonical_identifier()
    channel_no_bp = NettingChannelState(
        canonical_identifier=canonical_identifier,
        token_address=token_address,
        token_network_registry_address=token_network_registry_address,
        reveal_timeout=1,
        settle_timeout=2,
        our_state=NettingChannelEndState(address=our_address,
                                         contract_balance=1),
        partner_state=NettingChannelEndState(address=partner_address,
                                             contract_balance=0),
        open_transaction=TransactionExecutionStatus(result="success"),
        settle_transaction=None,
        update_transaction=None,
        close_transaction=None,
        fee_schedule=FeeScheduleState(),
    )

    chain_with_channel_no_bp = deepcopy(chain_with_token_network_no_bp)
    chain_with_token_network_no_bp.identifiers_to_tokennetworkregistries[
        token_network_registry_address].tokennetworkaddresses_to_tokennetworks[
            token_network_address].channelidentifiers_to_channels[
                canonical_identifier.channel_identifier] = channel_no_bp

    assert empty(
        detect_balance_proof_change(empty_chain,
                                    chain_with_channel_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(chain_with_registry_no_bp,
                                    chain_with_channel_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(chain_with_token_network_no_bp,
                                    chain_with_channel_no_bp)), MSG_NO_CHANGE
    assert empty(
        detect_balance_proof_change(
            chain_with_channel_no_bp,
            deepcopy(chain_with_channel_no_bp))), MSG_NO_CHANGE

    channel_with_sent_bp = deepcopy(channel_no_bp)
    channel_with_sent_bp.our_state.balance_proof = create(
        BalanceProofUnsignedState)

    chain_with_sent_bp = deepcopy(chain_with_token_network_no_bp)
    chain_with_sent_bp.identifiers_to_tokennetworkregistries[
        token_network_registry_address].tokennetworkaddresses_to_tokennetworks[
            token_network_address].channelidentifiers_to_channels[
                canonical_identifier.channel_identifier] = channel_with_sent_bp

    assert not empty(
        detect_balance_proof_change(
            empty_chain,
            chain_with_sent_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_registry_no_bp,
            chain_with_sent_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_token_network_no_bp,
            chain_with_sent_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_channel_no_bp,
            chain_with_sent_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert empty(
        detect_balance_proof_change(
            chain_with_sent_bp, deepcopy(chain_with_sent_bp))), MSG_NO_CHANGE

    channel_with_received_bp = deepcopy(channel_no_bp)
    channel_with_received_bp.partner_state.balance_proof = create(
        BalanceProofUnsignedState)

    chain_with_received_bp = deepcopy(chain_with_token_network_no_bp)
    chain_with_received_bp.identifiers_to_tokennetworkregistries[
        token_network_registry_address].tokennetworkaddresses_to_tokennetworks[
            token_network_address].channelidentifiers_to_channels[
                canonical_identifier.channel_identifier] = channel_with_sent_bp

    # asserting with `channel_with_received_bp` and `channel_with_sent_bp`
    # doesn't make sense, because one of the balance proofs would have to
    # disappear (which is a bug)
    assert not empty(
        detect_balance_proof_change(
            empty_chain,
            chain_with_received_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_registry_no_bp,
            chain_with_received_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_token_network_no_bp,
            chain_with_received_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(
            chain_with_channel_no_bp,
            chain_with_received_bp)), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert empty(
        detect_balance_proof_change(
            chain_with_received_bp,
            deepcopy(chain_with_received_bp))), MSG_NO_CHANGE

    chain_with_sent_and_received_bp = deepcopy(chain_with_token_network_no_bp)
    ta_to_tn = chain_with_sent_and_received_bp.identifiers_to_tokennetworkregistries
    channel_with_sent_and_recived_bp = (
        ta_to_tn[token_network_registry_address].
        tokennetworkaddresses_to_tokennetworks[token_network_address].
        channelidentifiers_to_channels[canonical_identifier.channel_identifier]
    )
    channel_with_sent_and_recived_bp.partner_state.balance_proof = deepcopy(
        channel_with_received_bp.partner_state.balance_proof)
    channel_with_sent_and_recived_bp.our_state.balance_proof = deepcopy(
        channel_with_received_bp.our_state.balance_proof)

    assert not empty(
        detect_balance_proof_change(empty_chain,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(chain_with_registry_no_bp,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(chain_with_token_network_no_bp,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(chain_with_channel_no_bp,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(chain_with_received_bp,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert not empty(
        detect_balance_proof_change(chain_with_sent_bp,
                                    chain_with_sent_and_received_bp)
    ), MSG_BALANCE_PROOF_SHOULD_BE_DETECTED
    assert empty(
        detect_balance_proof_change(
            chain_with_sent_and_received_bp,
            deepcopy(chain_with_sent_and_received_bp))), MSG_NO_CHANGE
Exemple #6
0
def test_detect_balance_proof_chain_handles_attribute_error(chain_state):
    chain_state.identifiers_to_tokennetworkregistries["123"] = None
    changes_iterator = detect_balance_proof_change(old_state=object(),
                                                   current_state=chain_state)
    assert len(list(changes_iterator)) == 0
Exemple #7
0
    def start(self):
        """ Start the node synchronously. Raises directly if anything went wrong on startup """
        if not self.stop_event.ready():
            raise RuntimeError(f'{self!r} already started')
        self.stop_event.clear()
        self.greenlets = list()

        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked

        # start the registration early to speed up the start
        if self.config['transport_type'] == 'udp':
            endpoint_registration_greenlet = gevent.spawn(
                self.discovery.register,
                self.address,
                self.config['transport']['udp']['external_ip'],
                self.config['transport']['udp']['external_port'],
            )

        self.maybe_upgrade_db()

        storage = sqlite.SerializedSQLiteStorage(
            database_path=self.database_path,
            serializer=serialize.JSONSerializer(),
        )
        storage.log_run()
        self.wal = wal.restore_to_state_change(
            transition_function=node.state_transition,
            storage=storage,
            state_change_identifier='latest',
        )

        if self.wal.state_manager.current_state is None:
            log.debug(
                'No recoverable state available, created inital state',
                node=pex(self.address),
            )
            # On first run Raiden needs to fetch all events for the payment
            # network, to reconstruct all token network graphs and find opened
            # channels
            last_log_block_number = self.query_start_block
            last_log_block_hash = self.chain.client.blockhash_from_blocknumber(
                last_log_block_number, )

            state_change = ActionInitChain(
                pseudo_random_generator=random.Random(),
                block_number=last_log_block_number,
                block_hash=last_log_block_hash,
                our_address=self.chain.node_address,
                chain_id=self.chain.network_id,
            )
            self.handle_and_track_state_change(state_change)

            payment_network = PaymentNetworkState(
                self.default_registry.address,
                [],  # empty list of token network states as it's the node's startup
            )
            state_change = ContractReceiveNewPaymentNetwork(
                transaction_hash=constants.EMPTY_HASH,
                payment_network=payment_network,
                block_number=last_log_block_number,
                block_hash=last_log_block_hash,
            )
            self.handle_and_track_state_change(state_change)
        else:
            # The `Block` state change is dispatched only after all the events
            # for that given block have been processed, filters can be safely
            # installed starting from this position without losing events.
            last_log_block_number = views.block_number(
                self.wal.state_manager.current_state)
            log.debug(
                'Restored state from WAL',
                last_restored_block=last_log_block_number,
                node=pex(self.address),
            )

            known_networks = views.get_payment_network_identifiers(
                views.state_from_raiden(self))
            if known_networks and self.default_registry.address not in known_networks:
                configured_registry = pex(self.default_registry.address)
                known_registries = lpex(known_networks)
                raise RuntimeError(
                    f'Token network address mismatch.\n'
                    f'Raiden is configured to use the smart contract '
                    f'{configured_registry}, which conflicts with the current known '
                    f'smart contracts {known_registries}', )

        # Restore the current snapshot group
        state_change_qty = self.wal.storage.count_state_changes()
        self.snapshot_group = state_change_qty // SNAPSHOT_STATE_CHANGES_COUNT

        # Install the filters using the correct from_block value, otherwise
        # blockchain logs can be lost.
        self.install_all_blockchain_filters(
            self.default_registry,
            self.default_secret_registry,
            last_log_block_number,
        )

        # Complete the first_run of the alarm task and synchronize with the
        # blockchain since the last run.
        #
        # Notes about setup order:
        # - The filters must be polled after the node state has been primed,
        # otherwise the state changes won't have effect.
        # - The alarm must complete its first run before the transport is started,
        #   to reject messages for closed/settled channels.
        self.alarm.register_callback(self._callback_new_block)
        with self.dispatch_events_lock:
            self.alarm.first_run(last_log_block_number)

        chain_state = views.state_from_raiden(self)
        self._initialize_transactions_queues(chain_state)
        self._initialize_whitelists(chain_state)
        self._initialize_payment_statuses(chain_state)
        # send messages in queue before starting transport,
        # this is necessary to avoid a race where, if the transport is started
        # before the messages are queued, actions triggered by it can cause new
        # messages to be enqueued before these older ones
        self._initialize_messages_queues(chain_state)

        # before we start the transport, we need to request monitoring for all current
        # balance proofs.
        current_balance_proofs = views.detect_balance_proof_change(
            State(),
            chain_state,
        )
        for balance_proof in current_balance_proofs:
            update_monitoring_service_from_balance_proof(self, balance_proof)

        # The transport must not ever be started before the alarm task's
        # `first_run()` has been, because it's this method which synchronizes the
        # node with the blockchain, including the channel's state (if the channel
        # is closed on-chain new messages must be rejected, which will not be the
        # case if the node is not synchronized)
        self.transport.start(
            raiden_service=self,
            message_handler=self.message_handler,
            prev_auth_data=chain_state.last_transport_authdata,
        )

        # First run has been called above!
        self.alarm.start()

        # exceptions on these subtasks should crash the app and bubble up
        self.alarm.link_exception(self.on_error)
        self.transport.link_exception(self.on_error)

        # Health check needs the transport layer
        self.start_neighbours_healthcheck(chain_state)

        if self.config['transport_type'] == 'udp':
            endpoint_registration_greenlet.get(
            )  # re-raise if exception occurred

        log.debug('Raiden Service started', node=pex(self.address))
        super().start()