Exemplo n.º 1
0
def test_events_loaded_from_storage_should_deserialize(tmp_path):
    filename = Path(f"{tmp_path}/v{RAIDEN_DB_VERSION}_log.db")
    storage = SerializedSQLiteStorage(filename, serializer=JSONSerializer())

    # Satisfy the foreign-key constraint for state change ID
    ids = storage.write_state_changes([
        Block(
            block_number=BlockNumber(1),
            gas_limit=BlockGasLimit(1),
            block_hash=factories.make_block_hash(),
        )
    ])

    canonical_identifier = factories.make_canonical_identifier()
    recipient = factories.make_address()
    participant = factories.make_address()
    event = SendWithdrawRequest(
        recipient=recipient,
        canonical_identifier=canonical_identifier,
        message_identifier=factories.make_message_identifier(),
        total_withdraw=WithdrawAmount(1),
        participant=participant,
        expiration=BlockExpiration(10),
        nonce=Nonce(15),
    )
    storage.write_events([(ids[0], event)])

    stored_events = storage.get_events()
    assert stored_events[0] == event
Exemplo n.º 2
0
def test_write_read_log() -> None:
    wal = new_wal(state_transition_noop)

    block_number = BlockNumber(1337)
    block_hash = make_block_hash()
    block = Block(block_number=block_number,
                  gas_limit=BlockGasLimit(1),
                  block_hash=block_hash)
    unlocked_amount = TokenAmount(10)
    returned_amount = TokenAmount(5)
    participant = make_address()
    partner = make_address()
    locksroot = make_locksroot()
    contract_receive_unlock = ContractReceiveChannelBatchUnlock(
        transaction_hash=make_transaction_hash(),
        canonical_identifier=make_canonical_identifier(
            token_network_address=make_address()),
        receiver=participant,
        sender=partner,
        locksroot=locksroot,
        unlocked_amount=unlocked_amount,
        returned_tokens=returned_amount,
        block_number=block_number,
        block_hash=block_hash,
    )

    state_changes1 = wal.storage.get_statechanges_by_range(
        RANGE_ALL_STATE_CHANGES)
    count1 = len(state_changes1)

    dispatch(wal, [block])

    state_changes2 = wal.storage.get_statechanges_by_range(
        RANGE_ALL_STATE_CHANGES)
    count2 = len(state_changes2)
    assert count1 + 1 == count2

    dispatch(wal, [contract_receive_unlock])

    state_changes3 = wal.storage.get_statechanges_by_range(
        RANGE_ALL_STATE_CHANGES)
    count3 = len(state_changes3)
    assert count2 + 1 == count3

    result1, result2 = state_changes3[-2:]
    assert isinstance(result1, Block)
    assert result1.block_number == block_number

    assert isinstance(result2, ContractReceiveChannelBatchUnlock)
    assert result2.receiver == participant
    assert result2.sender == partner
    assert result2.locksroot == locksroot
    assert result2.unlocked_amount == unlocked_amount
    assert result2.returned_tokens == returned_amount

    # Make sure state snapshot can only go for corresponding state change ids
    with pytest.raises(sqlite3.IntegrityError):
        wal.storage.write_state_snapshot(State(), StateChangeID(make_ulid()),
                                         1)
Exemplo n.º 3
0
def test_get_snapshot_before_state_change() -> None:
    wal = new_wal(state_transtion_acc)

    block1 = Block(block_number=BlockNumber(5),
                   gas_limit=BlockGasLimit(1),
                   block_hash=make_block_hash())
    dispatch(wal, [block1])
    wal.snapshot(1)

    block2 = Block(block_number=BlockNumber(7),
                   gas_limit=BlockGasLimit(1),
                   block_hash=make_block_hash())
    dispatch(wal, [block2])
    wal.snapshot(2)

    block3 = Block(block_number=BlockNumber(8),
                   gas_limit=BlockGasLimit(1),
                   block_hash=make_block_hash())
    dispatch(wal, [block3])
    wal.snapshot(3)

    snapshot = wal.storage.get_snapshot_before_state_change(
        HIGH_STATECHANGE_ULID)
    assert snapshot and snapshot.data == AccState([block1, block2, block3])
Exemplo n.º 4
0
    def new_blocks(self, number):
        for _ in range(number):
            block_state_change = Block(
                block_number=BlockNumber(self.block_number + 1),
                gas_limit=BlockGasLimit(1),
                block_hash=make_block_hash(),
            )
            for client in self.address_to_client.values():
                events = list()
                result = node.state_transition(client.chain_state,
                                               block_state_change)
                events.extend(result.events)
            # TODO assert on events

            self.block_number += 1
Exemplo n.º 5
0
def test_upgrade_manager_restores_backup(tmp_path, monkeypatch):
    db_path = tmp_path / Path("v17_log.db")

    old_db_filename = tmp_path / Path("v16_log.db")

    with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION",
               new=16), SQLiteStorage(str(old_db_filename)) as storage:
        state_change = Block(
            block_number=BlockNumber(0),
            gas_limit=BlockGasLimit(1000),
            block_hash=factories.make_block_hash(),
        )
        block_data = JSONSerializer.serialize(state_change)
        storage.write_state_changes(state_changes=[block_data])
        storage.update_version()

    upgrade_functions = [UpgradeRecord(from_version=16, function=Mock())]

    upgrade_functions[0].function.return_value = 17

    web3, _ = create_fake_web3_for_block_hash(number_of_blocks=1)
    with monkeypatch.context() as m:
        m.setattr(raiden.utils.upgrades, "UPGRADES_LIST", upgrade_functions)
        m.setattr(raiden.utils.upgrades, "RAIDEN_DB_VERSION", 19)
        UpgradeManager(db_filename=db_path, web3=web3).run()

    # Once restored, the state changes written above should be
    # in the restored database
    with SQLiteStorage(str(db_path)) as storage:
        state_change_record = storage.get_latest_state_change_by_data_field(
            FilteredDBQuery(
                filters=[{
                    "_type": "raiden.transfer.state_change.Block"
                }],
                main_operator=Operator.NONE,
                inner_operator=Operator.NONE,
            ))
        assert state_change_record.data is not None
Exemplo n.º 6
0
def test_get_event_with_balance_proof():
    """ All events which contain a balance proof must be found by when
    querying the database.
    """
    serializer = JSONSerializer()
    storage = SerializedSQLiteStorage(":memory:", serializer)
    counter = itertools.count(1)
    partner_address = factories.make_address()

    balance_proof = make_balance_proof_from_counter(counter)
    lock_expired = SendLockExpired(
        recipient=partner_address,
        message_identifier=MessageID(next(counter)),
        balance_proof=balance_proof,
        secrethash=factories.make_secret_hash(next(counter)),
        canonical_identifier=balance_proof.canonical_identifier,
    )
    locked_transfer = SendLockedTransfer(
        recipient=partner_address,
        message_identifier=MessageID(next(counter)),
        transfer=make_transfer_from_counter(counter),
        canonical_identifier=factories.make_canonical_identifier(),
    )
    send_balance_proof = SendBalanceProof(
        recipient=partner_address,
        message_identifier=MessageID(next(counter)),
        payment_identifier=factories.make_payment_id(),
        token_address=factories.make_token_address(),
        secret=factories.make_secret(next(counter)),
        balance_proof=make_balance_proof_from_counter(counter),
        canonical_identifier=factories.make_canonical_identifier(),
    )

    refund_transfer = SendRefundTransfer(
        recipient=partner_address,
        message_identifier=MessageID(next(counter)),
        transfer=make_transfer_from_counter(counter),
        canonical_identifier=factories.make_canonical_identifier(),
    )

    events_balanceproofs = [
        (lock_expired, lock_expired.balance_proof),
        (locked_transfer, locked_transfer.balance_proof),
        (send_balance_proof, send_balance_proof.balance_proof),
        (refund_transfer, refund_transfer.transfer.balance_proof),
    ]

    state_change = Block(BlockNumber(1), BlockGasLimit(1),
                         factories.make_block_hash())
    for event, _ in events_balanceproofs:
        state_change_identifiers = storage.write_state_changes([state_change])
        storage.write_events(events=[(state_change_identifiers[0], event)])

    for event, balance_proof in events_balanceproofs:
        event_record = get_event_with_balance_proof_by_balance_hash(
            storage=storage,
            canonical_identifier=balance_proof.canonical_identifier,
            balance_hash=balance_proof.balance_hash,
            recipient=partner_address,
        )
        assert event_record
        assert event_record.data == event

        event_record = get_event_with_balance_proof_by_locksroot(
            storage=storage,
            canonical_identifier=balance_proof.canonical_identifier,
            recipient=event.recipient,
            locksroot=balance_proof.locksroot,
        )
        assert event_record
        assert event_record.data == event

        # Checking that balance proof attribute can be accessed for all events.
        # Issue https://github.com/raiden-network/raiden/issues/3179
        assert event_record.data.balance_proof == event.balance_proof

    storage.close()
Exemplo n.º 7
0
    def fetch_logs_in_batch(self, target_block_number: BlockNumber) -> Optional[PollResult]:
        """Poll the smart contract events for a limited number of blocks to
        avoid read timeouts (issue #3558).

        The block ``target_block_number`` will not be reached if it is more than
        ``self.block_batch_size_adjuster.batch_size`` blocks away. To ensure the
        target is reached keep calling ``fetch_logs_in_batch`` until
        ``PollResult.polled_block_number`` is the same as ``target_block_number``.

        This function will make sure that the block range for the queries is
        not too big, this is necessary because it may take a long time for an
        Ethereum node to process the request, which will result in read
        timeouts (issue #3558).

        The block batch size is adjusted dynamically based on the request
        processing duration (see ``_query_and_track()``, issue #5538).
        If the request times out the batch size is decreased and ``None``
        is returned.
        If the batch size falls below the lower threshold an exception is raised
        by the ``BlockBatchSizeAdjuster``.

        This will also group the queries as an optimization for a healthy node
        (issue #4872). This is enforced by the design of the datastructures,
        this will always fetch all the events for all the registered addresses.
        """
        # The target block has been reached already, raise an exception since
        # the caller is breaking the contract of the API
        if target_block_number <= self.last_fetched_block:
            raise ValueError(
                f"target {target_block_number} is in the past, the block has "
                f"been fetched already. Current {self.last_fetched_block}"
            )

        # As of Geth 1.9.5 there is no relational database nor an index of
        # blooms. Geth always does a linear search proportional to the number
        # of blocks in the query.
        #
        # As of Parity 2.5.8 the client has no relational database. The
        # blockchain events are indexed through a hierarchy of bloom filters
        # three levels deep, each level has it's own `.dbd` file.
        #
        # The Bottom layer is comprised of every block logs bloom, as defined
        # in the yellow paper, where each entry position matches the
        # originating block number. The top and mid layers are just an
        # optimization, in these layers each entry is composed of 16 blooms
        # filters from the layer below.
        #
        # Each pair (`address`, `topic`) of a query is used to create one bloom
        # filter, these blooms are then used find candidate blocks through the
        # bloom index, then these blocks are loaded and their logs filtered.
        #
        # Based on the `fromBlock` the index files are seeked to the correct
        # position. The search always start at the top level, if the query
        # bloom is not contained in the index then the search goes to next
        # entry at the top level and skips all the mid and lower indexes. The
        # same procedure is done for the mid level. If there is a match at the
        # lower level, then we may have a hit. Because the bloom index is the
        # same as the block number, this information is used to recover the
        # block hash.
        #
        # Each of the blocks that correspond to the hashes from the previous
        # step are then loaded, including the receipts with the logs. The
        # matching logs are then returned as results to the query.
        #
        # Additional notes for Parity :
        #
        # - Every operation to the bloom database uses an exclusive lock.
        # Therefore concurrent requests are not extremely useful.
        # - The path explained above is only used if the queries are done using
        # block numbers. Queries for block hashes will not use the index, this
        # seems necessary because there is only one index for the canonical
        # chain, and queries with block hashes seems to support uncle
        # blocks/reorgs.
        # - When an address is being queried for all the logs, it is better to
        # not specify any topics. Specially when multiple addresses are being
        # queried.
        # - The batching interface doesn't do any internal optimizations, so in
        # effect it is the same thing as sending multiple requests, one after
        # the other. The only benefit here would be to save the requests
        # round-trip time.

        with self._filters_lock:
            # Skip the last fetched block, since the ranges are inclusive the
            # same block will be fetched twice which could result in duplicate
            # events.
            from_block = BlockNumber(self.last_fetched_block + 1)

            # Limit the range of blocks fetched, this limits the size of
            # the scan done by the target node. The batch size is adjusted
            # below depending on the response time of the node.
            to_block = BlockNumber(
                min(from_block + self.block_batch_size_adjuster.batch_size, target_block_number)
            )

            # Sending a single request for all the smart contract addresses
            # is the core optimization here. Because both Geth and Parity
            # will do a linear search per request, in some shape or form,
            # sending only one request will result in only one linear
            # search.
            #
            # This optimization has a few benefits:
            #
            # - There will be only one request for all the smart contracts,
            # reducing trafic from Raiden to the Ethereum client, this is
            # important if the client is remote or a hosted service like
            # Infura.
            # - The request will be faster for large ranges (This is an
            # implementation detail that happen to be true for both
            # clients, the rationale is to reduce the number of loops that
            # go through lots of elements).

            try:
                decoded_result, max_request_duration = self._query_and_track(from_block, to_block)
            except EthGetLogsTimeout:
                # The request timed out - this typically means the node wasn't able to process
                # the requested batch size fast enough.
                # Decrease the batch size and let the higher layer retry.
                log.debug("Timeout while fetching blocks, decreasing batch size")
                self.block_batch_size_adjuster.decrease()
                return None

            can_use_bigger_batches = (
                target_block_number - from_block > self.block_batch_size_adjuster.batch_size
            )
            # Adjust block batch size depending on request duration.
            # To reduce oscillating the batch size is kept constant for request durations
            # between ``ETH_GET_LOGS_THRESHOLD_FAST`` and ``ETH_GET_LOGS_THRESHOLD_SLOW``.
            if max_request_duration < ETH_GET_LOGS_THRESHOLD_FAST:
                # The request was fast, increase batch size
                if can_use_bigger_batches:
                    # But only if we actually need bigger batches. This prevents the batch
                    # size from ballooning towards the maximum after the initial sync is done
                    # since then typically only one block is fetched at a time which is usually
                    # fast.
                    self.block_batch_size_adjuster.increase()
            elif max_request_duration > ETH_GET_LOGS_THRESHOLD_SLOW:
                # The request is taking longer than the 'slow' threshold - decrease
                # the batch size
                self.block_batch_size_adjuster.decrease()

            latest_confirmed_block = self.web3.eth.getBlock(to_block)

            self.last_fetched_block = to_block

            return PollResult(
                polled_block_number=to_block,
                polled_block_hash=BlockHash(bytes(latest_confirmed_block["hash"])),
                polled_block_gas_limit=BlockGasLimit(latest_confirmed_block["gasLimit"]),
                events=decoded_result,
            )
Exemplo n.º 8
0
@dataclass(frozen=True)
class PollResult:
    """Result of a poll request. The block number is provided so that the
    caller can confirm it in its storage.
    """

    polled_block_number: BlockNumber
    polled_block_hash: BlockHash
    polled_block_gas_limit: BlockGasLimit
    events: List[DecodedEvent]


ZERO_POLL_RESULT = PollResult(
    polled_block_number=GENESIS_BLOCK_NUMBER,
    polled_block_hash=EMPTY_HASH,
    polled_block_gas_limit=BlockGasLimit(0),
    events=[],
)


def verify_block_number(number: BlockSpecification, argname: str) -> None:
    if isinstance(number, int) and (number < 0 or number > UINT64_MAX):
        raise InvalidBlockNumberInput(
            "Provided block number {} for {} is invalid. Has to be in the range "
            "of [0, UINT64_MAX]".format(number, argname))


def get_contract_events(
    proxy_manager: ProxyManager,
    abi: ABI,
    contract_address: Address,