コード例 #1
0
def _get_partition_indices(encoded_sizes: Sequence[int],
                           max_payload_size: int) -> Iterable[int]:
    cumulative_sizes = accumulate(operator.add, encoded_sizes)
    offset = 0
    yield 0
    for idx, (last_size,
              size) in enumerate(sliding_window(2, cons(0, cumulative_sizes))):
        if size - offset > max_payload_size:
            offset = last_size
            yield idx
    yield len(encoded_sizes)
コード例 #2
0
ファイル: protocol.py プロジェクト: wschwab/trinity
def get_cmd_offsets(protocol_types: Sequence[Type[ProtocolAPI]]) -> Tuple[int, ...]:
    """
    Computes the `command_id_offsets` for each protocol.  The first offset is
    always P2P_PROTOCOL_COMMAND_LENGTH since the first protocol always begins
    after the base `p2p` protocol.  Each subsequent protocol is the accumulated
    sum of all of the protocol offsets that came before it.
    """
    return tuple(accumulate(
        lambda prev_offset, protocol_class: prev_offset + protocol_class.cmd_length,
        protocol_types,
        P2P_PROTOCOL_COMMAND_LENGTH,
    ))[:-1]  # the `[:-1]` is to discard the last accumulated offset which is not needed
コード例 #3
0
    def serialize(self, value: TSerializable) -> bytes:
        self._validate_serializable(value)

        if not len(value):
            return b""

        pairs = self._get_item_sedes_pairs(value)  # slow
        element_sedes = tuple(sedes for element, sedes in pairs)

        has_fixed_size_section_length_cache = hasattr(
            value, "_fixed_size_section_length_cache")
        if has_fixed_size_section_length_cache:
            if value._fixed_size_section_length_cache is None:
                fixed_size_section_length = _compute_fixed_size_section_length(
                    element_sedes)
                value._fixed_size_section_length_cache = fixed_size_section_length
            else:
                fixed_size_section_length = value._fixed_size_section_length_cache
        else:
            fixed_size_section_length = _compute_fixed_size_section_length(
                element_sedes)

        variable_size_section_parts = tuple(
            sedes.serialize(item)  # slow
            for item, sedes in pairs if not sedes.is_fixed_sized)

        if variable_size_section_parts:
            offsets = tuple(
                accumulate(
                    operator.add,
                    map(len, variable_size_section_parts[:-1]),
                    fixed_size_section_length,
                ))
        else:
            offsets = ()

        offsets_iter = iter(offsets)

        fixed_size_section_parts = tuple(
            sedes.serialize(item)  # slow
            if sedes.is_fixed_sized else encode_offset(next(offsets_iter))
            for item, sedes in pairs)

        try:
            next(offsets_iter)
        except StopIteration:
            pass
        else:
            raise DeserializationError(
                "Did not consume all offsets while decoding value")

        return b"".join(
            concatv(fixed_size_section_parts, variable_size_section_parts))
コード例 #4
0
ファイル: test_header_db.py プロジェクト: marcgarreau/py-evm
def test_headerdb_get_score_for_non_genesis_headers(headerdb, genesis_header):
    headerdb.persist_header(genesis_header)

    headers = mk_header_chain(genesis_header, length=10)
    difficulties = tuple(h.difficulty for h in headers)
    scores = tuple(accumulate(operator.add, difficulties, genesis_header.difficulty))

    headerdb.persist_header_chain(headers)

    for header, expected_score in zip(headers, scores[1:]):
        actual_score = headerdb.get_score(header.hash)
        assert actual_score == expected_score
コード例 #5
0
def test_ephemeral_db_fuzz(capacity):
    center_id = 0
    keys = tuple(b'key/%d' % idx for idx in range(50))
    values = tuple('0' * idx for idx in range(1, 51))
    items = list(zip(keys, values))
    lookup = dict(items)
    sorted_items = list(
        sorted(
            items,
            key=lambda item: compute_distance(center_id,
                                              content_key_to_node_id(item[0])),
        ))
    cumulative_sizes = tuple(
        accumulate(operator.add, (len(item[1]) for item in sorted_items)))
    cutoff_index = bisect.bisect_left(cumulative_sizes, capacity)
    remaining_capacity = capacity - cumulative_sizes[cutoff_index - 1]

    sorted_keys = tuple(key for key, value in sorted_items)
    expected_keys = sorted_keys[:cutoff_index]
    expected_evicted_keys = sorted_keys[cutoff_index:]

    random.shuffle(items)
    db = EphemeralDB(
        capacity=capacity,
        distance_fn=lambda key: compute_distance(center_id,
                                                 content_key_to_node_id(key)),
    )

    for key, value in items:
        db.set(key, value)

    for key in expected_keys:
        assert db.has(key)
    for key in expected_evicted_keys:
        # we can end up with extra keys if the database has extra capacity that
        # don't end up getting evicted.
        if len(lookup[key]) <= remaining_capacity:
            continue
        assert not db.has(key)
コード例 #6
0
ファイル: test_header_db.py プロジェクト: marcgarreau/py-evm
def assert_is_canonical_chain(headerdb, headers):
    if not headers:
        return

    # verify that the HEAD is correctly set.
    head = headerdb.get_canonical_head()
    assert_headers_eq(head, headers[-1])

    # verify that each header is set as the canonical block.
    for header in headers:
        canonical = headerdb.get_canonical_block_header_by_number(header.block_number)
        assert canonical == header

    # verify difficulties are correctly set.
    base_header = headerdb.get_block_header_by_hash(headers[0].parent_hash)

    difficulties = tuple(h.difficulty for h in headers)
    scores = tuple(accumulate(operator.add, difficulties, base_header.difficulty))

    for header, expected_score in zip(headers, scores[1:]):
        actual_score = headerdb.get_score(header.hash)
        assert actual_score == expected_score
コード例 #7
0
def deserialize_paths(data: bytes) -> Tuple[TreePath, ...]:
    # Parse the remaining data ase the encoded paths.  The `[1:]` at the
    # end here is just an implementation detail for how `accumulate` works,
    # resulting in an extra element that isn't needed.
    return tuple(accumulate(deserialize_path, partition_leb128(data), ()))[1:]