Exemplo n.º 1
0
    def _get_nodes_for_exploration(self) -> Iterator[Tuple[NodeID, int]]:
        candidates = self._get_ordered_candidates()
        candidate_triplets = sliding_window(
            3, caboose(cons(None, candidates), None))

        for left_id, node_id, right_id in candidate_triplets:
            # Filter out nodes that have already been queried
            if node_id in self.queried:
                continue
            elif node_id in self.in_flight:
                continue

            # By looking at the two closest *sibling* nodes we can determine
            # how much of their routing table we need to query.  We consider
            # the maximum logarithmic distance to either neighbor which
            # guarantees that we look up the region of the network that this
            # node knows the most about, but avoid querying buckets for which
            # other nodes are going to have a more complete view.
            if left_id is None:
                left_distance = 256
            else:
                left_distance = compute_log_distance(node_id, left_id)

            if right_id is None:
                right_distance = 256
            else:
                right_distance = compute_log_distance(node_id, right_id)

            # We use the maximum distance to ensure that we cover every part of
            # the address space.
            yield node_id, max(left_distance, right_distance)
Exemplo n.º 2
0
def initialize_scenario(
    validator_set_contract: Contract,
    transition_heights: Sequence[int] = None
) -> Tuple[List[ValidatorDefinitionRange], List[Union[Type[Contract],
                                                      Contract]]]:
    _transition_heights: List[Union[int, None]] = (
        list(transition_heights) if transition_heights is not None else [])

    w3 = validator_set_contract.web3

    validator_definition_ranges = []
    contracts: List[Union[Type[Contract], Contract]] = []
    for enter_height, leave_height in sliding_window(
            2, _transition_heights + [None]):
        deployment_tx_hash = validator_set_contract.constructor().transact()
        deployment_receipt: TxReceipt = w3.eth.waitForTransactionReceipt(
            deployment_tx_hash)
        contract = w3.eth.contract(
            address=deployment_receipt["contractAddress"],
            abi=validator_set_contract.abi,
        )
        contracts.append(contract)

        validator_definition_ranges.append(
            ValidatorDefinitionRange(
                enter_height=enter_height,
                leave_height=leave_height,
                is_contract=True,
                contract_address=contracts[-1].address,
                validators=None,
            ))

    return validator_definition_ranges, contracts
Exemplo n.º 3
0
def _introduce_collisions(all_attestations_by_index, block_producer, state,
                          config):
    """
    Find some attestations for later epochs for the validators
    that are currently attesting in each source of attestation.
    """
    collisions = (all_attestations_by_index[0], )
    for src, dst in sliding_window(2, all_attestations_by_index):
        if not src:
            # src can be empty at low validator count
            collisions += (dst, )
            continue
        src_index = random.choice(list(src.keys()))
        src_val = src[src_index]
        src_slot, _ = src_val
        src_epoch = compute_epoch_at_slot(src_slot, config.SLOTS_PER_EPOCH)
        dst_epoch = src_epoch + 1

        collision = _find_collision(
            state,
            config,
            validator_index=src_index,
            epoch=dst_epoch,
            block_producer=block_producer,
        )
        collisions += (merge(dst, collision), )
    return collisions
Exemplo n.º 4
0
    async def _find_newest_matching_skeleton_header(self, peer: TChainPeer) -> BlockHeader:
        start_num = await self.wait(self._launch_strategy.get_starting_block_number())

        # after returning this header, we request the next gap, and prefer that one header
        # is new to us, which may be the next header in this mini-skeleton. (hence the -1 below)
        skip = MAX_HEADERS_FETCH - 1
        skeleton_launch_headers = await self._fetch_headers_from(peer, start_num, skip=skip)

        if len(skeleton_launch_headers) == 0:
            raise ValidationError(
                f"{peer} gave 0 headers when seeking common skeleton ancestors from {start_num}"
            )

        # check the first returned value
        first = skeleton_launch_headers[0]

        first_is_present = await self.wait(self._db.coro_header_exists(first.hash))

        if not first_is_present:
            await self._log_ancester_failure(peer, first)
            raise ValidationError(f"No common ancestor with {peer}, who started with {first}")
        elif len(skeleton_launch_headers) == 1:
            return skeleton_launch_headers[0]
        else:
            for parent, child in sliding_window(2, skeleton_launch_headers):
                is_present = await self.wait(self._db.coro_header_exists(child.hash))
                if not is_present:
                    return parent
            else:
                # All headers are present, probably the canonical head updated recently
                # Return the newest one
                return skeleton_launch_headers[-1]
Exemplo n.º 5
0
    def validate_chain(cls,
                       root: BlockHeader,
                       descendants: Tuple[BlockHeader, ...],
                       seal_check_random_sample_rate: int = 1) -> None:
        """
        Validate that all of the descendents are valid, given that the root header is valid.

        By default, check the seal validity (Proof-of-Work on Ethereum 1.x mainnet) of all headers.
        This can be expensive. Instead, check a random sample of seals using
        seal_check_random_sample_rate.
        """

        all_indices = range(len(descendants))
        if seal_check_random_sample_rate == 1:
            indices_to_check_seal = set(all_indices)
        else:
            sample_size = len(all_indices) // seal_check_random_sample_rate
            indices_to_check_seal = set(random.sample(all_indices,
                                                      sample_size))

        header_pairs = sliding_window(2, concatv([root], descendants))

        for index, (parent, child) in enumerate(header_pairs):
            if child.parent_hash != parent.hash:
                raise ValidationError(
                    "Invalid header chain; {} has parent {}, but expected {}".
                    format(child, child.parent_hash, parent.hash))
            should_check_seal = index in indices_to_check_seal
            vm_class = cls.get_vm_class_for_block_number(child.block_number)
            vm_class.validate_header(child,
                                     parent,
                                     check_seal=should_check_seal)
Exemplo n.º 6
0
    def validate_chain(
            self,
            root: BlockHeaderAPI,
            descendants: Tuple[BlockHeaderAPI, ...],
            seal_check_random_sample_rate: int = 1) -> None:

        all_indices = range(len(descendants))
        if seal_check_random_sample_rate == 1:
            indices_to_check_seal = set(all_indices)
        elif seal_check_random_sample_rate == 0:
            indices_to_check_seal = set()
        else:
            sample_size = len(all_indices) // seal_check_random_sample_rate
            indices_to_check_seal = set(random.sample(all_indices, sample_size))

        header_pairs = sliding_window(2, concatv([root], descendants))

        for index, (parent, child) in enumerate(header_pairs):
            if child.parent_hash != parent.hash:
                raise ValidationError(
                    f"Invalid header chain; {child} has parent {encode_hex(child.parent_hash)},"
                    f" but expected {encode_hex(parent.hash)}"
                )
            should_check_seal = index in indices_to_check_seal
            vm = self.get_vm(child)
            try:
                vm.validate_header(child, parent)
            except ValidationError as exc:
                raise ValidationError(
                    f"{child} is not a valid child of {parent}: {exc}"
                ) from exc

            if should_check_seal:
                vm.validate_seal(child)
Exemplo n.º 7
0
    def _get_blocks(self,
                    start_block: BaseBeaconBlock,
                    max_blocks: int) -> Iterable[BaseBeaconBlock]:
        if max_blocks < 0:
            raise Exception("Invariant: max blocks cannot be negative")

        if max_blocks == 0:
            return

        yield start_block

        blocks_generator = cons(start_block, (
            self.db.get_canonical_block_by_slot(slot)
            for slot in itertools.count(start_block.slot + 1)
        ))
        max_blocks_generator = take(max_blocks, blocks_generator)

        try:
            # ensure only a connected chain is returned (breaks might occur if the start block is
            # not part of the canonical chain or if the canonical chain changes during execution)
            for parent, child in sliding_window(2, max_blocks_generator):
                if child.parent_root == parent.hash:
                    yield child
                else:
                    break
        except BlockNotFound:
            return
Exemplo n.º 8
0
def validate_validator_definition_order(
        validator_definition_ranges: Sequence[ValidatorDefinitionRange]
) -> None:
    for current_range, next_range in sliding_window(
            2, validator_definition_ranges):
        if current_range.leave_height != next_range.enter_height:
            raise ValueError("Missing validator definition range")
Exemplo n.º 9
0
    def _persist_header_chain(
        cls, db: BaseDB, headers: Iterable[BlockHeader]
    ) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == GENESIS_PARENT_HASH
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                "Cannot persist block header ({}) with unknown parent ({})".
                format(encode_hex(first_header.hash),
                       encode_hex(first_header.parent_hash)))

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    "Non-contiguous chain. Expected {} to have {} as parent but was {}"
                    .format(
                        encode_hex(child.hash),
                        encode_hex(parent.hash),
                        encode_hex(child.parent_hash),
                    ))

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head(db).hash
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        return tuple(), tuple()
Exemplo n.º 10
0
    def _persist_header_chain(
            cls,
            db: DatabaseAPI,
            headers: Iterable[BlockHeaderAPI],
            genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == genesis_parent_hash
        if not is_genesis and not cls._header_exists(db, first_header.parent_hash):
            raise ParentNotFound(
                f"Cannot persist block header ({encode_hex(first_header.hash)}) "
                f"with unknown parent ({encode_hex(first_header.parent_hash)})"
            )

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header,), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    f"Non-contiguous chain. Expected {encode_hex(child.hash)} "
                    f"to have {encode_hex(parent.hash)} as parent "
                    f"but was {encode_hex(child.parent_hash)}"
                )

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head_hash(db)
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head, genesis_parent_hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head, genesis_parent_hash)

        return tuple(), tuple()
Exemplo n.º 11
0
def _find_breakpoints(*values: int) -> Iterable[int]:
    yield 0
    for index, (left, right) in enumerate(sliding_window(2, values), 1):
        if left + 1 == right:
            continue
        else:
            yield index
    yield len(values)
Exemplo n.º 12
0
def _load_state_machines(
    sm_configuration: StateMachineConfiguration
) -> Iterable[Tuple[Container[int], MedallaStateMachineFast]]:
    sm_configuration += ((FAR_FUTURE_SLOT, None), )
    for (first_fork, second_fork) in toolz.sliding_window(2, sm_configuration):
        valid_range = range(first_fork[0], second_fork[0])
        valid_sm = first_fork[1]()
        yield (valid_range, valid_sm)
Exemplo n.º 13
0
def _load_state_machines(
    sm_configuration: StateMachineConfiguration, chain_db: BaseBeaconChainDB
) -> Iterable[Tuple[Container[int], "BaseBeaconStateMachine"]]:
    sm_configuration += ((FAR_FUTURE_SLOT, None),)
    for (first_fork, second_fork) in toolz.sliding_window(2, sm_configuration):
        valid_range = range(first_fork[0], second_fork[0])
        valid_sm = first_fork[1](chain_db)
        yield (valid_range, valid_sm)
def test_enr_partitioning(sizes, max_size):
    partitions = partition_enr_indices_by_size(sizes, max_size)
    indices = [index for partition in partitions for index in partition]
    dropped_indices = tuple(index for index, size in enumerate(sizes)
                            if size > max_size)

    assert len(indices) == len(set(indices))
    assert set(indices) == set(range(len(sizes))) - set(dropped_indices)
    assert all(index1 < index2
               for index1, index2 in sliding_window(2, indices))

    partitioned_sizes = tuple(
        tuple(sizes[index] for index in partition) for partition in partitions)
    assert all(sum(partition) <= max_size for partition in partitioned_sizes)
    assert all(
        sum(partition) + next_partition[0] > max_size
        for partition, next_partition in sliding_window(2, partitioned_sizes))
Exemplo n.º 15
0
def ensure_branch(block_dicts):
    """make sure we really have a branch, i.e. each block is the parent block
    of the following block

    raises ValueError if block_dicts is not a branch.
    """
    for parent, child in sliding_window(2, block_dicts):
        if child.parentHash != parent.hash:
            raise ValueError("Given branch is not connected")
Exemplo n.º 16
0
def partition_advertisements(
    advertisements: Sequence[Advertisement],
    max_payload_size: int,
) -> Iterable[Tuple[Advertisement, ...]]:
    encoded_sizes = tuple(
        len(advertisement.content_key) + ADVERTISEMENT_FIXED_SIZE
        for advertisement in advertisements)
    partition_indices = _get_partition_indices(encoded_sizes, max_payload_size)
    for left, right in sliding_window(2, partition_indices):
        yield tuple(advertisements[left:right])
Exemplo n.º 17
0
def _get_partition_indices(encoded_sizes: Sequence[int],
                           max_payload_size: int) -> Iterable[int]:
    cumulative_sizes = accumulate(operator.add, encoded_sizes)
    offset = 0
    yield 0
    for idx, (last_size,
              size) in enumerate(sliding_window(2, cons(0, cumulative_sizes))):
        if size - offset > max_payload_size:
            offset = last_size
            yield idx
    yield len(encoded_sizes)
Exemplo n.º 18
0
def first_nonconsecutive_header(headers: Sequence[BlockHeader]) -> int:
    """
    :return: index of first child that does not match parent header, or a number
        past the end if all are consecutive
    """
    for index, (parent, child) in enumerate(sliding_window(2, headers)):
        if child.parent_hash != parent.hash:
            return index + 1

    # return an index off the end to indicate that all headers are consecutive
    return len(headers)
Exemplo n.º 19
0
    def _validate_sequence(self, blocks: Tuple[BaseBeaconBlock, ...]) -> None:
        # workaround for https://github.com/pytoolz/cytoolz/issues/123#issuecomment-432905716
        if not blocks:
            return

        for parent, child in sliding_window(2, blocks):
            # check that the received blocks form a sequence of descendents connected by parent
            # hashes, starting with the oldest ancestor
            if child.parent_root != parent.signing_root:
                raise ValidationError(
                    "Returned blocks are not a connected branch")
Exemplo n.º 20
0
def _extract_integer_ranges(*values: int) -> Iterable[Tuple[int, int]]:
    """
    Take a sequence of integers which is expected to be ordered and return the
    most concise definition of the sequence in terms of integer ranges.

    - fn(1, 2, 3) -> ((1, 3),)
    - fn(1, 2, 3, 7, 8, 9) -> ((1, 3), (7, 9))
    - fn(1, 7, 8, 9) -> ((1, 1), (7, 9))
    """
    for left, right in sliding_window(2, _find_breakpoints(*values)):
        chunk = values[left:right]
        yield chunk[0], chunk[-1]
Exemplo n.º 21
0
def _validate_gap_invariants(gaps):
    # 1. gaps are sorted
    for low, high in gaps[0]:
        assert high >= low, gaps

    # 2. gaps are not overrlapping
    for low_range, high_range in sliding_window(2, gaps[0]):
        # the top of the low range must not be sequential with the bottom of the high range
        assert low_range[1] + 1 < high_range[0], gaps

    # 3. final gap does not overlap with the tail
    if len(gaps[0]):
        final_gap_range = gaps[0][-1]
        assert final_gap_range[1] + 1 < gaps[1], gaps
Exemplo n.º 22
0
    def deserialize_variable_size_parts(self,
                                        offset_pairs: Tuple[Tuple[int, TSedes], ...],
                                        stream: IO[bytes]) -> Iterable[Any]:
        offsets, fields = zip(*offset_pairs)

        *head_fields, last_field = fields
        for sedes, (left_offset, right_offset) in zip(head_fields, sliding_window(2, offsets)):
            field_length = right_offset - left_offset
            field_data = read_exact(field_length, stream)
            yield sedes.deserialize(field_data)

        # simply reading to the end of the current stream gives us all of the final element data
        final_field_data = stream.read()
        yield last_field.deserialize(final_field_data)
Exemplo n.º 23
0
def filter_overlapping_paths(*paths: TreePath) -> Iterable[TreePath]:
    """
    Filter out any paths that are a prefix of another path.
    """
    if not paths:
        return
    sorted_paths = sorted(paths)
    for left, right in sliding_window(2, sorted_paths):
        if right[:len(left)] == left:
            continue
        else:
            yield left

    # Because of the use of `sliding_window` we need to manually yield the last
    # path
    yield sorted_paths[-1]
Exemplo n.º 24
0
def merklize_elements(elements: Sequence[ProofElement]) -> Hash32:
    """
    Given a set of `ProofElement` compute the `hash_tree_root`.

    This also verifies that the proof is both "well-formed" and "minimal".
    """
    elements_by_depth = groupby(operator.attrgetter("depth"), elements)
    max_depth = max(elements_by_depth.keys())

    for depth in range(max_depth, 0, -1):
        try:
            elements_at_depth = sorted(elements_by_depth.pop(depth))
        except KeyError:
            continue

        # Verify that all of the paths at this level are unique
        paths = set(el.path for el in elements_at_depth)
        if len(paths) != len(elements_at_depth):
            raise BrokenTree(
                f"Duplicate paths detected: depth={depth}  elements={elements_at_depth}"
            )

        sibling_pairs = tuple(
            (left, right)
            for left, right in sliding_window(2, elements_at_depth)
            if left.path[:-1] == right.path[:-1])

        # Check to see if any of the elements didn't have a sibling which
        # indicates either a missing sibling, or a duplicate node.
        orphans = set(elements_at_depth).difference(
            itertools.chain(*sibling_pairs))
        if orphans:
            raise BrokenTree(
                f"Orphaned tree elements: dept={depth} orphans={orphans}")

        parents = tuple(
            ProofElement(path=left.path[:-1],
                         value=hash_eth2(left.value + right.value))
            for left, right in sibling_pairs)

        if not elements_by_depth and len(parents) == 1:
            return parents[0].value
        else:
            elements_by_depth.setdefault(depth - 1, [])
            elements_by_depth[depth - 1].extend(parents)
    else:
        raise BrokenTree("Unable to fully collapse tree within 32 rounds")
Exemplo n.º 25
0
def test_enr_partitioning(num_enr_records, max_payload_size):
    enrs = ENRFactory.create_batch(num_enr_records)
    batches = partition_enrs(enrs, max_payload_size)

    assert sum(len(batch) for batch in batches) == len(enrs)
    assert set(itertools.chain(*batches)) == set(enrs)

    for batch in batches:
        encoded_batch = rlp.encode(batch,
                                   sedes=rlp.sedes.CountableList(ENRSedes))
        assert len(encoded_batch) <= max_payload_size

    for batch, next_batch in sliding_window(2, batches):
        overfull_batch = tuple(batch) + (next_batch[0], )
        encoded_batch = rlp.encode(overfull_batch,
                                   sedes=rlp.sedes.CountableList(ENRSedes))
        assert len(encoded_batch) > max_payload_size
Exemplo n.º 26
0
    def _deserialize_stream_to_tuple(
            self, stream: IO[bytes]) -> Iterable[TDeserialized]:
        if self.element_sedes.is_fixed_sized:
            element_size = self.element_sedes.get_fixed_size()
            data = stream.read()
            if len(data) % element_size != 0:
                raise DeserializationError(
                    f"Invalid max_length. List is comprised of a fixed size sedes "
                    f"but total serialized data is not an even multiple of the "
                    f"element size. data max_length: {len(data)}  element size: "
                    f"{element_size}")
            for start_idx in range(0, len(data), element_size):
                segment = data[start_idx:start_idx + element_size]
                yield self.element_sedes.deserialize(segment)
        else:
            stream_zero_loc = stream.tell()
            try:
                first_offset = s_decode_offset(stream)
            except DeserializationError:
                if stream.tell() == stream_zero_loc:
                    # Empty list
                    return
                else:
                    raise

            num_remaining_offset_bytes = first_offset - stream.tell()
            if num_remaining_offset_bytes % OFFSET_SIZE != 0:
                raise DeserializationError(
                    f"Offset bytes was not a multiple of {OFFSET_SIZE}.  Got "
                    f"{num_remaining_offset_bytes}")

            num_remaining_offsets = num_remaining_offset_bytes // OFFSET_SIZE
            tail_offsets = tuple(
                s_decode_offset(stream) for _ in range(num_remaining_offsets))

            offsets = tuple(cons(first_offset, tail_offsets))

            for left_offset, right_offset in sliding_window(2, offsets):
                element_length = right_offset - left_offset
                element_data = read_exact(element_length, stream)
                yield self.element_sedes.deserialize(element_data)

            # simply reading to the end of the current stream gives us all of the final element data
            final_element_data = stream.read()
            yield self.element_sedes.deserialize(final_element_data)
Exemplo n.º 27
0
def _compute_gas_price(probabilities: Sequence[Probability],
                       desired_probability: float) -> Wei:
    """
    Given a sorted range of ``Probability`` named-tuples returns a gas price
    computed based on where the ``desired_probability`` would fall within the
    range.

    :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.
    :param desired_probability: An floating point representation of the desired
        probability. (e.g. ``85% -> 0.85``)
    """
    first = probabilities[0]
    last = probabilities[-1]

    if desired_probability >= first.prob:
        return Wei(int(first.gas_price))
    elif desired_probability <= last.prob:
        return Wei(int(last.gas_price))

    for left, right in sliding_window(2, probabilities):
        if desired_probability < right.prob:
            continue
        elif desired_probability > left.prob:
            # This code block should never be reachable as it would indicate
            # that we already passed by the probability window in which our
            # `desired_probability` is located.
            raise Exception('Invariant')

        adj_prob = desired_probability - right.prob
        window_size = left.prob - right.prob
        position = adj_prob / window_size
        gas_window_size = left.gas_price - right.gas_price
        gas_price = int(math.ceil(right.gas_price +
                                  gas_window_size * position))
        return Wei(gas_price)
    else:
        # The initial `if/else` clause in this function handles the case where
        # the `desired_probability` is either above or below the min/max
        # probability found in the `probabilities`.
        #
        # With these two cases handled, the only way this code block should be
        # reachable would be if the `probabilities` were not sorted correctly.
        # Otherwise, the `desired_probability` **must** fall between two of the
        # values in the `probabilities``.
        raise Exception('Invariant')
Exemplo n.º 28
0
def test_advertisement_partitioning(content_keys):
    advertisements = tuple(
        AdvertisementFactory(
            content_key=content_key, signature=keys.Signature(b"\x00" * 65)
        )
        for content_key in content_keys
    )
    batches = partition_advertisements(advertisements, 512)

    for batch in batches:
        ssz_payload = tuple(ad.to_sedes_payload() for ad in batch)
        encoded = ssz.encode(ssz_payload, sedes=AdvertiseSedes)
        assert len(encoded) <= 512

    for left, right in sliding_window(2, batches):
        ssz_payload = tuple(ad.to_sedes_payload() for ad in left + (right[0],))
        encoded = ssz.encode(ssz_payload, sedes=AdvertiseSedes)
        assert len(encoded) > 512
Exemplo n.º 29
0
def get_validator_definition_ranges(
        validator_definition: Dict) -> Sequence[ValidatorDefinitionRange]:
    validate_validator_definition(validator_definition)

    sorted_definition = sorted(
        # Lambda tuple destructuring has been removed from Python 3 (https://www.python.org/dev/peps/pep-3113/) :-(
        validator_definition["multi"].items(),
        key=lambda item: int(item[0]),
    )

    result = []

    # Iterate over all configurations. Add an extra empty item for the sliding window to slide to the very end.
    # Alternatively we'd have to do some additional processing which would further complicate the code
    for (range_height, range_config), (next_range_height, _) in sliding_window(
            2, chain(sorted_definition, [[None, None]])):
        [(config_type, config_data)] = range_config.items()
        validators: Optional[List[bytes]] = None
        if config_type == "list":
            is_contract = False
            validators = [
                to_canonical_address(validator_address)
                for validator_address in config_data
            ]
            contract_address = None
        elif config_type in ["contract", "safeContract"]:
            is_contract = True
            validators = None
            contract_address = to_canonical_address(config_data)
        else:
            assert False, "Unreachable. Invalid config type."

        result.append(
            ValidatorDefinitionRange(
                enter_height=int(range_height),
                leave_height=int(next_range_height)
                if next_range_height is not None else None,
                is_contract=is_contract,
                validators=validators,
                contract_address=contract_address,
            ))

    return result
Exemplo n.º 30
0
    def _deserialize_stream_to_tuple(
            self, stream: IO[bytes]) -> Iterable[TDeserializedElement]:
        if self.element_sedes.is_fixed_sized:
            element_size = self.element_sedes.get_fixed_size()
            for _ in range(self.length):
                element_data = read_exact(element_size, stream)
                yield self.element_sedes.deserialize(element_data)
        else:
            offsets = tuple(
                s_decode_offset(stream) for _ in range(self.length))

            for left_offset, right_offset in sliding_window(2, offsets):
                element_length = right_offset - left_offset
                element_data = read_exact(element_length, stream)
                yield self.element_sedes.deserialize(element_data)

            # simply reading to the end of the current stream gives us all of the final element data
            final_element_data = stream.read()
            yield self.element_sedes.deserialize(final_element_data)