コード例 #1
0
ファイル: explorer.py プロジェクト: njgheorghita/ddht
    async def run(self) -> None:
        self._start_at = trio.current_time()

        self.manager.run_task(self._source_initial_nodes)

        for worker_id in range(self._concurrency):
            self.manager.run_daemon_task(self._worker, worker_id)

        async with self._send_channel:
            self._ready.set()

            # First wait for the RFN to be complete.
            await self._exploration_seeded.wait()

            while self.manager.is_running:
                # TODO: stop-gap to ensure we don't deadlock
                with trio.move_on_after(60) as scope:
                    async with self._condition:

                        try:
                            first(self._get_nodes_for_exploration())
                        except StopIteration:
                            if not self.in_flight:
                                break

                        await self._condition.wait()

                if scope.cancelled_caught:
                    self.logger.error("Deadlocked")

        self.logger.debug("%s[final]: %s", self, self.get_stats())
        self.manager.cancel()
コード例 #2
0
ファイル: list.py プロジェクト: cburgdorf/py-ssz
 def _validate_emptiness(value: Iterable[TSerializable]) -> None:
     try:
         first(value)
     except StopIteration:
         pass
     else:
         raise SerializationError("Can only serialize empty Iterables")
コード例 #3
0
def test_laziness():
    def crash_after_first_val():
        yield 1
        raise Exception("oops, iterated past first value")

    repeated_use = CachedIterable(crash_after_first_val())
    assert first(repeated_use) == 1
    assert first(repeated_use) == 1
コード例 #4
0
def test_randao_processing_validates_randao_reveal(sample_beacon_block_params,
                                                   sample_beacon_state_params,
                                                   sample_fork_params, keymap,
                                                   config):
    proposer_pubkey, proposer_privkey = first(keymap.items())
    state = SerenityBeaconState(**sample_beacon_state_params).copy(
        validator_registry=tuple(
            mock_validator_record(proposer_pubkey)
            for _ in range(config.TARGET_COMMITTEE_SIZE)),
        validator_balances=(config.MAX_DEPOSIT_AMOUNT, ) *
        config.TARGET_COMMITTEE_SIZE,
        latest_randao_mixes=tuple(
            ZERO_HASH32 for _ in range(config.LATEST_RANDAO_MIXES_LENGTH)),
    )

    epoch = state.current_epoch(config.EPOCH_LENGTH)
    slot = epoch * config.EPOCH_LENGTH
    message = (epoch + 1).to_bytes(32, byteorder="big")
    fork = Fork(**sample_fork_params)
    domain = get_domain(fork, slot, SignatureDomain.DOMAIN_RANDAO)
    randao_reveal = bls.sign(message, proposer_privkey, domain)

    block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
        randao_reveal=randao_reveal, )

    with pytest.raises(ValidationError):
        process_randao(state, block, config)
コード例 #5
0
ファイル: asyncio_utils.py プロジェクト: gengmoqi/trinity
async def wait_first(tasks: Sequence[asyncio.Task[Any]]) -> None:
    """
    Wait for the first of the given tasks to complete, then cancels all others.

    If the completed task raised an exception, re-raise it.

    If the task running us is cancelled, all tasks will be cancelled.
    """
    for task in tasks:
        if not isinstance(task, asyncio.Task):
            raise ValueError(f"{task} is not an asyncio.Task")

    try:
        done, pending = await asyncio.wait(tasks,
                                           return_when=asyncio.FIRST_COMPLETED)
    except asyncio.CancelledError:
        await cancel_tasks(tasks)
        raise
    else:
        if pending:
            await cancel_tasks(cast(Set['asyncio.Task[Any]'], pending))
        if len(done) != 1:
            raise Exception(
                "Invariant: asyncio.wait() returned more than one task even "
                "though we used return_when=asyncio.FIRST_COMPLETED: %s", done)
        done_task = first(done)
        if done_task.exception():
            raise done_task.exception()
コード例 #6
0
ファイル: block_processing.py プロジェクト: berand/trinity
def process_eth1_data(state: BeaconState,
                      block: BaseBeaconBlock) -> BeaconState:
    try:
        vote_index, original_vote = first(
            (index, eth1_data_vote)
            for index, eth1_data_vote in enumerate(state.eth1_data_votes)
            if block.eth1_data == eth1_data_vote.eth1_data
        )
    except StopIteration:
        new_vote = Eth1DataVote(
            eth1_data=block.eth1_data,
            vote_count=1,
        )
        state = state.copy(
            eth1_data_votes=state.eth1_data_votes + (new_vote,)
        )
    else:
        updated_vote = original_vote.copy(
            vote_count=original_vote.vote_count + 1
        )
        state = state.copy(
            eth1_data_votes=update_tuple_item(state.eth1_data_votes, vote_index, updated_vote)
        )

    return state
コード例 #7
0
    async def _enforce_total_size(self) -> None:
        if self.max_size is None:
            raise Exception("Invalid")

        while self.manager.is_running:
            while self.manager.is_running:
                await trio.lowlevel.checkpoint()
                total_size = self.content_storage.total_size()

                if total_size == 0:
                    break

                furthest_key = first(
                    self.content_storage.iter_furthest(
                        self._network.local_node_id))
                # TODO: we can actually read the size from the database so this
                # should probably be a new
                # `ContentStorageAPI.get_content_size(...)`.
                furthest_content = self.content_storage.get_content(
                    furthest_key)
                if total_size - len(furthest_content) <= self.max_size:
                    break

                self.logger.debug("Purging: content_key=%s",
                                  furthest_key.hex())
                self.content_storage.delete_content(furthest_key)

            await trio.sleep(30)
コード例 #8
0
ファイル: hashing.py プロジェクト: kclowes/eth-account
def get_array_dimensions(data):
    """
    Given an array type data item, check that it is an array and return the dimensions as a tuple.

    Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
    """
    depths_and_dimensions = get_depths_and_dimensions(data, 0)
    # re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
    grouped_by_depth = {
        depth: tuple(dimension for depth, dimension in group)
        for depth, group in groupby(depths_and_dimensions, itemgetter(0))
    }

    # validate that there is only one dimension for any given depth.
    invalid_depths_dimensions = tuple(
        (depth, dimensions) for depth, dimensions in grouped_by_depth.items()
        if len(set(dimensions)) != 1)
    if invalid_depths_dimensions:
        raise ValidationError('\n'.join([
            "Depth {0} of array data has more than one dimensions: {1}".format(
                depth, dimensions)
            for depth, dimensions in invalid_depths_dimensions
        ]))

    dimensions = tuple(
        toolz.first(set(dimensions))
        for depth, dimensions in sorted(grouped_by_depth.items()))

    return dimensions
コード例 #9
0
 def last_checkpoint(self) -> JournalDBCheckpoint:
     """
     Returns the latest checkpoint
     """
     # last() was iterating through all values, so first(reversed()) gives a 12.5x speedup
     # Interestingly, an attempt to cache this value caused a slowdown.
     return first(reversed(self._journal_data.keys()))
コード例 #10
0
ファイル: network.py プロジェクト: pipermerriam/ddht
    async def _ping_oldest_routing_table_entry(self) -> None:
        await self._routing_table_ready.wait()

        while self.manager.is_running:
            # Here we preserve the lazy iteration while still checking that the
            # iterable is not empty before passing it into `min` below which
            # throws an ambiguous `ValueError` otherwise if the iterable is
            # empty.
            nodes_iter = self.routing_table.iter_all_random()
            try:
                first_node_id = first(nodes_iter)
            except StopIteration:
                await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
                continue
            else:
                least_recently_ponged_node_id = min(
                    cons(first_node_id, nodes_iter),
                    key=lambda node_id: self._last_pong_at.get(node_id, 0),
                )

            too_old_at = trio.current_time() - ROUTING_TABLE_KEEP_ALIVE
            try:
                last_pong_at = self._last_pong_at[
                    least_recently_ponged_node_id]
            except KeyError:
                pass
            else:
                if last_pong_at > too_old_at:
                    await trio.sleep(last_pong_at - too_old_at)
                    continue

            did_bond = await self.bond(least_recently_ponged_node_id)
            if not did_bond:
                self.routing_table.remove(least_recently_ponged_node_id)
コード例 #11
0
async def wait_first(futures: Sequence[asyncio.Future[None]]) -> None:
    """
    Wait for the first of the given futures to complete, then cancels all others.

    If the completed future raised an exception, re-raise it.

    If the task running us is cancelled, all futures will be cancelled.
    """
    for future in futures:
        if not isinstance(future, asyncio.Future):
            raise ValueError(f"{future} is not an asyncio.Future")

    try:
        done, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
    except asyncio.CancelledError:
        await cancel_futures(futures)
        raise
    else:
        if pending:
            await cancel_futures(pending)
        if len(done) != 1:
            raise Exception(
                "Invariant: asyncio.wait() returned more than one future even "
                "though we used return_when=asyncio.FIRST_COMPLETED: %s", done)
        done_future = first(done)
        if done_future.exception():
            raise done_future.exception()
コード例 #12
0
def test_randao_processing_validates_randao_reveal(
    sample_beacon_block_params,
    sample_beacon_block_body_params,
    sample_beacon_state_params,
    sample_fork_params,
    keymap,
    config,
):
    proposer_pubkey, proposer_privkey = first(keymap.items())
    state = SerenityBeaconState.create(**sample_beacon_state_params).mset(
        "validators",
        tuple(
            create_mock_validator(proposer_pubkey, config)
            for _ in range(config.TARGET_COMMITTEE_SIZE)),
        "balances",
        (config.MAX_EFFECTIVE_BALANCE, ) * config.TARGET_COMMITTEE_SIZE,
        "randao_mixes",
        tuple(ZERO_HASH32 for _ in range(config.EPOCHS_PER_HISTORICAL_VECTOR)),
    )

    epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
    message_hash = (epoch + 1).to_bytes(32, byteorder="little")
    domain = get_domain(state, SignatureDomain.DOMAIN_RANDAO,
                        config.SLOTS_PER_EPOCH)
    randao_reveal = bls.sign(message_hash, proposer_privkey, domain)

    block_body = BeaconBlockBody.create(**sample_beacon_block_body_params).set(
        "randao_reveal", randao_reveal)

    block = SerenityBeaconBlock.create(**sample_beacon_block_params).set(
        "body", block_body)

    with pytest.raises(ValidationError):
        process_randao(state, block, config)
コード例 #13
0
def test_randao_processing(sample_beacon_block_params,
                           sample_beacon_state_params, sample_fork_params,
                           keymap, config):
    proposer_pubkey, proposer_privkey = first(keymap.items())
    state = SerenityBeaconState(**sample_beacon_state_params).copy(
        validator_registry=tuple(
            mock_validator_record(proposer_pubkey)
            for _ in range(config.TARGET_COMMITTEE_SIZE)),
        validator_balances=(config.MAX_DEPOSIT_AMOUNT, ) *
        config.TARGET_COMMITTEE_SIZE,
        latest_randao_mixes=tuple(
            ZERO_HASH32 for _ in range(config.LATEST_RANDAO_MIXES_LENGTH)),
    )

    epoch = state.current_epoch(config.EPOCH_LENGTH)
    slot = epoch * config.EPOCH_LENGTH
    message = epoch.to_bytes(32, byteorder="big")
    fork = Fork(**sample_fork_params)
    domain = get_domain(fork, slot, SignatureDomain.DOMAIN_RANDAO)
    randao_reveal = bls.sign(message, proposer_privkey, domain)

    block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
        randao_reveal=randao_reveal, )

    new_state = process_randao(state, block, config)

    updated_index = epoch % config.LATEST_RANDAO_MIXES_LENGTH
    original_mixes = state.latest_randao_mixes
    updated_mixes = new_state.latest_randao_mixes

    assert all(
        updated == original if index != updated_index else updated != original
        for index, (updated,
                    original) in enumerate(zip(updated_mixes, original_mixes)))
コード例 #14
0
ファイル: header.py プロジェクト: ArtObr/indy-scp
    def _persist_header_chain(
            cls,
            db: DatabaseAPI,
            headers: Iterable[BlockHeaderAPI],
            genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == genesis_parent_hash
        if not is_genesis and not cls._header_exists(db, first_header.parent_hash):
            raise ParentNotFound(
                f"Cannot persist block header ({encode_hex(first_header.hash)}) "
                f"with unknown parent ({encode_hex(first_header.parent_hash)})"
            )

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header,), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    f"Non-contiguous chain. Expected {encode_hex(child.hash)} "
                    f"to have {encode_hex(parent.hash)} as parent "
                    f"but was {encode_hex(child.parent_hash)}"
                )

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head_hash(db)
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head, genesis_parent_hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head, genesis_parent_hash)

        return tuple(), tuple()
コード例 #15
0
    def _persist_header_chain(
        cls, db: BaseDB, headers: Iterable[BlockHeader]
    ) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == GENESIS_PARENT_HASH
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                "Cannot persist block header ({}) with unknown parent ({})".
                format(encode_hex(first_header.hash),
                       encode_hex(first_header.parent_hash)))

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    "Non-contiguous chain. Expected {} to have {} as parent but was {}"
                    .format(
                        encode_hex(child.hash),
                        encode_hex(parent.hash),
                        encode_hex(child.parent_hash),
                    ))

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head(db).hash
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        return tuple(), tuple()
コード例 #16
0
 def content_radius(self) -> int:
     if self.is_full:
         furthest_key = first(
             self.content_storage.iter_furthest(
                 self._network.local_node_id))
         content_id = content_key_to_content_id(furthest_key)
         return compute_content_distance(self._network.local_node_id,
                                         content_id)
     else:
         return 2**256 - 1
コード例 #17
0
 def _fetch_single_query(self, query: str, args: Tuple[Any, ...] = ()) -> Any:
     with self._conn:
         cursor = self._conn.execute(query, args).fetchall()
     if len(cursor) > 1:
         raise Exception(
             f"Invalid db state. More than one result found for query: {query}."
         )
     if not cursor:
         return None
     return first(cursor)
コード例 #18
0
    def __delitem__(self, key: TKey) -> None:
        serialized_key = self._key_encoder(key)
        result = self._fetch_single_query(
            "SELECT key FROM cache WHERE key=?;", (serialized_key,),
        )

        if not result:
            raise KeyError(key)

        node_key = first(result)

        # delete key from cache
        self._execute(
            "DELETE FROM cache WHERE key=?;", (node_key,),
        )

        # update any nrefs/prefs in cache
        nref_result = self._fetch_single_query(
            "SELECT key FROM cache WHERE nref=?;", (node_key,),
        )
        pref_result = self._fetch_single_query(
            "SELECT key FROM cache WHERE pref=?;", (node_key,),
        )

        if nref_result and pref_result:
            nref_key = first(nref_result)
            pref_key = first(pref_result)
            self._execute(
                "UPDATE cache SET nref=? WHERE key=?;", (pref_key, nref_key),
            )
            self._execute(
                "UPDATE cache SET pref=? WHERE key=?;", (nref_key, pref_key),
            )
        elif nref_result:
            self._execute(
                "UPDATE cache SET nref=? WHERE key=?;", (None, first(nref_result)),
            )
        elif pref_result:
            self._execute(
                "UPDATE cache SET pref=? WHERE key=?;", (None, first(pref_result)),
            )
コード例 #19
0
ファイル: test_lmd_ghost.py プロジェクト: voith/trinity
def _attach_committees_to_block_tree(state, block_tree, committees_by_slot,
                                     config, forking_asymmetry):
    for level, committees in zip(_iter_block_tree_by_slot(block_tree),
                                 committees_by_slot):
        block_count = len(level)
        partitions = partition(block_count, committees)
        for block, committee in zip(_iter_block_level_by_block(level),
                                    partitions):
            if forking_asymmetry:
                if random.choice([True, False]):
                    # random drop out
                    continue
            _attach_committee_to_block(block, first(committee))
コード例 #20
0
def test_randao_processing(sample_beacon_block_params,
                           sample_beacon_block_body_params,
                           sample_beacon_state_params,
                           keymap,
                           config):
    proposer_pubkey, proposer_privkey = first(keymap.items())
    state = SerenityBeaconState(**sample_beacon_state_params).copy(
        validators=tuple(
            create_mock_validator(proposer_pubkey, config)
            for _ in range(config.TARGET_COMMITTEE_SIZE)
        ),
        balances=(config.MAX_EFFECTIVE_BALANCE,) * config.TARGET_COMMITTEE_SIZE,

        randao_mixes=tuple(
            ZERO_HASH32
            for _ in range(config.EPOCHS_PER_HISTORICAL_VECTOR)
        ),
    )

    epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
    slot = get_epoch_start_slot(epoch, config.SLOTS_PER_EPOCH)

    randao_reveal = _generate_randao_reveal(
        privkey=proposer_privkey,
        slot=slot,
        state=state,
        config=config,
    )

    block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
        randao_reveal=randao_reveal,
    )

    block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
        body=block_body,
    )

    new_state = process_randao(state, block, config)

    updated_index = epoch % config.EPOCHS_PER_HISTORICAL_VECTOR
    original_mixes = state.randao_mixes
    updated_mixes = new_state.randao_mixes

    assert all(
        updated == original if index != updated_index else updated != original
        for index, (updated, original) in enumerate(zip(updated_mixes, original_mixes))
    )
コード例 #21
0
def test_randao_processing(sample_beacon_block_params,
                           sample_beacon_block_body_params,
                           sample_beacon_state_params,
                           keymap,
                           config):
    proposer_pubkey, proposer_privkey = first(keymap.items())
    state = SerenityBeaconState(**sample_beacon_state_params).copy(
        validator_registry=tuple(
            mock_validator_record(proposer_pubkey, config)
            for _ in range(config.TARGET_COMMITTEE_SIZE)
        ),
        validator_balances=(config.MAX_DEPOSIT_AMOUNT,) * config.TARGET_COMMITTEE_SIZE,

        latest_randao_mixes=tuple(
            ZERO_HASH32
            for _ in range(config.LATEST_RANDAO_MIXES_LENGTH)
        ),
    )

    epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
    slot = get_epoch_start_slot(epoch, config.SLOTS_PER_EPOCH)

    randao_reveal = _generate_randao_reveal(
        privkey=proposer_privkey,
        slot=slot,
        fork=state.fork,
        config=config,
    )

    block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
        randao_reveal=randao_reveal,
    )

    block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
        body=block_body,
    )

    new_state = process_randao(state, block, config)

    updated_index = epoch % config.LATEST_RANDAO_MIXES_LENGTH
    original_mixes = state.latest_randao_mixes
    updated_mixes = new_state.latest_randao_mixes

    assert all(
        updated == original if index != updated_index else updated != original
        for index, (updated, original) in enumerate(zip(updated_mixes, original_mixes))
    )
コード例 #22
0
async def wait_first(tasks: Sequence[asyncio.Task[Any]],
                     max_wait_after_cancellation: float) -> None:
    """
    Wait for the first of the given tasks to complete, then cancels all others.

    If the completed task raised an exception, that is re-raised.

    If the task running us is cancelled, all tasks will be cancelled, in no specific order.

    If we get an exception from any of the cancelled tasks, they are re-raised as a
    trio.MultiError, which will include the exception from the completed task (if any) in their
    context.

    If the cancelled tasks don't return in max_wait_after_cancellation seconds, a TimeoutError
    will be raised.
    """
    for task in tasks:
        if not isinstance(task, asyncio.Task):
            raise ValueError(f"{task} is not an asyncio.Task")

    logger = get_logger('p2p.asyncio_utils.wait_first')
    async with cancel_pending_tasks(*tasks,
                                    timeout=max_wait_after_cancellation):
        try:
            done, pending = await asyncio.wait(
                tasks, return_when=asyncio.FIRST_COMPLETED)
        except (KeyboardInterrupt, asyncio.CancelledError) as err:
            logger.debug("Got %r waiting for %s, cancelling them all", err,
                         tasks)
            raise
        except BaseException:
            logger.exception(
                "Unexpected error waiting for %s, cancelling them all", tasks)
            raise
        else:
            logger.debug("Task %s finished, cancelling pending ones: %s", done,
                         pending)
            if len(done) != 1:
                raise Exception(
                    "Invariant: asyncio.wait() returned more than one task even "
                    "though we used return_when=asyncio.FIRST_COMPLETED: %s",
                    done)
            done_task = first(done)
            if done_task.exception():
                raise done_task.exception()
コード例 #23
0
    def _get_or_create_advertisement(self, content_key: ContentKey,
                                     hash_tree_root: Hash32) -> Advertisement:
        try:
            advertisement = first(
                self._local_advertisement_db.query(
                    content_key=content_key,
                    node_id=self._network.local_node_id,
                    hash_tree_root=hash_tree_root,
                ))
        except StopIteration:
            advertisement = Advertisement.create(
                content_key=content_key,
                hash_tree_root=hash_tree_root,
                private_key=self._network.client.local_private_key,
            )
            self._local_advertisement_db.add(advertisement)

        return advertisement  # type: ignore
コード例 #24
0
    async def select_sync_peer(self) -> BCCPeer:
        if len(self.peer_pool) == 0:
            raise ValidationError("Not connected to anyone")

        # choose the peer with the highest head slot
        peers = cast(Iterable[BCCPeer],
                     self.peer_pool.connected_nodes.values())
        sorted_peers = sorted(peers,
                              key=operator.attrgetter("head_slot"),
                              reverse=True)
        best_peer = first(sorted_peers)

        finalized_head = await self.chain_db.coro_get_finalized_head(
            BeaconBlock)
        if best_peer.head_slot <= finalized_head.slot:
            raise ValidationError("No peer that is ahead of us")

        return best_peer
コード例 #25
0
    def advertisement_radius(self) -> int:
        if self.max_advertisement_count is None:
            return MAX_RADIUS

        advertisement_count = self.advertisement_db.count()

        if advertisement_count < self.max_advertisement_count:
            return MAX_RADIUS

        try:
            furthest_advertisement = first(
                self.advertisement_db.furthest(self._network.local_node_id))
        except StopIteration:
            return MAX_RADIUS
        else:
            return compute_content_distance(
                self._network.local_node_id,
                furthest_advertisement.content_id,
            )
コード例 #26
0
    async def select_sync_peer(self) -> BCCPeer:
        if len(self.peer_pool) == 0:
            raise ValidationError("Not connected to anyone")

        # choose the peer with the highest head slot
        peers = cast(Iterable[BCCPeer], self.peer_pool.connected_nodes.values())
        sorted_peers = sorted(peers, key=operator.attrgetter("head_slot"), reverse=True)
        best_peer = first(sorted_peers)

        try:
            finalized_head = await self.chain_db.coro_get_finalized_head(BeaconBlock)
        # TODO(ralexstokes) look at better way to handle once we have fork choice in place
        except FinalizedHeadNotFound:
            return best_peer

        if best_peer.head_slot <= finalized_head.slot:
            raise ValidationError("No peer that is ahead of us")

        return best_peer
コード例 #27
0
ファイル: explorer.py プロジェクト: njgheorghita/ddht
    async def _worker(self, worker_id: int) -> None:
        """
        Work through the unqueried nodes to explore each of their neighborhoods
        in the network.
        """
        for round in itertools.count():
            async with self._condition:
                try:
                    node_id, radius = first(self._get_nodes_for_exploration())
                except StopIteration:
                    await self._condition.wait()
                    continue

            with self._mark_in_flight(node_id):
                self.queried.add(node_id)

                # Some of the node ids may have come from our routing table.
                # These won't be present in the `received_node_ids` so we
                # detect this here and send them over the channel.
                if node_id not in self.seen:
                    enr = self._network.enr_db.get_enr(node_id)
                    self.seen.add(node_id)

                    try:
                        await self._send_channel.send(enr)
                    except (trio.BrokenResourceError,
                            trio.ClosedResourceError):
                        # In the event that the exploration exits early before
                        # the lookup has completed we can end up operating on a
                        # closed channel.
                        return

                await self._explore(node_id, radius)

            # we need to trigger the condition here so that our "done" check
            # will wake up and once we query our last node and see that there
            # are no more nodes in flight or left to query.
            async with self._condition:
                self._condition.notify_all()
コード例 #28
0
def pack(serialized_values: Sequence[bytes]) -> Tuple[Hash32, ...]:
    if len(serialized_values) == 0:
        return (EMPTY_CHUNK, )

    item_size = len(serialized_values[0])
    items_per_chunk = get_items_per_chunk(item_size)

    number_of_items = len(serialized_values)
    number_of_chunks = (number_of_items +
                        (items_per_chunk - 1)) // items_per_chunk

    chunk_partitions = partition(items_per_chunk, serialized_values, pad=b"")
    chunks_unpadded = (b"".join(chunk_partition)
                       for chunk_partition in chunk_partitions)

    full_chunks = tuple(
        Hash32(chunk) for chunk in take(number_of_chunks - 1, chunks_unpadded))
    last_chunk = first(chunks_unpadded)
    if len(tuple(chunks_unpadded)) > 0:
        raise Exception("Invariant: all chunks have been taken")

    return full_chunks + (Hash32(last_chunk.ljust(CHUNK_SIZE, b"\x00")), )
コード例 #29
0
def _update_eth1_vote_if_exists(state: BeaconState,
                                config: Eth2Config) -> BeaconState:
    """
    This function searches the 'pending' Eth1 data votes in ``state`` to find one Eth1 data vote
    containing majority support.

    If such a vote is found, update the ``state`` entry for the latest vote.
    Regardless of the existence of such a vote, clear the 'pending' storage.
    """

    latest_eth1_data = state.latest_eth1_data

    try:
        majority_vote = first(
            filter(_is_majority_vote(config), state.eth1_data_votes))
        latest_eth1_data = majority_vote.eth1_data
    except StopIteration:
        pass

    return state.copy(
        latest_eth1_data=latest_eth1_data,
        eth1_data_votes=(),
    )
コード例 #30
0
def get_test_name(filler: Dict[str, Any]) -> str:
    assert len(filler) == 1
    return first(filler)