コード例 #1
0
 async def do_run(self, event_bus: EndpointAPI) -> None:
     proxy_peer_pool = ETHProxyPeerPool(event_bus,
                                        TO_NETWORKING_BROADCAST_CONFIG)
     async with background_trio_service(proxy_peer_pool):
         service = NewBlockService(event_bus, proxy_peer_pool)
         async with background_trio_service(service) as manager:
             await manager.wait_finished()
コード例 #2
0
async def test_client_symetric_connect():
    alice = ClientFactory()
    bob = ClientFactory()

    logger.info('ALICE: %s', alice.local_node)
    logger.info('BOB: %s', bob.local_node)

    async with background_trio_service(alice):
        # this triggers the creation of an outbound session but the packet
        # never gets to bob because his client isn't listening yet.
        await alice.send_ping(bob.local_node)

        # let the event loop tick for a few ticks to ensure that the packet has
        # been sent.
        for _ in range(20):
            await trio.hazmat.checkpoint()

        async with alice.events.handshake_complete.subscribe() as alice_handshake_complete:
            async with bob.events.handshake_complete.subscribe() as bob_handshake_complete:
                async with background_trio_service(bob):
                    await bob.send_ping(alice.local_node)

                    with trio.fail_after(1):
                        bob_session = await alice_handshake_complete.receive()
                        alice_session = await bob_handshake_complete.receive()

                    assert not bob_session.is_initiator
                    assert alice_session.is_initiator
コード例 #3
0
ファイル: full.py プロジェクト: big-c-note/trinity-eth2
    async def _run_host(
            self,
            task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED) -> None:
        host = self._host
        listen_maddr = self._p2p_maddr
        try:
            # NOTE the following code relies on knowledge of the internals here...
            # We ideally want better encapsulation but it looks like it will
            # involve separating out the ``Service`` abstraction from the
            # various ``libp2p`` abstractions...
            async with host.run(listen_addrs=(listen_maddr, )):
                task_status.started()
                self.logger.info("peer listening at %s", listen_maddr)
                async with background_trio_service(host._gossiper.pubsub):
                    async with background_trio_service(
                            host._gossiper.gossipsub):
                        await self._connect_preferred_nodes()
                        await self._connect_bootstrap_nodes()

                        # NOTE: need to connect *some* peers first before
                        # subscribing to gossip
                        # FIXME: can likely move this inside ``host``.
                        await host.subscribe_gossip_channels()

                        await self._handle_gossip()

                        await host.unsubscribe_gossip_channels()
        except Exception as e:
            # TODO: likely want to catch exceptions in a more granular way
            # and restart the host...
            self.logger.exception(e)
コード例 #4
0
 async def do_run(self, event_bus: EndpointAPI) -> None:
     if self._boot_info.args.enable_metrics:
         metrics_service = metrics_service_from_args(self._boot_info.args)
     else:
         metrics_service = NOOP_METRICS_SERVICE
     proxy_peer_pool = ETHProxyPeerPool(event_bus, TO_NETWORKING_BROADCAST_CONFIG)
     async with background_trio_service(proxy_peer_pool):
         async with background_trio_service(metrics_service):
             service = NewBlockService(
                 event_bus, proxy_peer_pool, metrics_service.registry, self._boot_info)
             async with background_trio_service(service) as manager:
                 await manager.wait_finished()
コード例 #5
0
ファイル: conftest.py プロジェクト: matt783/alexandria
async def alice_and_bob_clients():
    alice = ClientFactory()
    bob = ClientFactory()

    logger.info('ALICE: %s', alice.local_node)
    logger.info('BOB: %s', bob.local_node)

    async with alice.events.listening.subscribe() as alice_listening:
        async with bob.events.listening.subscribe() as bob_listening:
            async with background_trio_service(bob), background_trio_service(
                    alice):
                await alice_listening.receive()
                await bob_listening.receive()

                yield alice, bob
コード例 #6
0
async def test_trio_service_manager_run_task_can_still_cancel_after_run_finishes(
):
    task_event = trio.Event()
    service_finished = trio.Event()

    @as_service
    async def RunTaskService(manager):
        async def task_fn():
            # this will never complete
            await task_event.wait()

        manager.run_task(task_fn)
        # the task is set to run in the background but then  the service exits.
        # We want to be sure that the task is allowed to continue till
        # completion unless explicitely cancelled.
        service_finished.set()

    async with background_trio_service(RunTaskService()) as manager:
        with trio.fail_after(0.01):
            await service_finished.wait()

        # show that the service hangs waiting for the task to complete.
        with trio.move_on_after(0.01) as cancel_scope:
            await manager.wait_finished()
        assert cancel_scope.cancelled_caught is True

        # trigger cancellation and see that the service actually stops
        manager.cancel()
        with trio.fail_after(0.01):
            await manager.wait_finished()
コード例 #7
0
async def test_sub_service_cancelled_when_parent_stops():
    ready_cancel = trio.Event()

    # This test runs a service that runs a sub-service that sleeps forever. When the parent exits,
    # the sub-service should be cancelled as well.
    @as_service
    async def WaitForeverService(manager):
        ready_cancel.set()
        await manager.wait_finished()

    sub_manager = TrioManager(WaitForeverService())

    @as_service
    async def ServiceTest(manager):
        async def run_sub():
            await sub_manager.run()

        manager.run_task(run_sub)
        await manager.wait_finished()

    s = ServiceTest()
    async with background_trio_service(s) as manager:
        await ready_cancel.wait()

    assert not manager.is_running
    assert manager.is_cancelled
    assert manager.is_finished

    assert not sub_manager.is_running
    assert not sub_manager.is_cancelled
    assert sub_manager.is_finished
コード例 #8
0
    async def recursive_find_content(
        self, content_key: ContentKey,
    ) -> AsyncIterator[trio.abc.ReceiveChannel[bytes]]:
        seeker = Seeker(self, content_key)

        async with background_trio_service(seeker):
            yield seeker.content_receive
コード例 #9
0
    async def do_run(self, event_bus: EndpointAPI) -> None:
        boot_info = self._boot_info
        metrics_service = metrics_service_from_args(boot_info.args)

        # types ignored due to https://github.com/ethereum/async-service/issues/5
        system_metrics_collector = collect_process_metrics(  # type: ignore
            metrics_service.registry,
            frequency_seconds=boot_info.args.metrics_system_collector_frequency,
        )

        # types ignored due to https://github.com/ethereum/async-service/issues/5
        blockchain_metrics_collector = collect_blockchain_metrics(  # type: ignore
            boot_info,
            event_bus,
            metrics_service.registry,
            frequency_seconds=boot_info.args.metrics_blockchain_collector_frequency,
        )

        services_to_exit = (
            metrics_service,
            system_metrics_collector,
            blockchain_metrics_collector,
        )

        async with contextlib.AsyncExitStack() as stack:
            managers = tuple([
                await stack.enter_async_context(background_trio_service(service))
                for service in services_to_exit
            ])
            await managers[0].wait_finished()
コード例 #10
0
async def test_packet_decoder_error():
    datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(
        1)
    packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)

    service = PacketDecoder(datagram_receive_channel, packet_send_channel)
    async with background_trio_service(service):
        # send invalid packet
        await datagram_send_channel.send(
            InboundDatagram(
                datagram=b"not a valid packet",
                sender_endpoint=EndpointFactory(),
            ))

        # send valid packet
        packet = AuthTagPacketFactory()
        sender_endpoint = EndpointFactory()
        await datagram_send_channel.send(
            InboundDatagram(
                datagram=packet.to_wire_bytes(),
                sender_endpoint=sender_endpoint,
            ))

        # ignore the invalid one, only receive the valid one
        with trio.fail_after(0.5):
            inbound_packet = await packet_receive_channel.receive()

        assert inbound_packet.packet == packet
        assert inbound_packet.sender_endpoint.ip_address == sender_endpoint.ip_address
        assert inbound_packet.sender_endpoint.port == sender_endpoint.port
コード例 #11
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:

        metrics_service = MetricsService(
            influx_server=boot_info.args.metrics_influx_server,
            influx_user=boot_info.args.metrics_influx_user,
            influx_password=boot_info.args.metrics_influx_password,
            influx_database=boot_info.args.metrics_influx_database,
            host=boot_info.args.metrics_host,
            reporting_frequency=boot_info.args.metrics_reporting_frequency,
        )

        # types ignored due to https://github.com/ethereum/async-service/issues/5
        system_metrics_collector = collect_process_metrics(  # type: ignore
            metrics_service.registry,
            frequency_seconds=boot_info.args.metrics_system_collector_frequency
        )

        services_to_exit = (metrics_service, system_metrics_collector,)

        async with AsyncExitStack() as stack:
            managers = tuple([
                await stack.enter_async_context(background_trio_service(service))
                for service in services_to_exit
            ])
            await managers[0].wait_finished()
コード例 #12
0
async def test_content_provider_restricts_max_chunks(
    alice,
    bob,
    alice_alexandria_network,
    bob_alexandria_client,
):
    content = ContentFactory(length=1024 * 10)
    content_key = b"test-content-key"
    content_storage = MemoryContentStorage({content_key: content})
    proof = compute_proof(content, sedes=content_sedes)

    content_provider = ContentProvider(bob_alexandria_client,
                                       (content_storage, ),
                                       max_chunks_per_request=16)
    async with background_trio_service(content_provider):
        # this ensures that the subscription is in place.
        await content_provider.ready()

        with trio.fail_after(2):
            proof = await alice_alexandria_network.get_content_proof(
                bob.node_id,
                hash_tree_root=proof.get_hash_tree_root(),
                content_key=content_key,
                start_chunk_index=0,
                max_chunks=100,
                endpoint=bob.endpoint,
            )
            validate_proof(proof)
            num_leaf_elements = len(
                tuple(element for element in proof.elements
                      if len(element.path) == proof.path_bit_length))
            assert num_leaf_elements == 16
コード例 #13
0
async def test_content_provider_serves_advertisements(
    alice,
    bob,
    alice_alexandria_network,
    bob_alexandria_client,
):
    content_key = b"test-content-key"
    advertisement_db = AdvertisementDatabase(sqlite3.connect(":memory:"))
    advertisements = tuple(
        AdvertisementFactory(content_key=content_key) for _ in range(5))
    for advertisement in advertisements:
        advertisement_db.add(advertisement)

    advertisement_provider = AdvertisementProvider(
        bob_alexandria_client,
        (advertisement_db, ),
    )
    async with background_trio_service(advertisement_provider):
        # this ensures that the subscription is in place.
        await advertisement_provider.ready()

        with trio.fail_after(2):
            result = await alice_alexandria_network.locate(
                bob.node_id,
                content_key=content_key,
            )
            assert set(result) == set(advertisements)
コード例 #14
0
async def test_trio_manager_stats_does_not_count_main_run_method():
    ready = trio.Event()

    class StatsTest(Service):
        async def run(self):
            self.manager.run_task(trio.sleep_forever)
            ready.set()

    async with background_trio_service(StatsTest()) as manager:
        with trio.fail_after(1):
            await ready.wait()

        # we need to yield to the event loop a few times to allow the various
        # tasks to schedule themselves and get running.
        for _ in range(10):
            await trio.lowlevel.checkpoint()

        assert manager.stats.tasks.total_count == 1
        assert manager.stats.tasks.finished_count == 0
        assert manager.stats.tasks.pending_count == 1

    # now check after exiting
    assert manager.stats.tasks.total_count == 1
    assert manager.stats.tasks.finished_count == 1
    assert manager.stats.tasks.pending_count == 0
コード例 #15
0
async def test_app_null_start_fixed_end_with_head_tracking(session, w3):
    await _load_block(session, w3, 0)
    await _load_block(session, w3, 1)
    await _load_block(session, w3, 2)

    await _load_block(session, w3, 8, is_detatched=True)
    await _load_block(session, w3, 9)

    app = Application(
        w3=w3,
        session=session,
        start_block=None,
        end_block=CHAIN_HEIGHT + 2,
        concurrency=1,
        ipc_path=None,
    )
    with trio.fail_after(10):
        async with background_trio_service(app) as manager:
            await trio.sleep(0.5)
            for _ in range(2):
                await trio.sleep(0.05)
                w3.testing.mine()
            await manager.wait_finished()

    _verify_integrity(session, 0, CHAIN_HEIGHT + 2)
    _verify_empty(session, CHAIN_HEIGHT + 2, None)
コード例 #16
0
async def test_trio_service_external_api_raises_when_cancelled():
    service = ExternalAPIService()

    async with background_trio_service(service) as manager:
        with pytest.raises(LifecycleError):
            async with trio.open_nursery() as nursery:
                # an event to ensure that we are indeed within the body of the
                is_within_fn = trio.Event()
                trigger_return = trio.Event()

                nursery.start_soon(service.get_7, trigger_return, is_within_fn)

                # ensure we're within the body of the task.
                await is_within_fn.wait()

                # now cancel the service and trigger the return of the function.
                manager.cancel()

                # exiting the context block here will cause the background task
                # to complete and shold raise the exception

        # A direct call should also fail.  This *should* be hitting the early
        # return mechanism.
        with pytest.raises(LifecycleError):
            assert await service.get_7()
コード例 #17
0
async def rpc_node(ipc_path, peers):
    bootnodes = tuple(peer.client.local_node for peer in peers)
    alice = ApplicationFactory(bootnodes=bootnodes)
    async with background_trio_service(alice):
        json_rpc_server = RPCServer(
            ipc_path=ipc_path,
            client=alice.client,
            network=alice.network,
            kademlia=alice.kademlia,
            routing_table=alice.routing_table,
        )
        async with background_trio_service(json_rpc_server):
            await json_rpc_server.wait_serving()
            for _ in range(32):
                await trio.hazmat.checkpoint()
            yield alice
コード例 #18
0
async def test_content_provider_serves_large_content(
    alice,
    bob,
    alice_alexandria_network,
    bob_alexandria_client,
):
    content = ContentFactory(length=1024 * 10)
    content_key = b"test-content-key"
    content_storage = MemoryContentStorage({content_key: content})
    proof = compute_proof(content, sedes=content_sedes)

    content_provider = ContentProvider(bob_alexandria_client,
                                       (content_storage, ))
    async with background_trio_service(content_provider):
        async with alice_alexandria_network.client.subscribe(
                ContentMessage) as subscription:
            # this ensures that the subscription is in place.
            await content_provider.ready()

            with trio.fail_after(2):
                content_retrieval_ctx = alice_alexandria_network.retrieve_content(
                    content_key,
                    proof.get_hash_tree_root(),
                )
                async with content_retrieval_ctx as content_retrieval:
                    await content_retrieval.node_queue.add(bob.node_id)
                    result = await content_retrieval.wait_content_proof()

            validate_proof(result)
            result_data = result.get_proven_data()
            assert result_data[0:len(content)] == content

            response = await subscription.receive()
            assert response.message.payload.is_proof is True
コード例 #19
0
ファイル: test_loading.py プロジェクト: marcgarreau/cthaeh
async def test_head_loader_without_reorgs(session):
    send_channel, receive_channel = trio.open_memory_channel(0)
    loader = HeadLoader(session, receive_channel)

    header_9 = HeaderFactory(block_number=9)
    block_10 = mk_block(parent=header_9, topic_bundles=(mk_topic_bundle(1),))
    block_11 = mk_block(parent=block_10.header, topic_bundles=(mk_topic_bundle(2, 1),))
    block_12 = mk_block(parent=block_11.header, topic_bundles=(mk_topic_bundle(3, 4),))
    block_13 = mk_block(parent=block_12.header, topic_bundles=(mk_topic_bundle(1, 2),))

    packet_0 = HeadBlockPacket(blocks=(block_10,))
    packet_1 = HeadBlockPacket(blocks=(block_11, block_12))
    packet_2 = HeadBlockPacket(blocks=(block_13,))

    assert session.query(Header).scalar() is None

    async with background_trio_service(loader) as manager:
        async with send_channel:
            await send_channel.send(packet_0)
            await send_channel.send(packet_1)
            await send_channel.send(packet_2)

        with trio.fail_after(2):
            await manager.wait_finished()

    front = session.query(Header).filter(Header.hash == block_10.header.hash).one()
    assert front.hash == block_10.header.hash
    assert front.is_detatched

    head = session.query(Header).filter(Header.hash == block_13.header.hash).one()
    assert head.hash == block_13.header.hash
    assert not head.is_detatched
コード例 #20
0
ファイル: commands.py プロジェクト: pipermerriam/cthaeh
async def do_main(args: argparse.Namespace) -> None:
    # Establish database connections
    engine = _get_engine(args)
    Session.configure(bind=engine)  # type: ignore
    session = Session()

    # Ensure database schema is present
    if args.database_url == MEMORY_DB:
        Base.metadata.create_all(engine)

    start_block = args.start_block
    end_block = args.end_block

    from web3.auto.ipc import w3

    if args.disable_jsonrpc:
        ipc_path = None
    elif args.ipc_path:
        ipc_path = args.ipc_path
    else:
        ipc_path = get_xdg_cthaeh_root() / "jsonrpc.ipc"

    app = Application(
        w3,
        session,
        start_block=start_block,
        end_block=end_block,
        concurrency=args.concurrency,
        ipc_path=ipc_path,
    )

    logger.info("Started main process (pid=%d)", os.getpid())
    async with background_trio_service(app) as manager:
        await manager.wait_finished()
コード例 #21
0
    async def network(
        self,
        network: Optional[NetworkAPI] = None,
        bootnodes: Collection[ENRAPI] = (),
        max_advertisement_count: int = 32,
    ) -> AsyncIterator[AlexandriaNetworkAPI]:
        network_context: AsyncContextManager[NetworkAPI]

        if network is None:
            network_context = self.node.network()
        else:
            # unclear why the typing isn't work for `asyncnullcontext`
            network_context = asyncnullcontext(network)  # type: ignore

        async with self._lock.acquire("AlexandriaNode.network(...)"):
            async with network_context as network:
                alexandria_network = AlexandriaNetwork(
                    network=network,
                    bootnodes=bootnodes,
                    commons_content_storage=self.commons_content_storage,
                    pinned_content_storage=self.pinned_content_storage,
                    local_advertisement_db=self.local_advertisement_db,
                    broadcast_log=self.broadcast_log,
                    remote_advertisement_db=self.remote_advertisement_db,
                    max_advertisement_count=max_advertisement_count,
                )
                async with background_trio_service(alexandria_network):
                    await alexandria_network.ready()
                    yield alexandria_network
コード例 #22
0
async def test_radius_tracker_tracks_via_pong(
    alice, bob, alice_alexandria_network, bob_alexandria_client
):
    radius_tracker = RadiusTracker(alice_alexandria_network)

    async with bob_alexandria_client.subscribe(PingMessage) as subscription:
        async with trio.open_nursery() as nursery:
            did_respond = trio.Event()

            async def _respond():
                request = await subscription.receive()
                await bob_alexandria_client.send_pong(
                    request.sender_node_id,
                    request.sender_endpoint,
                    enr_seq=bob.enr.sequence_number,
                    advertisement_radius=1234,
                    request_id=request.request_id,
                )
                did_respond.set()

            nursery.start_soon(_respond)

            async with background_trio_service(radius_tracker):
                await radius_tracker.ready()

                await alice_alexandria_network.ping(bob.node_id,)

                with trio.fail_after(2):
                    advertisement_radius = await radius_tracker.get_advertisement_radius(
                        bob.node_id,
                    )

                    assert advertisement_radius == 1234

                    await did_respond.wait()
コード例 #23
0
async def test_trio_service_task_cancellation_dag_order():
    # all of the assertions happen within the body of the service.
    service = TrioDAGServiceTest()
    assert service.all_checks_passed is False
    async with background_trio_service(service):
        await service.ready_cancel()
    assert service.all_checks_passed is True
コード例 #24
0
    async def _run_envelope_and_dispatcher_services(self) -> None:
        """
        Ensure that in the task hierarchy the envelope encode will be shut down
        *after* the dispatcher.

        run()
          |
          ---EnvelopeEncoder
                |
                ---EnvelopeDecoder
                      |
                      ---Dispatcher
        """
        async with background_trio_service(self.envelope_encoder):
            async with background_trio_service(self.envelope_decoder):
                async with background_trio_service(self.dispatcher):
                    await self.manager.wait_finished()
コード例 #25
0
ファイル: services.py プロジェクト: root-servers/trinity
async def run_background_trio_services(services: Sequence[ServiceAPI]) -> None:
    async with contextlib.AsyncExitStack() as stack:
        managers = tuple([
            await stack.enter_async_context(background_trio_service(service))
            for service in services
        ])
        # If any of the services terminate, we do so as well.
        await wait_first_trio([manager.wait_finished for manager in managers])
コード例 #26
0
async def message_dispatcher(node_db, incoming_message_channels, outgoing_message_channels):
    message_dispatcher = MessageDispatcher(
        node_db=node_db,
        incoming_message_receive_channel=incoming_message_channels[1],
        outgoing_message_send_channel=outgoing_message_channels[0],
    )
    async with background_trio_service(message_dispatcher):
        yield message_dispatcher
コード例 #27
0
async def peers(ipc_path):
    bootnodes = []
    peer_a = ApplicationFactory()
    async with background_trio_service(peer_a):
        bootnodes.append(peer_a.client.local_node)
        peer_b = ApplicationFactory(bootnodes=bootnodes)
        async with background_trio_service(peer_b):
            bootnodes.append(peer_b.client.local_node)
            peer_c = ApplicationFactory()
            async with background_trio_service(peer_c):
                bootnodes.append(peer_c.client.local_node)
                peer_d = ApplicationFactory(bootnodes=bootnodes)
                async with background_trio_service(peer_d):
                    bootnodes.append(peer_d.client.local_node)
                    peer_e = ApplicationFactory(bootnodes=bootnodes)
                    async with background_trio_service(peer_e):
                        yield (peer_a, peer_b, peer_c, peer_d, peer_e)
コード例 #28
0
async def test_logs_handling(
    w3,
    deposit_contract,
    tester,
    num_blocks_confirmed,
    polling_period,
    endpoint_server,
    func_do_deposit,
    start_block_number,
):
    amount_0 = func_do_deposit()
    amount_1 = func_do_deposit()
    m = Eth1Monitor(
        w3=w3,
        deposit_contract_address=deposit_contract.address,
        deposit_contract_abi=deposit_contract.abi,
        num_blocks_confirmed=num_blocks_confirmed,
        polling_period=polling_period,
        start_block_number=start_block_number,
        event_bus=endpoint_server,
        base_db=AtomicDBFactory(),
    )
    async with background_trio_service(m):
        # Test: logs emitted prior to starting `Eth1Monitor` can still be queried.
        await wait_all_tasks_blocked()
        assert m.total_deposit_count == 0
        assert m.highest_processed_block_number == 0

        tester.mine_blocks(num_blocks_confirmed - 1)
        # Test: only single deposit is processed.
        #      `num_blocks_confirmed`
        #       |-----------------|
        # [x] -> [x] -> [ ] -> [ ]
        await trio.sleep(polling_period)
        await wait_all_tasks_blocked()
        assert (m.total_deposit_count == 1
                and m._db.get_deposit_data(0).amount == amount_0)

        tester.mine_blocks(1)
        # Test: both deposits are processed.
        #             `num_blocks_confirmed`
        #              |-----------------|
        # [x] -> [x] -> [ ] -> [ ] -> [ ]
        await trio.sleep(polling_period)
        await wait_all_tasks_blocked()
        assert (m.total_deposit_count == 2
                and m._db.get_deposit_data(1).amount == amount_1)
        # Test: a new log can be queried after the transaction is included in a block
        #   and `num_blocks_confirmed` blocks are mined.
        #                                         `num_blocks_confirmed`
        #                                          |-----------------|
        # [x] -> [x] -> [ ] -> [ ] -> [ ] -> [x] -> [ ] -> [ ] -> [ ]
        amount_2 = func_do_deposit()
        tester.mine_blocks(num_blocks_confirmed)
        await trio.sleep(polling_period)
        await wait_all_tasks_blocked()
        assert (m.total_deposit_count == 3
                and m._db.get_deposit_data(2).amount == amount_2)
コード例 #29
0
async def message_dispatcher(enr_db, inbound_message_channels,
                             outbound_message_channels):
    message_dispatcher = MessageDispatcher(
        enr_db=enr_db,
        inbound_message_receive_channel=inbound_message_channels[1],
        outbound_message_send_channel=outbound_message_channels[0],
    )
    async with background_trio_service(message_dispatcher):
        yield message_dispatcher
コード例 #30
0
    async def create_batch_with_gossipsub(
        cls,
        number: int,
        *,
        cache_size: int = None,
        strict_signing: bool = False,
        protocols: Sequence[TProtocol] = None,
        degree: int = GOSSIPSUB_PARAMS.degree,
        degree_low: int = GOSSIPSUB_PARAMS.degree_low,
        degree_high: int = GOSSIPSUB_PARAMS.degree_high,
        time_to_live: int = GOSSIPSUB_PARAMS.time_to_live,
        gossip_window: int = GOSSIPSUB_PARAMS.gossip_window,
        gossip_history: int = GOSSIPSUB_PARAMS.gossip_history,
        heartbeat_interval: float = GOSSIPSUB_PARAMS.heartbeat_interval,
        heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.
        heartbeat_initial_delay,
        security_protocol: TProtocol = None,
        muxer_opt: TMuxerOptions = None,
        msg_id_constructor: Callable[[rpc_pb2.Message],
                                     bytes] = get_peer_and_seqno_msg_id,
    ) -> AsyncIterator[Tuple[Pubsub, ...]]:
        if protocols is not None:
            gossipsubs = GossipsubFactory.create_batch(
                number,
                protocols=protocols,
                degree=degree,
                degree_low=degree_low,
                degree_high=degree_high,
                time_to_live=time_to_live,
                gossip_window=gossip_window,
                heartbeat_interval=heartbeat_interval,
            )
        else:
            gossipsubs = GossipsubFactory.create_batch(
                number,
                degree=degree,
                degree_low=degree_low,
                degree_high=degree_high,
                time_to_live=time_to_live,
                gossip_window=gossip_window,
                heartbeat_interval=heartbeat_interval,
            )

        async with cls._create_batch_with_router(
                number,
                gossipsubs,
                cache_size,
                strict_signing,
                security_protocol=security_protocol,
                muxer_opt=muxer_opt,
                msg_id_constructor=msg_id_constructor,
        ) as pubsubs:
            async with AsyncExitStack() as stack:
                for router in gossipsubs:
                    await stack.enter_async_context(
                        background_trio_service(router))
                yield pubsubs