Exemplo n.º 1
0
async def pubsubs(num_hosts, security_protocol, is_gossipsub, is_pubsub_signing_strict):
    if is_gossipsub:
        yield PubsubFactory.create_batch_with_gossipsub(
            num_hosts,
            security_protocol=security_protocol,
            strict_signing=is_pubsub_signing_strict,
        )
    else:
        yield PubsubFactory.create_batch_with_floodsub(
            num_hosts, security_protocol, strict_signing=is_pubsub_signing_strict
        )
Exemplo n.º 2
0
async def test_subscribe_and_publish_full_channel():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        pubsub = pubsubs_fsub[0]

        extra_data_0 = b"extra_data_0"
        extra_data_1 = b"extra_data_1"

        # Test: Subscription channel is of size `SUBSCRIPTION_CHANNEL_SIZE`.
        #   When the channel is full, new received messages are dropped.
        #   After the channel has empty slot, the channel can receive new messages.

        # Assume `SUBSCRIPTION_CHANNEL_SIZE` is smaller than `2**(4*8)`.
        list_data = [
            i.to_bytes(4, "big") for i in range(SUBSCRIPTION_CHANNEL_SIZE)
        ]
        # Expect `extra_data_0` is dropped and `extra_data_1` is appended.
        expected_list_data = list_data + [extra_data_1]

        subscription = await pubsub.subscribe(TESTING_TOPIC)
        for data in list_data:
            await pubsub.publish(TESTING_TOPIC, data)

        # Publish `extra_data_0` which should be dropped since the channel is already full.
        await pubsub.publish(TESTING_TOPIC, extra_data_0)
        # Consume a message and there is an empty slot in the channel.
        assert (await subscription.get()).data == expected_list_data.pop(0)
        # Publish `extra_data_1` which should be appended to the channel.
        await pubsub.publish(TESTING_TOPIC, extra_data_1)

        for expected_data in expected_list_data:
            assert (await subscription.get()).data == expected_data
Exemplo n.º 3
0
async def test_handle_subscription():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        assert len(pubsubs_fsub[0].peer_topics) == 0
        sub_msg_0 = rpc_pb2.RPC.SubOpts(subscribe=True, topicid=TESTING_TOPIC)
        peer_ids = [IDFactory() for _ in range(2)]
        # Test: One peer is subscribed
        pubsubs_fsub[0].handle_subscription(peer_ids[0], sub_msg_0)
        assert (len(pubsubs_fsub[0].peer_topics) == 1
                and TESTING_TOPIC in pubsubs_fsub[0].peer_topics)
        assert len(pubsubs_fsub[0].peer_topics[TESTING_TOPIC]) == 1
        assert peer_ids[0] in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
        # Test: Another peer is subscribed
        pubsubs_fsub[0].handle_subscription(peer_ids[1], sub_msg_0)
        assert len(pubsubs_fsub[0].peer_topics) == 1
        assert len(pubsubs_fsub[0].peer_topics[TESTING_TOPIC]) == 2
        assert peer_ids[1] in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
        # Test: Subscribe to another topic
        another_topic = "ANOTHER_TOPIC"
        sub_msg_1 = rpc_pb2.RPC.SubOpts(subscribe=True, topicid=another_topic)
        pubsubs_fsub[0].handle_subscription(peer_ids[0], sub_msg_1)
        assert len(pubsubs_fsub[0].peer_topics) == 2
        assert another_topic in pubsubs_fsub[0].peer_topics
        assert peer_ids[0] in pubsubs_fsub[0].peer_topics[another_topic]
        # Test: unsubscribe
        unsub_msg = rpc_pb2.RPC.SubOpts(subscribe=False, topicid=TESTING_TOPIC)
        pubsubs_fsub[0].handle_subscription(peer_ids[0], unsub_msg)
        assert peer_ids[0] not in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
Exemplo n.º 4
0
async def test_re_subscribe():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids

        await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids
Exemplo n.º 5
0
async def test_dense():
    async with PubsubFactory.create_batch_with_gossipsub(10) as pubsubs_gsub:
        hosts = [pubsub.host for pubsub in pubsubs_gsub]
        num_msgs = 5

        # All pubsub subscribe to foobar
        queues = [await pubsub.subscribe("foobar") for pubsub in pubsubs_gsub]

        # Densely connect libp2p hosts in a random way
        await dense_connect(hosts)

        # Wait 2 seconds for heartbeat to allow mesh to connect
        await trio.sleep(2)

        for i in range(num_msgs):
            msg_content = b"foo " + i.to_bytes(1, "big")

            # randomly pick a message origin
            origin_idx = random.randint(0, len(hosts) - 1)

            # publish from the randomly chosen host
            await pubsubs_gsub[origin_idx].publish("foobar", msg_content)

            await trio.sleep(0.5)
            # Assert that all blocking queues receive the message
            for queue in queues:
                msg = await queue.get()
                assert msg.data == msg_content
Exemplo n.º 6
0
async def test_subscribe_and_publish():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        pubsub = pubsubs_fsub[0]

        list_data = [b"d0", b"d1"]
        event_receive_data_started = trio.Event()

        async def publish_data(topic):
            await event_receive_data_started.wait()
            for data in list_data:
                await pubsub.publish(topic, data)

        async def receive_data(topic):
            i = 0
            event_receive_data_started.set()
            assert topic not in pubsub.topic_ids
            subscription = await pubsub.subscribe(topic)
            async with subscription:
                assert topic in pubsub.topic_ids
                async for msg in subscription:
                    assert msg.data == list_data[i]
                    i += 1
                    if i == len(list_data):
                        break
            assert topic not in pubsub.topic_ids

        async with trio.open_nursery() as nursery:
            nursery.start_soon(receive_data, TESTING_TOPIC)
            nursery.start_soon(publish_data, TESTING_TOPIC)
Exemplo n.º 7
0
def _make_pubsubs(hosts, pubsub_routers, cache_size):
    if len(pubsub_routers) != len(hosts):
        raise ValueError(
            f"lenght of pubsub_routers={pubsub_routers} should be equaled to the "
            f"length of hosts={len(hosts)}")
    return tuple(
        PubsubFactory(host=host, router=router, cache_size=cache_size)
        for host, router in zip(hosts, pubsub_routers))
Exemplo n.º 8
0
def pubsubs(num_hosts, hosts, is_gossipsub):
    if is_gossipsub:
        routers = GossipsubFactory.create_batch(num_hosts, **GOSSIPSUB_PARAMS._asdict())
    else:
        routers = FloodsubFactory.create_batch(num_hosts)
    _pubsubs = tuple(
        PubsubFactory(host=host, router=router) for host, router in zip(hosts, routers)
    )
    yield _pubsubs
Exemplo n.º 9
0
async def test_join():
    async with PubsubFactory.create_batch_with_gossipsub(
            4, degree=4, degree_low=3, degree_high=5) as pubsubs_gsub:
        gossipsubs = [pubsub.router for pubsub in pubsubs_gsub]
        hosts = [pubsub.host for pubsub in pubsubs_gsub]
        hosts_indices = list(range(len(pubsubs_gsub)))

        topic = "test_join"
        central_node_index = 0
        # Remove index of central host from the indices
        hosts_indices.remove(central_node_index)
        num_subscribed_peer = 2
        subscribed_peer_indices = random.sample(hosts_indices,
                                                num_subscribed_peer)

        # All pubsub except the one of central node subscribe to topic
        for i in subscribed_peer_indices:
            await pubsubs_gsub[i].subscribe(topic)

        # Connect central host to all other hosts
        await one_to_all_connect(hosts, central_node_index)

        # Wait 2 seconds for heartbeat to allow mesh to connect
        await trio.sleep(2)

        # Central node publish to the topic so that this topic
        # is added to central node's fanout
        # publish from the randomly chosen host
        await pubsubs_gsub[central_node_index].publish(topic, b"data")

        # Check that the gossipsub of central node has fanout for the topic
        assert topic in gossipsubs[central_node_index].fanout
        # Check that the gossipsub of central node does not have a mesh for the topic
        assert topic not in gossipsubs[central_node_index].mesh

        # Central node subscribes the topic
        await pubsubs_gsub[central_node_index].subscribe(topic)

        await trio.sleep(2)

        # Check that the gossipsub of central node no longer has fanout for the topic
        assert topic not in gossipsubs[central_node_index].fanout

        for i in hosts_indices:
            if i in subscribed_peer_indices:
                assert hosts[i].get_id(
                ) in gossipsubs[central_node_index].mesh[topic]
                assert hosts[central_node_index].get_id(
                ) in gossipsubs[i].mesh[topic]
            else:
                assert (hosts[i].get_id()
                        not in gossipsubs[central_node_index].mesh[topic])
                assert topic not in gossipsubs[i].mesh
Exemplo n.º 10
0
async def test_strict_signing_failed_validation(monkeypatch):
    async with PubsubFactory.create_batch_with_floodsub(
            2, strict_signing=True) as pubsubs_fsub:
        msg = make_pubsub_msg(
            origin_id=pubsubs_fsub[0].my_id,
            topic_ids=[TESTING_TOPIC],
            data=TESTING_DATA,
            seqno=b"\x00" * 8,
        )
        priv_key = pubsubs_fsub[0].sign_key
        signature = priv_key.sign(PUBSUB_SIGNING_PREFIX.encode() +
                                  msg.SerializeToString())

        event = trio.Event()

        def _is_msg_seen(msg):
            return False

        # Use router publish to check if `push_msg` succeed.
        async def router_publish(*args, **kwargs):
            await trio.hazmat.checkpoint()
            # The event will only be set if `push_msg` succeed.
            event.set()

        monkeypatch.setattr(pubsubs_fsub[0], "_is_msg_seen", _is_msg_seen)
        monkeypatch.setattr(pubsubs_fsub[0].router, "publish", router_publish)

        # Test: no signature attached in `msg`
        await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
        await trio.sleep(0.01)
        assert not event.is_set()

        # Test: `msg.key` does not match `msg.from_id`
        msg.key = pubsubs_fsub[1].host.get_public_key().serialize()
        msg.signature = signature
        await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
        await trio.sleep(0.01)
        assert not event.is_set()

        # Test: invalid signature
        msg.key = pubsubs_fsub[0].host.get_public_key().serialize()
        msg.signature = b"\x12" * 100
        await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
        await trio.sleep(0.01)
        assert not event.is_set()

        # Finally, assert the signature indeed will pass validation
        msg.key = pubsubs_fsub[0].host.get_public_key().serialize()
        msg.signature = signature
        await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
        await trio.sleep(0.01)
        assert event.is_set()
Exemplo n.º 11
0
async def test_mesh_heartbeat(initial_mesh_peer_count, monkeypatch):
    async with PubsubFactory.create_batch_with_gossipsub(
        1, heartbeat_initial_delay=100
    ) as pubsubs_gsub:
        # It's difficult to set up the initial peer subscription condition.
        # Ideally I would like to have initial mesh peer count that's below ``GossipSubDegree``
        # so I can test if `mesh_heartbeat` return correct peers to GRAFT.
        # The problem is that I can not set it up so that we have peers subscribe to the topic
        # but not being part of our mesh peers (as these peers are the peers to GRAFT).
        # So I monkeypatch the peer subscriptions and our mesh peers.
        total_peer_count = 14
        topic = "TEST_MESH_HEARTBEAT"

        fake_peer_ids = [IDFactory() for _ in range(total_peer_count)]
        peer_protocol = {peer_id: PROTOCOL_ID for peer_id in fake_peer_ids}
        monkeypatch.setattr(pubsubs_gsub[0].router, "peer_protocol", peer_protocol)

        peer_topics = {topic: set(fake_peer_ids)}
        # Monkeypatch the peer subscriptions
        monkeypatch.setattr(pubsubs_gsub[0], "peer_topics", peer_topics)

        mesh_peer_indices = random.sample(
            range(total_peer_count), initial_mesh_peer_count
        )
        mesh_peers = [fake_peer_ids[i] for i in mesh_peer_indices]
        router_mesh = {topic: set(mesh_peers)}
        # Monkeypatch our mesh peers
        monkeypatch.setattr(pubsubs_gsub[0].router, "mesh", router_mesh)

        peers_to_graft, peers_to_prune = pubsubs_gsub[0].router.mesh_heartbeat()
        if initial_mesh_peer_count > pubsubs_gsub[0].router.degree:
            # If number of initial mesh peers is more than `GossipSubDegree`,
            # we should PRUNE mesh peers
            assert len(peers_to_graft) == 0
            assert (
                len(peers_to_prune)
                == initial_mesh_peer_count - pubsubs_gsub[0].router.degree
            )
            for peer in peers_to_prune:
                assert peer in mesh_peers
        elif initial_mesh_peer_count < pubsubs_gsub[0].router.degree:
            # If number of initial mesh peers is less than `GossipSubDegree`,
            # we should GRAFT more peers
            assert len(peers_to_prune) == 0
            assert (
                len(peers_to_graft)
                == pubsubs_gsub[0].router.degree - initial_mesh_peer_count
            )
            for peer in peers_to_graft:
                assert peer not in mesh_peers
        else:
            assert len(peers_to_prune) == 0 and len(peers_to_graft) == 0
Exemplo n.º 12
0
async def test_peers_subscribe():
    async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
        await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
        await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        # Yield to let 0 notify 1
        await trio.sleep(1)
        assert pubsubs_fsub[0].my_id in pubsubs_fsub[1].peer_topics[
            TESTING_TOPIC]
        await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
        # Yield to let 0 notify 1
        await trio.sleep(1)
        assert pubsubs_fsub[0].my_id not in pubsubs_fsub[1].peer_topics[
            TESTING_TOPIC]
Exemplo n.º 13
0
async def test_strict_signing():
    async with PubsubFactory.create_batch_with_floodsub(
            2, strict_signing=True) as pubsubs_fsub:
        await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
        await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        await pubsubs_fsub[1].subscribe(TESTING_TOPIC)
        await trio.sleep(1)

        await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
        await trio.sleep(1)

        assert len(pubsubs_fsub[0].seen_messages) == 1
        assert len(pubsubs_fsub[1].seen_messages) == 1
Exemplo n.º 14
0
async def test_fanout():
    async with PubsubFactory.create_batch_with_gossipsub(10) as pubsubs_gsub:
        hosts = [pubsub.host for pubsub in pubsubs_gsub]
        num_msgs = 5

        # All pubsub subscribe to foobar except for `pubsubs_gsub[0]`
        subs = [
            await pubsub.subscribe("foobar") for pubsub in pubsubs_gsub[1:]
        ]

        # Sparsely connect libp2p hosts in random way
        await dense_connect(hosts)

        # Wait 2 seconds for heartbeat to allow mesh to connect
        await trio.sleep(2)

        topic = "foobar"
        # Send messages with origin not subscribed
        for i in range(num_msgs):
            msg_content = b"foo " + i.to_bytes(1, "big")

            # Pick the message origin to the node that is not subscribed to 'foobar'
            origin_idx = 0

            # publish from the randomly chosen host
            await pubsubs_gsub[origin_idx].publish(topic, msg_content)

            await trio.sleep(0.5)
            # Assert that all blocking queues receive the message
            for sub in subs:
                msg = await sub.get()
                assert msg.data == msg_content

        # Subscribe message origin
        subs.insert(0, await pubsubs_gsub[0].subscribe(topic))

        # Send messages again
        for i in range(num_msgs):
            msg_content = b"bar " + i.to_bytes(1, "big")

            # Pick the message origin to the node that is not subscribed to 'foobar'
            origin_idx = 0

            # publish from the randomly chosen host
            await pubsubs_gsub[origin_idx].publish(topic, msg_content)

            await trio.sleep(0.5)
            # Assert that all blocking queues receive the message
            for sub in subs:
                msg = await sub.get()
                assert msg.data == msg_content
Exemplo n.º 15
0
    async def create(cls, number: int) -> AsyncIterator[Tuple["DummyAccountNode", ...]]:
        """
        Create a new DummyAccountNode and attach a libp2p node, a floodsub, and
        a pubsub instance to this new node.

        We use create as this serves as a factory function and allows us
        to use async await, unlike the init function
        """
        async with PubsubFactory.create_batch_with_floodsub(number) as pubsubs:
            async with AsyncExitStack() as stack:
                dummy_acount_nodes = tuple(cls(pubsub) for pubsub in pubsubs)
                for node in dummy_acount_nodes:
                    await stack.enter_async_context(background_trio_service(node))
                yield dummy_acount_nodes
Exemplo n.º 16
0
    async def create(cls) -> "DummyAccountNode":
        """
        Create a new DummyAccountNode and attach a libp2p node, a floodsub, and
        a pubsub instance to this new node.

        We use create as this serves as a factory function and allows us
        to use async await, unlike the init function
        """

        pubsub = PubsubFactory(router=FloodsubFactory())
        await pubsub.host.get_network().listen(LISTEN_MADDR)
        return cls(libp2p_node=pubsub.host,
                   pubsub=pubsub,
                   floodsub=pubsub.router)
Exemplo n.º 17
0
async def test_set_and_remove_topic_validator():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        is_sync_validator_called = False

        def sync_validator(peer_id, msg):
            nonlocal is_sync_validator_called
            is_sync_validator_called = True

        is_async_validator_called = False

        async def async_validator(peer_id, msg):
            nonlocal is_async_validator_called
            is_async_validator_called = True
            await trio.hazmat.checkpoint()

        topic = "TEST_VALIDATOR"

        assert topic not in pubsubs_fsub[0].topic_validators

        # Register sync validator
        pubsubs_fsub[0].set_topic_validator(topic, sync_validator, False)

        assert topic in pubsubs_fsub[0].topic_validators
        topic_validator = pubsubs_fsub[0].topic_validators[topic]
        assert not topic_validator.is_async

        # Validate with sync validator
        topic_validator.validator(peer_id=IDFactory(), msg="msg")

        assert is_sync_validator_called
        assert not is_async_validator_called

        # Register with async validator
        pubsubs_fsub[0].set_topic_validator(topic, async_validator, True)

        is_sync_validator_called = False
        assert topic in pubsubs_fsub[0].topic_validators
        topic_validator = pubsubs_fsub[0].topic_validators[topic]
        assert topic_validator.is_async

        # Validate with async validator
        await topic_validator.validator(peer_id=IDFactory(), msg="msg")

        assert is_async_validator_called
        assert not is_sync_validator_called

        # Remove validator
        pubsubs_fsub[0].remove_topic_validator(topic)
        assert topic not in pubsubs_fsub[0].topic_validators
Exemplo n.º 18
0
async def test_leave():
    async with PubsubFactory.create_batch_with_gossipsub(1) as pubsubs_gsub:
        gossipsub = pubsubs_gsub[0].router
        topic = "test_leave"

        assert topic not in gossipsub.mesh

        await gossipsub.join(topic)
        assert topic in gossipsub.mesh

        await gossipsub.leave(topic)
        assert topic not in gossipsub.mesh

        # Test re-leave
        await gossipsub.leave(topic)
Exemplo n.º 19
0
async def test_re_unsubscribe():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        # Unsubscribe from topic we didn't even subscribe to
        assert "NOT_MY_TOPIC" not in pubsubs_fsub[0].topic_ids
        await pubsubs_fsub[0].unsubscribe("NOT_MY_TOPIC")
        assert "NOT_MY_TOPIC" not in pubsubs_fsub[0].topic_ids

        await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids

        await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
        assert TESTING_TOPIC not in pubsubs_fsub[0].topic_ids

        await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
        assert TESTING_TOPIC not in pubsubs_fsub[0].topic_ids
Exemplo n.º 20
0
async def test_handle_graft(monkeypatch):
    async with PubsubFactory.create_batch_with_gossipsub(2) as pubsubs_gsub:
        gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)

        index_alice = 0
        id_alice = pubsubs_gsub[index_alice].my_id
        index_bob = 1
        id_bob = pubsubs_gsub[index_bob].my_id
        await connect(pubsubs_gsub[index_alice].host,
                      pubsubs_gsub[index_bob].host)

        # Wait 2 seconds for heartbeat to allow mesh to connect
        await trio.sleep(2)

        topic = "test_handle_graft"
        # Only lice subscribe to the topic
        await gossipsubs[index_alice].join(topic)

        # Monkey patch bob's `emit_prune` function so we can
        # check if it is called in `handle_graft`
        event_emit_prune = trio.Event()

        async def emit_prune(topic, sender_peer_id):
            event_emit_prune.set()
            await trio.lowlevel.checkpoint()

        monkeypatch.setattr(gossipsubs[index_bob], "emit_prune", emit_prune)

        # Check that alice is bob's peer but not his mesh peer
        assert gossipsubs[index_bob].peer_protocol[id_alice] == PROTOCOL_ID
        assert topic not in gossipsubs[index_bob].mesh

        await gossipsubs[index_alice].emit_graft(topic, id_bob)

        # Check that `emit_prune` is called
        await event_emit_prune.wait()

        # Check that bob is alice's peer but not her mesh peer
        assert topic in gossipsubs[index_alice].mesh
        assert id_bob not in gossipsubs[index_alice].mesh[topic]
        assert gossipsubs[index_alice].peer_protocol[id_bob] == PROTOCOL_ID

        await gossipsubs[index_bob].emit_graft(topic, id_alice)

        await trio.sleep(1)

        # Check that bob is now alice's mesh peer
        assert id_bob in gossipsubs[index_alice].mesh[topic]
Exemplo n.º 21
0
async def test_gossip_propagation():
    async with PubsubFactory.create_batch_with_gossipsub(
        2, degree=1, degree_low=0, degree_high=2, gossip_window=50, gossip_history=100
    ) as pubsubs_gsub:
        topic = "foo"
        queue_0 = await pubsubs_gsub[0].subscribe(topic)

        # node 0 publish to topic
        msg_content = b"foo_msg"

        # publish from the randomly chosen host
        await pubsubs_gsub[0].publish(topic, msg_content)

        await trio.sleep(0.5)
        # Assert that the blocking queues receive the message
        msg = await queue_0.get()
        assert msg.data == msg_content
Exemplo n.º 22
0
async def test_message_all_peers(monkeypatch, security_protocol):
    async with PubsubFactory.create_batch_with_floodsub(
            1, security_protocol=security_protocol
    ) as pubsubs_fsub, net_stream_pair_factory(
            security_protocol=security_protocol) as stream_pair:
        peer_id = IDFactory()
        mock_peers = {peer_id: stream_pair[0]}
        with monkeypatch.context() as m:
            m.setattr(pubsubs_fsub[0], "peers", mock_peers)

            empty_rpc = rpc_pb2.RPC()
            empty_rpc_bytes = empty_rpc.SerializeToString()
            empty_rpc_bytes_len_prefixed = encode_varint_prefixed(
                empty_rpc_bytes)
            await pubsubs_fsub[0].message_all_peers(empty_rpc_bytes)
            assert (await stream_pair[1].read(MAX_READ_LEN)
                    ) == empty_rpc_bytes_len_prefixed
Exemplo n.º 23
0
async def test_get_hello_packet():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:

        def _get_hello_packet_topic_ids():
            packet = pubsubs_fsub[0].get_hello_packet()
            return tuple(sub.topicid for sub in packet.subscriptions)

        # Test: No subscription, so there should not be any topic ids in the hello packet.
        assert len(_get_hello_packet_topic_ids()) == 0

        # Test: After subscriptions, topic ids should be in the hello packet.
        topic_ids = ["t", "o", "p", "i", "c"]
        for topic in topic_ids:
            await pubsubs_fsub[0].subscribe(topic)
        topic_ids_in_hello = _get_hello_packet_topic_ids()
        for topic in topic_ids:
            assert topic in topic_ids_in_hello
Exemplo n.º 24
0
async def test_validate_msg(is_topic_1_val_passed, is_topic_2_val_passed):
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:

        def passed_sync_validator(peer_id, msg):
            return True

        def failed_sync_validator(peer_id, msg):
            return False

        async def passed_async_validator(peer_id, msg):
            await trio.hazmat.checkpoint()
            return True

        async def failed_async_validator(peer_id, msg):
            await trio.hazmat.checkpoint()
            return False

        topic_1 = "TEST_SYNC_VALIDATOR"
        topic_2 = "TEST_ASYNC_VALIDATOR"

        if is_topic_1_val_passed:
            pubsubs_fsub[0].set_topic_validator(topic_1, passed_sync_validator,
                                                False)
        else:
            pubsubs_fsub[0].set_topic_validator(topic_1, failed_sync_validator,
                                                False)

        if is_topic_2_val_passed:
            pubsubs_fsub[0].set_topic_validator(topic_2,
                                                passed_async_validator, True)
        else:
            pubsubs_fsub[0].set_topic_validator(topic_2,
                                                failed_async_validator, True)

        msg = make_pubsub_msg(
            origin_id=pubsubs_fsub[0].my_id,
            topic_ids=[topic_1, topic_2],
            data=b"1234",
            seqno=b"\x00" * 8,
        )

        if is_topic_1_val_passed and is_topic_2_val_passed:
            await pubsubs_fsub[0].validate_msg(pubsubs_fsub[0].my_id, msg)
        else:
            with pytest.raises(ValidationError):
                await pubsubs_fsub[0].validate_msg(pubsubs_fsub[0].my_id, msg)
Exemplo n.º 25
0
async def test_get_msg_validators():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        times_sync_validator_called = 0

        def sync_validator(peer_id, msg):
            nonlocal times_sync_validator_called
            times_sync_validator_called += 1

        times_async_validator_called = 0

        async def async_validator(peer_id, msg):
            nonlocal times_async_validator_called
            times_async_validator_called += 1
            await trio.hazmat.checkpoint()

        topic_1 = "TEST_VALIDATOR_1"
        topic_2 = "TEST_VALIDATOR_2"
        topic_3 = "TEST_VALIDATOR_3"

        # Register sync validator for topic 1 and 2
        pubsubs_fsub[0].set_topic_validator(topic_1, sync_validator, False)
        pubsubs_fsub[0].set_topic_validator(topic_2, sync_validator, False)

        # Register async validator for topic 3
        pubsubs_fsub[0].set_topic_validator(topic_3, async_validator, True)

        msg = make_pubsub_msg(
            origin_id=pubsubs_fsub[0].my_id,
            topic_ids=[topic_1, topic_2, topic_3],
            data=b"1234",
            seqno=b"\x00" * 8,
        )

        topic_validators = pubsubs_fsub[0].get_msg_validators(msg)
        for topic_validator in topic_validators:
            if topic_validator.is_async:
                await topic_validator.validator(peer_id=IDFactory(), msg="msg")
            else:
                topic_validator.validator(peer_id=IDFactory(), msg="msg")

        assert times_sync_validator_called == 2
        assert times_async_validator_called == 1
Exemplo n.º 26
0
async def test_handle_talk():
    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        sub = await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
        msg_0 = make_pubsub_msg(
            origin_id=pubsubs_fsub[0].my_id,
            topic_ids=[TESTING_TOPIC],
            data=b"1234",
            seqno=b"\x00" * 8,
        )
        pubsubs_fsub[0].notify_subscriptions(msg_0)
        msg_1 = make_pubsub_msg(
            origin_id=pubsubs_fsub[0].my_id,
            topic_ids=["NOT_SUBSCRIBED"],
            data=b"1234",
            seqno=b"\x11" * 8,
        )
        pubsubs_fsub[0].notify_subscriptions(msg_1)
        assert (len(pubsubs_fsub[0].topic_ids) == 1 and sub
                == pubsubs_fsub[0].subscribed_topics_receive[TESTING_TOPIC])
        assert (await sub.get()) == msg_0
Exemplo n.º 27
0
async def test_simple_two_nodes():
    async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
        topic = "my_topic"
        data = b"some data"

        await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
        await trio.sleep(0.25)

        sub_b = await pubsubs_fsub[1].subscribe(topic)
        # Sleep to let a know of b's subscription
        await trio.sleep(0.25)

        await pubsubs_fsub[0].publish(topic, data)

        res_b = await sub_b.get()

        # Check that the msg received by node_b is the same
        # as the message sent by node_a
        assert ID(res_b.from_id) == pubsubs_fsub[0].host.get_id()
        assert res_b.data == data
        assert res_b.topicIDs == [topic]
Exemplo n.º 28
0
async def test_handle_prune():
    async with PubsubFactory.create_batch_with_gossipsub(
        2, heartbeat_interval=3
    ) as pubsubs_gsub:
        gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)

        index_alice = 0
        id_alice = pubsubs_gsub[index_alice].my_id
        index_bob = 1
        id_bob = pubsubs_gsub[index_bob].my_id

        topic = "test_handle_prune"
        for pubsub in pubsubs_gsub:
            await pubsub.subscribe(topic)

        await connect(pubsubs_gsub[index_alice].host, pubsubs_gsub[index_bob].host)

        # Wait for heartbeat to allow mesh to connect
        await trio.sleep(1)

        # Check that they are each other's mesh peer
        assert id_alice in gossipsubs[index_bob].mesh[topic]
        assert id_bob in gossipsubs[index_alice].mesh[topic]

        # alice emit prune message to bob, alice should be removed
        # from bob's mesh peer
        await gossipsubs[index_alice].emit_prune(topic, id_bob)
        # `emit_prune` does not remove bob from alice's mesh peers
        assert id_bob in gossipsubs[index_alice].mesh[topic]

        # NOTE: We increase `heartbeat_interval` to 3 seconds so that bob will not
        # add alice back to his mesh after heartbeat.
        # Wait for bob to `handle_prune`
        await trio.sleep(0.1)

        # Check that alice is no longer bob's mesh peer
        assert id_alice not in gossipsubs[index_bob].mesh[topic]
Exemplo n.º 29
0
async def test_lru_cache_two_nodes():
    # two nodes with cache_size of 4

    # Mock `get_msg_id` to make us easier to manipulate `msg_id` by `data`.
    def get_msg_id(msg):
        # Originally it is `(msg.seqno, msg.from_id)`
        return (msg.data, msg.from_id)

    async with PubsubFactory.create_batch_with_floodsub(
            2, cache_size=4, msg_id_constructor=get_msg_id) as pubsubs_fsub:
        # `node_a` send the following messages to node_b
        message_indices = [1, 1, 2, 1, 3, 1, 4, 1, 5, 1]
        # `node_b` should only receive the following
        expected_received_indices = [1, 2, 3, 4, 5, 1]

        topic = "my_topic"

        await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
        await trio.sleep(0.25)

        sub_b = await pubsubs_fsub[1].subscribe(topic)
        await trio.sleep(0.25)

        def _make_testing_data(i: int) -> bytes:
            num_int_bytes = 4
            if i >= 2**(num_int_bytes * 8):
                raise ValueError("integer is too large to be serialized")
            return b"data" + i.to_bytes(num_int_bytes, "big")

        for index in message_indices:
            await pubsubs_fsub[0].publish(topic, _make_testing_data(index))
        await trio.sleep(0.25)

        for index in expected_received_indices:
            res_b = await sub_b.get()
            assert res_b.data == _make_testing_data(index)
Exemplo n.º 30
0
async def test_publish_push_msg_is_called(monkeypatch):
    msg_forwarders = []
    msgs = []

    async def push_msg(msg_forwarder, msg):
        msg_forwarders.append(msg_forwarder)
        msgs.append(msg)
        await trio.hazmat.checkpoint()

    async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
        with monkeypatch.context() as m:
            m.setattr(pubsubs_fsub[0], "push_msg", push_msg)

            await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
            await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)

            assert (
                len(msgs) == 2
            ), "`push_msg` should be called every time `publish` is called"
            assert (msg_forwarders[0]
                    == msg_forwarders[1]) and (msg_forwarders[1]
                                               == pubsubs_fsub[0].my_id)
            assert (msgs[0].seqno !=
                    msgs[1].seqno), "`seqno` should be different every time"