Esempio n. 1
0
File: node.py Progetto: onyb/trinity
    def __init__(self,
                 key_pair: KeyPair,
                 listen_ip: str,
                 listen_port: int,
                 chain: BaseBeaconChain,
                 event_bus: EndpointAPI,
                 security_protocol_ops: Dict[TProtocol,
                                             BaseSecureTransport] = None,
                 muxer_protocol_ops: Dict[TProtocol, IMuxedConn] = None,
                 gossipsub_params: Optional[GossipsubParams] = None,
                 cancel_token: CancelToken = None,
                 bootstrap_nodes: Tuple[Multiaddr, ...] = (),
                 preferred_nodes: Tuple[Multiaddr, ...] = (),
                 subnets: Optional[Set[SubnetId]] = None) -> None:
        super().__init__(cancel_token)
        self.listen_ip = listen_ip
        self.listen_port = listen_port
        self.key_pair = key_pair
        self.bootstrap_nodes = bootstrap_nodes
        self.preferred_nodes = preferred_nodes
        self.subnets = subnets if subnets is not None else set()
        # TODO: Add key and peer_id to the peerstore
        if security_protocol_ops is None:
            security_protocol_ops = {SecIOID: SecIOTransport(key_pair)}
        if muxer_protocol_ops is None:
            muxer_protocol_ops = {MPLEX_PROTOCOL_ID: Mplex}
        network: INetwork = initialize_default_swarm(
            key_pair=key_pair,
            transport_opt=[self.listen_maddr],
            muxer_opt=muxer_protocol_ops,
            sec_opt=security_protocol_ops,
            peerstore_opt=None,  # let the function initialize it
        )
        self.host = BasicHost(network=network)

        if gossipsub_params is None:
            gossipsub_params = GossipsubParams()
        gossipsub_router = GossipSub(
            protocols=[GOSSIPSUB_PROTOCOL_ID],
            degree=gossipsub_params.DEGREE,
            degree_low=gossipsub_params.DEGREE_LOW,
            degree_high=gossipsub_params.DEGREE_HIGH,
            time_to_live=gossipsub_params.FANOUT_TTL,
            gossip_window=gossipsub_params.GOSSIP_WINDOW,
            gossip_history=gossipsub_params.GOSSIP_HISTORY,
            heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL,
        )
        self.pubsub = Pubsub(
            host=self.host,
            router=gossipsub_router,
            my_id=self.peer_id,
        )

        self.chain = chain
        self._event_bus = event_bus

        self.handshaked_peers = PeerPool()

        self.run_task(self.start())
Esempio n. 2
0
def create_pubsub_and_gossipsub_instances(libp2p_hosts, supported_protocols, degree, degree_low, \
    degree_high, time_to_live, gossip_window, gossip_history, heartbeat_interval):
    pubsubs = []
    gossipsubs = []
    for node in libp2p_hosts:
        gossipsub = GossipSub(supported_protocols, degree, degree_low,
                              degree_high, time_to_live, gossip_window,
                              gossip_history, heartbeat_interval)
        pubsub = Pubsub(node, gossipsub, "a")
        pubsubs.append(pubsub)
        gossipsubs.append(gossipsub)

    return pubsubs, gossipsubs
Esempio n. 3
0
async def test_init():
    node = await new_node(transport_opt=["/ip4/127.1/tcp/0"])

    await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.1/tcp/0"))

    supported_protocols = ["/gossipsub/1.0.0"]

    gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
    pubsub = Pubsub(node, gossipsub, "a")

    # Did it work?
    assert gossipsub and pubsub

    await cleanup()
Esempio n. 4
0
    def __init__(self,
                 privkey: datatypes.PrivateKey,
                 listen_ip: str,
                 listen_port: int,
                 security_protocol_ops: Dict[str, ISecureTransport],
                 muxer_protocol_ids: Tuple[str, ...],
                 gossipsub_params: Optional[GossipsubParams] = None,
                 cancel_token: CancelToken = None,
                 bootstrap_nodes: Tuple[Multiaddr, ...] = None,
                 preferred_nodes: Tuple[Multiaddr, ...] = None) -> None:
        super().__init__(cancel_token)
        self.listen_ip = listen_ip
        self.listen_port = listen_port
        self.privkey = privkey
        self.bootstrap_nodes = bootstrap_nodes
        self.preferred_nodes = preferred_nodes
        # TODO: Add key and peer_id to the peerstore
        network: INetwork = initialize_default_swarm(
            id_opt=peer_id_from_pubkey(self.privkey.public_key),
            transport_opt=[self.listen_maddr],
            muxer_opt=list(muxer_protocol_ids),
            sec_opt=security_protocol_ops,
            peerstore_opt=None,  # let the function initialize it
            disc_opt=None,  # no routing required here
        )
        self.host = BasicHost(network=network, router=None)

        if gossipsub_params is None:
            gossipsub_params = GossipsubParams()
        gossipsub_router = GossipSub(
            protocols=[GOSSIPSUB_PROTOCOL_ID],
            degree=gossipsub_params.DEGREE,
            degree_low=gossipsub_params.DEGREE_LOW,
            degree_high=gossipsub_params.DEGREE_HIGH,
            time_to_live=gossipsub_params.FANOUT_TTL,
            gossip_window=gossipsub_params.GOSSIP_WINDOW,
            gossip_history=gossipsub_params.GOSSIP_HISTORY,
            heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL,
        )
        self.pubsub = Pubsub(
            host=self.host,
            router=gossipsub_router,
            my_id=self.peer_id,
        )
Esempio n. 5
0
 def __init__(self, fork_digest_provider: ForkDigestProvider,
              host: IHost) -> None:
     self._fork_digest_provider = fork_digest_provider
     self._host = host
     gossipsub_router = GossipSub(
         protocols=[GOSSIPSUB_PROTOCOL_ID],
         degree=self.DEGREE,
         degree_low=self.DEGREE_LOW,
         degree_high=self.DEGREE_HIGH,
         time_to_live=self.FANOUT_TTL,
         gossip_window=self.GOSSIP_WINDOW,
         gossip_history=self.GOSSIP_HISTORY,
         heartbeat_interval=self.HEARTBEAT_INTERVAL,
     )
     self.gossipsub = gossipsub_router
     self.pubsub = Pubsub(
         host=self._host,
         router=gossipsub_router,
         msg_id_constructor=get_content_addressed_msg_id,
     )
Esempio n. 6
0
async def perform_test_from_obj(obj):
    """
    Perform a floodsub test from a test obj.
    test obj are composed as follows:
    
    {
        "supported_protocols": ["supported/protocol/1.0.0",...],
        "adj_list": {
            "node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
            "node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
            ...
        },
        "topic_map": {
            "topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
        },
        "messages": [
            {
                "topics": ["topic1_for_message", "topic2_for_message", ...],
                "data": "some contents of the message (newlines are not supported)",
                "node_id": "message sender node id"
            },
            ...
        ]
    }
    NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A
    or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
    is undefined (even if it may work)
    """

    # Step 1) Create graph
    adj_list = obj["adj_list"]
    node_map = {}
    gossipsub_map = {}
    pubsub_map = {}

    supported_protocols = obj["supported_protocols"]

    tasks_connect = []
    for start_node_id in adj_list:
        # Create node if node does not yet exist
        if start_node_id not in node_map:
            node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
            await node.get_network().listen(
                multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))

            node_map[start_node_id] = node

            gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
            gossipsub_map[start_node_id] = gossipsub
            pubsub = Pubsub(node, gossipsub, start_node_id)
            pubsub_map[start_node_id] = pubsub

        # For each neighbor of start_node, create if does not yet exist,
        # then connect start_node to neighbor
        for neighbor_id in adj_list[start_node_id]:
            # Create neighbor if neighbor does not yet exist
            if neighbor_id not in node_map:
                neighbor_node = await new_node(
                    transport_opt=["/ip4/127.0.0.1/tcp/0"])
                await neighbor_node.get_network().listen(
                    multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))

                node_map[neighbor_id] = neighbor_node

                gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
                gossipsub_map[neighbor_id] = gossipsub
                pubsub = Pubsub(neighbor_node, gossipsub, neighbor_id)
                pubsub_map[neighbor_id] = pubsub

            # Connect node and neighbor
            tasks_connect.append(
                asyncio.ensure_future(
                    connect(node_map[start_node_id], node_map[neighbor_id])))
    tasks_connect.append(asyncio.sleep(2))
    await asyncio.gather(*tasks_connect)

    # Allow time for graph creation before continuing
    # await asyncio.sleep(0.25)

    # Step 2) Subscribe to topics
    queues_map = {}
    topic_map = obj["topic_map"]

    tasks_topic = []
    tasks_topic_data = []
    for topic in topic_map:
        for node_id in topic_map[topic]:
            """
            # Subscribe node to topic
            q = await pubsub_map[node_id].subscribe(topic)

            # Create topic-queue map for node_id if one does not yet exist
            if node_id not in queues_map:
                queues_map[node_id] = {}

            # Store queue in topic-queue map for node
            queues_map[node_id][topic] = q
            """
            tasks_topic.append(
                asyncio.ensure_future(pubsub_map[node_id].subscribe(topic)))
            tasks_topic_data.append((node_id, topic))
    tasks_topic.append(asyncio.sleep(2))

    # Gather is like Promise.all
    responses = await asyncio.gather(*tasks_topic, return_exceptions=True)
    for i in range(len(responses) - 1):
        q = responses[i]
        node_id, topic = tasks_topic_data[i]
        if node_id not in queues_map:
            queues_map[node_id] = {}

        # Store queue in topic-queue map for node
        queues_map[node_id][topic] = q

    # Allow time for subscribing before continuing
    # await asyncio.sleep(0.01)

    # Step 3) Publish messages
    topics_in_msgs_ordered = []
    messages = obj["messages"]
    tasks_publish = []
    next_msg_id_func = message_id_generator(0)

    for msg in messages:
        topics = msg["topics"]

        data = msg["data"]
        node_id = msg["node_id"]

        # Get actual id for sender node (not the id from the test obj)
        actual_node_id = str(node_map[node_id].get_id())

        # Create correctly formatted message
        msg_talk = generate_RPC_packet(actual_node_id, topics, data,
                                       next_msg_id_func())

        # Publish message
        tasks_publish.append(asyncio.ensure_future(gossipsub_map[node_id].publish(\
            actual_node_id, msg_talk.SerializeToString())))

        # For each topic in topics, add topic, msg_talk tuple to ordered test list
        # TODO: Update message sender to be correct message sender before
        # adding msg_talk to this list
        for topic in topics:
            topics_in_msgs_ordered.append((topic, msg_talk))

    # Allow time for publishing before continuing
    # await asyncio.sleep(0.4)
    tasks_publish.append(asyncio.sleep(2))
    await asyncio.gather(*tasks_publish)

    # Step 4) Check that all messages were received correctly.
    # TODO: Check message sender too
    for i in range(len(topics_in_msgs_ordered)):
        topic, actual_msg = topics_in_msgs_ordered[i]

        # Look at each node in each topic
        for node_id in topic_map[topic]:
            # Get message from subscription queue
            msg_on_node = await queues_map[node_id][topic].get()
            assert actual_msg.publish[0].SerializeToString(
            ) == msg_on_node.SerializeToString()

    # Success, terminate pending tasks.
    await cleanup()