def new_host( key_pair: KeyPair = None, muxer_opt: TMuxerOptions = None, sec_opt: TSecurityOptions = None, peerstore_opt: IPeerStore = None, disc_opt: IPeerRouting = None, ) -> IHost: """ Create a new libp2p host based on the given parameters. :param key_pair: optional choice of the ``KeyPair`` :param muxer_opt: optional choice of stream muxer :param sec_opt: optional choice of security upgrade :param peerstore_opt: optional peerstore :param disc_opt: optional discovery :return: return a host instance """ swarm = new_swarm( key_pair=key_pair, muxer_opt=muxer_opt, sec_opt=sec_opt, peerstore_opt=peerstore_opt, ) host: IHost if disc_opt: host = RoutedHost(swarm, disc_opt) else: host = BasicHost(swarm) return host
def __init__(self, key_pair: KeyPair, listen_ip: str, listen_port: int, chain: BaseBeaconChain, event_bus: EndpointAPI, security_protocol_ops: Dict[TProtocol, BaseSecureTransport] = None, muxer_protocol_ops: Dict[TProtocol, IMuxedConn] = None, gossipsub_params: Optional[GossipsubParams] = None, cancel_token: CancelToken = None, bootstrap_nodes: Tuple[Multiaddr, ...] = (), preferred_nodes: Tuple[Multiaddr, ...] = (), subnets: Optional[Set[SubnetId]] = None) -> None: super().__init__(cancel_token) self.listen_ip = listen_ip self.listen_port = listen_port self.key_pair = key_pair self.bootstrap_nodes = bootstrap_nodes self.preferred_nodes = preferred_nodes self.subnets = subnets if subnets is not None else set() # TODO: Add key and peer_id to the peerstore if security_protocol_ops is None: security_protocol_ops = {SecIOID: SecIOTransport(key_pair)} if muxer_protocol_ops is None: muxer_protocol_ops = {MPLEX_PROTOCOL_ID: Mplex} network: INetwork = initialize_default_swarm( key_pair=key_pair, transport_opt=[self.listen_maddr], muxer_opt=muxer_protocol_ops, sec_opt=security_protocol_ops, peerstore_opt=None, # let the function initialize it ) self.host = BasicHost(network=network) if gossipsub_params is None: gossipsub_params = GossipsubParams() gossipsub_router = GossipSub( protocols=[GOSSIPSUB_PROTOCOL_ID], degree=gossipsub_params.DEGREE, degree_low=gossipsub_params.DEGREE_LOW, degree_high=gossipsub_params.DEGREE_HIGH, time_to_live=gossipsub_params.FANOUT_TTL, gossip_window=gossipsub_params.GOSSIP_WINDOW, gossip_history=gossipsub_params.GOSSIP_HISTORY, heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL, ) self.pubsub = Pubsub( host=self.host, router=gossipsub_router, my_id=self.peer_id, ) self.chain = chain self._event_bus = event_bus self.handshaked_peers = PeerPool() self.run_task(self.start())
def test_default_protocols(): key_pair = create_new_key_pair() swarm = initialize_default_swarm(key_pair) host = BasicHost(swarm) mux = host.get_mux() handlers = mux.handlers assert handlers == get_default_protocols()
def test_default_protocols(): key_pair = create_new_key_pair() swarm = initialize_default_swarm(key_pair) host = BasicHost(swarm) mux = host.get_mux() handlers = mux.handlers # NOTE: comparing keys for equality as handlers may be closures that do not compare in the way # this test is concerned with assert handlers.keys() == get_default_protocols(host).keys()
async def create_batch_and_listen(cls, is_secure: bool, number: int) -> Tuple[BasicHost, ...]: key_pairs = [generate_new_rsa_identity() for _ in range(number)] swarms = await asyncio.gather(*[ SwarmFactory.create_and_listen(is_secure, key_pair) for key_pair in key_pairs ]) return tuple( BasicHost(key_pair.public_key, swarm) for key_pair, swarm in zip(key_pairs, swarms))
async def create_batch_and_listen( cls, number: int, security_protocol: TProtocol = None, muxer_opt: TMuxerOptions = None, ) -> AsyncIterator[Tuple[BasicHost, ...]]: async with SwarmFactory.create_batch_and_listen( number, security_protocol=security_protocol, muxer_opt=muxer_opt) as swarms: hosts = tuple(BasicHost(swarm) for swarm in swarms) yield hosts
async def new_node( key_pair: KeyPair = None, swarm_opt: INetwork = None, transport_opt: Sequence[str] = None, muxer_opt: TMuxerOptions = None, sec_opt: TSecurityOptions = None, peerstore_opt: IPeerStore = None, disc_opt: IPeerRouting = None, ) -> BasicHost: """ create new libp2p node. :param key_pair: key pair for deriving an identity :param swarm_opt: optional swarm :param id_opt: optional id for host :param transport_opt: optional choice of transport upgrade :param muxer_opt: optional choice of stream muxer :param sec_opt: optional choice of security upgrade :param peerstore_opt: optional peerstore :param disc_opt: optional discovery :return: return a host instance """ if not key_pair: key_pair = generate_new_rsa_identity() id_opt = generate_peer_id_from(key_pair) if not swarm_opt: swarm_opt = initialize_default_swarm( key_pair=key_pair, id_opt=id_opt, transport_opt=transport_opt, muxer_opt=muxer_opt, sec_opt=sec_opt, peerstore_opt=peerstore_opt, ) # TODO enable support for other host type # TODO routing unimplemented host: IHost # If not explicitly typed, MyPy raises error if disc_opt: host = RoutedHost(key_pair.public_key, swarm_opt, disc_opt) else: host = BasicHost(key_pair.public_key, swarm_opt) # Kick off cleanup job asyncio.ensure_future(cleanup_done_tasks()) return host
async def new_node( key_pair: KeyPair = None, swarm_opt: INetwork = None, transport_opt: Sequence[str] = None, muxer_opt: Mapping[TProtocol, MuxerClassType] = None, sec_opt: Mapping[TProtocol, ISecureTransport] = None, peerstore_opt: IPeerStore = None, disc_opt: IPeerRouting = None, ) -> BasicHost: """ create new libp2p node :param key_pair: key pair for deriving an identity :param swarm_opt: optional swarm :param id_opt: optional id for host :param transport_opt: optional choice of transport upgrade :param muxer_opt: optional choice of stream muxer :param sec_opt: optional choice of security upgrade :param peerstore_opt: optional peerstore :param disc_opt: optional discovery :return: return a host instance """ if not key_pair: key_pair = generate_new_rsa_identity() id_opt = generate_peer_id_from(key_pair) if not swarm_opt: swarm_opt = initialize_default_swarm( key_pair=key_pair, id_opt=id_opt, transport_opt=transport_opt, muxer_opt=muxer_opt, sec_opt=sec_opt, peerstore_opt=peerstore_opt, disc_opt=disc_opt, ) # TODO enable support for other host type # TODO routing unimplemented host = BasicHost(swarm_opt) # Kick off cleanup job asyncio.ensure_future(cleanup_done_tasks()) return host
def __init__(self, privkey: datatypes.PrivateKey, listen_ip: str, listen_port: int, security_protocol_ops: Dict[str, ISecureTransport], muxer_protocol_ids: Tuple[str, ...], gossipsub_params: Optional[GossipsubParams] = None, cancel_token: CancelToken = None, bootstrap_nodes: Tuple[Multiaddr, ...] = None, preferred_nodes: Tuple[Multiaddr, ...] = None) -> None: super().__init__(cancel_token) self.listen_ip = listen_ip self.listen_port = listen_port self.privkey = privkey self.bootstrap_nodes = bootstrap_nodes self.preferred_nodes = preferred_nodes # TODO: Add key and peer_id to the peerstore network: INetwork = initialize_default_swarm( id_opt=peer_id_from_pubkey(self.privkey.public_key), transport_opt=[self.listen_maddr], muxer_opt=list(muxer_protocol_ids), sec_opt=security_protocol_ops, peerstore_opt=None, # let the function initialize it disc_opt=None, # no routing required here ) self.host = BasicHost(network=network, router=None) if gossipsub_params is None: gossipsub_params = GossipsubParams() gossipsub_router = GossipSub( protocols=[GOSSIPSUB_PROTOCOL_ID], degree=gossipsub_params.DEGREE, degree_low=gossipsub_params.DEGREE_LOW, degree_high=gossipsub_params.DEGREE_HIGH, time_to_live=gossipsub_params.FANOUT_TTL, gossip_window=gossipsub_params.GOSSIP_WINDOW, gossip_history=gossipsub_params.GOSSIP_HISTORY, heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL, ) self.pubsub = Pubsub( host=self.host, router=gossipsub_router, my_id=self.peer_id, )
async def test_one_notifier_on_two_nodes_with_listen(): events_b = [] messages = ["hello", "hello"] node_a_key_pair = create_new_key_pair() node_a_transport_opt = ["/ip4/127.0.0.1/tcp/0"] node_a = await new_node(node_a_key_pair, transport_opt=node_a_transport_opt) await node_a.get_network().listen( multiaddr.Multiaddr(node_a_transport_opt[0])) # Set up node_b swarm to pass into host node_b_key_pair = create_new_key_pair() node_b_transport_opt = ["/ip4/127.0.0.1/tcp/0"] node_b_multiaddr = multiaddr.Multiaddr(node_b_transport_opt[0]) node_b_swarm = initialize_default_swarm(node_b_key_pair, transport_opt=node_b_transport_opt) node_b = BasicHost(node_b_swarm) async def my_stream_handler(stream): # Ensure the listened, connected and opened_stream events were hit in Notifee obj # and that the stream passed into opened_stream matches the stream created on # node_b assert events_b == [ ["listenedb", node_b_multiaddr], ["connectedb", stream.mplex_conn], ["opened_streamb", stream], ] for message in messages: read_string = (await stream.read(len(message))).decode() resp = ACK + read_string await stream.write(resp.encode()) # Add notifee for node_a events_a = [] assert node_a.get_network().notify(MyNotifee(events_a, "a")) # Add notifee for node_b assert node_b.get_network().notify(MyNotifee(events_b, "b")) # start listen on node_b_swarm await node_b.get_network().listen(node_b_multiaddr) node_b.set_stream_handler("/echo/1.0.0", my_stream_handler) # Associate the peer with local ip address (see default parameters of Libp2p()) node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"]) # Ensure the connected and opened_stream events were hit in MyNotifee obj # and that stream passed into opened_stream matches the stream created on # node_a assert events_a == [["connecteda", stream.mplex_conn], ["opened_streama", stream]] for message in messages: expected_resp = ACK + message await stream.write(message.encode()) response = (await stream.read(len(expected_resp))).decode() assert response == expected_resp # Success, terminate pending tasks. await cleanup()
async def create_batch_and_listen( cls, is_secure: bool, number: int ) -> Tuple[BasicHost, ...]: swarms = await ListeningSwarmFactory.create_batch_and_listen(is_secure, number) return tuple(BasicHost(swarm) for swarm in range(swarms))
async def create_and_listen(cls, is_secure: bool) -> BasicHost: swarms = await ListeningSwarmFactory.create_batch_and_listen(is_secure, 1) return BasicHost(swarms[0])