def test_event_service_triggered_when_new_node_added(self): # Create Network service w1 = Wallet() p1 = Network(wallet=w1, ctx=self.ctx, socket_base='tcp://0.0.0.0') # Create Discovery Server w2 = Wallet() d = DiscoveryServer(wallet=w2, socket_id=_socket('tcp://127.0.0.1:10000'), pepper=PEPPER.encode(), ctx=self.ctx, poll_timeout=2000, linger=200) # Create raw subscriber subscriber = self.ctx.socket(zmq.SUB) subscriber.setsockopt(zmq.SUBSCRIBE, b'') subscriber.connect('tcp://127.0.0.1:19003') # TCP takes a bit longer to bind and is prone to dropping messages... sleep(0.3) # Construct the join RPC message join_message = ['join', (w2.verifying_key().hex(), 'tcp://127.0.0.1')] join_message = json.dumps(join_message).encode() # Wrap recv() in an async async def recv(): msg = await subscriber.recv() return msg tasks = asyncio.gather( p1.peer_service.start( ), # Start the PeerService which will process RPC and emit events d.serve( ), # Start Discovery so PeerService can verify they are online services.get(_socket('tcp://127.0.0.1:19002'), msg=join_message, ctx=self.ctx, timeout=3000), # Push out a join request stop_server(p1.peer_service, 1), stop_server(d, 1), recv() # Collect the subscription result ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) expected_list = ['join', [w2.verifying_key().hex(), 'tcp://127.0.0.1']] got_list = json.loads(res[-1].decode()) self.assertListEqual(expected_list, got_list)
def test_fetch_block_from_master(self): # Setup Mongo #sync.seed_vkbook() w = Wallet() c = CilantroStorageDriver(key=w.sk.encode()) c.drop_collections() # Store 20 blocks self.store_blocks(c, 1) w1 = Wallet() m1 = BlockServer(socket_base='tcp://127.0.0.1', wallet=w1, ctx=self.ctx, linger=500, poll_timeout=500, driver=FakeTopBlockManager(101, 'abcd')) class FakeParameters: async def refresh(self): await asyncio.sleep(0.1) def get_masternode_sockets(self, *args): return ['tcp://127.0.0.1:10004'] f = BlockFetcher(wallet=Wallet(), ctx=self.ctx, parameters=FakeParameters()) tasks = asyncio.gather( m1.serve(), f.get_block_from_master( 0, cilantro_ee.sockets.struct._socket('tcp://127.0.0.1:10004')), stop_server(m1, 0.3), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) block_dict = c.get_block(0) del block_dict['blockOwners'] got_block = res[1] got = canonical.block_from_subblocks([s for s in got_block.subBlocks], previous_hash=b'\x00' * 32, block_num=0) self.assertDictEqual(block_dict, got)
def test_orchestrator(self): mns, dls = make_network(2, 2, self.ctx) start_up = make_start_awaitable(mns, dls) async def test(): await start_up await asyncio.sleep(1) await send_tx(mns[1], mns + dls, contract='testing', function='test', sender=Wallet()) await send_tx(mns[1], mns + dls, contract='testing', function='test', sender=Wallet()) await send_tx(mns[1], mns + dls, contract='testing', function='test', sender=Wallet()) loop = asyncio.get_event_loop() loop.run_until_complete(test())
def test_find_node_returns_self_if_asked_on_peer_address_and_self_is_the_value_ipc( self): # Create Network service w1 = Wallet() p1 = Network(wallet=w1, ctx=self.ctx, socket_base='ipc:///tmp') async def get(): return await p1.find_node(p1.peer_service_address, w1.verifying_key().hex()) loop = asyncio.get_event_loop() res = loop.run_until_complete(get()) self.assertEqual(res.get(w1.verifying_key().hex()), 'ipc:///tmp')
def aiomain(source, track, outfd, procs, shutdown, outhwm): context = zmq.asyncio.Context() drain = context.socket(zmq.PUSH) drain.setsockopt(zmq.SNDHWM, outhwm) drain.bind(outfd) args = [aioreq(context, source, track, drain) for _ in range(procs)] args.append(stop(shutdown)) loop = asyncio.get_event_loop() try: loop.run_until_complete(asyncio.gather(*args)) except Exception: loop.stop() context.destroy(linger=0) loop.close()
def __init__( self, ip: str, port: int, unpack=None, logger: logging.Logger = None, zmqcontext=None, loop=None, passoff_callback=None, name: str = None, ): """The init of an AsyncSubscriber Args: ip (str): The ip address where the datas are published (can be local or remote) port (int): The port number at which the datas are published unpack (None, optional): A callable which takes the received data packet as input and returns an unpacked object. If not given the packed data object is put in the subscribed buffer. logger (logging.Logger, optional): Optionally provide a logger instance zmqcontext (None, optional): zmq context loop (None, optional): an asyncio event loop passoff_callback (None, optional): An optional callback for overriding the default buffer. Note: if this is used then ``get_data()`` will always be empty. name (str, optional): The name of the subscriber (used in logging) """ logger = logger or sslogger name = name or __class__.__name__ self.log = logger.getChild(name) self._context = zmqcontext or zmq.asyncio.Context() self._sock = self._context.socket(zmq.SUB) self._sock.setsockopt(zmq.SUBSCRIBE, b"") con_str = "tcp://%s:%s" % (ip, port) if "0.0.0.0" == ip: self._sock.bind(con_str) else: self._sock.connect(con_str) self.log.info("Connected to : %s" % con_str) self._running = False self._data_buffer = asyncio.Queue() self._loop = loop or asyncio.get_event_loop() self._running = True self._unpack = (lambda x: x) if unpack is None else unpack self._task = self._loop.create_task(self.receive()) self._passoff_callback = passoff_callback or ( lambda x: self._loop.create_task(self._data_buffer.put(x)))
def get_mastr_code(self, exchange, product): start_time_stamp = int(time.time() * 1000) sub_type = SubscribeMsgType.SUB_WITH_PRODUCT base_info = [{'exchange': exchange, 'product_code': product}] rsp = asyncio.get_event_loop().run_until_complete( future=self.api.SubsQutoMsgReqApi( sub_type=sub_type, base_info=base_info, start_time_stamp=start_time_stamp)) master_code = None open_interrest = 0 for snapshot in rsp['before_snapshot_json_list']: if int(self.common.doDicEvaluate(snapshot['future'], 'openInterrest', 0)) >= open_interrest and \ 'main' not in snapshot['commonInfo']['instrCode']: master_code = snapshot['commonInfo']['instrCode'] open_interrest = int( self.common.doDicEvaluate(snapshot['future'], 'openInterrest', 0)) rsp = asyncio.get_event_loop().run_until_complete( future=self.api.UnSubsQutoMsgReqApi( unsub_type=sub_type, unbase_info=base_info, start_time_stamp=start_time_stamp)) return master_code
def check_all_instr(self): instruments = asyncio.get_event_loop().run_until_complete( future=self.get_instrument('SYNC_INSTR_REQ', codegenerate_dealer_address)) self.logger.debug(instruments) for instrument in instruments['instruments']: if 'main' in instrument['base']['instrCode']: self.logger.debug(instrument) product = instrument['proc']['code'] exchange = instrument['base']['exchange'] # product = 'CUS' # exchange = 'HKFE' master_code = self.get_mastr_code(exchange, product) api_master_code = instrument['future']['relatedInstr'] assert master_code == api_master_code
def test_peer_server_init(self): w = Wallet() t = {'woo': 'hoo'} p = PeerServer(socket_id=_socket('tcp://127.0.0.1:19999'), event_address=_socket('tcp://127.0.0.1:19888'), wallet=w, table=t, ctx=self.ctx, linger=100, poll_timeout=100) tasks = asyncio.gather(p.serve(), stop_server(p, 0.1)) loop = asyncio.get_event_loop() loop.run_until_complete(tasks)
def start(self, async_func_list=None): """No functions given --> data pass through only; else apply function on data before forwarding N publishers to 1 sub; proxy 1 sub to 1 pub; publish to M subscribers """ # make sure pub / sub is initialised if not self.pub_socket or not self.sub_socket: print("Both pub and sub needs to be initiliased and set to bind") print("Pub: {}".format(self.pub_socket)) print("Sub: {}".format(self.sub_socket)) sys.exit() # apply function to data to passing through data if async_func_list: import asyncio # capture ZeroMQ errors; ZeroMQ using asyncio doesn't print out errors try: asyncio.get_event_loop().run_until_complete(asyncio.wait( [func() for func in async_func_list] )) except Exception as e: print("Error with async function") # print(e) logging.error(traceback.format_exc()) print() finally: # TODO disconnect pub/sub pass # don't duplicate the message, just pass through else: print("Try: Proxy... CONNECT!") zmq.proxy(self.pub_socket, self.sub_socket) print("CONNECT successful!")
def test_stamps_too_few_doesnt_break_network(self): code = ''' state = Hash() @construct def seed(): state['thiskey', 'current'] = 'jeff' state['thiskey', 'next'] = 'stu' @export def testing(value): state['thiskey', 'current'] = 'tejas' state['thiskey', 'next'] = value ''' stu = Wallet() o = Orchestrator(1, 2, self.ctx) block_0 = [] block_0.append( o.make_tx(contract='submission', function='submit_contract', kwargs={ 'name': 'con_jeff', 'code': code }, sender=stu, stamps=1)) block_1 = [] block_1.append( o.make_tx(contract='con_jeff', function='testing', kwargs={'value': 'moomoo'}, sender=stu)) async def test(): await o.start_network await send_tx_batch(o.masternodes[0], block_0) await asyncio.sleep(2) await send_tx_batch(o.masternodes[0], block_1) await asyncio.sleep(2) loop = asyncio.get_event_loop() loop.run_until_complete(test())
def test_other_peers_add_new_nodes_when_join_event_occurs_ipc(self): # N3 runs discovery server and pings N1 # N1 checks to see if N3 is valid, and if so, adds to their table and pings N2 about the new join # Create Network service w1 = Wallet() p1 = Network(wallet=w1, ctx=self.ctx, socket_base='ipc:///tmp/n1') # Create Network service w2 = Wallet() p2 = Network(wallet=w2, ctx=self.ctx, socket_base='ipc:///tmp/n2') p2.peer_service.event_service.add_subscription( _socket('ipc:///tmp/n1/events')) # Create Discovery Server w3 = Wallet() d = DiscoveryServer(wallet=w3, socket_id=_socket('ipc:///tmp/n3/discovery'), pepper=PEPPER.encode(), ctx=self.ctx, poll_timeout=2000, linger=2000) # TCP takes a bit longer to bind and is prone to dropping messages... sleep(1) # Construct the join RPC message join_message = ['join', (w3.verifying_key().hex(), 'ipc:///tmp/n3')] join_message = json.dumps(join_message).encode() tasks = asyncio.gather( p1.peer_service.start(), p2.peer_service.start(), d.serve(), services.get(_socket('ipc:///tmp/n1/peers'), msg=join_message, ctx=self.ctx, timeout=1000), stop_server(p1.peer_service, 2), stop_server(p2.peer_service, 2), stop_server(d, 2), ) loop = asyncio.get_event_loop() loop.run_until_complete(tasks) self.assertTrue(w3.verifying_key().hex() in p2.peer_service.table)
def aiomain(*, shutdown, barrier, source, outfd, track, nthread): context = zmq.asyncio.Context() drain = context.socket(zmq.PUSH) drain.setsockopt(zmq.SNDHWM, REQ_HWM) drain.bind(outfd) lock = asyncio.Lock() args = [aioreq(context, source, track, drain, lock) for _ in range(nthread)] args.append(stop(shutdown)) loop = asyncio.get_event_loop() barrier.wait() try: loop.run_until_complete(asyncio.gather(*args)) except StopAsyncIteration: loop.stop() context.destroy(linger=0) loop.close()
def test_pub_sub_single_socket(self): pub = self.ctx.socket(zmq.PUB) pub.bind('inproc://test1') s = SubscriptionService(ctx=self.ctx) s.add_subscription(SocketStruct.from_string('inproc://test1')) tasks = asyncio.gather(s.serve(), pub.send(b'howdy'), pub.send(b'howdy2'), stop_server(s, 0.1)) loop = asyncio.get_event_loop() loop.run_until_complete(tasks) self.assertListEqual(s.received, [(b'howdy', 'inproc://test1'), (b'howdy2', 'inproc://test1')])
def __init__( self, file_prefix: str, ip: str, port: int, subscriber: AsyncSubscriber, writer, file_ext: str, name: str, folder: str = "", file_enumerator: str = None, filesize_lim: int = None, loop=None, ): """ Summary Args: file_prefix (str): Description ip (str): Description port (int): Description subscriber (AsyncSubscriber): Description writer (TYPE): Description file_ext (str): Description name (str): Description folder (str, optional): Description file_enumerator (str, optional): Description filesize_lim (int, optional): Description loop (None, optional): Description """ self.log = sslogger.getChild(name) super().__init__( file_prefix=file_prefix, writer=writer, folder=folder, file_enumerator=file_enumerator, filesize_lim=filesize_lim, file_ext=file_ext, ) self._loop = loop or asyncio.get_event_loop() self._subscriber = subscriber(ip=ip, port=port, loop=self._loop, logger=self.log, name="mainsub") self._running = False self.stopping = False self._task = self._loop.create_task(self.run())
def test_find_node_gets_node_from_self_if_asked_from_self_and_has_it_as_peer_ipc( self): # Create Network service w1 = Wallet() p1 = Network(wallet=w1, ctx=self.ctx, socket_base='ipc:///tmp') w2 = Wallet() p1.peer_service.table[w2.verifying_key().hex()] = 'ipc:///tmp123' async def get(): return await p1.find_node(p1.peer_service_address, w2.verifying_key().hex()) loop = asyncio.get_event_loop() res = loop.run_until_complete(get()) self.assertEqual(res.get(w2.verifying_key().hex()), 'ipc:///tmp123')
def test_secure_request_returns_result(self): authenticator = authentication.SocketAuthenticator( client=ContractingClient(), ctx=self.ctx) w = Wallet() w2 = Wallet() authenticator.add_verifying_key(w.verifying_key) authenticator.add_verifying_key(w2.verifying_key) authenticator.configure() class MockProcessor(router.Processor): async def process_message(self, msg): return {'whats': 'good'} m = router.Router(socket_id='tcp://127.0.0.1:10000', ctx=self.ctx, linger=2000, poll_timeout=50, secure=True, wallet=w) m.add_service('something', MockProcessor()) async def get(): r = await router.secure_request(msg={'hello': 'there'}, service='something', wallet=w2, vk=w.verifying_key, ip='tcp://127.0.0.1:10000', ctx=self.ctx) return r tasks = asyncio.gather( m.serve(), get(), stop_server(m, 1), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertDictEqual(res[1], {'whats': 'good'}) authenticator.authenticator.stop()
async def discover_nodes(ip_list, pepper: bytes, ctx: zmq.Context, timeout=1000, retries=10, debug=True): nodes_found = {} one_found = False retries_left = retries log = get_logger('DiscoverNodes') log.propagate = debug log.info([str(ip) for ip in ip_list]) while not one_found and retries_left > 0: tasks = [ ping(socket_id=ip, pepper=pepper, ctx=ctx, timeout=timeout) for ip in ip_list ] tasks = asyncio.gather(*tasks) loop = asyncio.get_event_loop() log.info('Sending pings to {} nodes.'.format(len(ip_list))) if loop.is_running(): results = await asyncio.ensure_future(tasks) else: results = loop.run_until_complete(tasks) for res in results: ip, vk = res if vk is not None: nodes_found[str(ip)] = vk.hex() log.info(f'Found {ip} with VK {vk}') one_found = True if not one_found: retries_left -= 1 log.info( 'No one discovered... {} retried left.'.format(retries_left)) # Returns mapping of IP -> VK. VKs that return None are not stored in the dictionary. return nodes_found
async def refresh(self): pb_nodes = set(self.contacts.delegates + self.contacts.masternodes) self.log.info(f'Finding these nodes: {pb_nodes}') try: pb_nodes.remove(self.wallet.verifying_key().hex()) except KeyError: pass current_nodes = set(self.sockets.keys()) # Delete / remove old nodes to_del = self.old_nodes(pb_nodes, current_nodes) for node in to_del: self.remove_node(node) # Add new nodes # to_add = self.new_nodes(pb_nodes, current_nodes) coroutines = [self.find_node(m) for m in pb_nodes] tasks = asyncio.gather(*coroutines) loop = asyncio.get_event_loop() if loop.is_running(): results = await asyncio.ensure_future(tasks) else: results = loop.run_until_complete(tasks) for r in results: self.log.info(r) if r is not None: _r = json.loads(r) if len(_r) == 0: break vk, socket = [(k, v) for k, v in _r.items()][0] self.log.info(f'Found {vk} : {socket}') self.sockets.update({vk: socket}) self.log.info('Done finding.')
def test_secure_request_sends_as_service(self): authenticator = authentication.SocketAuthenticator( client=ContractingClient(), ctx=self.ctx) w = Wallet() w2 = Wallet() authenticator.add_verifying_key(w.verifying_key) authenticator.add_verifying_key(w2.verifying_key) authenticator.configure() m = router.JSONAsyncInbox(socket_id='tcp://127.0.0.1:10000', ctx=self.ctx, linger=2000, poll_timeout=50, secure=True, wallet=w) async def get(): r = await router.secure_request(msg={'hello': 'there'}, service='something', wallet=w2, vk=w.verifying_key, ip='tcp://127.0.0.1:10000', ctx=self.ctx) return r tasks = asyncio.gather( m.serve(), get(), stop_server(m, 1), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertDictEqual(res[1], { 'service': 'something', 'msg': { 'hello': 'there' } })
def test_add_seat_motion_works(self): mns, dls = make_network(2, 2, self.ctx) async def test(): await make_start_awaitable(mns, dls) await send_tx(mns[1], mns + dls, contract='election_house', function='vote_policy', kwargs={ 'policy': 'masternodes', 'value': ('introduce_motion', 2) }, sender=mns[0].wallet) #await send_tx(mns[1], mns + dls, contract='testing', function='test', sender=Wallet()) #await send_tx(mns[1], mns + dls, contract='testing', function='test', sender=Wallet()) loop = asyncio.get_event_loop() loop.run_until_complete(test()) # def test_introduce_motion_remove_seat_works_and_sets_position_and_motion_opened(self): # self.client.submit(masternodes, constructor_args={ # 'initial_masternodes': [1, 2, 3], # }) # # mn_contract = self.client.get_contract('masternodes') # # mn_contract.quick_write('S', 'open_seats', 1) # # env = {'now': Datetime._from_datetime(dt.today() + td(days=7))} # # mn_contract.run_private_function( # f='introduce_motion', # position=3, # arg=None, # environment=env # ) # # self.assertEqual(mn_contract.quick_read('S', 'current_motion'), 3) # self.assertEqual(mn_contract.quick_read('S', 'motion_opened'), env['now'])
def test_gather_work_waits_for_all(self): q = {} async def fill_q(): q['1'] = 123 await asyncio.sleep(0.1) q['3'] = 678 await asyncio.sleep(0.5) q['x'] = 'zzz' tasks = asyncio.gather( fill_q(), work.gather_transaction_batches(q, expected_batches=3, timeout=5)) loop = asyncio.get_event_loop() _, w = loop.run_until_complete(tasks) expected = [123, 678, 'zzz'] self.assertListEqual(expected, w)
def test_gather_past_timeout_returns_current_work(self): q = {} async def fill_q(): q['1'] = 123 await asyncio.sleep(0.1) q['3'] = 678 await asyncio.sleep(1.1) q['x'] = 'zzz' tasks = asyncio.gather( fill_q(), work.gather_transaction_batches(q, expected_batches=3, timeout=1)) loop = asyncio.get_event_loop() _, w = loop.run_until_complete(tasks) expected = [123, 678] self.assertListEqual(expected, w)
def test_mock_processor_returns_custom_message(self): r = router.Router(socket_id='ipc:///tmp/router', ctx=self.ctx, linger=50) class MockProcessor(router.Processor): async def process_message(self, msg): return {'whats': 'good'} q = MockProcessor() r.add_service('test', q) async def request(msg): msg = encode(msg).encode() socket = self.ctx.socket(zmq.DEALER) socket.connect('ipc:///tmp/router') await socket.send(msg) resp = await socket.recv() resp = decode(resp) return resp message = {'service': 'test', 'msg': {'howdy': 'there'}} expected_msg = {'whats': 'good'} tasks = asyncio.gather( r.serve(), request(message), stop_server(r, 1), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertDictEqual(res[1], expected_msg)
def test_secure_send_receives_messages(self): authenticator = authentication.SocketAuthenticator( client=ContractingClient(), ctx=self.ctx) w = Wallet() w2 = Wallet() authenticator.add_verifying_key(w.verifying_key) authenticator.add_verifying_key(w2.verifying_key) authenticator.configure() m = router.Router(socket_id='tcp://127.0.0.1:10000', ctx=self.ctx, linger=2000, poll_timeout=50, secure=True, wallet=w) q = router.QueueProcessor() m.add_service('something', q) async def get(): await router.secure_send(msg={'hello': 'there'}, service='something', wallet=w2, vk=w.verifying_key, ip='tcp://127.0.0.1:10000', ctx=self.ctx) tasks = asyncio.gather( m.serve(), get(), stop_server(m, 1), ) loop = asyncio.get_event_loop() loop.run_until_complete(tasks) self.assertEqual(q.q[0], {'hello': 'there'}) authenticator.authenticator.stop()
def test_find_node_requests_from_others_and_returns_key_if_they_have_it_ipc( self): # Create Network service w1 = Wallet() n1 = NetworkParameters(peer_ipc='peers1', event_ipc='events1', discovery_ipc='discovery1') p1 = Network(wallet=w1, ctx=self.ctx, socket_base='ipc:///tmp', params=n1) # Create Network service w2 = Wallet() n2 = NetworkParameters(peer_ipc='peers2', event_port='events2') p2 = Network(wallet=w2, ctx=self.ctx, socket_base='ipc:///tmp', params=n2) async def get(): return await p1.find_node(_socket('ipc:///tmp/peers2'), w2.verifying_key().hex()) async def stop(n: Network, s): await asyncio.sleep(s) n.peer_service.stop() tasks = asyncio.gather( p1.peer_service.start(), p2.peer_service.start(), get(), stop(p1, 0.3), stop(p2, 0.3), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertEqual(res[2].get(w2.verifying_key().hex()), 'ipc:///tmp')
def test_find_node_fails_if_cant_find_and_retries_are_up(self): # Create Network service w1 = Wallet() n1 = NetworkParameters(peer_port=10001, event_port=10002) p1 = Network(wallet=w1, ctx=self.ctx, socket_base='tcp://127.0.0.1', params=n1) # Create Network service w2 = Wallet() n2 = NetworkParameters(peer_port=10003, event_port=10004) p2 = Network(wallet=w2, ctx=self.ctx, socket_base='tcp://127.0.0.1', params=n2) w3 = Wallet() async def get(): return await p1.find_node(_socket('tcp://127.0.0.1:10003'), w3.verifying_key().hex(), retries=1) async def stop(n: Network, s): await asyncio.sleep(s) n.peer_service.stop() tasks = asyncio.gather( p1.peer_service.start(), p2.peer_service.start(), get(), stop(p1, 0.3), stop(p2, 0.3), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertIsNone(res[2])
def test_get_latest_block_height(self): w = Wallet() m = BlockServer(socket_base='tcp://127.0.0.1', wallet=w, ctx=self.ctx, linger=500, poll_timeout=500, driver=FakeTopBlockManager(101, 'abcd')) f = BlockFetcher(wallet=Wallet(), ctx=self.ctx) tasks = asyncio.gather( m.serve(), f.get_latest_block_height( cilantro_ee.sockets.struct._socket('tcp://127.0.0.1:10004')), stop_server(m, 0.2), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) self.assertEqual(res[1], 101)
def test_peer_table_updated_on_join_command_ipc(self): # Network params issue try: os.mkdir('/tmp/n1') os.mkdir('/tmp/n2') except: pass w1 = Wallet() p1 = Network(wallet=w1, socket_base='ipc:///tmp/n1', ctx=self.ctx) w2 = Wallet() d = DiscoveryServer(wallet=w2, socket_id=_socket('ipc:///tmp/n2/discovery'), pepper=PEPPER.encode(), ctx=self.ctx, linger=200) # 1. start network # 2. start discovery of other side # 3. send join request # 4. check to see if the data has been added join_message = ['join', (w2.verifying_key().hex(), 'ipc:///tmp/n2')] join_message = json.dumps(join_message).encode() tasks = asyncio.gather( p1.peer_service.serve(), d.serve(), services.get(_socket('ipc:///tmp/n1/peers'), msg=join_message, ctx=self.ctx, timeout=1000), stop_server(p1.peer_service, 0.3), stop_server(d, 0.3)) loop = asyncio.get_event_loop() loop.run_until_complete(tasks) self.assertEqual(p1.peer_service.table[w2.verifying_key().hex()], 'ipc:///tmp/n2')
def test_get_latest_block_height(self): w = Wallet() m = BlockServer(w, 'tcp://127.0.0.1', self.ctx, linger=500, poll_timeout=500, driver=self.t) self.t.set_latest_block_num(555) async def get(msg): socket = self.ctx.socket(zmq.DEALER) socket.connect('tcp://127.0.0.1:10004') await socket.send(msg) res = await socket.recv() return res message = Message.get_signed_message_packed_2( wallet=w, msg_type=MessageType.LATEST_BLOCK_HEIGHT_REQUEST, timestamp=int(time.time())) tasks = asyncio.gather( m.serve(), get(message), stop_server(m, 0.2), ) loop = asyncio.get_event_loop() res = loop.run_until_complete(tasks) msg_type, msg, sender, timestamp, is_verified = Message.unpack_message_2( res[1]) self.assertEqual(msg.blockHeight, 555)