async def __server_socket_recv_loop(self): while not SERVER_SOCKET_RECV_LOOP_INTERRUPTED: received_string = await self.__server_socket.recv_string() if received_string == 'PING': asyncio.ensure_future(self.__handle_ping()) else: asyncio.ensure_future(self._send_message_from_thread(received_string))
async def run(self): if self.state == ServiceState.started: self.logger.error('tried to run service, but state is %s' % self.state) else: self.state = ServiceState.started # init account info quarterly = CryptoTradingAccount( DERIBIT_ACCOUNT_ID, Broker.deribit_dma, DERIBIT_ACCOUNT_ID, '', MarketDataApi.deribit_md_websocket, TradeDataApi.deribit_td_websocket, DERIBIT_CLIENT_ID, DERIBIT_CLIENT_SECRET) accounts.append(quarterly) n_quarterly = CryptoTradingAccount( N_DERIBIT_ACCOUNT_ID, Broker.deribit_dma, N_DERIBIT_ACCOUNT_ID, '', MarketDataApi.deribit_md_websocket, TradeDataApi.deribit_td_websocket, N_DERIBIT_CLIENT_ID, N_DERIBIT_CLIENT_SECRET) accounts.append(n_quarterly) # create websocket for every account for account in accounts: asyncio.ensure_future(self.pub_msg(account)) # fetch account positions # requests[account.id] = queue.Queue() # requests[account.id].put({'method': 'get_positions', 'params': {'currency': 'BTC', 'kind': 'future'}}) asyncio.ensure_future(self.on_request())
async def handle_loop(self): while True: received_bytes = await self.socket.recv() text = received_bytes.decode('utf-8') if text == 'PING': asyncio.ensure_future(self.__handle_ping()) else: asyncio.ensure_future(self.__handle_message(text))
async def send_request(self, message: ServerMessage) -> object: if message.request_id is not None: raise ValueError('Message can`t have request_id before it is scheduled') request_id = message.request_id = self.request_next_id self.request_next_id = self.request_next_id + 1 asyncio.ensure_future(self.send_message(message)) while request_id not in self.responses: await asyncio.sleep(1) response = self.responses[request_id] del self.responses[request_id] return response
async def run(self): if self.state == ServiceState.started: self.logger.error('tried to run service, but state is %s' % self.state) else: self.state = ServiceState.started asyncio.ensure_future(self.process_msg()) asyncio.ensure_future(self.sub_msg_md()) asyncio.ensure_future(self.sub_msg_td()) asyncio.ensure_future(self.balance_positions()) asyncio.ensure_future(self.check_funding())
async def send_request_to_server(self, message: ServerMessage) -> object: if message.request_id is not None: raise ValueError('Message can`t have request_id before it is scheduled') request_id = message.request_id = self.__request_next_id self.request_next_id = self.__request_next_id + 1 asyncio.ensure_future(self.send_message_to_server(message)) # you should await self.__responses[request_id] which should be a task, # which you resolve somewhere else while request_id not in self.__responses: await asyncio.sleep(1) response = self.__responses[request_id] del self.__responses[request_id] return response
async def __handle_message(self, text: str): try: message_object = json.loads(text) message = ServerMessage.fromJSON(message_object) if message.request_id is not None: self.responses[message_object['requestId']] = message.payload return asyncio.ensure_future(self.on_message_handler(message)) except Exception as e: error_text = traceback.format_exc() logger.error("__handle_message Exception: '%s'" % error_text)
async def find_quotes_gap(self, sym): try: global deribit_balance, okex_balance v = quotes[sym] if all(( not v.get('gapped', False), not v.get('trading', False), # sym[-1] == 'C', )): timedelta = time.mktime( time.strptime(sym.split('-')[1], '%d%b%y')) - time.time() delta = abs(v.get('delta', 1)) if 'deribit' in v.keys() and 'okex' in v.keys(): if v['deribit'][0] and v['okex'][2]: for (gap, d, t) in QUOTE_GAP: if v['deribit'][0] - float( v['okex'] [2]) >= gap and timedelta <= t and delta <= d: self.logger.info( '----------------------------------------------------------' ) self.logger.info('%s -- gap: %.4f -- %s' % (sym, v['deribit'][0] - float(v['okex'][2]), str(v))) self.logger.info( 'deribit: %s, okex: %s' % (str(deribit_balance), str(okex_balance))) v.update({'gapped': True, 'trading': True}) asyncio.ensure_future( self.gap_trade(sym, copy.copy(v), False)) break if v['deribit'][2] and v['okex'][0]: for (gap, d, t) in QUOTE_GAP: if float(v['okex'][0]) - v['deribit'][ 2] >= gap and timedelta <= t and delta <= d: self.logger.info( '----------------------------------------------------------' ) self.logger.info('%s -- gap: %.4f -- %s' % (sym, float(v['okex'][0]) - v['deribit'][2], str(v))) self.logger.info( 'deribit: %s, okex: %s' % (str(deribit_balance), str(okex_balance))) v.update({'gapped': True, 'trading': True}) asyncio.ensure_future( self.gap_trade(sym, copy.copy(v), True)) break except Exception as e: self.logger.exception(e)
def test_auth_validate_default(self): port = 5523 async def send_async_sec(): authorized = await self.dih.authenticate(self.a['curve_key'], '127.0.0.1', port) self.assertTrue(authorized) self.aih.cleanup() self.dih.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.a = genkeys( '5664ec7306cc22e56820ae988b983bdc8ebec8246cdd771cfee9671299e98e3c') self.aih = Ironhouse(self.a['sk'], wipe_certs=True, auth_port=port, keyname='a') self.aih.setup_secure_server() self.aih.add_public_key(self.curve_public_key) self.dih = Ironhouse(self.sk, wipe_certs=True) self.dih.setup_secure_server() self.dih.add_public_key(self.a['curve_key']) self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
def test_auth_validate_fail_timeout(self): port = 5523 self.validated = False async def send_async_sec(): authorized = await self.ironhouse.authenticate( self.fake['curve_key'], '127.0.0.1', port) self.assertEqual(authorized, 'no_reply') self.assertTrue(self.validated) self.ironhouse.cleanup() self.fake_ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) def auth_validate_fake(vk): self.validated = True return False def auth_validate(vk): return vk == b'catastrophe' self.fake = genkeys( '7ae3fcfd3a9047adbec6ad11e5a58036df9934dc0746431d80b49d25584d7e78') self.fake_ironhouse = Ironhouse(self.fake['sk'], wipe_certs=True, auth_validate=auth_validate, auth_port=port, keyname='fake') self.fake_ironhouse.setup_secure_server() self.fake_ironhouse.add_public_key(self.curve_public_key) self.fake_ironhouse.auth_validate = auth_validate_fake self.ironhouse.setup_secure_server() self.ironhouse.add_public_key(self.fake['curve_key']) self.ironhouse.auth_validate = auth_validate self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
async def sub_msg_md(self): try: await asyncio.sleep(1) while self.state == ServiceState.started: task = asyncio.ensure_future(self.deribitmd.recv_string()) done, pending = await asyncio.wait({task}, timeout=5) for t in pending: t.cancel() msg = json.loads(done.pop().result()) if done else {} if msg: if msg['type'] == 'quote': await self.msg.put(msg) else: self.logger.info('cant receive msg from future md') await self.deribittdreq.send_string( json.dumps({ 'accountid': DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {} })) await self.deribittdreq.recv_string() except Exception as e: self.logger.exception(e) await self.sub_msg_md()
def test_cleanup(self): async def delay(): await asyncio.sleep(0.1) del self.ironhouse self.loop.call_soon_threadsafe(self.loop.stop) self.loop.run_until_complete(asyncio.ensure_future(delay()))
def test_authenticate_then_reject(self): port = 5523 async def send_async_sec(): await self.ironhouse.authenticate(self.fake['curve_key'], '127.0.0.1', port) self.ironhouse.remove_public_key(self.fake['curve_key']) self.assertFalse( self.ironhouse.authorized_keys[self.fake['curve_key']]) self.ironhouse.cleanup() self.fake_ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.fake = genkeys( '91f7021a9e8c65ca873747ae24de08e0a7acf58159a8aa6548910fe152dab3d8') self.fake_ironhouse = Ironhouse(self.fake['sk'], wipe_certs=True, auth_validate=auth_validate, auth_port=port, keyname='fake') self.fake_ironhouse.setup_secure_server() self.fake_ironhouse.add_public_key(self.curve_public_key) self.ironhouse.setup_secure_server() self.ironhouse.add_public_key(self.fake['curve_key']) self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
def test_auth_validate(self): port = 5523 self.validated = False async def send_async_sec(): authorized = await self.ironhouse.authenticate( self.fake['curve_key'], '127.0.0.1', port) self.assertTrue(authorized) self.assertTrue(self.validated) self.ironhouse.cleanup() self.fake_ironhouse.cleanup() self.loop.stop() def auth_validate_fake(vk): self.validated = True return True def auth_validate(vk): return vk == 'b9284b28589523f055ae5b54c98b0b904a1df3b0be5d546d30208d0516e71aa0' self.fake = genkeys( '7ae3fcfd3a9047adbec6ad11e5a58036df9934dc0746431d80b49d25584d7e78') self.fake_ironhouse = Ironhouse(self.fake['sk'], wipe_certs=True, auth_validate=auth_validate, auth_port=port, keyname='fake') self.fake_ironhouse.create_from_public_key(self.curve_public_key) self.fake_ironhouse.auth_validate = auth_validate_fake self.fake_ironhouse.setup_secure_server() self.ironhouse.create_from_public_key(self.fake['curve_key']) self.ironhouse.auth_validate = auth_validate self.ironhouse.setup_secure_server() self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
async def run(self): if self.state == ServiceState.started: self.logger.error('tried to run service, but state is %s' % self.state) else: self.state = ServiceState.started asyncio.ensure_future(self.sub_msg_md()) asyncio.ensure_future(self.sub_msg_td()) asyncio.ensure_future(self.get_current_order_state()) asyncio.ensure_future(self.get_current_positions_and_orders())
def test_auth_invalid_public_key(self): async def send_async_sec(): authorized = await self.ironhouse.authenticate( b'ack', '127.0.0.1', 1234) self.assertEqual(authorized, 'invalid') self.ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.ironhouse.setup_secure_server() self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
async def start(self): await self.network.start() # Start block server asyncio.ensure_future(self.nbn_inbox.serve()) # Catchup when joining the network if self.network.mn_seed is not None: await self.catchup( self.network_parameters.resolve(self.network.mn_seed, ServiceType.BLOCK_SERVER)) self.log.info(self.network.peers()) self.parameters.sockets.update(self.network.peers()) # Start block server #asyncio.ensure_future(self.nbn_inbox.serve()) self.running = True
def test_authenticate_fail(self): async def send_async_sec(): authorized = await self.ironhouse.authenticate( b'A/c=Kn2)aHRI*>fK-{v*r^YCyXJ//3.CGQQC@A9J', '127.0.0.1') self.assertEqual(authorized, 'no_reply') self.ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.ironhouse.setup_secure_server() self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
def test_authenticate_self(self): async def send_async_sec(): authorized = await self.ironhouse.authenticate( self.curve_public_key, '127.0.0.1') self.assertTrue(authorized) self.ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.ironhouse.setup_secure_server() self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
async def start(self): asyncio.ensure_future(self.router.serve()) # Get the set of VKs we are looking for from the constitution argument vks = self.constitution['masternodes'] + self.constitution['delegates'] for node in self.bootnodes.keys(): self.socket_authenticator.add_verifying_key(node) self.socket_authenticator.configure() # Use it to boot up the network await self.network.start(bootnodes=self.bootnodes, vks=vks) if not self.bypass_catchup: masternode_ip = None masternode = None if self.seed is not None: for k, v in self.bootnodes.items(): self.log.info(k, v) if v == self.seed: masternode = k masternode_ip = v else: masternode = self.constitution['masternodes'][0] masternode_ip = self.network.peers[masternode] self.log.info(f'Masternode Seed VK: {masternode}') # Use this IP to request any missed blocks await self.catchup(mn_seed=masternode_ip, mn_vk=masternode) # Refresh the sockets to accept new nodes self.socket_authenticator.refresh_governance_sockets() # Start running self.running = True
def _get_node_from_vk(cls, event_id, vk: str, timeout=3): async def coro(): node = None if vk in VKBook.get_all(): try: node, cached = await asyncio.wait_for( cls.dht.network.lookup_ip(vk), timeout) except: log.notice('Did not find an ip for VK {}'.format(vk)) if node: cls.event_sock.send_json({ 'event': 'got_ip', 'event_id': event_id, 'public_key': node.public_key.decode(), 'ip': node.ip, 'vk': vk }) else: cls.event_sock.send_json({ 'event': 'not_found', 'event_id': event_id }) asyncio.ensure_future(coro())
async def run(self): if self.state == ServiceState.started: self.logger.error('tried to run service, but state is %s' % self.state) else: self.state = ServiceState.started # await self.sub_msg() asyncio.ensure_future(self.sub_msg_deribit()) asyncio.ensure_future(self.sub_msg_okex()) asyncio.ensure_future(self.refresh_token())
async def sub_msg_md(self): try: global deribit_margin, perpetual, future, can_place_order, if_order_cancelling, if_price_changing global current_order, future_size, perpetual_size await asyncio.sleep(1) while self.state == ServiceState.started: task = asyncio.ensure_future(self.deribitmd.recv_string()) done, pending = await asyncio.wait({task}, timeout=5) for t in pending: t.cancel() msg = json.loads(done.pop().result()) if done else {} # msg = json.loads(await self.deribitmd.recv_string()) if msg: if msg['type'] == 'quote': quote = pickle.loads(eval(msg['data'])) # self.logger.info('++++ quote: {}'.format(quote)) if quote['sym'] == 'BTC-PERPETUAL': perpetual = [ quote['bid_prices'][0], quote['bid_sizes'][0], quote['ask_prices'][0], quote['ask_sizes'][0] ] elif quote['sym'] == SEASON_FUTURE: future = [ quote['bid_prices'][0], quote['bid_sizes'][0], quote['ask_prices'][0], quote['ask_sizes'][0] ] await self.find_quotes_gap() else: self.logger.info('cant receive msg from future md') self.deribittdreq.send_string( json.dumps({ 'accountid': N_DERIBIT_ACCOUNT_ID, 'method': 'cancel_all', 'params': {} })) self.deribittdreq.recv_string() if_order_cancelling = True except Exception as e: self.logger.exception(e) await self.sub_msg_md()
def _start_service(cls, sk): ctx = zmq.asyncio.Context() cls.event_sock = ctx.socket(zmq.PUB) cls.event_sock.bind(cls.event_url) cls.discovery_mode = 'test' if os.getenv( 'TEST_NAME') else 'neighborhood' cls.dht = DHT(sk=sk, mode=cls.discovery_mode, loop=cls.loop, alpha=ALPHA, ksize=KSIZE, event_sock=cls.event_sock, max_peers=MAX_PEERS, block=False, cmd_cli=False, wipe_certs=True) cls._started = True cls.listener_fut = asyncio.ensure_future(cls._listen_for_cmds()) cls.event_sock.send_json({'event': 'service_started'}) cls.loop.run_forever()
def test_secure_server(self): ip = '127.0.0.1' port = 4523 async def send_async_sec(): client = self.ironhouse.ctx.socket(zmq.REQ) client = self.ironhouse.secure_socket(client, self.secret, self.curve_public_key, self.curve_public_key) client.connect('tcp://{}:{}'.format(ip, port)) client.send(self.vk.encode()) client.close() self.ironhouse.cleanup() self.loop.call_soon_threadsafe(self.loop.stop) self.ironhouse.setup_secure_server() self.assertIsInstance(self.ironhouse.ctx, zmq.Context, 'asynchronous context created incorrectly') self.assertIsInstance(self.ironhouse.sec_sock, zmq.sugar.socket.Socket, 'unable to secure a socket') self.loop.run_until_complete(asyncio.ensure_future(send_async_sec()))
def __init__(self, node_rpc_url, node_zerromq_url, logger, last_block_height=0, chain_tail=None, tx_handler=None, orphan_handler=None, before_block_handler=None, block_handler=None, after_block_handler=None, block_batch_handler=None, flush_app_caches_handler=None, synchronization_completed_handler=None, block_timeout=30, deep_sync_limit=20, backlog=0, mempool_tx=True, rpc_batch_limit=50, rpc_threads_limit=100, rpc_timeout=100, utxo_data=False, utxo_cache_size=1000000, skip_opreturn=True, block_cache_workers=4, block_preload_cache_limit=1000 * 1000000, block_preload_batch_size_limit=200000000, block_hashes_cache_limit=200 * 1000000, db_type=None, db=None, app_proc_title="Connector"): self.loop = asyncio.get_event_loop() # settings self.log = logger self.rpc_url = node_rpc_url self.app_proc_title = app_proc_title self.rpc_timeout = rpc_timeout self.rpc_batch_limit = rpc_batch_limit self.zmq_url = node_zerromq_url self.orphan_handler = orphan_handler self.block_timeout = block_timeout self.tx_handler = tx_handler self.skip_opreturn = skip_opreturn self.before_block_handler = before_block_handler self.block_handler = block_handler self.after_block_handler = after_block_handler self.block_batch_handler = block_batch_handler self.flush_app_caches_handler = flush_app_caches_handler self.synchronization_completed_handler = synchronization_completed_handler self.block_preload_batch_size_limit = block_preload_batch_size_limit self.deep_sync_limit = deep_sync_limit self.backlog = backlog self.mempool_tx = mempool_tx self.db_type = db_type self.db = db self.utxo_cache_size = utxo_cache_size self.block_cache_workers = block_cache_workers self.utxo_data = utxo_data self.chain_tail = list(chain_tail) if chain_tail else [] # state and stats self.node_last_block = None self.sync_utxo = None self.uutxo = None self.cache_loading = False self.app_block_height_on_start = int(last_block_height) if int( last_block_height) else 0 self.last_block_height = 0 self.last_block_utxo_cached_height = 0 self.deep_synchronization = False self.block_dependency_tx = 0 # counter of tx that have dependencies in block self.active = True self.get_next_block_mutex = False self.active_block = asyncio.Future() self.active_block.set_result(True) self.last_zmq_msg = int(time.time()) self.total_received_tx = 0 self.total_received_tx_stat = 0 self.blocks_processed_count = 0 self.blocks_decode_time = 0 self.blocks_download_time = 0 self.blocks_processing_time = 0 self.tx_processing_time = 0 self.non_cached_blocks = 0 self.total_received_tx_time = 0 self.coins = 0 self.op_return = 0 self.destroyed_coins = 0 self.preload_cached_total = 0 self.preload_cached = 0 self.preload_cached_annihilated = 0 self.start_time = time.time() self.total_received_tx_last = 0 self.start_time_last = time.time() self.batch_time = time.time() self.batch_load_utxo = 0 self.batch_parsing = 0 self.batch_handler = 0 self.app_last_block = None # cache and system self.block_preload_cache_limit = block_preload_cache_limit self.block_hashes_cache_limit = block_hashes_cache_limit self.tx_cache_limit = 144 * 5000 self.block_headers_cache_limit = 100 * 100000 self.block_preload = Cache(max_size=self.block_preload_cache_limit, clear_tail=False) self.block_hashes = Cache(max_size=self.block_hashes_cache_limit) self.block_hashes_preload_mutex = False self.tx_cache = MRU(self.tx_cache_limit) self.block_headers_cache = Cache( max_size=self.block_headers_cache_limit) self.block_txs_request = None self.connected = asyncio.Future() self.await_tx = list() self.missed_tx = list() self.await_tx_future = dict() self.add_tx_future = dict() self.get_missed_tx_threads = 0 self.get_missed_tx_threads_limit = rpc_threads_limit self.tx_in_process = set() self.zmqContext = None self.tasks = list() self.log.info("Node connector started") asyncio.ensure_future(self.start(), loop=self.loop)
result = int(work_unit["operands"][0] / work_unit["operands"][1]) else: result = work_unit["operands"][0] - work_unit["operands"][1] await self.gatherer_zmocket.send_multipart([ "gatherer1".encode(), json.dumps({ "result": result, "work_unit": work_unit, "worker": self.name }).encode() ]) except: print("No work there: " + self.name) await asyncio.sleep(0.5) if __name__ == "__main__": worker_ports = [42421, 42422, 42423] workers = [] for worker_port in worker_ports: worker = Worker("worker-" + str(worker_port)) workers.append(worker) for worker in workers: asyncio.ensure_future(worker.pulling_work()) loop = asyncio.get_event_loop() loop.run_forever() loop.close()
def __init__(self, node_rpc_url, node_zerromq_url, logger, last_block_height=0, chain_tail=None, mempool_tx_list=None, tx_handler=None, orphan_handler=None, before_block_handler=None, block_handler=None, after_block_handler=None, block_timeout=30, deep_sync_limit=20, backlog=0, mempool_tx=True, rpc_batch_limit=20, rpc_threads_limit=100, rpc_timeout=100, preload=False): self.loop = asyncio.get_event_loop() # settings self.log = logger self.rpc_url = node_rpc_url self.zmq_url = node_zerromq_url self.orphan_handler = orphan_handler self.block_timeout = block_timeout self.tx_handler = tx_handler self.before_block_handler = before_block_handler self.block_handler = block_handler self.after_block_handler = after_block_handler self.deep_sync_limit = deep_sync_limit self.backlog = backlog self.mempool_tx = mempool_tx self.chain_tail = list(chain_tail) if chain_tail else [] self.mempool_tx_list = list(mempool_tx_list) if mempool_tx_list else [] self.rpc_timeout = rpc_timeout self.batch_limit = rpc_batch_limit # state and stats self.node_last_block = None self.last_block_height = int(last_block_height) if int(last_block_height) else 0 self.deep_synchronization = False self.block_dependency_tx = 0 # counter of tx that have dependencies in block self.active = True self.get_next_block_mutex = asyncio.Future() self.get_next_block_mutex.set_result(True) self.active_block = asyncio.Future() self.active_block.set_result(True) self.last_zmq_msg = int(time.time()) self.total_received_tx = 0 self.blocks_processed_count = 0 self.blocks_decode_time = 0 self.blocks_download_time = 0 self.blocks_processing_time = 0 self.total_received_tx_time = 0 # cache and system self.preload = preload self.block_preload = Cache(max_size=50000) self.block_hashes_preload = Cache(max_size=50000) self.tx_cache = Cache(max_size=50000) self.block_cache = Cache(max_size=10000) self.block_txs_request = None self.connected = asyncio.Future() self.await_tx_list = list() self.missed_tx_list = list() self.await_tx_future = dict() self.add_tx_future = dict() self.get_missed_tx_threads = 0 self.get_missed_tx_threads_limit = rpc_threads_limit self.tx_in_process = set() self.zmqContext = None self.tasks = list() self.log.info("Node connector started") asyncio.ensure_future(self.start(), loop=self.loop)
async def pub_msg(self, account): # get tx data from exchange socket, then store it and pub it to zmq try: async with websockets.connect( 'wss://www.deribit.com/ws/api/v2') as websocket: self.logger.info( 'Account %s connected to deribit websocket server' % account.id) # set heartbeats to keep alive await websocket.send(json.dumps(heartbeat)) res = await websocket.recv() # auth auth['params']['client_id'] = account.api_public_key auth['params']['client_secret'] = account.api_private_key await websocket.send(json.dumps(auth)) res = json.loads(await websocket.recv()) tokens[account.id] = res['result']['access_token'] # private subscribe private_subscribe['params']['channels'] = [ 'user.portfolio.{}'.format(SYMBOL), 'user.changes.future.{}.raw'.format(SYMBOL) ] await websocket.send(json.dumps(private_subscribe)) # it is very important here to use 'self.state' to control start/stop!!! lastheartbeat = time.time() while websocket.open and self.state == ServiceState.started: # check heartbeat to see if websocket is broken if time.time() - lastheartbeat > 15: raise websockets.exceptions.ConnectionClosedError( 1003, 'Serverside heartbeat stopped.') # check request queue to send request if account.id in requests: mq = requests[account.id] if mq.qsize() > 0: msg = mq.get() msg.update({ 'jsonrpc': '2.0', 'method': 'private/' + msg['method'], 'id': eval('_'.join( ('MSG', msg['method'].upper(), 'ID'))) }) # self.logger.info(msg) await websocket.send(json.dumps(msg)) lastheartbeat = time.time() # then deal with every received msg task = asyncio.ensure_future(websocket.recv()) done, pending = await asyncio.wait({task}, timeout=1) for t in pending: t.cancel() response = json.loads(done.pop().result()) if done else {} if response: if 'error' in response: self.logger.error(response) if response.get('method', '') == 'heartbeat': if response['params']['type'] == 'test_request': await websocket.send(json.dumps(test)) lastheartbeat = time.time() elif response.get('id', '') in MSG_MAP.keys(): await self.pubserver.send_string( json.dumps({ 'accountid': account.id, 'type': MSG_MAP[response.get('id')], 'data': response.get('result', {}), 'error': response.get('error', {}) })) elif response.get('params', ''): if response['params']['channel'].startswith( 'user.portfolio'): self.pubserver.send_string( json.dumps({ 'accountid': account.id, 'type': 'user.portfolio', 'data': response['params']['data'] })) elif response['params']['channel'].startswith( 'user.changes.future'): self.pubserver.send_string( json.dumps({ 'accountid': account.id, 'type': 'user.changes.future', 'data': response['params']['data'] })) else: pass else: pass else: if self.state == ServiceState.started: await self.pub_msg(account) except websockets.exceptions.ConnectionClosedError: self.pubserver.send_string( json.dumps({ 'accountid': account.id, 'type': 'cancel_all', 'data': {}, 'error': {} })) await self.pub_msg(account) except Exception as e: self.logger.exception(e) await self.pub_msg(account)
async def pub_msg(self): # get marketdata from exchange socket, then pub to zmq try: async with websockets.connect( 'wss://real.OKEx.com:8443/ws/v3') as ws: timestamp = server_timestamp() login_str = login_params(str(timestamp), api_key, passphrase, secret_key) await ws.send(login_str) login_res = await ws.recv() self.logger.info('Login result: %s' % json.loads(inflate(login_res))) await ws.send( json.dumps({ "op": "subscribe", "args": [ "option/instruments:BTC-USD", "option/account:BTC-USD" ] })) response = json.loads(inflate(await ws.recv())) #await ws.send(json.dumps({"op": "subscribe", "args": ["option/account:BTC-USD"]})) #response = json.loads(inflate(await ws.recv())) lastheartbeat = time.time() while ws.open and self.state == ServiceState.started: task = asyncio.ensure_future(ws.recv()) done, pending = await asyncio.wait({task}, timeout=5) for t in pending: t.cancel() response = json.loads(inflate( done.pop().result())) if done else {} if response: # self.logger.info(response) lastheartbeat = time.time() if response.get('table', '') == 'option/instruments': self.logger.info('Instruments updated') await ws.send( json.dumps({ "op": "subscribe", "args": [ "option/depth5:" + i['instrument_id'] for i in response['data'] ] + [ "option/trade:" + i['instrument_id'] for i in response['data'] ] })) elif response.get('table', '') in ('option/depth5', 'option/account', 'option/trade'): # self.logger.info(response['data']) self.pubserver.send_string(json.dumps(response)) else: if time.time() - lastheartbeat > 30: raise websockets.exceptions.ConnectionClosedError( 1003, 'Serverside heartbeat stopped') else: if self.state == ServiceState.started: await self.pub_msg() except websockets.exceptions.ConnectionClosedError: await self.pub_msg() except Exception as e: self.logger.exception(e) await self.pub_msg()