def set_network(self, network: str): """Set/update the current network :param network: "mainnet" or "testnet" :type network: str :returns: the client :raises: raises if network not provided :raises: `Invalid network' if the given network is invalid """ if not network: raise Exception("Network must be provided") else: self.network = network # choose network (testnet or mainnet) if self.network == 'testnet': # initialise with Testnet environment self.env = BinanceEnvironment.get_testnet_env() elif self.network == 'mainnet': # initialise with mainnet environment self.env = BinanceEnvironment.get_production_env() else: raise Exception("Invalid network") self.client = AsyncHttpApiClient(env=self.env) self.address = '' return self.client
async def check_incoming(config): last_stored_time = await Chain.get_last_time(CHAIN_NAME) LOGGER.info("Last time is %s" % last_stored_time) loop = asyncio.get_event_loop() env = BinanceEnvironment.get_production_env() client = AsyncHttpApiClient(env=env) while True: last_stored_time = await Chain.get_last_time(CHAIN_NAME) i = 0 j = 0 tasks = [] seen_ids = [] async for txi in request_transactions(config, client, last_stored_time): i += 1 # TODO: handle big message list stored in IPFS case # (if too much messages, an ipfs hash is stored here). for message in txi['messages']: j += 1 message['time'] = txi['time'] # running those separately... a good/bad thing? # shouldn't do that for VMs. tasks.append( loop.create_task( incoming(message, chain_name=CHAIN_NAME, seen_ids=seen_ids, tx_hash=txi['tx_hash'], height=txi['height'], check_message=True))) # let's join every 500 messages... if (j > 500): for task in tasks: try: await task except Exception: LOGGER.exception("error in incoming task") j = 0 seen_ids = [] tasks = [] for task in tasks: try: await task # let's wait for all tasks to end. except Exception: LOGGER.exception("error in incoming task") # print(i) if (i < 10): # if there was less than 10 items, not a busy time await asyncio.sleep(2)
async def binance_packer(config): loop = asyncio.get_event_loop() # TODO: testnet perhaps? When we get testnet coins. env = BinanceEnvironment.get_production_env() target_addr = config.binancechain.sync_address.value client = AsyncHttpApiClient(env=env) wallet = Wallet(config.binancechain.private_key.value, env=env) LOGGER.info("BNB Connector set up with address %s" % wallet.address) try: await loop.run_in_executor(None, wallet.reload_account_sequence) except KeyError: pass i = 0 while True: if (i >= 100): try: await loop.run_in_executor(None, wallet.reload_account_sequence) except KeyError: pass # utxo = await get_utxo(config, address) i = 0 messages = [ message async for message in (await Message.get_unconfirmed_raw( limit=500, for_chain=CHAIN_NAME)) ] if len(messages): content = await get_chaindata(messages, bulk_threshold=0) content = json.dumps(content) tx = await loop.run_in_executor(None, prepare_transfer_tx, wallet, target_addr, content.encode('utf-8')) # tx_hash = await tx.get_hash() LOGGER.info("Broadcasting TX") await client.broadcast_msg(tx, sync=True) await asyncio.sleep(35) i += 1
async def check_incoming(config): last_stored_time = await Chain.get_last_time(CHAIN_NAME) LOGGER.info("Last time is %s" % last_stored_time) loop = asyncio.get_event_loop() env = BinanceEnvironment.get_production_env() client = AsyncHttpApiClient(env=env) while True: last_stored_time = await Chain.get_last_time(CHAIN_NAME) i = 0 j = 0 async for jdata, context in request_transactions( config, client, last_stored_time): await incoming_chaindata(jdata, context) await Chain.set_last_time( CHAIN_NAME, datetime.fromtimestamp(context['time'], tz=pytz.utc)) # print(i) if (i < 10): # if there was less than 10 items, not a busy time await asyncio.sleep(2)