def on_event(self, event: threading.Event, min_frequency_in_seconds: int, callback): """ Register the specified callback to be called every time event is triggered, but at least once every `min_frequency_in_seconds`. Args: event: Event which should be monitored. min_frequency_in_seconds: Minimum execution frequency (in seconds). callback: Function to be called by the timer. """ assert(isinstance(event, threading.Event)) assert(isinstance(min_frequency_in_seconds, int)) assert(callable(callback)) self.event_timers.append((event, min_frequency_in_seconds, AsyncCallback(callback)))
def _start_watching_blocks(self): def new_block_callback(block_hash): self._last_block_time = datetime.datetime.now(tz=pytz.UTC) block = self.web3.eth.getBlock(block_hash) block_number = block['number'] if not self.web3.eth.syncing: max_block_number = self.web3.eth.blockNumber if block_number >= max_block_number: def on_start(): self.logger.debug(f"Processing block #{block_number} ({block_hash.hex()})") def on_finish(): self.logger.debug(f"Finished processing block #{block_number} ({block_hash.hex()})") if not self.terminated_internally and not self.terminated_externally and not self.fatal_termination: if not self._on_block_callback.trigger(on_start, on_finish): self.logger.debug(f"Ignoring block #{block_number} ({block_hash.hex()})," f" as previous callback is still running") else: self.logger.debug(f"Ignoring block #{block_number} as keeper is already terminating") else: self.logger.debug(f"Ignoring block #{block_number} ({block_hash.hex()})," f" as there is already block #{max_block_number} available") else: self.logger.info(f"Ignoring block #{block_number} ({block_hash.hex()}), as the node is syncing") def new_block_watch(): event_filter = self.web3.eth.filter('latest') logging.debug(f"Created event filter: {event_filter}") while True: try: for event in event_filter.get_new_entries(): logging.info("new block from event filter: {event}") new_block_callback(event) except (BlockNotFound, BlockNumberOutofRange, ValueError) as ex: self.logger.warning(f"Node dropped event emitter; recreating latest block filter: {ex}") event_filter = self.web3.eth.filter('latest') finally: time.sleep(1) if self.block_function: self._on_block_callback = AsyncCallback(self.block_function) block_filter = threading.Thread(target=new_block_watch, daemon=True) block_filter.start() register_filter_thread(block_filter) self.logger.info("Watching for new blocks")
def test_should_wait_for_the_callback_to_finish(self, callbacks): # given async_callback = AsyncCallback(callbacks.long_running_callback) async_callback.trigger() assert callbacks.counter == 0 # when async_callback.wait() # then assert callbacks.counter == 1
def _start_watching_blocks(self): def new_block_callback(block_hash): self._last_block_time = datetime.datetime.now() block = self.web3.eth.getBlock(block_hash) block_number = block['number'] if not self.web3.eth.syncing: max_block_number = self.web3.eth.blockNumber if block_number == max_block_number: def on_start(): self.logger.debug( f"Processing block #{block_number} ({block_hash})") def on_finish(): self.logger.debug( f"Finished processing block #{block_number} ({block_hash})" ) if not self.terminated_internally and not self.terminated_externally: if not self._on_block_callback.trigger( on_start, on_finish): self.logger.debug( f"Ignoring block #{block_number} ({block_hash})," f" as previous callback is still running") else: self.logger.debug( f"Ignoring block #{block_number} as keeper is already terminating" ) else: self.logger.debug( f"Ignoring block #{block_number} ({block_hash})," f" as there is already block #{max_block_number} available" ) else: self.logger.info( f"Ignoring block #{block_number} ({block_hash}), as the node is syncing" ) if self.block_function: self._on_block_callback = AsyncCallback(self.block_function) block_filter = self.web3.eth.filter('latest') block_filter.watch(new_block_callback) register_filter_thread(block_filter) self.logger.info("Watching for new blocks")
def _start_watching_blocks(self): def new_block_callback(block_hash): block_number = None try: self._last_block_time = datetime.datetime.now(tz=pytz.UTC) block = self.web3.eth.getBlock(block_hash) if block_hash == 'latest': block_hash = block['hash'] block_number = block['number'] # print(f"new_block: {block_number}") if self.skip_syncing_check: is_syncing = False else: is_syncing = self.web3.eth.syncing if not is_syncing: max_block_number = self.web3.eth.blockNumber if block_number >= max_block_number: def on_start(): self.logger.debug(f"Processing block #{block_number} ({block_hash.hex()})") def on_finish(): self.logger.debug(f"Finished processing block #{block_number} ({block_hash.hex()})") if not self.terminated_internally and not self.terminated_externally and not self.fatal_termination: if not self._on_block_callback.trigger(on_start, on_finish): self.logger.debug(f"Ignoring block #{block_number} ({block_hash.hex()})," f" as previous callback is still running") self._on_block_callback.wait() else: self.logger.debug(f"Ignoring block #{block_number} as keeper is already terminating") else: self.logger.debug(f"Ignoring block #{block_number} ({block_hash.hex()})," f" as there is already block #{max_block_number} available") else: self.logger.info(f"Ignoring block #{block_number} ({block_hash.hex()}), as the node is syncing") except Exception as err: # print(f"Ignoring block #{block_number} ({block_hash.hex()}), as error: {err} occurred.") self.logger.warning(f"Ignoring block #{block_number} ({block_hash.hex()}), as error: {err} occurred.") msg = "" for t in traceback.format_tb(err.__traceback__): t = t.replace("\n", ":") t = t[:-1] msg += f" {t}\n" self.logger.info(f"{msg}") # print(f"new_block: {block_number} end") def new_block_watch(): event_filter = self.web3.eth.filter('latest') self.logger.debug(f"Created event filter: {event_filter}") while True: if self.terminated_internally or self.terminated_externally: break try: for event in event_filter.get_new_entries(): block_hash = event if self.new_block_callback_use_latest: block_hash = 'latest' new_block_callback(block_hash) except (BlockNotFound, BlockNumberOutofRange, ValueError) as ex: # print(f"Node dropped event emitter; recreating latest block filter: {type(ex)}: {ex}") self.logger.warning(f"Node dropped event emitter; recreating latest block filter: {type(ex)}: {ex}") event_filter = self.web3.eth.filter('latest') time.sleep(0.5) except Exception as err: self.logger.error(f"Lifecycle Exception: {err}") self.terminated_internally = True break # self.logger.warning(f"Node dropped event emitter; recreating latest block filter: {err}") # event_filter = self.web3.eth.filter('latest') finally: time.sleep(0.05) def new_block_watch_subscribe(): async def get_event(): while True: if self.terminated_internally or self.terminated_externally: break if hasattr(self.web3.provider, 'endpoint_uri'): endpoint_uri = self.web3.provider.endpoint_uri else: self.logger.error(f"Lifecycle Error: invalid web3 provider: {repr(self.web3.provider)}") self.terminated_internally = True return self.logger.info(f"connecting to: {endpoint_uri}") async with connect(endpoint_uri) as ws: await ws.send(json.dumps({"id": 1, "method": "eth_subscribe", "params": ["newHeads"]})) subscription_response = await ws.recv() self.logger.info(f"subscribed to newHeads. Response: {subscription_response}") while True: if self.terminated_internally or self.terminated_externally: break try: message = await asyncio.wait_for(ws.recv(), timeout=60) block_hash = HexBytes(json.loads(message)['params']['result']['hash']) # self.logger.info(f"new block hash: {block_hash.hex()}") if self.new_block_callback_use_latest: block_hash = 'latest' new_block_callback(block_hash) except (BlockNotFound, BlockNumberOutofRange, ValueError) as ex: self.logger.warning(f"Node dropped event emitter; resubscribing: {type(ex)}: {ex}") time.sleep(0.5) break except Exception as err: self.logger.error(f"Lifecycle Exception: {err}") self.terminated_internally = True break finally: time.sleep(0.05) asyncio.new_event_loop().run_until_complete(get_event()) if self.block_function: self._on_block_callback = AsyncCallback(self.block_function) if self.subscribe_new_heads: block_watch_function = new_block_watch_subscribe else: block_watch_function = new_block_watch block_filter = threading.Thread(target=block_watch_function, daemon=True) block_filter.start() register_filter_thread(block_filter) self.logger.info("Watching for new blocks")