def check_and_recover_processing_if_necessary() -> None: """ Checks the processing tx queue and returns them to the incoming queue (Should be called before starting to process a new block, for unexpected crash recovery) """ if redis.llen_sync(PROCESSING_TX_KEY) != 0: _log.warning( "WARNING! Processing queue was not empty. Last block processing probably crashed. Recovering and re-queuing these dropped items." ) to_recover = redis.lrange_sync(PROCESSING_TX_KEY, 0, -1, decode=False) # Execute these in a pipeline in attempts to make this as atomic as possible p = redis.pipeline_sync() p.rpush(INCOMING_TX_KEY, *to_recover) p.delete(PROCESSING_TX_KEY) p.execute()
def start() -> None: """Start the next job in the queue""" _log.debug("Connecting to service account") kubernetes.config.load_incluster_config() _log.debug("Creating kubernetes client") global _kube _kube = kubernetes.client.BatchV1Api() _log.debug("Job processor ready!") if redis.llen_sync(PENDING_TASK_KEY): _log.warning( "WARNING! Pending job processor queue was not empty. Last job probably crashed. Re-queueing these dropped items." ) to_recover = redis.lrange_sync(PENDING_TASK_KEY, 0, -1, decode=False) p = redis.pipeline_sync() p.rpush(CONTRACT_TASK_KEY, *to_recover) p.delete(PENDING_TASK_KEY) p.execute() while True: start_task()
def test_lrange(self): redis.lrange_sync("banana", 1, 5) redis.redis_client.lrange.assert_called_once_with("banana", 1, 5)