Пример #1
0
def get_new_l4_blocks() -> List[bytes]:
    """Get all new l4 records from the incoming queue"""
    if LEVEL != "5":
        raise RuntimeError("Getting l4_blocks is a level 5 action")
    l4_blocks = []
    for _ in range(0, redis.llen_sync(INCOMING_TX_KEY)):
        # These are in lists because enterprise will be able to specify more than one l4.
        l4_blocks_list = cast(bytes, redis.rpoplpush_sync(INCOMING_TX_KEY, PROCESSING_TX_KEY, decode=False))
        l4_blocks.append(l4_blocks_list)
    return l4_blocks
Пример #2
0
def get_new_transactions() -> List[transaction_model.TransactionModel]:
    """Get all new transactions from the incoming queue"""
    if LEVEL != "1":
        raise RuntimeError("Getting transactions is a level 1 action")

    transactions = []
    # Only allow up to 1000 transactions to process at a time
    length = min(redis.llen_sync(INCOMING_TX_KEY), 1000)
    for _ in range(0, length):
        string = cast(bytes, redis.rpoplpush_sync(INCOMING_TX_KEY, PROCESSING_TX_KEY, decode=False))
        dictionary = json.loads(string)
        txn_model = transaction_model.new_from_queue_input(dictionary)
        transactions.append(txn_model)
    return transactions
Пример #3
0
def check_and_recover_processing_if_necessary() -> None:
    """
    Checks the processing tx queue and returns them to the incoming queue
    (Should be called before starting to process a new block, for unexpected crash recovery)
    """
    if redis.llen_sync(PROCESSING_TX_KEY) != 0:
        _log.warning(
            "WARNING! Processing queue was not empty. Last block processing probably crashed. Recovering and re-queuing these dropped items."
        )
        to_recover = redis.lrange_sync(PROCESSING_TX_KEY, 0, -1, decode=False)
        # Execute these in a pipeline in attempts to make this as atomic as possible
        p = redis.pipeline_sync()
        p.rpush(INCOMING_TX_KEY, *to_recover)
        p.delete(PROCESSING_TX_KEY)
        p.execute()
Пример #4
0
def get_new_transactions() -> List[transaction_model.TransactionModel]:
    """Get all new transactions from the incoming queue"""
    if LEVEL != "1":
        raise RuntimeError("Getting transactions is a level 1 action")

    transactions = []
    # Only allow up to 1000 transactions to process at a time
    length = min(redis.llen_sync(INCOMING_TX_KEY), 10000)
    p = redis.pipeline_sync()
    for _ in range(0, length):
        p.rpoplpush(INCOMING_TX_KEY, PROCESSING_TX_KEY)
    for value in p.execute():
        dictionary = json.loads(value)
        txn_model = transaction_model.new_from_queue_input(dictionary)
        transactions.append(txn_model)
    return transactions
Пример #5
0
def start() -> None:
    """Start the next job in the queue"""
    _log.debug("Connecting to service account")
    kubernetes.config.load_incluster_config()

    _log.debug("Creating kubernetes client")
    global _kube
    _kube = kubernetes.client.BatchV1Api()

    _log.debug("Job processor ready!")

    if redis.llen_sync(PENDING_TASK_KEY):
        _log.warning(
            "WARNING! Pending job processor queue was not empty. Last job probably crashed. Re-queueing these dropped items."
        )
        to_recover = redis.lrange_sync(PENDING_TASK_KEY, 0, -1, decode=False)
        p = redis.pipeline_sync()
        p.rpush(CONTRACT_TASK_KEY, *to_recover)
        p.delete(PENDING_TASK_KEY)
        p.execute()
    while True:
        start_task()
Пример #6
0
def is_not_empty() -> bool:
    """Check if there is another block in the queue ready to process"""
    return redis.llen_sync(INCOMING_TX_KEY) != 0
Пример #7
0
 def test_llen(self):
     redis.llen_sync("banana")
     redis.redis_client.llen.assert_called_once_with("banana")