Ejemplo n.º 1
0
def enqueue_l1(transaction: Dict[str, Any]) -> None:
    p = redis.pipeline_sync()
    enqueue_l1_pipeline(p, transaction)
    # Execute redis pipeline
    for result in p.execute():
        if not result:
            raise RuntimeError("Failed to enqueue")
Ejemplo n.º 2
0
def set_receieved_verification_for_block_from_chain_sync(block_id: str, level: int, chain_id: str) -> None:
    """Signify a successful receipt from a higher level node receipt for a certain block (sync)
    Args:
        block_id: block_id of lvl 1 block for received receipt
        level: level of the node from the higher level receipt
        chain_id: id of the higher level dragonchain receipt
    """
    # Check that this block is accepting verifications for the specified level
    accepting_verification_level = get_current_block_level_sync(block_id)
    if accepting_verification_level != level:
        raise exceptions.NotAcceptingVerifications(
            f"Block {block_id} is only accepting verifications for level {accepting_verification_level} (Not {level}) at the moment"
        )

    set_key = verifications_key(block_id, level)
    p = redis.pipeline_sync()
    p.sadd(set_key, chain_id)
    p.scard(set_key)
    verifications = p.execute()[1]  # Execute the commands and get the result of the scard operation (number of members in the set)
    required = dragonnet_config.DRAGONNET_CONFIG[f"l{level}"]["nodesRequired"]

    # Check if this block needs to be promoted to the next level
    if verifications >= required:
        if level >= 5:
            # If level 5, block needs no more verifications; remove it from the broadcast system
            remove_block_from_broadcast_system_sync(block_id)
        else:
            # Set the block to the next level and schedule it for broadcasting
            redis.delete_sync(storage_error_key(block_id))
            set_current_block_level_sync(block_id, level + 1)
            schedule_block_for_broadcast_sync(block_id)
Ejemplo n.º 3
0
def check_and_recover_processing_if_necessary() -> None:
    """
    Checks the processing tx queue and returns them to the incoming queue
    (Should be called before starting to process a new block, for unexpected crash recovery)
    """
    if redis.llen_sync(PROCESSING_TX_KEY) != 0:
        _log.warning(
            "WARNING! Processing queue was not empty. Last block processing probably crashed. Recovering and re-queuing these dropped items."
        )
        to_recover = redis.lrange_sync(PROCESSING_TX_KEY, 0, -1, decode=False)
        # Execute these in a pipeline in attempts to make this as atomic as possible
        p = redis.pipeline_sync()
        p.rpush(INCOMING_TX_KEY, *to_recover)
        p.delete(PROCESSING_TX_KEY)
        p.execute()
Ejemplo n.º 4
0
def get_new_transactions() -> List[transaction_model.TransactionModel]:
    """Get all new transactions from the incoming queue"""
    if LEVEL != "1":
        raise RuntimeError("Getting transactions is a level 1 action")

    transactions = []
    # Only allow up to 1000 transactions to process at a time
    length = min(redis.llen_sync(INCOMING_TX_KEY), 10000)
    p = redis.pipeline_sync()
    for _ in range(0, length):
        p.rpoplpush(INCOMING_TX_KEY, PROCESSING_TX_KEY)
    for value in p.execute():
        dictionary = json.loads(value)
        txn_model = transaction_model.new_from_queue_input(dictionary)
        transactions.append(txn_model)
    return transactions
Ejemplo n.º 5
0
def submit_bulk_transaction_v1(
        bulk_transaction: Sequence[Dict[str, Any]],
        api_key: "api_key_model.APIKeyModel") -> Dict[str, List[Any]]:
    """
    Formats, validates and enqueues the transactions in
    the payload of bulk_transaction
    Returns dictionary of 2 lists, key "201" are successfully created txn ids and key "400" are transactions that failed to post (entire dto passed in from user)
    """
    _log.info(
        "[TRANSACTION_BULK] Checking if key is allowed to create all given bulk transactions"
    )
    requested_types = set()
    for transaction in bulk_transaction:
        requested_types.add(transaction["txn_type"])
    # Check if allowed to create all these transactions of (potentially) various types
    if not api_key.is_key_allowed(
            "transactions",
            "create",
            "create_transaction",
            False,
            extra_data={"requested_types": requested_types}):
        raise exceptions.ActionForbidden(
            "API Key is not allowed to create all of the provided transaction types"
        )

    _log.info(
        f"[TRANSACTION_BULK] Auth successful. Attempting to enqueue {len(bulk_transaction)} transactions"
    )
    success = []
    fail = []
    pipeline = dc_redis.pipeline_sync()
    for transaction in bulk_transaction:
        try:
            txn_model = _generate_transaction_model(transaction)
            queue.enqueue_l1_pipeline(pipeline,
                                      txn_model.export_as_queue_task())
            success.append(txn_model.txn_id)
        except Exception:
            fail.append(transaction)
    # Queue the actual transactions in redis now
    for result in pipeline.execute():
        if not result:
            raise RuntimeError("Failed to enqueue")

    return {"201": success, "400": fail}
Ejemplo n.º 6
0
def activate_transaction_types_if_necessary(block_id: str) -> None:
    """Activate transaction type(s) by setting them to active at a certain block number (for index regeneration purposes)
    Args:
        block_id: the current block id where the transaction types are being activated (if they exist)
    """
    # Get all the queued transaction types
    p = redis.pipeline_sync(transaction=True)
    p.lrange(QUEUED_TXN_TYPES, 0, -1)
    p.delete(QUEUED_TXN_TYPES)
    results, _ = p.execute()
    for txn_type in results:
        try:
            txn_type_model = get_registered_transaction_type(txn_type.decode("utf8"))
            txn_type_model.active_since_block = block_id
            # Save the transaction type state
            storage.put_object_as_json(f"{FOLDER}/{txn_type_model.txn_type}", txn_type_model.export_as_at_rest())
        except exceptions.NotFound:
            pass  # txn_type was probably deleted before activating. Simply ignore it
Ejemplo n.º 7
0
def get_all_verifications_for_block_sync(block_id: str) -> List[Set[str]]:
    """Get an array of the sets of chain_ids for properly received receipts from a higher level for a certain block (sync)
    Args:
        block_id: block_id to check for received receipts
    Returns:
        List of sets of strings (of chain ids). List[0] is for L2 receipts, List[1] is for L3 receipts, etc
    """
    transaction = redis.pipeline_sync()
    transaction.smembers(verifications_key(block_id, 2))
    transaction.smembers(verifications_key(block_id, 3))
    transaction.smembers(verifications_key(block_id, 4))
    transaction.smembers(verifications_key(block_id, 5))
    result = transaction.execute()
    # Decode the results because this used a raw redis pipeline
    for i in range(len(result)):
        decoded_set = set()
        for chain in result[i]:
            decoded_set.add(chain.decode("ascii"))
        result[i] = decoded_set
    return result
Ejemplo n.º 8
0
def remove_block_from_broadcast_system_sync(block_id: str) -> None:
    """Clean up a block from the verifications/broadcast system (sync)

    This should be called when a block has received all necessary verifications
    and no longer needs to be involved with the broadcast system

    Args:
        block_id: block_id to remove from the system
    """
    # Make a multi exec redis transaction for less overhead
    transaction = redis.pipeline_sync()

    transaction.zrem(IN_FLIGHT_KEY, block_id)
    transaction.delete(state_key(block_id))
    transaction.delete(storage_error_key(block_id))
    for i in range(2, 6):
        transaction.delete(verifications_key(block_id, i))
    # This one is for the claim check from matchmaking that is saved locally
    transaction.hdel(CLAIM_CHECK_KEY, block_id)

    transaction.execute()
Ejemplo n.º 9
0
def start() -> None:
    """Start the next job in the queue"""
    _log.debug("Connecting to service account")
    kubernetes.config.load_incluster_config()

    _log.debug("Creating kubernetes client")
    global _kube
    _kube = kubernetes.client.BatchV1Api()

    _log.debug("Job processor ready!")

    if redis.llen_sync(PENDING_TASK_KEY):
        _log.warning(
            "WARNING! Pending job processor queue was not empty. Last job probably crashed. Re-queueing these dropped items."
        )
        to_recover = redis.lrange_sync(PENDING_TASK_KEY, 0, -1, decode=False)
        p = redis.pipeline_sync()
        p.rpush(CONTRACT_TASK_KEY, *to_recover)
        p.delete(PENDING_TASK_KEY)
        p.execute()
    while True:
        start_task()
Ejemplo n.º 10
0
def increment_storage_error_sync(block_id: str, current_level: int) -> None:
    """When getting a storage error/inconsistency between redis/storage, this should be called
    This will roll-back a block to a previous level for verifications if FAULT_TOLERATION is surpassed for a block

    Basically, the state in redis can be a mis-representation of what's in actual storage, and if this occurs, we need to roll back
    the block verifications state and remove any bogus verifications from redis that aren't truly saved in storage, which happens on occasion
    Args:
        block_id: the block_id to increment a storage error
        current_level: the current block verification level state (should be in broadcast:block:state)
    """
    # Don't do anything if at or below level two because no verifications are required yet
    if current_level <= 2:
        return
    error_key = storage_error_key(block_id)
    current_count = int(redis.get_sync(error_key, decode=False) or 0)
    if current_count < FAULT_TOLERATION:
        redis.set_sync(error_key, str(current_count + 1))
        return
    # Beyond fault toleration, we must rollback this block

    # First find all verifications actually in storage
    prefix = f"BLOCK/{block_id}-l{current_level - 1}"
    good_verifications = set()
    for key in storage.list_objects(prefix):
        good_verifications.add(re.search(f"^{prefix}-(.*)",
                                         key).group(1))  # noqa: T484

    # Now find all verifications the system thinks we have in redis
    redis_verifications_key = verifications_key(block_id, current_level - 1)
    all_verifications = redis.smembers_sync(redis_verifications_key)

    # Remove all bad verifications recorded in redis that aren't in storage, and demote block to previous level
    p = redis.pipeline_sync()
    p.srem(redis_verifications_key,
           *all_verifications.difference(good_verifications))
    p.delete(error_key)
    p.set(state_key(block_id), str(current_level - 1))
    p.execute()
Ejemplo n.º 11
0
 def test_pipeline(self):
     redis.pipeline_sync()
     redis.redis_client.pipelineassert_called_once_with(True, None)