Esempio n. 1
0
def execute() -> None:
    """
    * Pops Level 4 records off the queue and Sets them to storage in directory called toBroadcast-${block_id}
    * Publishes to public nodes when required
    * Locates confirmations from public nodes when required
    * Sends receipts to all L1 blocks represented in public broadcast
    """
    matchmaking.renew_registration_if_necessary()
    # Check if there are any funds
    if has_funds_for_transactions():
        _log.info("[L5] Has funds, proceeding")
        # Get the block number for the pending L5
        current_block_id = str(int(get_last_block_number()) + 1)

        # Verify the blocks and add to storage pool where they wait to be broadcasted
        store_l4_blocks(current_block_id)

        # Create and Send L5 block to public blockchain
        if should_broadcast(current_block_id):
            # TODO: if any of these steps fail, we need to roll back or retry
            l5_block = create_l5_block(current_block_id)
            broadcast_to_public_chain(l5_block)
            broadcast_clean_up(l5_block)
            # Check to see if any more funds have been added to wallet
            watch_for_funds()
    else:
        # 20 minute timer watcher
        _log.info("[L5] No funds, checking if time to watch")
        if is_time_to_watch():
            # Check to see if any funds have been added to wallet
            watch_for_funds()

    # Only check confirmations if there are any pending
    check_confirmations()
Esempio n. 2
0
def execute() -> None:
    """Pops transactions off the queue, fixates them into a block and adds it to the chain"""
    if BROADCAST:
        matchmaking.renew_registration_if_necessary()
    t0 = time.time()

    # Pop off of queue
    new_signable_txns = get_new_transactions()
    t1 = time.time()

    if len(new_signable_txns) > 0:
        # Sign / validate transactions
        signed_transactions = process_transactions(new_signable_txns)
        t2 = time.time()

        # Create the block
        block = create_block(signed_transactions)
        t3 = time.time()

        # Store the block
        store_data(block)
        t4 = time.time()

        # Clear our processing queue (finished successfully)
        clear_processing_transactions()

        total = t4 - t0
        _log.info(f"[L1] Processed {len(signed_transactions)} transactions in {total:.4f} seconds")
        _log.info(f"[L1] Retrieving Txns From queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)")
        _log.info(f"[L1] Signing/Fixating Txns: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)")
        _log.info(f"[L1] Creating block model: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)")
        _log.info(f"[L1] Uploading data: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)")
Esempio n. 3
0
def execute() -> None:
    """Pops transactions off the queue, fixates them into a block and adds it to the chain"""
    if BROADCAST:
        try:
            matchmaking.renew_registration_if_necessary()
        except (exceptions.MatchmakingError,
                exceptions.MatchmakingRetryableError):
            _log.warning(
                "Could not register with matchmaking! Is your Dragon Net configuration valid?"
            )
    t0 = time.time()

    # Pop off of queue
    new_signable_txns = get_new_transactions()
    t1 = time.time()

    # Get current block id
    current_block_id = l1_block_model.get_current_block_id()
    # Activate any new custom indexes if necessary
    activate_pending_indexes_if_necessary(current_block_id)
    t2 = time.time()

    if len(new_signable_txns) > 0:
        # Sign / validate transactions
        signed_transactions = process_transactions(new_signable_txns)
        t3 = time.time()

        # Create the block
        block = create_block(signed_transactions, current_block_id)
        t4 = time.time()

        # Store the block
        store_data(block)
        t5 = time.time()

        # Clear our processing queue (finished successfully)
        clear_processing_transactions()

        total = t5 - t0
        _log.info(
            f"[L1] Processed {len(signed_transactions)} transactions in {total:.4f} seconds"
        )
        _log.info(
            f"[L1] Retrieving Txns From queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L1] Activating pending transaction types: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L1] Signing/Fixating Txns: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L1] Creating block model: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L1] Uploading data: {t5 - t4:.4f} sec ({((t5 - t4) / total) * 100:.1f}% of processing)"
        )
def execute() -> None:
    """Gets the next L3 block content from the queue and processes it"""
    matchmaking.renew_registration_if_necessary()
    t0 = time.time()

    l1_headers, l3_blocks = get_new_blocks()
    if l3_blocks and l1_headers:
        _log.info(
            f"[L4] Got next L3 block array from dcid: {l1_headers['dc_id']} for blockid: {l1_headers['block_id']}"
        )
        t1 = time.time()

        validations = verify_blocks(l3_blocks, l1_headers)
        t2 = time.time()

        l4_block = create_block(l1_headers, validations)
        t3 = time.time()

        send_data(l4_block)
        t4 = time.time()

        # Clear our processing queue (finished successfully)
        clear_processing_blocks()

        total = t4 - t0
        _log.info(f"[L4] Processed block l3 blocks in {total:.4f} seconds")
        _log.info(
            f"[L4] Retrieving L3 block from queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L4] Validating all L3 block proofs: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L4] Creating block with proof: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L4] Uploading block and broadcasting down: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)"
        )

        recurse_if_necessary()
    else:
        # Clear our processing queue
        clear_processing_blocks()

    if l1_headers is not None and l3_blocks is None:
        try:
            _log.warning(
                f"Bad Block received from lower level. L1 Headers: {l1_headers}"
            )
        except Exception:  # nosec (We don't care if l1_headers is an error/not defined and this log fails)
            pass

        # Clear our processing queue
        clear_processing_blocks()
Esempio n. 5
0
def execute() -> None:
    """Gets the next L2 block arrays from the queue and processes it"""
    matchmaking.renew_registration_if_necessary()
    t0 = time.time()

    l1_headers, l2_blocks = get_new_blocks()
    if l1_headers and l2_blocks:
        t1 = time.time()
        _log.info(
            f"[L3] Got next L2 block array from dcid: {l1_headers['dc_id']} blockid: {l1_headers['block_id']}"
        )

        ddss, valid_block_count, regions, clouds = verify_blocks(
            l2_blocks, l1_headers)
        if not valid_block_count:
            _log.info(
                "[L3] None of the L2 blocks sent up were valid. Not creating any block/verifications"
            )
            clear_processing_blocks()
            recurse_if_necessary()
            return
        t2 = time.time()

        l3_block = create_block(l1_headers, ddss, valid_block_count, regions,
                                clouds, l2_blocks)
        t3 = time.time()

        send_data(l3_block)
        t4 = time.time()

        # Clear our processing queue (finished successfully)
        clear_processing_blocks()

        total = t4 - t0
        _log.info(
            f"[L3] Processed {len(l2_blocks)} l2 blocks for l1 block id {l1_headers['dc_id']} with dcid {l1_headers['block_id']} in {total:.4f} seconds"
        )
        _log.info(
            f"[L3] Retrieving L2 block list from queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L3] Verified all L2 blocks in list: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L3] Creating block with proof: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)"
        )
        _log.info(
            f"[L3] Uploading block and broadcasting down: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)"
        )

        recurse_if_necessary()
Esempio n. 6
0
def execute() -> None:
    """Gets the next L1 block from the queue and processes it"""
    matchmaking.renew_registration_if_necessary()
    t0 = time.time()
    l1_block = get_new_block()

    if l1_block:
        _log.info(
            f"[L2] Got next L1 block from dcid: {l1_block.dc_id} blockid: {l1_block.block_id}"
        )
        t1 = time.time()

        if verify_transaction_count(l1_block.dc_id, l1_block.block_id,
                                    len(l1_block.stripped_transactions)):
            transaction_validation_map = process_transactions(l1_block)
            t2 = time.time()

            l2_block = create_block(l1_block, transaction_validation_map)
            t3 = time.time()

            send_data(l2_block)
            t4 = time.time()

            total = t4 - t0
            _log.info(
                f"[L2] Processed block {l2_block.l1_block_id} from {l2_block.l1_dc_id} in {total:.4f} seconds"
            )
            _log.info(
                f"[L2] Retrieving L1 block from queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)"
            )
            _log.info(
                f"[L2] Processing transactions: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)"
            )
            _log.info(
                f"[L2] Creating L2 block: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)"
            )
            _log.info(
                f"[L2] Uploading block and broadcasting down: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)"
            )

        # Clear our processing queue (finished successfully)
        clear_processing_block()
        recurse_if_necessary()