コード例 #1
0
def _generate_block_indexes_from_scratch() -> None:
    client = _get_redisearch_index_client(Indexes.block.value)
    client.create_index([
        redisearch.NumericField("block_id", sortable=True),
        redisearch.NumericField("prev_id", sortable=True),
        redisearch.NumericField("timestamp", sortable=True),
    ])
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/[0-9]+$")
    for block_path in block_paths:
        if re.search(pattern, block_path):
            _log.info(f"Adding index for {block_path}")
            raw_block = storage.get_json_from_object(block_path)
            block = cast("model.BlockModel", None)
            if LEVEL == "1":
                block = l1_block_model.new_from_stripped_block(raw_block)
            elif LEVEL == "2":
                block = l2_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "3":
                block = l3_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "4":
                block = l4_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "5":
                block = l5_block_model.new_from_at_rest(raw_block)
            put_document(Indexes.block.value, block.block_id,
                         block.export_as_search_index())
コード例 #2
0
def _generate_l5_verification_indexes() -> None:
    client = _get_redisearch_index_client(Indexes.verification.value)
    client.drop_index()
    try:
        client.create_index(
            [
                redisearch.NumericField("block_id", sortable=True),
                redisearch.NumericField("prev_id", sortable=True),
                redisearch.NumericField("timestamp", sortable=True),
                redisearch.TagField("dc_id"),
            ]
        )
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"):  # We don't care if index already exists
            raise
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/([0-9]+)-([Ll])5(.*)$")
    for block_path in block_paths:
        if LEVEL == "1" and BROADCAST_ENABLED and re.search(pattern, block_path):
            if not client.redis.sismember(L5_BLOCK_MIGRATION_KEY, block_path):
                raw_block = storage.get_json_from_object(block_path)
                block = l5_block_model.new_from_at_rest(raw_block)
                put_document(Indexes.verification.value, block_path.split("/")[1], block.export_as_search_index())
                client.redis.sadd(L5_NODES, block.dc_id)
                client.redis.sadd(L5_BLOCK_MIGRATION_KEY, block_path)
            else:
                _log.info(f"Skipping already indexed L5 block {block_path}")
コード例 #3
0
def check_confirmations() -> None:
    last_confirmed_block = get_last_confirmed_block()
    last_confirmed_block_number = last_confirmed_block["block_id"]
    last_created_block = get_last_block_number()

    _log.info(
        f"[L5] Last confirmed block is {last_confirmed_block_number}, last created block is {last_created_block}"
    )

    if int(last_confirmed_block_number) < int(last_created_block):
        # Check for confirmations
        next_block_to_confirm = int(last_confirmed_block_number) + 1
        block_key = f"BLOCK/{next_block_to_confirm}"
        block = l5_block_model.new_from_at_rest(
            storage.get_json_from_object(block_key))

        for txn_hash in block.transaction_hash:
            try:
                if _interchain_client.is_transaction_confirmed(txn_hash):
                    finalize_block(block, last_confirmed_block, txn_hash)
                    # Stop execution here!
                    return
            except exceptions.RPCTransactionNotFound:
                #  If transaction not found, it may have been dropped, so we remove it from the block
                block.transaction_hash.remove(txn_hash)

        # If execution did not stop, the block is not confirmed.
        if _interchain_client.should_retry_broadcast(block.block_last_sent_at):
            broadcast_to_public_chain(block)
コード例 #4
0
ファイル: dragonnet.py プロジェクト: hewei-github/dragonchain
def process_receipt_v1(block_dto: Dict[str, Any]) -> None:
    if not block_dto:
        raise exceptions.ValidationException("block_dto missing")
    _log.info(
        f"[RECEIPT] Got receipt from L{block_dto['header']['level']}: {block_dto}"
    )
    block_model = cast("model.BlockModel",
                       None)  # This will always get defined, or it will raise
    level_received_from: int = block_dto["header"]["level"]
    if level_received_from == 2:
        block_model = l2_block_model.new_from_at_rest(block_dto)
    elif level_received_from == 3:
        block_model = l3_block_model.new_from_at_rest(block_dto)
    elif level_received_from == 4:
        block_model = l4_block_model.new_from_at_rest(block_dto)
    elif level_received_from == 5:
        block_model = l5_block_model.new_from_at_rest(block_dto)
    else:
        raise exceptions.InvalidNodeLevel("Unsupported level receipt")

    _log.info(f"Block model {block_model.__dict__}")
    l1_block_id_set = block_model.get_associated_l1_block_id()

    _log.info(
        f"Processing receipt for blocks {l1_block_id_set} from L{level_received_from}"
    )
    for l1_block_id in l1_block_id_set:
        # Check that the chain which sent this receipt is in our claims, and that this L1 block is accepting receipts for this level
        validations = matchmaking.get_claim_check(
            l1_block_id)["validations"][f"l{level_received_from}"]
        if (
                block_model.dc_id in validations
        ) and broadcast_functions.is_block_accepting_verifications_from_level(
                l1_block_id, level_received_from):
            _log.info(
                f"Verified that block {l1_block_id} was sent. Inserting receipt"
            )
            storage_location = broadcast_functions.verification_storage_location(
                l1_block_id, level_received_from, block_model.dc_id)
            storage.put_object_as_json(storage_location,
                                       block_model.export_as_at_rest())
            # Set new receipt for matchmaking claim check
            try:
                block_id = block_model.block_id
                proof = block_model.proof
                dc_id = block_model.dc_id
                matchmaking.add_receipt(l1_block_id, level_received_from,
                                        dc_id, block_id, proof)
            except Exception:
                _log.exception("matchmaking add_receipt failed!")
            # Update the broadcast system about this receipt
            broadcast_functions.set_receieved_verification_for_block_from_chain_sync(
                l1_block_id, level_received_from, block_model.dc_id)
        else:
            _log.warning(
                f"Chain {block_model.dc_id} (level {level_received_from}) returned a receipt that wasn't expected (possibly expired?) for block {l1_block_id}. Rejecting receipt"  # noqa: B950
            )
            raise exceptions.NotAcceptingVerifications(
                f"Not accepting verifications for block {l1_block_id} from {block_model.dc_id}"
            )
コード例 #5
0
def check_confirmations() -> None:
    last_confirmed_block = get_last_confirmed_block()
    last_confirmed_block_number = last_confirmed_block["block_id"]
    last_created_block = get_last_block_number()

    _log.info(f"[L5] Last confirmed block is {last_confirmed_block_number}, last created block is {last_created_block}")

    if int(last_confirmed_block_number) < int(last_created_block):
        # Check for confirmations
        next_block_to_confirm = int(last_confirmed_block_number) + 1
        block_key = f"BLOCK/{next_block_to_confirm}"
        block = l5_block_model.new_from_at_rest(storage.get_json_from_object(block_key))

        for txn_hash in block.transaction_hash:
            confirmed = INTERCHAIN.is_transaction_confirmed(txn_hash)

            #  If this function returned the transaction hash, that means it was dropped so we
            #  remove it from the block, otherwise if it returned true that means it was confirmed.
            #  When broadcast retry occurs, the removed hashes will be removed in storage.
            if isinstance(confirmed, str):
                block.transaction_hash.remove(txn_hash)
            elif confirmed:
                finalize_block(block, last_confirmed_block, txn_hash)

                # Stop execution here!
                return

        # If execution did not stop in the above for loop, we know that the block is not confirmed.
        retry_broadcast_if_necessary(block)
コード例 #6
0
ファイル: redisearch.py プロジェクト: uningan/dragonchain
def _generate_block_indexes() -> None:
    client = _get_redisearch_index_client(Indexes.block.value)
    try:
        client.create_index([
            redisearch.NumericField("block_id", sortable=True),
            redisearch.NumericField("prev_id", sortable=True),
            redisearch.NumericField("timestamp", sortable=True),
        ])
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"
                                 ):  # We don't care if index already exists
            raise
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/[0-9]+$")
    for block_path in block_paths:
        if re.search(pattern, block_path):
            # do a check to see if this block was already marked as indexed
            if not client.redis.sismember(BLOCK_MIGRATION_KEY, block_path):
                _log.info(f"Adding index for {block_path}")
                raw_block = storage.get_json_from_object(block_path)
                block = cast("model.BlockModel", None)
                if LEVEL == "1":
                    block = l1_block_model.new_from_stripped_block(raw_block)
                elif LEVEL == "2":
                    block = l2_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "3":
                    block = l3_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "4":
                    block = l4_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "5":
                    block = l5_block_model.new_from_at_rest(raw_block)
                put_document(Indexes.block.value, block.block_id,
                             block.export_as_search_index())
                client.redis.sadd(BLOCK_MIGRATION_KEY, block_path)
            else:
                _log.info(f"Skipping already indexed block {block_path}")