def _generate_l5_verification_indexes() -> None:
    client = _get_redisearch_index_client(Indexes.verification.value)
    client.drop_index()
    try:
        client.create_index(
            [
                redisearch.NumericField("block_id", sortable=True),
                redisearch.NumericField("prev_id", sortable=True),
                redisearch.NumericField("timestamp", sortable=True),
                redisearch.TagField("dc_id"),
            ]
        )
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"):  # We don't care if index already exists
            raise
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/([0-9]+)-([Ll])5(.*)$")
    for block_path in block_paths:
        if LEVEL == "1" and BROADCAST_ENABLED and re.search(pattern, block_path):
            if not client.redis.sismember(L5_BLOCK_MIGRATION_KEY, block_path):
                raw_block = storage.get_json_from_object(block_path)
                block = l5_block_model.new_from_at_rest(raw_block)
                put_document(Indexes.verification.value, block_path.split("/")[1], block.export_as_search_index())
                client.redis.sadd(L5_NODES, block.dc_id)
                client.redis.sadd(L5_BLOCK_MIGRATION_KEY, block_path)
            else:
                _log.info(f"Skipping already indexed L5 block {block_path}")
Exemple #2
0
def _generate_block_indexes_from_scratch() -> None:
    client = _get_redisearch_index_client(Indexes.block.value)
    client.create_index([
        redisearch.NumericField("block_id", sortable=True),
        redisearch.NumericField("prev_id", sortable=True),
        redisearch.NumericField("timestamp", sortable=True),
    ])
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/[0-9]+$")
    for block_path in block_paths:
        if re.search(pattern, block_path):
            _log.info(f"Adding index for {block_path}")
            raw_block = storage.get_json_from_object(block_path)
            block = cast("model.BlockModel", None)
            if LEVEL == "1":
                block = l1_block_model.new_from_stripped_block(raw_block)
            elif LEVEL == "2":
                block = l2_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "3":
                block = l3_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "4":
                block = l4_block_model.new_from_at_rest(raw_block)
            elif LEVEL == "5":
                block = l5_block_model.new_from_at_rest(raw_block)
            put_document(Indexes.block.value, block.block_id,
                         block.export_as_search_index())
Exemple #3
0
def perform_api_key_migration_v1_if_necessary() -> None:
    """Checks if an api key migration needs to be performed, and does so if necessary"""
    try:
        if storage.get(f"{FOLDER}/{MIGRATION_V1}") == b"1":
            # Migration was previously performed. No action necessary
            return
    except exceptions.NotFound:
        pass
    _log.info("Api key migration required. Performing now")
    valid_keys = storage.list_objects(prefix=FOLDER)
    regular_keys = list(
        filter(lambda x: not x.startswith("KEYS/INTERCHAIN/"), valid_keys))
    interchain_keys = list(
        filter(lambda x: x.startswith("KEYS/INTERCHAIN/"), valid_keys))
    for key in regular_keys:
        _log.info(f"Migrating {key}")
        api_key = api_key_model.new_from_legacy(
            storage.get_json_from_object(key), interchain_dcid="")
        save_api_key(api_key)
    for key in interchain_keys:
        _log.info(f"Migrating interchain key {key}")
        interchain_dcid = key[key.find("KEYS/INTERCHAIN/") +
                              16:]  # Get the interchain dcid from the key
        api_key = api_key_model.new_from_legacy(
            storage.get_json_from_object(key), interchain_dcid=interchain_dcid)
        save_api_key(api_key)
    # Save migration marker once complete
    storage.put(f"{FOLDER}/{MIGRATION_V1}", b"1")
    _log.info("Api key migration v1 complete")
def heap_list_v1(contract_id: str, path: str) -> List[str]:
    sub_folder = f"{contract_id}/HEAP"
    storage_key = f"{smart_contract_dao.FOLDER}/{sub_folder}{path}"
    listed_keys = storage.list_objects(storage_key)
    key_response = []
    for key in listed_keys:
        key_response.append(key[key.index(sub_folder) + len(sub_folder):])
    return key_response
def _generate_smart_contract_indexes() -> None:
    delete_index(Indexes.smartcontract.value)  # Always generate smart contract indexes from scratch by dropping existing ones
    client = _get_redisearch_index_client(Indexes.smartcontract.value)
    client.create_index([redisearch.TagField("sc_name")])
    # Find what smart contracts exist in storage
    _log.info("Listing all smart contracts in storage")
    sc_object_paths = storage.list_objects("SMARTCONTRACT/")
    pattern = re.compile(r"SMARTCONTRACT\/.{36}\/metadata\.json$")
    for sc in sc_object_paths:
        if re.search(pattern, sc):
            sc_model = smart_contract_model.new_from_at_rest(storage.get_json_from_object(sc))
            _log.info(f"Adding index for smart contract {sc_model.id} ({sc_model.txn_type})")
            put_document(Indexes.smartcontract.value, sc_model.id, sc_model.export_as_search_index())
def _generate_transaction_indexes() -> None:  # noqa: C901
    # -- CREATE INDEXES FOR TRANSACTIONS --
    client = _get_redisearch_index_client(Indexes.transaction.value)
    try:
        client.create_index([redisearch.TagField("block_id")])  # Used for reverse-lookup of transactions by id (with no txn_type)
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"):  # We don't care if index already exists
            raise
    try:
        create_transaction_index(namespace.Namespaces.Contract.value, force=False)  # Create the reserved txn type index
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"):  # We don't care if index already exists
            raise
    txn_types_to_watch = {namespace.Namespaces.Contract.value: 1}  # Will be use when going through all stored transactions
    txn_type_models = {
        namespace.Namespaces.Contract.value: transaction_type_model.TransactionTypeModel(namespace.Namespaces.Contract.value, active_since_block="1")
    }
    for txn_type in transaction_type_dao.list_registered_transaction_types():
        txn_type_model = transaction_type_model.new_from_at_rest(txn_type)
        txn_type_models[txn_type_model.txn_type] = txn_type_model
        _log.info(f"Adding index for {txn_type_model.txn_type}")
        try:
            create_transaction_index(txn_type_model.txn_type, txn_type_model.custom_indexes, force=False)
        except redis.exceptions.ResponseError as e:
            if not str(e).startswith("Index already exists"):  # We don't care if index already exists
                raise
        txn_types_to_watch[txn_type_model.txn_type] = int(txn_type_model.active_since_block)

    # -- LIST AND INDEX ACTUAL TRANSACTIONS FROM STORAGE
    _log.info("Listing all full transactions")
    transaction_blocks = storage.list_objects("TRANSACTION/")
    for txn_path in transaction_blocks:
        # do a check to see if this block's transactions were already marked as indexed
        if not client.redis.sismember(TXN_MIGRATION_KEY, txn_path):
            _log.info(f"Indexing transactions for {txn_path}")
            for txn in storage.get(txn_path).split(b"\n"):
                if txn:
                    txn_model = transaction_model.new_from_at_rest_full(json.loads(txn)["txn"])
                    # Add general transaction index
                    put_document(Indexes.transaction.value, f"txn-{txn_model.txn_id}", {"block_id": txn_model.block_id}, upsert=True)
                    watch_block = txn_types_to_watch.get(txn_model.txn_type)
                    # Extract custom indexes if necessary
                    if watch_block and int(txn_model.block_id) >= watch_block:
                        txn_model.extract_custom_indexes(txn_type_models[txn_model.txn_type])
                        put_document(txn_model.txn_type, txn_model.txn_id, txn_model.export_as_search_index(), upsert=True)
            client.redis.sadd(TXN_MIGRATION_KEY, txn_path)
        else:
            _log.info(f"Skipping already indexed transaction {txn_path}")
Exemple #7
0
def list_api_keys(include_interchain: bool) -> List[api_key_model.APIKeyModel]:
    """Retrieve a list of api keys
    Args:
        include_interchain: whether or not to include interchain api keys
    Returns:
        List of api key models
    """
    # Get keys from storage, excluding migration marker and interchain keys
    return_list = []
    for key in storage.list_objects(prefix=FOLDER):
        if (MIGRATION_V1 in key) or (key.startswith("KEYS/INTERCHAIN")
                                     and not include_interchain):
            continue
        return_list.append(
            api_key_model.new_from_at_rest(storage.get_json_from_object(key)))
    return return_list
Exemple #8
0
def rehydrate_transaction_types() -> None:
    existing_list = redis.smembers_sync("type_list_key")
    if len(existing_list) > 0:
        _log.info("redis is already populated")
        return

    transaction_types = filter(lambda x: len(x.split("/")) == 3,
                               storage.list_objects("TRANSACTION_TYPES"))
    txn_types = list(map(lambda x: x.split("/")[2], transaction_types))
    _log.info("Inserting new list into redis")
    if len(txn_types) > 0:
        response_number = redis.sadd_sync("type_list_key", *txn_types)
        if response_number > 0:
            _log.info("Succeeded in updating redis")
        _log.info(f"response number --> {response_number}")
    else:
        _log.info("No transaction types found to be updated...")
Exemple #9
0
def _generate_transaction_indexes_from_scratch() -> None:
    client = _get_redisearch_index_client(Indexes.transaction.value)
    # TODO: replace after redisearch is fixed
    client.create_index([
        TagField("block_id")
    ])  # Used for reverse-lookup of transactions by id (with no txn_type)
    force_create_transaction_index(namespace.Namespaces.Contract.value
                                   )  # Create the reserved txn type index
    txn_types_to_watch = {
        namespace.Namespaces.Contract.value: 1
    }  # Will be use when going through all stored transactions
    txn_type_models = {
        namespace.Namespaces.Contract.value:
        transaction_type_model.TransactionTypeModel(
            namespace.Namespaces.Contract.value, active_since_block="1")
    }
    for txn_type in transaction_type_dao.list_registered_transaction_types():
        txn_type_model = transaction_type_model.new_from_at_rest(txn_type)
        txn_type_models[txn_type_model.txn_type] = txn_type_model
        _log.info(f"Adding index for {txn_type_model.txn_type}")
        force_create_transaction_index(txn_type_model.txn_type,
                                       txn_type_model.custom_indexes)
        txn_types_to_watch[txn_type_model.txn_type] = int(
            txn_type_model.active_since_block)
    _log.info("Listing all full transactions")
    transaction_blocks = storage.list_objects("TRANSACTION/")
    for txn_path in transaction_blocks:
        _log.info(f"Indexing transactions for {txn_path}")
        for txn in storage.get(txn_path).split(b"\n"):
            if txn:
                txn_model = transaction_model.new_from_at_rest_full(
                    json.loads(txn)["txn"])
                # Add general transaction index
                put_document(Indexes.transaction.value,
                             f"txn-{txn_model.txn_id}",
                             {"block_id": txn_model.block_id},
                             upsert=True)
                watch_block = txn_types_to_watch.get(txn_model.txn_type)
                # Extract custom indexes if necessary
                if watch_block and int(txn_model.block_id) >= watch_block:
                    txn_model.extract_custom_indexes(
                        txn_type_models[txn_model.txn_type])
                    put_document(txn_model.txn_type,
                                 txn_model.txn_id,
                                 txn_model.export_as_search_index(),
                                 upsert=True)
def get_pending_l4_blocks(block_id: str) -> List[str]:
    all_waiting_verification_keys = storage.list_objects(f"BROADCAST/TO_BROADCAST/{block_id}")

    l4_blocks = []
    for key in all_waiting_verification_keys:
        record_list = storage.get_json_from_object(key)

        for record in record_list:
            item = {
                "l1_dc_id": record["header"]["l1_dc_id"],
                "l1_block_id": record["header"]["l1_block_id"],
                "l4_dc_id": record["header"]["dc_id"],
                "l4_block_id": record["header"]["block_id"],
                "l4_proof": record["proof"]["proof"],
            }
            if record.get("is_invalid"):
                item["is_invalid"] = record.get("is_invalid")
            l4_blocks.append(json.dumps(item, separators=(",", ":")))

    return l4_blocks
Exemple #11
0
def list_interchain_clients(blockchain: str) -> List["model.InterchainModel"]:
    """Get all of the interchain clients for a specific blockchain type
    Args:
        blockchain: The blockchain of the desired clients to get
    Returns:
        List of instantiated interchain clients for the specified blockchain
    """
    from_rest_function: Any = None
    if blockchain == "bitcoin":
        from_rest_function = btc.new_from_at_rest
    elif blockchain == "ethereum":
        from_rest_function = eth.new_from_at_rest
    else:
        raise exceptions.NotFound(
            f"Blockchain network {blockchain} is not supported")

    return [
        from_rest_function(storage.get_json_from_object(x))
        for x in storage.list_objects(f"{FOLDER}/{blockchain}/")
    ]
Exemple #12
0
def get_verifications_for_l1_block(block_id: str,
                                   level: int) -> List[Dict[str, Any]]:
    try:
        keys = list(
            broadcast_functions.
            get_receieved_verifications_for_block_and_level_sync(
                block_id, level))
        if len(keys) != 0:
            for i in range(len(keys)):
                keys[
                    i] = f"{FOLDER}/{block_id}-l{level}-{keys[i].decode('utf8')}"
            return list(
                map(lambda key: storage.get_json_from_object(key), keys))
    except Exception:
        pass
    # Only fall back to listing from storage if we don't have verifications already saved in redis
    prefix = f"{FOLDER}/{block_id}-l{level}"
    keys = storage.list_objects(prefix)
    _log.info(f"Verification keys by prefix {prefix}: {keys}")
    return [] if len(keys) == 0 else list(
        map(lambda key: storage.get_json_from_object(key), keys))
Exemple #13
0
def get_verifications_for_l1_block(block_id: str,
                                   level: int) -> List[Dict[str, Any]]:
    try:
        keys = list(
            broadcast_functions.
            get_receieved_verifications_for_block_and_level_sync(
                block_id, level))
        if len(keys) != 0:
            for i in range(len(keys)):
                keys[i] = f"{FOLDER}/{block_id}-l{level}-{keys[i]}"
            return [storage.get_json_from_object(x) for x in keys]
    except Exception:
        _log.exception(
            "Error getting verifications from cached list. Falling back to direct storage list"
        )
    # Only fall back to listing from storage if we don't have verifications already saved in redis
    prefix = f"{FOLDER}/{block_id}-l{level}"
    keys = storage.list_objects(prefix)
    _log.info(f"Verification keys by prefix {prefix}: {keys}")
    return [] if len(keys) == 0 else [
        storage.get_json_from_object(key) for key in keys
    ]
def increment_storage_error_sync(block_id: str, current_level: int) -> None:
    """When getting a storage error/inconsistency between redis/storage, this should be called
    This will roll-back a block to a previous level for verifications if FAULT_TOLERATION is surpassed for a block

    Basically, the state in redis can be a mis-representation of what's in actual storage, and if this occurs, we need to roll back
    the block verifications state and remove any bogus verifications from redis that aren't truly saved in storage, which happens on occasion
    Args:
        block_id: the block_id to increment a storage error
        current_level: the current block verification level state (should be in broadcast:block:state)
    """
    # Don't do anything if at or below level two because no verifications are required yet
    if current_level <= 2:
        return
    error_key = storage_error_key(block_id)
    current_count = int(redis.get_sync(error_key, decode=False) or 0)
    if current_count < FAULT_TOLERATION:
        redis.set_sync(error_key, str(current_count + 1))
        return
    # Beyond fault toleration, we must rollback this block

    # First find all verifications actually in storage
    prefix = f"BLOCK/{block_id}-l{current_level - 1}"
    good_verifications = set()
    for key in storage.list_objects(prefix):
        good_verifications.add(re.search(f"^{prefix}-(.*)",
                                         key).group(1))  # noqa: T484

    # Now find all verifications the system thinks we have in redis
    redis_verifications_key = verifications_key(block_id, current_level - 1)
    all_verifications = redis.smembers_sync(redis_verifications_key)

    # Remove all bad verifications recorded in redis that aren't in storage, and demote block to previous level
    p = redis.pipeline_sync()
    p.srem(redis_verifications_key,
           *all_verifications.difference(good_verifications))
    p.delete(error_key)
    p.set(state_key(block_id), str(current_level - 1))
    p.execute()
Exemple #15
0
def get_api_key_list_v1() -> Dict[str, List[Dict[str, Any]]]:
    """
    Gets the list of api key IDs
    Returns:
        List of API keys
    """
    keys = storage.list_objects(prefix=FOLDER)
    valid_keys = list(
        filter(
            lambda x: not x.startswith("KEYS/WEB_") and not x.startswith(
                "KEYS/SC_") and not x.startswith("KEYS/INTERCHAIN"), keys))
    returned_keys = []
    for key in valid_keys:
        resp = storage.get_json_from_object(key)
        returned_keys.append({
            "id":
            str(resp["id"]),
            "registration_time":
            int(resp["registration_time"]),
            "nickname":
            str(resp.get("nickname") or "")
        })
    return {"keys": returned_keys}
Exemple #16
0
def _generate_block_indexes() -> None:
    client = _get_redisearch_index_client(Indexes.block.value)
    try:
        client.create_index([
            redisearch.NumericField("block_id", sortable=True),
            redisearch.NumericField("prev_id", sortable=True),
            redisearch.NumericField("timestamp", sortable=True),
        ])
    except redis.exceptions.ResponseError as e:
        if not str(e).startswith("Index already exists"
                                 ):  # We don't care if index already exists
            raise
    _log.info("Listing all blocks in storage")
    block_paths = storage.list_objects("BLOCK/")
    pattern = re.compile(r"BLOCK\/[0-9]+$")
    for block_path in block_paths:
        if re.search(pattern, block_path):
            # do a check to see if this block was already marked as indexed
            if not client.redis.sismember(BLOCK_MIGRATION_KEY, block_path):
                _log.info(f"Adding index for {block_path}")
                raw_block = storage.get_json_from_object(block_path)
                block = cast("model.BlockModel", None)
                if LEVEL == "1":
                    block = l1_block_model.new_from_stripped_block(raw_block)
                elif LEVEL == "2":
                    block = l2_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "3":
                    block = l3_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "4":
                    block = l4_block_model.new_from_at_rest(raw_block)
                elif LEVEL == "5":
                    block = l5_block_model.new_from_at_rest(raw_block)
                put_document(Indexes.block.value, block.block_id,
                             block.export_as_search_index())
                client.redis.sadd(BLOCK_MIGRATION_KEY, block_path)
            else:
                _log.info(f"Skipping already indexed block {block_path}")
def _level_records(block_id: str, level: int) -> List[Any]:
    return [
        storage.get_json_from_object(key)
        for key in storage.list_objects(f"BLOCK/{block_id}-l{level}")
    ]
def list_registered_transaction_types() -> List[Dict[str, Any]]:
    return [
        storage.get_json_from_object(txn_type)
        for txn_type in storage.list_objects(f"{FOLDER}/")
    ]
Exemple #19
0
 def test_list_objects_calls_storage_list_objects_with_params(self):
     storage.storage.list_objects = MagicMock()
     storage.list_objects("prefix")
     storage.storage.list_objects.assert_called_once_with("test", "prefix")