def test_get_transaction_batches_to_process_empty_batch(app):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    anchor_program_indexer.get_latest_slot = MagicMock(return_value=0)

    mock_transactions_history = {"result": []}
    solana_client_manager_mock.get_signatures_for_address.return_value = (
        mock_transactions_history
    )

    transaction_batches = anchor_program_indexer.get_transaction_batches_to_process()
    assert transaction_batches == [[]]
Beispiel #2
0
def sol_play_check():
    """
    limit: number of latest plays to return
    max_drift: maximum duration in seconds between `now` and the
    latest recorded play record to be considered healthy
    """
    limit = request.args.get("limit", type=int, default=20)
    max_drift = request.args.get("max_drift", type=int)
    error = None
    redis = redis_connection.get_redis()

    response = {}
    response = get_latest_sol_play_check_info(redis, limit)
    latest_db_sol_plays = response["latest_db_sol_plays"]

    if latest_db_sol_plays:
        latest_db_play = latest_db_sol_plays[0]
        latest_created_at = latest_db_play["created_at"]
        drift = (datetime.now() - latest_created_at).total_seconds()

        # Error if max drift was provided and the drift is greater than max_drift
        error = max_drift and drift > max_drift

    return success_response(response,
                            500 if error else 200,
                            sign_response=False)
Beispiel #3
0
def get_latest_ipld_indexed_block(use_redis_cache=True):
    redis = redis_connection.get_redis()
    latest_indexed_ipld_block_num = None
    latest_indexed_ipld_block_hash = None

    if use_redis_cache:
        latest_indexed_ipld_block_num = redis.get(
            most_recent_indexed_ipld_block_redis_key)
        latest_indexed_ipld_block_hash = redis.get(
            most_recent_indexed_ipld_block_hash_redis_key)
        if latest_indexed_ipld_block_num is not None:
            latest_indexed_ipld_block_num = int(latest_indexed_ipld_block_num)

    if latest_indexed_ipld_block_num is None or latest_indexed_ipld_block_hash is None:
        (
            latest_indexed_ipld_block_num,
            latest_indexed_ipld_block_hash,
        ) = _get_db_ipld_block_state()

        # If there are no entries in the table, default to these values
        if latest_indexed_ipld_block_num is None:
            latest_indexed_ipld_block_num = 0
        if latest_indexed_ipld_block_hash is None:
            latest_indexed_ipld_block_hash = ""

    return latest_indexed_ipld_block_num, latest_indexed_ipld_block_hash
def test_fetch_traversed_tx_from_cache(app):
    with app.app_context():
        redis = get_redis()

    tx_slot = 111753419
    first_mock_tx: ConfirmedSignatureForAddressResult = {
        "signature": "4NyCD5Ef5bsheuqXKFASJXPsiiss8ESqPg61bF5QNLBMYGshGbKTCfTAFjrxZYAH1JxZdY14kSeQG2b66Cis7vxj",
        "slot": tx_slot,
        "blockTime": 1640126543,
        "confirmationStatus": "finalized",
        "err": None,
        "memo": None,
    }

    cache_traversed_tx(redis, first_mock_tx)
    # Confirm that if the latest db slot is greater than the cached value, it is removed from redis
    latest_db_slot = tx_slot + 10
    fetched_tx = fetch_traversed_tx_from_cache(redis, latest_db_slot)
    assert fetched_tx == None

    # Confirm the values have been removed from redis queue
    assert_cache_array_length(redis, 0)

    # Now, populate 2 entries into redis
    cache_traversed_tx(redis, first_mock_tx)
    cache_traversed_tx(redis, mock_tx_result_2)

    assert_cache_array_length(redis, 2)

    fetched_tx = fetch_traversed_tx_from_cache(redis, latest_db_slot)
    assert fetched_tx == mock_tx_result_2["signature"]

    # Confirm the values have been removed from redis queue
    assert_cache_array_length(redis, 0)
def app_impl():
    # Drop DB, ensuring migration performed at start
    if database_exists(DB_URL):
        drop_database(DB_URL)

    create_database(DB_URL)

    # Drop redis
    redis = get_redis()
    redis.flushall()

    # Clear any existing logging config
    helpers.reset_logging()

    # Run db migrations because the db gets dropped at the start of the tests
    alembic_dir = os.getcwd()
    alembic_config = alembic.config.Config(f"{alembic_dir}/alembic.ini")
    alembic_config.set_main_option("sqlalchemy.url", str(DB_URL))
    alembic_config.set_main_option("mode", "test")
    with helpers.cd(alembic_dir):
        alembic.command.upgrade(alembic_config, "head")

    # Create application for testing
    discovery_provider_app = create_app(TEST_CONFIG_OVERRIDE)

    yield discovery_provider_app
def test_get_transaction_batches_to_process_single_batch(app):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    anchor_program_indexer.get_latest_slot = MagicMock(return_value=0)
    anchor_program_indexer.is_tx_in_db = MagicMock(return_value=True)

    mock_transactions_history = {
        "result": [
            {"slot": 3, "signature": "sig3"},
            {"slot": 2, "signature": "sig2"},
            {"slot": 1, "signature": "sig1"},
            {"slot": 0, "signature": "intersection"},
        ]
    }
    solana_client_manager_mock.get_signatures_for_address.return_value = (
        mock_transactions_history
    )

    transaction_batches = anchor_program_indexer.get_transaction_batches_to_process()
    assert transaction_batches == [
        list(map(lambda x: x["signature"], mock_transactions_history["result"][:-1]))
    ]
def test_exists_in_db_and_get_latest_slot(app):  # pylint: disable=W0621
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )

    TEST_TX_HASH = "3EvzmLSZekcQn3zEGFUkaoXej9nUrwkomyTpu9PRBaJJDAtzFQ3woYuGmnLHrqY6kZJtxamqCgeu17euyGp3EN4W"
    TEST_TX_SLOT = 100

    with db.scoped_session() as session:
        assert anchor_program_indexer.is_tx_in_db(session, TEST_TX_HASH) == False
        session.add(AudiusDataTx(signature=TEST_TX_HASH, slot=TEST_TX_SLOT))
        assert anchor_program_indexer.is_tx_in_db(session, TEST_TX_HASH) == True

    latest_slot = anchor_program_indexer.get_latest_slot()
    assert latest_slot == TEST_TX_SLOT
def is_valid_oracle(address: str) -> bool:
    redis = get_redis()
    oracle_addresses = redis.get(oracle_addresses_key)
    if oracle_addresses:
        oracle_addresses = oracle_addresses.decode().split(",")
    else:
        oracle_addresses = get_oracle_addresses_from_chain(redis)
    return address in oracle_addresses
def use_redis_cache(key, ttl_sec, work_func):
    """Attemps to return value by key, otherwise caches and returns `work_func`"""
    redis = redis_connection.get_redis()
    cached_value = get_pickled_key(redis, key)
    if cached_value:
        return cached_value
    to_cache = work_func()
    pickle_and_set(redis, key, to_cache, ttl_sec)
    return to_cache
def test_cache_traversed_tx(app):
    with app.app_context():
        redis = get_redis()

    cache_traversed_tx(redis, mock_tx_result_1)
    assert_cache_array_length(redis, 1)
    cached_val_array = redis.lrange(REDIS_TX_CACHE_QUEUE_PREFIX, 0, 100)
    cached_first_entry = json.loads(cached_val_array[0])
    assert cached_first_entry == mock_tx_result_1
def index_block_stats():
    redis = redis_connection.get_redis()

    results = {
        "index_blocks_ms": {
            "minute":
            get_index_blocks_ms_stats_since(redis, MINUTE_IN_SECONDS),
            "ten_minutes":
            get_index_blocks_ms_stats_since(redis, TEN_MINUTES_IN_SECONDS),
            "hour":
            get_index_blocks_ms_stats_since(redis, HOUR_IN_SECONDS),
            "six_hour":
            get_index_blocks_ms_stats_since(redis, SIX_HOURS_IN_SECONDS),
            "twelve_hour":
            get_index_blocks_ms_stats_since(redis, TWELVE_HOURS_IN_SECONDS),
            "day":
            get_index_blocks_ms_stats_since(redis, DAY_IN_SECONDS),
        },
        "fetch_ipfs_metadata_ms": {
            "minute":
            get_fetch_ipfs_metadata_ms_stats_since(redis, MINUTE_IN_SECONDS),
            "ten_minutes":
            get_fetch_ipfs_metadata_ms_stats_since(redis,
                                                   TEN_MINUTES_IN_SECONDS),
            "hour":
            get_fetch_ipfs_metadata_ms_stats_since(redis, HOUR_IN_SECONDS),
            "six_hour":
            get_fetch_ipfs_metadata_ms_stats_since(redis,
                                                   SIX_HOURS_IN_SECONDS),
            "twelve_hour":
            get_fetch_ipfs_metadata_ms_stats_since(redis,
                                                   TWELVE_HOURS_IN_SECONDS),
            "day":
            get_fetch_ipfs_metadata_ms_stats_since(redis, DAY_IN_SECONDS),
        },
        "add_indexed_block_to_db_ms": {
            "minute":
            get_add_indexed_block_to_db_ms_stats_since(redis,
                                                       MINUTE_IN_SECONDS),
            "ten_minutes":
            get_add_indexed_block_to_db_ms_stats_since(redis,
                                                       TEN_MINUTES_IN_SECONDS),
            "hour":
            get_add_indexed_block_to_db_ms_stats_since(redis, HOUR_IN_SECONDS),
            "six_hour":
            get_add_indexed_block_to_db_ms_stats_since(redis,
                                                       SIX_HOURS_IN_SECONDS),
            "twelve_hour":
            get_add_indexed_block_to_db_ms_stats_since(
                redis, TWELVE_HOURS_IN_SECONDS),
            "day":
            get_add_indexed_block_to_db_ms_stats_since(redis, DAY_IN_SECONDS),
        },
    }

    return success_response(results, sign_response=False)
def get_cached_users(user_ids):
    redis_user_id_keys = list(map(get_user_id_cache_key, user_ids))
    redis = redis_connection.get_redis()
    users = get_all_json_cached_key(redis, redis_user_id_keys)
    for user in users:
        if user:
            for field in user_datetime_fields:
                if user[field]:
                    user[field] = parser.parse(user[field])
    return users
Beispiel #13
0
def get_cached_playlists(playlist_ids):
    redis_playlist_id_keys = list(map(get_playlist_id_cache_key, playlist_ids))
    redis = redis_connection.get_redis()
    playlists = get_all_json_cached_key(redis, redis_playlist_id_keys)
    for playlist in playlists:
        if playlist:
            for field in playlist_datetime_fields:
                if playlist[field]:
                    playlist[field] = parser.parse(playlist[field])
    return playlists
Beispiel #14
0
def get_cached_tracks(track_ids):
    redis_track_id_keys = list(map(get_track_id_cache_key, track_ids))
    redis = redis_connection.get_redis()
    tracks = get_all_json_cached_key(redis, redis_track_id_keys)
    for track in tracks:
        if track:
            for field in track_datetime_fields:
                if track[field]:
                    track[field] = parser.parse(track[field])
    return tracks
Beispiel #15
0
def test_self_referrals(bus_mock: mock.MagicMock, app):
    """Test that users can't refer themselves"""
    with app.app_context():
        db = get_db()
        redis = get_redis()
        bus_mock(redis)
    with db.scoped_session() as session, bus_mock.use_scoped_dispatch_queue():
        user = User(user_id=1, blockhash=str(block_hash), blocknumber=1)
        events: UserEventsMetadata = {"referrer": 1}
        update_user_events(session, user, events, bus_mock)
        mock_call = mock.call.dispatch(ChallengeEvent.referral_signup, 1, 1,
                                       {"referred_user_id": 1})
        assert mock_call not in bus_mock.method_calls
def celery_app():
    """
    Configures a test fixture for celery.
    Usage:
    ```
    def test_something(celery_app):
        task = celery_app.celery.tasks["update_something"]
        db = task.db
        with db.scoped_session():
            pass
    ```

    Note: This fixture must be at module scope because celery
    works by autodiscovering tasks with a decorator at import time of a module.
    If you try to use celery as a fixture at the function scope,
    you will run into mysterious errors as some task context may be stale!
    """
    # Drop DB, ensuring migration performed at start
    if database_exists(DB_URL):
        drop_database(DB_URL)

    create_database(DB_URL)

    # Drop redis
    redis = get_redis()
    redis.flushall()

    # Clear any existing logging config
    helpers.reset_logging()

    # Run db migrations because the db gets dropped at the start of the tests
    alembic_dir = os.getcwd()
    alembic_config = alembic.config.Config(f"{alembic_dir}/alembic.ini")
    alembic_config.set_main_option("sqlalchemy.url", str(DB_URL))
    alembic_config.set_main_option("mode", "test")
    with helpers.cd(alembic_dir):
        alembic.command.upgrade(alembic_config, "head")

    # Call to create_celery returns an object containing the following:
    # 'Celery' - base Celery application
    # 'celery' - src.tasks.celery_app
    # Hence, references to the discovery provider celery application
    # Are formatted as:
    #   'celery_app.celery._some_res_or_func'
    celery_app = create_celery(TEST_CONFIG_OVERRIDE)

    yield celery_app
    if database_exists(DB_URL):
        drop_database(DB_URL)
    redis.flushall()
def test_parse_tx(app, mocker):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    mocker.patch(
        "src.solana.anchor_program_indexer.AnchorProgramIndexer.is_valid_instruction",
        return_value=True,  # return true because admin differs
        autospec=True,
    )

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)

    solana_client_manager_mock.get_sol_tx_info.return_value = mock_tx_info
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    resp = asyncio.run(
        anchor_program_indexer.parse_tx(
            "5vvRr1R99NoU53vuZgukBkyNtfzojGT93ryCJkf6Xv6Yjb1xgb9gfkPLiddgVrhPZ44Jx5SxX4SaB4ZtJaiLMuzW"
        )
    )
    assert resp["tx_metadata"]["instructions"] is not None
    assert len(resp["tx_metadata"]["instructions"]) == 1
    instr = resp["tx_metadata"]["instructions"][0]
    assert instr["instruction_name"] == "create_content_node"
    assert instr["data"] is not None
    data = instr["data"]
    sp_id = data.get("sp_id")
    base = data.get("base")
    authority = data.get("authority")
    owner_eth_address: ListContainer = data.get("owner_eth_address")
    owner_eth_address_array = list(owner_eth_address)
    owner_eth_address_hex = Web3.toChecksumAddress(
        f"0x{bytes(owner_eth_address_array).hex()}"
    )
    assert sp_id == 1
    assert str(base) == "DUvTEvu2WHLWstwgn38S5fCpE23L8yd36WDKxYoAHHax"
    assert str(authority) == "HEpbkzohyMFbc2cQ4KPRbXRUVbgFW3uVrHaKPdMD6pqJ"
    assert owner_eth_address_hex == "0x25A3Acd4758Ab107ea0Bd739382B8130cD1F204d"
Beispiel #18
0
def get_cached_tracks(track_ids):
    redis_track_id_keys = map(get_track_id_cache_key, track_ids)
    redis = redis_connection.get_redis()
    cached_values = redis.mget(redis_track_id_keys)

    tracks = []
    for val in cached_values:
        if val is not None:
            try:
                track = pickle.loads(val)
                tracks.append(track)
            except Exception as e:
                logger.warning(f"Unable to deserialize cached track: {e} {val}")
                tracks.append(None)
        else:
            tracks.append(None)
    return tracks
Beispiel #19
0
def get_cached_playlists(playlist_ids):
    redis_playlist_id_keys = map(get_playlist_id_cache_key, playlist_ids)
    redis = redis_connection.get_redis()
    cached_values = redis.mget(redis_playlist_id_keys)

    playlists = []
    for val in cached_values:
        if val is not None:
            try:
                playlist = pickle.loads(val)
                playlists.append(playlist)
            except Exception as e:
                logger.warning(f"Unable to deserialize cached playlist: {e}")
                playlists.append(None)
        else:
            playlists.append(None)
    return playlists
def use_redis_cache(key, ttl_sec, work_func):
    """Attemps to return value by key, otherwise caches and returns `work_func`"""
    redis = redis_connection.get_redis()
    cached_value = redis.get(key)

    if cached_value:
        logger.info(f"Redis Cache - hit {key}")
        try:
            deserialized = pickle.loads(cached_value)
            return deserialized
        except Exception as e:
            logger.warning(f"Unable to deserialize cached response: {e}")

    logger.info(f"Redis Cache - miss {key}")
    to_cache = work_func()
    serialized = pickle.dumps(to_cache)
    redis.set(key, serialized, ttl_sec)
    return to_cache
def setup_challenge_bus():
    redis = get_redis()
    bus = ChallengeEventBus(redis)

    # register listeners

    # profile_challenge_mananger listeners
    bus.register_listener(ChallengeEvent.profile_update,
                          profile_challenge_manager)
    bus.register_listener(ChallengeEvent.repost, profile_challenge_manager)
    bus.register_listener(ChallengeEvent.follow, profile_challenge_manager)
    bus.register_listener(ChallengeEvent.favorite, profile_challenge_manager)
    # listen_streak_challenge_manager listeners
    bus.register_listener(ChallengeEvent.track_listen,
                          listen_streak_challenge_manager)
    # track_upload_challenge_manager listeners
    bus.register_listener(ChallengeEvent.track_upload,
                          track_upload_challenge_manager)
    # referral challenge managers
    bus.register_listener(ChallengeEvent.referral_signup,
                          referral_challenge_manager)
    bus.register_listener(ChallengeEvent.referral_signup,
                          verified_referral_challenge_manager)
    bus.register_listener(ChallengeEvent.referred_signup,
                          referred_challenge_manager)
    # connect_verified_challenge_manager listeners
    bus.register_listener(ChallengeEvent.connect_verified,
                          connect_verified_challenge_manager)
    bus.register_listener(ChallengeEvent.mobile_install,
                          mobile_install_challenge_manager)
    bus.register_listener(ChallengeEvent.trending_track,
                          trending_track_challenge_manager)
    bus.register_listener(
        ChallengeEvent.trending_underground,
        trending_underground_track_challenge_manager,
    )
    bus.register_listener(ChallengeEvent.trending_playlist,
                          trending_playlist_challenge_manager)
    bus.register_listener(ChallengeEvent.send_tip,
                          send_first_tip_challenge_manager)

    return bus
async def test_fetch_metadata(app, mocker):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    populate_mock_db(db, basic_entities, block_offset=3)

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)

    cid_metadata_client_mock.async_fetch_metadata_from_gateway_endpoints.return_value = (
        mock_cid_metadata
    )

    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    parsed_tx = {
        "tx_metadata": {
            "instructions": [
                {
                    "instruction_name": "init_user",
                    "data": Container([("metadata", mock_cid), ("user_id", 1)]),
                }
            ]
        },
        "tx_sig": "x4PCuQs3ncvhJ3Qz18CBzYg26KnG1tAD1QvZG9B6oBZbR8cJrat2MzcvCbjtMMn9Mkc4C8w23LHTFaLG4dJaXkV",
    }
    mock_parsed_transactions = [parsed_tx]
    cid_metadata, blacklisted_cids = await anchor_program_indexer.fetch_cid_metadata(
        mock_parsed_transactions
    )

    assert cid_metadata == mock_cid_metadata
def test_validate_and_save_parsed_tx_records(app):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)

    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    processed_transactions = [
        {
            "tx_sig": "test_sig1",
            "tx_metadata": {"instructions": []},
            "result": {"slot": 1, "meta": {"err": None}},
        },
        {
            "tx_sig": "test_sig2",
            "tx_metadata": {"instructions": []},
            "result": {"slot": 2, "meta": {"err": None}},
        },
    ]
    anchor_program_indexer.validate_and_save_parsed_tx_records(
        processed_transactions, {}
    )
    with db.scoped_session() as session:
        for tx_entry in processed_transactions:
            assert (
                anchor_program_indexer.is_tx_in_db(session, tx_entry["tx_sig"]) == True
            )
def test_is_valid_instruction(app):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )

    parsed_instruction = {
        "account_names_map": {"admin": ADMIN_STORAGE_PUBLIC_KEY},
        "instruction_name": "init_admin",
    }
    resp = anchor_program_indexer.is_valid_instruction(parsed_instruction)

    assert resp == True
def test_get_transaction_batches_to_process_interslot_batch(app):
    with app.app_context():
        db = get_db()
        redis = get_redis()

    solana_client_manager_mock = create_autospec(SolanaClientManager)
    cid_metadata_client_mock = create_autospec(CIDMetadataClient)
    anchor_program_indexer = AnchorProgramIndexer(
        PROGRAM_ID,
        ADMIN_STORAGE_PUBLIC_KEY,
        LABEL,
        redis,
        db,
        solana_client_manager_mock,
        cid_metadata_client_mock,
    )
    anchor_program_indexer.get_latest_slot = MagicMock(return_value=0)
    anchor_program_indexer.is_tx_in_db = MagicMock(return_value=True)

    mock_first_transactions_history = {
        "result": [{"slot": 3, "signature": "sig3"}] * 500
        + [{"slot": 2, "signature": "sig2"}] * 500
    }
    mock_second_transactions_history = {
        "result": [{"slot": 2, "signature": "sig2"}] * 500
        + [{"slot": 1, "signature": "sig1"}]
        + [{"slot": 0, "signature": "intersection"}]
    }

    solana_client_manager_mock.get_signatures_for_address.side_effect = [
        mock_first_transactions_history,
        mock_second_transactions_history,
    ]

    transaction_batches = anchor_program_indexer.get_transaction_batches_to_process()
    assert transaction_batches == [["sig1"], ["sig2"] * 1000, ["sig3"] * 500]
def cache(**kwargs):
    """
    Cache decorator.
    Should be called with `@cache(ttl_sec=123, transform=transform_response)`

    Arguments:
        ttl_sec: optional,number The time in seconds to cache the response if
            status code < 400
        transform: optional,func The transform function of the wrapped function
            to convert the function response to request response

    Usage Notes:
        If the wrapped function returns a tuple, the transform function will not
        be run on the response. The first item of the tuple must be serializable.

        If the wrapped function returns a single response, the transform function
        must be passed to the decorator. The wrapper function response must be
        serializable.

    Decorators in Python are just higher-order-functions that accept a function
    as a single parameter, and return a function that wraps the input function.

    In this case, because we need to pass kwargs into our decorator function,
    we need an additional layer of wrapping; the outermost function accepts the kwargs,
    and when called, returns the decorating function `outer_wrap`, which in turn returns
    the wrapped input function, `inner_wrap`.

    @functools.wraps simply ensures that if Python introspects `inner_wrap`, it refers to
    `func` rather than `inner_wrap`.
    """
    ttl_sec = kwargs["ttl_sec"] if "ttl_sec" in kwargs else default_ttl_sec
    transform = kwargs["transform"] if "transform" in kwargs else None
    redis = redis_connection.get_redis()

    def outer_wrap(func):
        @functools.wraps(func)
        def inner_wrap(*args, **kwargs):
            has_user_id = 'user_id' in request.args and request.args['user_id'] is not None
            key = extract_key(request.path, request.args.items())
            if not has_user_id:
                cached_resp = redis.get(key)

                if cached_resp:
                    logger.info(f"Redis Cache - hit {key}")
                    try:
                        deserialized = pickle.loads(cached_resp)
                        if transform is not None:
                            return transform(deserialized)
                        return deserialized, 200
                    except Exception as e:
                        logger.warning(f"Unable to deserialize cached response: {e}")

                logger.info(f"Redis Cache - miss {key}")
            response = func(*args, **kwargs)

            if len(response) == 2:
                resp, status_code = response
                if status_code < 400:
                    serialized = pickle.dumps(resp)
                    redis.set(key, serialized, ttl_sec)
                return resp, status_code
            serialized = pickle.dumps(response)
            redis.set(key, serialized, ttl_sec)
            return transform(response)
        return inner_wrap
    return outer_wrap
Beispiel #27
0
def test_user_indexing_skip_tx(bus_mock: mock.MagicMock, app, mocker):
    """Tests that users skip cursed txs without throwing an error and are able to process other tx in block"""
    with app.app_context():
        db = get_db()
        redis = get_redis()
        web3 = Web3()
        bus_mock(redis)
        update_task = DatabaseTask(
            cid_metadata_client=cid_metadata_client,
            web3=web3,
            challenge_event_bus=bus_mock,
            redis=redis,
        )

    class TestUserTransaction:
        def __init__(self):
            self.transactionHash = None

        pass

    blessed_tx_hash = (
        "0x34004dfaf5bb7cf9998eaf387b877d72d198c6508608e309df3f89e57def4db3")
    blessed_tx = TestUserTransaction()
    blessed_tx.transactionHash = update_task.web3.toBytes(
        hexstr=blessed_tx_hash)
    cursed_tx_hash = (
        "0x5fe51d735309d3044ae30055ad29101018a1a399066f6c53ea23800225e3a3be")
    cursed_tx = TestUserTransaction()
    cursed_tx.transactionHash = update_task.web3.toBytes(hexstr=cursed_tx_hash)
    test_block_number = 25278765
    test_block_timestamp = 1
    test_block_hash = update_task.web3.toHex(block_hash)
    test_user_factory_txs = [cursed_tx, blessed_tx]
    test_timestamp = datetime.utcfromtimestamp(test_block_timestamp)
    blessed_user_record = User(
        blockhash=test_block_hash,
        blocknumber=test_block_number,
        txhash=blessed_tx_hash,
        user_id=91232,
        name="tobey maguire",
        is_creator=False,
        is_current=True,
        updated_at=test_timestamp,
        created_at=test_timestamp,
    )
    cursed_user_record = User(
        blockhash=test_block_hash,
        blocknumber=test_block_number,
        txhash=cursed_tx_hash,
        user_id=91238,
        name="birbs",
        is_current=None,
        is_creator=None,
        updated_at=test_timestamp,
        created_at=None,
    )

    mocker.patch(
        "src.tasks.users.lookup_user_record",
        side_effect=[cursed_user_record, blessed_user_record],
        autospec=True,
    )
    mocker.patch(
        "src.tasks.users.get_user_events_tx",
        side_effect=[
            [],  # no user added events
            [],
            [
                {
                    "args":
                    AttrDict({
                        "_userId": cursed_user_record.user_id,
                        "_name": cursed_user_record.name.encode("utf-8"),
                    })
                },
            ],  # update name event
            [],
            [],
            [],
            [],
            [],
            [],
            [],
            [],  # second tx receipt
            [],
            [
                {
                    "args":
                    AttrDict({
                        "_userId": blessed_user_record.user_id,
                        "_name": blessed_user_record.name.encode("utf-8"),
                    })
                },
            ],  # update name event
            [],
            [],
            [],
            [],
            [],
            [],
            [],
        ],
        autospec=True,
    )
    test_ipfs_metadata: Dict[str, Any] = {}
    test_blacklisted_cids: Dict[str, Any] = {}

    with db.scoped_session() as session, bus_mock.use_scoped_dispatch_queue():
        try:
            current_block = Block(
                blockhash=test_block_hash,
                parenthash=test_block_hash,
                number=test_block_number,
                is_current=True,
            )
            session.add(current_block)
            (total_changes, updated_user_ids_set) = user_state_update(
                update_task,
                update_task,
                session,
                test_user_factory_txs,
                test_block_number,
                test_block_timestamp,
                block_hash,
                test_ipfs_metadata,
                test_blacklisted_cids,
            )
            assert len(updated_user_ids_set) == 1
            assert list(updated_user_ids_set)[0] == blessed_user_record.user_id
            assert total_changes == 1
            assert (session.query(SkippedTransaction).filter(
                SkippedTransaction.txhash == cursed_user_record.txhash,
                SkippedTransaction.level == SkippedTransactionLevel.node,
            ).first())
            assert (session.query(User).filter(
                User.user_id == blessed_user_record.user_id).first())
            assert (session.query(User).filter(
                User.user_id == cursed_user_record.user_id).first()) == None
        except Exception:
            assert False
Beispiel #28
0
def test_index_users(bus_mock: mock.MagicMock, app):
    """Tests that users are indexed correctly"""
    with app.app_context():
        db = get_db()
        redis = get_redis()
        web3 = Web3()
        bus_mock(redis)
        update_task = DatabaseTask(
            cid_metadata_client=cid_metadata_client,
            web3=web3,
            challenge_event_bus=bus_mock,
            redis=redis,
        )

    with db.scoped_session() as session, bus_mock.use_scoped_dispatch_queue():
        # ================== Test Add User Event ==================
        event_type, entry = get_add_user_event()

        block_number = 1
        block_timestamp = 1585336422

        # Some sqlalchemy user instance
        user_record = lookup_user_record(
            update_task,
            session,
            entry,
            block_number,
            block_timestamp,
            "0x"  # txhash
        )

        assert user_record.updated_at == None

        # Fields set to None by default
        assert user_record.handle == None
        assert user_record.handle_lc == None
        assert user_record.wallet == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # updated_at should be updated every parse_user_event
        assert user_record.updated_at == datetime.utcfromtimestamp(
            block_timestamp)

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.handle == helpers.bytes32_to_str(entry.args._handle)
        assert (user_record.handle_lc == helpers.bytes32_to_str(
            entry.args._handle).lower())
        assert user_record.wallet == entry.args._wallet.lower()

        # ================== Test Update User Bio Event ==================
        event_type, entry = get_update_bio_event()

        assert user_record.bio == None
        assert user_record.handle != None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.bio == entry.args._bio

        # ================== Test Update User Location Event ==================
        event_type, entry = get_update_location_event()

        # `location` field is none by default
        assert user_record.location == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.location == helpers.bytes32_to_str(
            entry.args._location)

        # ================== Test Update User is Creator Event ==================
        event_type, entry = get_update_is_creator_event()

        # `is_creator` field is none by default
        assert user_record.is_creator == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.is_creator == entry.args._isCreator

        # ================== Test Update User Name Event ==================
        event_type, entry = get_update_name_event()

        # `name` field is none by default
        assert user_record.name == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.name == helpers.bytes32_to_str(entry.args._name)

        # ================== Test Update User CNodes Event for legacy ==================
        event_type, entry = get_update_creator_node_endpoint_event()

        # `creator_node_endpoint` field is none by default
        assert user_record.creator_node_endpoint == None

        # Set primary id so that creator node endpoints is not set
        assert user_record.primary_id == None
        user_record.primary_id = 1

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.creator_node_endpoint == None

        # Set primary id back to none
        user_record.primary_id = None

        # ================== Test Update User CNodes Event ==================
        event_type, entry = get_update_creator_node_endpoint_event()

        # `creator_node_endpoint` field is none by default
        assert user_record.creator_node_endpoint == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: handle, handle_lc, wallet
        assert user_record.creator_node_endpoint == entry.args._creatorNodeEndpoint

        # ================== Test Update User Profile Photo Event ==================
        event_type, entry = get_update_profile_photo_event()

        # `profile_picture` field is none by default
        assert user_record.profile_picture == None
        assert user_record.profile_picture_sizes == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: profile_picture_sizes, profile_picture
        assert user_record.profile_picture_sizes == helpers.multihash_digest_to_cid(
            entry.args._profilePhotoDigest)
        assert user_record.profile_picture == None

        # ================== Test Update User Cover Photo Event ==================
        event_type, entry = get_update_cover_photo_event()

        # `cover_photo` field is none by default
        assert user_record.cover_photo == None
        assert user_record.cover_photo_sizes == None

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            None,  # ipfs_metadata - not used
            block_timestamp,  # Used to update the user.updated_at field
        )

        # add_user should be updated fields: cover_photo, cover_photo_sizes
        assert user_record.cover_photo == None
        assert user_record.cover_photo_sizes == helpers.multihash_digest_to_cid(
            entry.args._coverPhotoDigest)

        # ================== Test Update User Metadata Event ==================
        event_type, entry = get_update_multihash_event()

        parse_user_event(
            None,  # self - not used
            update_task,  # only need the ipfs client for get_metadata
            session,
            None,  # tx_receipt - not used
            block_number,  # not used
            entry,  # Contains the event args used for updating
            event_type,  # String that should one of user_event_types_lookup
            user_record,  # User ORM instance
            update_task.cid_metadata_client.get_metadata(
                helpers.multihash_digest_to_cid(entry.args._multihashDigest),
                user_metadata_format,
                "",
            ),  # ipfs_metadata
            block_timestamp,  # Used to update the user.updated_at field
        )
        session.flush()

        entry_multihash = helpers.multihash_digest_to_cid(
            entry.args._multihashDigest)
        ipfs_metadata = update_task.cid_metadata_client.get_metadata(
            entry_multihash, "", "")

        assert user_record.profile_picture == ipfs_metadata["profile_picture"]
        assert user_record.cover_photo == ipfs_metadata["cover_photo"]
        assert user_record.bio == ipfs_metadata["bio"]
        assert user_record.name == ipfs_metadata["name"]
        assert user_record.location == ipfs_metadata["location"]
        assert (user_record.profile_picture_sizes ==
                ipfs_metadata["profile_picture_sizes"])
        assert user_record.cover_photo_sizes == ipfs_metadata[
            "cover_photo_sizes"]
        assert user_record.has_collectibles == True
        assert user_record.playlist_library == ipfs_metadata[
            "playlist_library"]

        assert user_record.is_deactivated == True

        ipfs_associated_wallets = ipfs_metadata["associated_wallets"]
        associated_wallets = (session.query(AssociatedWallet).filter_by(
            user_id=user_record.user_id,
            is_current=True,
            is_delete=False,
            chain="eth",
        ).all())
        for associated_wallet in associated_wallets:
            assert associated_wallet.wallet in ipfs_associated_wallets
        assert len(associated_wallets) == len(ipfs_associated_wallets)

        ipfs_associated_sol_wallets = ipfs_metadata["associated_sol_wallets"]
        associated_sol_wallets = (session.query(AssociatedWallet).filter_by(
            user_id=user_record.user_id,
            is_current=True,
            is_delete=False,
            chain="sol",
        ).all())
        for associated_wallet in associated_sol_wallets:
            assert associated_wallet.wallet in ipfs_associated_sol_wallets
        assert len(associated_sol_wallets) == len(ipfs_associated_sol_wallets)

        user_events = (session.query(UserEvents).filter_by(
            user_id=user_record.user_id, is_current=True).first())
        assert user_events.referrer == 2
        assert user_events.is_mobile_user == True
        calls = [
            mock.call.dispatch(ChallengeEvent.mobile_install, 1, 1),
            mock.call.dispatch(ChallengeEvent.referred_signup, 1, 1),
            mock.call.dispatch(ChallengeEvent.referral_signup, 1, 2,
                               {"referred_user_id": 1}),
        ]
        bus_mock.assert_has_calls(calls, any_order=True)
Beispiel #29
0
def get_health(args, use_redis_cache=True):
    """
    Gets health status for the service

    :param args: dictionary
    :param args.verbose: bool
        if True, returns db connection information
    :param args.healthy_block_diff: int
        determines the point at which a block difference is considered unhealthy
    :param args.enforce_block_diff: bool
        if true and the block difference is unhealthy an error is returned

    :rtype: (dictionary, bool)
    :return: tuple of health results and a boolean indicating an error
    """
    redis = redis_connection.get_redis()
    web3 = web3_provider.get_web3()

    verbose = args.get("verbose")
    enforce_block_diff = args.get("enforce_block_diff")
    qs_healthy_block_diff = args.get("healthy_block_diff")

    # If healthy block diff is given in url and positive, override config value
    healthy_block_diff = qs_healthy_block_diff if qs_healthy_block_diff is not None \
        and qs_healthy_block_diff >= 0 else default_healthy_block_diff

    latest_block_num = None
    latest_block_hash = None
    latest_indexed_block_num = None
    latest_indexed_block_hash = None

    if use_redis_cache:
        # get latest blockchain state from redis cache, or fallback to chain if None
        latest_block_num, latest_block_hash = get_latest_chain_block_set_if_nx(
            redis, web3)

        # get latest db state from redis cache
        latest_indexed_block_num = redis.get(
            most_recent_indexed_block_redis_key)
        if latest_indexed_block_num is not None:
            latest_indexed_block_num = int(latest_indexed_block_num)

        latest_indexed_block_hash = redis.get(
            most_recent_indexed_block_hash_redis_key)
        if latest_indexed_block_hash is not None:
            latest_indexed_block_hash = latest_indexed_block_hash.decode(
                "utf-8")

    # fetch latest blockchain state from web3 if:
    # we explicitly don't want to use redis cache or
    # value from redis cache is None
    if not use_redis_cache or latest_block_num is None or latest_block_hash is None:
        # get latest blockchain state from web3
        latest_block = web3.eth.getBlock("latest", True)
        latest_block_num = latest_block.number
        latest_block_hash = latest_block.hash.hex()

    # fetch latest db state if:
    # we explicitly don't want to use redis cache or
    # value from redis cache is None
    if not use_redis_cache or latest_indexed_block_num is None or latest_indexed_block_hash is None:
        db_block_state = _get_db_block_state()
        latest_indexed_block_num = db_block_state["number"] or 0
        latest_indexed_block_hash = db_block_state["blockhash"]

    trending_tracks_age_sec = get_elapsed_time_redis(
        redis, trending_tracks_last_completion_redis_key)
    trending_playlists_age_sec = get_elapsed_time_redis(
        redis, trending_playlists_last_completion_redis_key)

    # Get system information monitor values
    sys_info = monitors.get_monitors([
        MONITORS[monitor_names.database_size],
        MONITORS[monitor_names.database_connections],
        MONITORS[monitor_names.total_memory],
        MONITORS[monitor_names.used_memory],
        MONITORS[monitor_names.filesystem_size],
        MONITORS[monitor_names.filesystem_used],
        MONITORS[monitor_names.received_bytes_per_sec],
        MONITORS[monitor_names.transferred_bytes_per_sec],
        MONITORS[monitor_names.redis_total_memory]
    ])

    health_results = {
        "web": {
            "blocknumber": latest_block_num,
            "blockhash": latest_block_hash,
        },
        "db": {
            "number": latest_indexed_block_num,
            "blockhash": latest_indexed_block_hash
        },
        "git": os.getenv("GIT_SHA"),
        "trending_tracks_age_sec": trending_tracks_age_sec,
        "trending_playlists_age_sec": trending_playlists_age_sec,
        "number_of_cpus": number_of_cpus,
        **sys_info
    }

    block_difference = abs(health_results["web"]["blocknumber"] -
                           health_results["db"]["number"])
    health_results["block_difference"] = block_difference
    health_results[
        "maximum_healthy_block_difference"] = default_healthy_block_diff
    health_results.update(disc_prov_version)

    if verbose:
        # DB connections check
        db_connections_json, error = _get_db_conn_state()
        health_results["db_connections"] = db_connections_json
        if error:
            return health_results, error

    # Return error on unhealthy block diff if requested.
    if enforce_block_diff and health_results[
            "block_difference"] > healthy_block_diff:
        return health_results, True

    return health_results, False
Beispiel #30
0
def get_health(args: GetHealthArgs,
               use_redis_cache: bool = True) -> Tuple[Dict, bool]:
    """
    Gets health status for the service

    Returns a tuple of health results and a boolean indicating an error
    """
    redis = redis_connection.get_redis()
    web3 = web3_provider.get_web3()

    verbose = args.get("verbose")
    enforce_block_diff = args.get("enforce_block_diff")
    qs_healthy_block_diff = cast(Optional[int], args.get("healthy_block_diff"))
    challenge_events_age_max_drift = args.get("challenge_events_age_max_drift")
    plays_count_max_drift = args.get("plays_count_max_drift")

    # If healthy block diff is given in url and positive, override config value
    healthy_block_diff = (
        qs_healthy_block_diff if qs_healthy_block_diff is not None
        and qs_healthy_block_diff >= 0 else default_healthy_block_diff)

    latest_block_num: Optional[int] = None
    latest_block_hash: Optional[str] = None
    latest_indexed_block_num: Optional[int] = None
    latest_indexed_block_hash: Optional[str] = None

    if use_redis_cache:
        # get latest blockchain state from redis cache, or fallback to chain if None
        latest_block_num, latest_block_hash = get_latest_chain_block_set_if_nx(
            redis, web3)

        # get latest db state from redis cache
        latest_indexed_block_num = redis.get(
            most_recent_indexed_block_redis_key)
        if latest_indexed_block_num is not None:
            latest_indexed_block_num = int(latest_indexed_block_num)

        latest_indexed_block_hash_bytes = redis.get(
            most_recent_indexed_block_hash_redis_key)
        if latest_indexed_block_hash_bytes is not None:
            latest_indexed_block_hash = latest_indexed_block_hash_bytes.decode(
                "utf-8")
    else:
        # Get latest blockchain state from web3
        try:
            latest_block = web3.eth.get_block("latest", True)
            latest_block_num = latest_block.number
            latest_block_hash = latest_block.hash.hex()
        except Exception as e:
            logger.error(f"Could not get latest block from chain: {e}")

    # fetch latest db state if:
    # we explicitly don't want to use redis cache or
    # value from redis cache is None
    if (not use_redis_cache or latest_indexed_block_num is None
            or latest_indexed_block_hash is None):
        db_block_state = _get_db_block_state()
        latest_indexed_block_num = db_block_state["number"] or 0
        latest_indexed_block_hash = db_block_state["blockhash"]

    play_health_info = get_play_health_info(redis, plays_count_max_drift)
    rewards_manager_health_info = get_rewards_manager_health_info(redis)
    user_bank_health_info = get_user_bank_health_info(redis)
    spl_audio_info = get_spl_audio_info(redis)
    reactions_health_info = get_reactions_health_info(
        redis,
        args.get("reactions_max_indexing_drift"),
        args.get("reactions_max_last_reaction_drift"),
    )

    trending_tracks_age_sec = get_elapsed_time_redis(
        redis, trending_tracks_last_completion_redis_key)
    trending_playlists_age_sec = get_elapsed_time_redis(
        redis, trending_playlists_last_completion_redis_key)
    challenge_events_age_sec = get_elapsed_time_redis(
        redis, challenges_last_processed_event_redis_key)
    user_balances_age_sec = get_elapsed_time_redis(
        redis, user_balances_refresh_last_completion_redis_key)
    num_users_in_lazy_balance_refresh_queue = int(
        redis.scard(LAZY_REFRESH_REDIS_PREFIX))
    num_users_in_immediate_balance_refresh_queue = int(
        redis.scard(IMMEDIATE_REFRESH_REDIS_PREFIX))
    last_scanned_block_for_balance_refresh = redis_get_or_restore(
        redis, eth_indexing_last_scanned_block_key)
    index_eth_age_sec = get_elapsed_time_redis(
        redis, index_eth_last_completion_redis_key)
    last_scanned_block_for_balance_refresh = (
        int(last_scanned_block_for_balance_refresh)
        if last_scanned_block_for_balance_refresh else None)

    # Get system information monitor values
    sys_info = monitors.get_monitors([
        MONITORS[monitor_names.database_size],
        MONITORS[monitor_names.database_connections],
        MONITORS[monitor_names.total_memory],
        MONITORS[monitor_names.used_memory],
        MONITORS[monitor_names.filesystem_size],
        MONITORS[monitor_names.filesystem_used],
        MONITORS[monitor_names.received_bytes_per_sec],
        MONITORS[monitor_names.transferred_bytes_per_sec],
        MONITORS[monitor_names.redis_total_memory],
    ])

    health_results = {
        "web": {
            "blocknumber": latest_block_num,
            "blockhash": latest_block_hash,
        },
        "db": {
            "number": latest_indexed_block_num,
            "blockhash": latest_indexed_block_hash,
        },
        "git": os.getenv("GIT_SHA"),
        "trending_tracks_age_sec": trending_tracks_age_sec,
        "trending_playlists_age_sec": trending_playlists_age_sec,
        "challenge_last_event_age_sec": challenge_events_age_sec,
        "user_balances_age_sec": user_balances_age_sec,
        "num_users_in_lazy_balance_refresh_queue":
        num_users_in_lazy_balance_refresh_queue,
        "num_users_in_immediate_balance_refresh_queue":
        num_users_in_immediate_balance_refresh_queue,
        "last_scanned_block_for_balance_refresh":
        last_scanned_block_for_balance_refresh,
        "index_eth_age_sec": index_eth_age_sec,
        "number_of_cpus": number_of_cpus,
        **sys_info,
        "plays": play_health_info,
        "rewards_manager": rewards_manager_health_info,
        "user_bank": user_bank_health_info,
        "openresty_public_key": openresty_public_key,
        "spl_audio_info": spl_audio_info,
        "reactions": reactions_health_info,
        "infra_setup": infra_setup,
    }

    if latest_block_num is not None and latest_indexed_block_num is not None:
        block_difference = abs(latest_block_num - latest_indexed_block_num)
    else:
        # If we cannot get a reading from chain about what the latest block is,
        # we set the difference to be an unhealthy amount
        block_difference = default_healthy_block_diff + 1
    health_results["block_difference"] = block_difference
    health_results[
        "maximum_healthy_block_difference"] = default_healthy_block_diff
    health_results.update(disc_prov_version)

    # Check that this node meets the minimum system requirements
    num_cpus: int = cast(int, health_results["number_of_cpus"] or 0)
    total_memory: int = cast(int, health_results["total_memory"] or 0)
    filesystem_size: int = cast(int, health_results["filesystem_size"] or 0)
    if (num_cpus < min_number_of_cpus or total_memory < min_total_memory
            or filesystem_size < min_filesystem_size):
        health_results["meets_min_requirements"] = False
        # TODO - this will become strictly enforced in upcoming service versions and return with error
    else:
        health_results["meets_min_requirements"] = True

    if verbose:
        # Elasticsearch health
        if esclient:
            health_results["elasticsearch"] = get_elasticsearch_health_info(
                esclient, latest_indexed_block_num)

        # DB connections check
        db_connections_json, db_connections_error = _get_db_conn_state()
        health_results["db_connections"] = db_connections_json
        location = get_location()
        health_results.update(location)

        if db_connections_error:
            return health_results, db_connections_error

        query_insights_json, query_insights_error = _get_query_insights()
        health_results["query_insights"] = query_insights_json

        if query_insights_error:
            return health_results, query_insights_error

        table_size_info_json = monitors.get_monitors([
            MONITORS[monitor_names.table_size_info],
        ])

        health_results["tables"] = table_size_info_json

    unhealthy_blocks = bool(enforce_block_diff
                            and block_difference > healthy_block_diff)
    unhealthy_challenges = bool(
        challenge_events_age_max_drift and challenge_events_age_sec
        and challenge_events_age_sec > challenge_events_age_max_drift)

    is_unhealthy = (unhealthy_blocks or unhealthy_challenges
                    or play_health_info["is_unhealthy"]
                    or reactions_health_info["is_unhealthy"])

    return health_results, is_unhealthy