示例#1
0
def lookup_user_record(update_task, session, entry, block_number,
                       block_timestamp, txhash):
    event_blockhash = update_task.web3.toHex(entry.blockHash)
    user_id = helpers.get_tx_arg(entry, "_userId")

    # Check if the userId is in the db
    user_record = (session.query(User).filter(User.user_id == user_id,
                                              User.is_current == True).first())

    if user_record:
        # expunge the result from sqlalchemy so we can modify it without UPDATE statements being made
        # https://stackoverflow.com/questions/28871406/how-to-clone-a-sqlalchemy-db-object-with-new-primary-key
        session.expunge(user_record)
        make_transient(user_record)
    else:
        user_record = User(
            is_current=True,
            user_id=user_id,
            created_at=datetime.utcfromtimestamp(block_timestamp),
        )

    # update these fields regardless of type
    user_record.blocknumber = block_number
    user_record.blockhash = event_blockhash
    user_record.txhash = txhash

    return user_record
def parse_user_record(update_task, entry, user_record, block_timestamp):
    primary = helpers.get_tx_arg(entry, "_primaryId")
    secondaries = helpers.get_tx_arg(entry, "_secondaryIds")
    signer = helpers.get_tx_arg(entry, "_signer")
    user_record.updated_at = datetime.utcfromtimestamp(block_timestamp)
    user_record.primary_id = primary
    user_record.secondary_ids = secondaries
    user_record.replica_set_update_signer = signer

    # Update cnode endpoint string reconstructed from sp ID
    creator_node_endpoint_str = get_endpoint_string_from_sp_ids(
        update_task, primary, secondaries)
    user_record.creator_node_endpoint = creator_node_endpoint_str

    if not all_required_fields_present(User, user_record):
        raise EntityMissingRequiredFieldError(
            "user_replica_set",
            user_record,
            f"Error parsing user for user replica set change {user_record} with entity missing required field(s)",
        )

    return user_record
def parse_ursm_cnode_record(update_task, entry, cnode_record):
    cnode_record.delegate_owner_wallet = helpers.get_tx_arg(
        entry, "_cnodeDelegateOwnerWallet")
    cnode_record.owner_wallet = helpers.get_tx_arg(entry, "_cnodeOwnerWallet")
    cnode_record.proposer_1_delegate_owner_wallet = helpers.get_tx_arg(
        entry, "_proposer1DelegateOwnerWallet")
    cnode_record.proposer_2_delegate_owner_wallet = helpers.get_tx_arg(
        entry, "_proposer2DelegateOwnerWallet")
    cnode_record.proposer_3_delegate_owner_wallet = helpers.get_tx_arg(
        entry, "_proposer3DelegateOwnerWallet")
    cnode_record.proposer_sp_ids = helpers.get_tx_arg(entry, "_proposerSpIds")
    # Retrieve endpoint from eth contracts
    cnode_sp_id = helpers.get_tx_arg(entry, "_cnodeSpId")
    cnode_record.endpoint = get_ursm_cnode_endpoint(update_task, cnode_sp_id)

    if not all_required_fields_present(URSMContentNode, cnode_record):
        raise EntityMissingRequiredFieldError(
            "content_node",
            cnode_record,
            f"Error parsing content node {cnode_record} with entity missing required field(s)",
        )

    return cnode_record
def user_replica_set_state_update(
    self,
    update_task: DatabaseTask,
    session: Session,
    user_replica_set_mgr_txs,
    block_number,
    block_timestamp,
    block_hash,
    _ipfs_metadata,  # prefix unused args with underscore to prevent pylint
    _blacklisted_cids,
) -> Tuple[int, Set]:
    """Return Tuple containing int representing number of User model state changes found in transaction and set of user_id values"""

    event_blockhash = update_task.web3.toHex(block_hash)
    num_user_replica_set_changes = 0
    skipped_tx_count = 0

    user_ids: Set[int] = set()
    if not user_replica_set_mgr_txs:
        return num_user_replica_set_changes, user_ids

    # This stores the state of the user object along with all the events applied to it
    # before it gets committed to the db
    # Data format is {"user_id": {"user", "events": []}}
    # NOTE - events are stored only for debugging purposes and not used or persisted anywhere
    user_replica_set_events_lookup = {}

    # This stores the state of the cnode object along with all events applied
    # Data format is {"cnode_sp_id": {"cnode_record", "events":[]}}
    cnode_events_lookup = {}

    # pylint: disable=too-many-nested-blocks
    for tx_receipt in user_replica_set_mgr_txs:
        txhash = update_task.web3.toHex(tx_receipt.transactionHash)
        for event_type in user_replica_set_manager_event_types_arr:
            user_events_tx = get_user_replica_set_mgr_tx(
                update_task, event_type, tx_receipt)
            processedEntries = 0  # if record does not get added, do not count towards num_total_changes
            for entry in user_events_tx:
                args = entry["args"]
                existing_user_record = None
                existing_cnode_record = None
                user_id = (helpers.get_tx_arg(entry, "_userId")
                           if "_userId" in args else None)
                cnode_sp_id = (helpers.get_tx_arg(entry, "_cnodeSpId")
                               if "_cnodeSpId" in args else None)
                try:
                    # if the user id is not in the lookup object, it hasn't been initialized yet
                    # first, get the user object from the db(if exists or create a new one)
                    # then set the lookup object for user_id with the appropriate props
                    if user_id:
                        existing_user_record = lookup_user_record(
                            update_task,
                            session,
                            entry,
                            block_number,
                            block_timestamp,
                            txhash,
                        )

                    if cnode_sp_id:
                        existing_cnode_record = lookup_ursm_cnode(
                            update_task,
                            session,
                            entry,
                            block_number,
                            block_timestamp,
                            txhash,
                        )

                    # Add or update the value of the user record for this block in user_replica_set_events_lookup,
                    # ensuring that multiple events for a single user result in only 1 row insert operation
                    # (even if multiple operations are present)
                    if (event_type ==
                            user_replica_set_manager_event_types_lookup[
                                "update_replica_set"]):
                        parsed_user_record = parse_user_record(
                            update_task,
                            entry,
                            existing_user_record,
                            block_timestamp,
                        )
                        if user_id not in user_replica_set_events_lookup:
                            user_replica_set_events_lookup[user_id] = {
                                "user": parsed_user_record,
                                "events": [],
                            }
                        else:
                            user_replica_set_events_lookup[user_id][
                                "user"] = parsed_user_record
                        user_replica_set_events_lookup[user_id][
                            "events"].append(event_type)
                        user_ids.add(user_id)
                        processedEntries += 1
                    # Process L2 Content Node operations
                    elif (event_type ==
                          user_replica_set_manager_event_types_lookup[
                              "add_or_update_content_node"]):
                        parsed_cnode_record = parse_ursm_cnode_record(
                            update_task,
                            entry,
                            existing_cnode_record,
                        )
                        if cnode_sp_id not in cnode_events_lookup:
                            cnode_events_lookup[cnode_sp_id] = {
                                "content_node": parsed_cnode_record,
                                "events": [],
                            }
                        else:
                            cnode_events_lookup[cnode_sp_id][
                                "content_node"] = parsed_cnode_record
                        cnode_events_lookup[cnode_sp_id]["events"].append(
                            event_type)
                        processedEntries += 1
                except EntityMissingRequiredFieldError as e:
                    logger.warning(f"Skipping tx {txhash} with error {e}")
                    skipped_tx_count += 1
                    add_node_level_skipped_transaction(session, block_number,
                                                       event_blockhash, txhash)
                    pass
                except Exception as e:
                    logger.info("Error in parse user replica set transaction")
                    raise IndexingError(
                        "user_replica_set",
                        block_number,
                        event_blockhash,
                        txhash,
                        str(e),
                    ) from e
            num_user_replica_set_changes += processedEntries

    logger.info(
        f"index.py | user_replica_set.py | [URSM indexing] There are {num_user_replica_set_changes} events processed and {skipped_tx_count} skipped transactions."
    )

    # for each record in user_replica_set_events_lookup, invalidate the old record and add the new record
    # we do this after all processing has completed so the user record is atomic by block, not tx
    for user_id, value_obj in user_replica_set_events_lookup.items():
        logger.info(
            f"index.py | user_replica_set.py | Replica Set Processing Adding {value_obj['user']}"
        )
        invalidate_old_user(session, user_id)
        session.add(value_obj["user"])

    for content_node_id, value_obj in cnode_events_lookup.items():
        logger.info(
            f"index.py | user_replica_set.py | Content Node Processing Adding {value_obj['content_node']}"
        )
        invalidate_old_cnode_record(session, content_node_id)
        session.add(value_obj["content_node"])

    return num_user_replica_set_changes, user_ids
示例#5
0
def parse_user_event(
    self,
    update_task: DatabaseTask,
    session: Session,
    tx_receipt,
    block_number,
    entry,
    event_type,
    user_record,
    ipfs_metadata,
    block_timestamp,
):
    # type specific field changes
    if event_type == user_event_types_lookup["add_user"]:
        handle_str = helpers.bytes32_to_str(
            helpers.get_tx_arg(entry, "_handle"))
        user_record.handle = handle_str
        user_record.handle_lc = handle_str.lower()
        user_record.wallet = helpers.get_tx_arg(entry, "_wallet").lower()
    elif event_type == user_event_types_lookup["update_multihash"]:
        metadata_multihash = helpers.multihash_digest_to_cid(
            helpers.get_tx_arg(entry, "_multihashDigest"))
        user_record.metadata_multihash = metadata_multihash
    elif event_type == user_event_types_lookup["update_name"]:
        user_record.name = helpers.bytes32_to_str(
            helpers.get_tx_arg(entry, "_name"))
    elif event_type == user_event_types_lookup["update_location"]:
        user_record.location = helpers.bytes32_to_str(
            helpers.get_tx_arg(entry, "_location"))
    elif event_type == user_event_types_lookup["update_bio"]:
        user_record.bio = helpers.get_tx_arg(entry, "_bio")
    elif event_type == user_event_types_lookup["update_profile_photo"]:
        profile_photo_multihash = helpers.multihash_digest_to_cid(
            helpers.get_tx_arg(entry, "_profilePhotoDigest"))
        is_blacklisted = is_blacklisted_ipld(session, profile_photo_multihash)
        if is_blacklisted:
            logger.info(
                f"index.py | users.py | Encountered blacklisted CID:"
                f"{profile_photo_multihash} in indexing update user profile photo"
            )
            return None
        user_record.profile_picture = profile_photo_multihash
    elif event_type == user_event_types_lookup["update_cover_photo"]:
        cover_photo_multihash = helpers.multihash_digest_to_cid(
            helpers.get_tx_arg(entry, "_coverPhotoDigest"))
        is_blacklisted = is_blacklisted_ipld(session, cover_photo_multihash)
        if is_blacklisted:
            logger.info(
                f"index.py | users.py | Encountered blacklisted CID:"
                f"{cover_photo_multihash} in indexing update user cover photo")
            return None
        user_record.cover_photo = cover_photo_multihash
    elif event_type == user_event_types_lookup["update_is_creator"]:
        user_record.is_creator = helpers.get_tx_arg(entry, "_isCreator")
    elif event_type == user_event_types_lookup["update_is_verified"]:
        user_record.is_verified = helpers.get_tx_arg(entry, "_isVerified")
        if user_record.is_verified:
            update_task.challenge_event_bus.dispatch(
                ChallengeEvent.connect_verified,
                block_number,
                user_record.user_id,
            )

    elif event_type == user_event_types_lookup["update_creator_node_endpoint"]:
        # Ensure any user consuming the new UserReplicaSetManager contract does not process
        # legacy `creator_node_endpoint` changes
        # Reference user_replica_set.py for the updated indexing flow around this field
        replica_set_upgraded = user_replica_set_upgraded(user_record)
        logger.info(
            f"index.py | users.py | {user_record.handle} Replica set upgraded: {replica_set_upgraded}"
        )
        if not replica_set_upgraded:
            user_record.creator_node_endpoint = helpers.get_tx_arg(
                entry, "_creatorNodeEndpoint")

    # New updated_at timestamp
    user_record.updated_at = datetime.utcfromtimestamp(block_timestamp)

    # If the multihash is updated, fetch the metadata (if not fetched) and update the associated wallets column
    if event_type == user_event_types_lookup["update_multihash"]:
        # Look up metadata multihash in IPFS and override with metadata fields
        if ipfs_metadata:
            # Fields also stored on chain
            if "profile_picture" in ipfs_metadata and ipfs_metadata[
                    "profile_picture"]:
                user_record.profile_picture = ipfs_metadata["profile_picture"]

            if "cover_photo" in ipfs_metadata and ipfs_metadata["cover_photo"]:
                user_record.cover_photo = ipfs_metadata["cover_photo"]

            if "bio" in ipfs_metadata and ipfs_metadata["bio"]:
                user_record.bio = ipfs_metadata["bio"]

            if "name" in ipfs_metadata and ipfs_metadata["name"]:
                user_record.name = ipfs_metadata["name"]

            if "location" in ipfs_metadata and ipfs_metadata["location"]:
                user_record.location = ipfs_metadata["location"]

            # Fields with no on-chain counterpart
            if ("profile_picture_sizes" in ipfs_metadata
                    and ipfs_metadata["profile_picture_sizes"]):
                user_record.profile_picture = ipfs_metadata[
                    "profile_picture_sizes"]

            if ("cover_photo_sizes" in ipfs_metadata
                    and ipfs_metadata["cover_photo_sizes"]):
                user_record.cover_photo = ipfs_metadata["cover_photo_sizes"]

            if ("collectibles" in ipfs_metadata
                    and ipfs_metadata["collectibles"]
                    and isinstance(ipfs_metadata["collectibles"], dict)
                    and ipfs_metadata["collectibles"].items()):
                user_record.has_collectibles = True
            else:
                user_record.has_collectibles = False

            if "associated_wallets" in ipfs_metadata:
                update_user_associated_wallets(
                    session,
                    update_task,
                    user_record,
                    ipfs_metadata["associated_wallets"],
                    "eth",
                )

            if "associated_sol_wallets" in ipfs_metadata:
                update_user_associated_wallets(
                    session,
                    update_task,
                    user_record,
                    ipfs_metadata["associated_sol_wallets"],
                    "sol",
                )

            if ("playlist_library" in ipfs_metadata
                    and ipfs_metadata["playlist_library"]):
                user_record.playlist_library = ipfs_metadata[
                    "playlist_library"]

            if "is_deactivated" in ipfs_metadata:
                user_record.is_deactivated = ipfs_metadata["is_deactivated"]

            if "events" in ipfs_metadata and ipfs_metadata["events"]:
                update_user_events(
                    session,
                    user_record,
                    ipfs_metadata["events"],
                    update_task.challenge_event_bus,
                )

    # All incoming profile photos intended to be a directory
    # Any write to profile_picture field is replaced by profile_picture_sizes
    if user_record.profile_picture:
        logger.info(
            f"index.py | users.py | Processing user profile_picture {user_record.profile_picture}"
        )
        user_record.profile_picture_sizes = user_record.profile_picture
        user_record.profile_picture = None

    # All incoming cover photos intended to be a directory
    # Any write to cover_photo field is replaced by cover_photo_sizes
    if user_record.cover_photo:
        logger.info(
            f"index.py | users.py | Processing user cover photo {user_record.cover_photo}"
        )
        user_record.cover_photo_sizes = user_record.cover_photo
        user_record.cover_photo = None

    if not all_required_fields_present(User, user_record):
        raise EntityMissingRequiredFieldError(
            "user",
            user_record,
            f"Error parsing user {user_record} with entity missing required field(s)",
        )

    return user_record
示例#6
0
def user_state_update(
    self,
    update_task: DatabaseTask,
    session: Session,
    user_factory_txs,
    block_number,
    block_timestamp,
    block_hash,
    ipfs_metadata,
    blacklisted_cids,
) -> Tuple[int, Set]:
    """Return tuple containing int representing number of User model state changes found in transaction and set of processed user IDs."""
    begin_user_state_update = datetime.now()
    metric = PrometheusMetric(
        "user_state_update_runtime_seconds",
        "Runtimes for src.task.users:user_state_update()",
        ("scope", ),
    )

    blockhash = update_task.web3.toHex(block_hash)
    num_total_changes = 0
    skipped_tx_count = 0
    user_ids: Set[int] = set()
    if not user_factory_txs:
        return num_total_changes, user_ids

    challenge_bus = update_task.challenge_event_bus

    # This stores the state of the user object along with all the events applied to it
    # before it gets committed to the db
    # Data format is {"user_id": {"user", "events": []}}
    # NOTE - events are stored only for debugging purposes and not used or persisted anywhere
    user_events_lookup: Dict[int, Dict[str, Any]] = {}

    # Array of transactions by user to be applied in parallel
    # Map(user_id=1 <-> [tx1, tx2], user_id=2 <-> [tx1])
    user_transactions_lookup: Dict[int, List[Tuple]] = {}

    # For each user factory transaction, loop through every tx
    # loop through all audius event types within that tx and get all event logs
    # for each event, apply changes to the user in user_events_lookup
    for tx_receipt in user_factory_txs:
        txhash = update_task.web3.toHex(tx_receipt.transactionHash)
        for event_type in user_event_types_arr:
            user_events_tx = get_user_events_tx(update_task, event_type,
                                                tx_receipt)
            # if record does not get added, do not count towards num_total_changes
            for entry in user_events_tx:
                user_id = helpers.get_tx_arg(entry, "_userId")
                if user_id not in user_transactions_lookup:
                    user_transactions_lookup[user_id] = []
                # Append to user level list
                user_transactions_lookup[user_id].append(
                    (entry, event_type, tx_receipt, txhash))

            # num_total_changes += processedEntries

    # Process each user in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        process_user_txs_futures = {}
        for user_id in user_transactions_lookup.keys():
            user_txs = user_transactions_lookup[user_id]
            process_user_txs_futures[executor.submit(
                process_user_txs_serial,
                self,
                user_id,
                user_txs,
                session,
                user_events_lookup,
                update_task,
                blacklisted_cids,
                block_number,
                block_timestamp,
                blockhash,
                ipfs_metadata,
                user_ids,
                skipped_tx_count,
            )] = user_id
        for future in concurrent.futures.as_completed(
                process_user_txs_futures):
            try:
                processed_entries = future.result()
                num_total_changes += processed_entries
            except Exception as exc:
                raise exc
    logger.info(
        f"index.py | users.py | There are {num_total_changes} events processed and {skipped_tx_count} skipped transactions."
    )

    # For each record in user_events_lookup, invalidate the old record and add the new record
    # we do this after all processing has completed so the user record is atomic by block, not tx
    for user_id, value_obj in user_events_lookup.items():
        logger.info(f"index.py | users.py | Adding {value_obj['user']}")
        if value_obj["events"]:
            invalidate_old_user(session, user_id)
            challenge_bus.dispatch(ChallengeEvent.profile_update, block_number,
                                   user_id)
            session.add(value_obj["user"])

    if num_total_changes:
        metric.save_time({"scope": "full"})
        logger.info(
            f"index.py | users.py | user_state_update | finished user_state_update in {datetime.now() - begin_user_state_update} // per event: {(datetime.now() - begin_user_state_update) / num_total_changes} secs"
        )
    return num_total_changes, user_ids
示例#7
0
def process_user_txs_serial(
    self,
    user_id,
    user_txs,
    session,
    user_events_lookup,
    update_task,
    blacklisted_cids,
    block_number,
    block_timestamp,
    blockhash,
    ipfs_metadata,
    user_ids,
    skipped_tx_count,
):
    metric = PrometheusMetric(
        "user_state_update_runtime_seconds",
        "Runtimes for src.task.users:user_state_update()",
        ("scope", ),
    )
    processed_entries = 0
    for user_tx in user_txs:
        try:
            process_user_txs_start_time = time()
            entry = user_tx[0]
            event_type = user_tx[1]
            tx_receipt = user_tx[2]
            txhash = user_tx[3]

            # look up or populate existing record
            if user_id in user_events_lookup:
                existing_user_record = user_events_lookup[user_id]["user"]
            else:
                existing_user_record = lookup_user_record(
                    update_task,
                    session,
                    entry,
                    block_number,
                    block_timestamp,
                    txhash,
                )

            # parse user event to add metadata to record
            if event_type == user_event_types_lookup["update_multihash"]:
                metadata_multihash = helpers.multihash_digest_to_cid(
                    helpers.get_tx_arg(entry, "_multihashDigest"))
                user_record = (parse_user_event(
                    self,
                    update_task,
                    session,
                    tx_receipt,
                    block_number,
                    entry,
                    event_type,
                    existing_user_record,
                    ipfs_metadata[metadata_multihash],
                    block_timestamp,
                ) if metadata_multihash not in blacklisted_cids else None)
            else:
                user_record = parse_user_event(
                    self,
                    update_task,
                    session,
                    tx_receipt,
                    block_number,
                    entry,
                    event_type,
                    existing_user_record,
                    None,
                    block_timestamp,
                )
            # process user record
            if user_record is not None:
                if user_id not in user_events_lookup:
                    user_events_lookup[user_id] = {
                        "user": user_record,
                        "events": [],
                    }
                else:
                    user_events_lookup[user_id]["user"] = user_record
                user_events_lookup[user_id]["events"].append(event_type)
                user_ids.add(user_id)

            processed_entries += 1
            metric.save_time({"scope": "user_tx"},
                             start_time=process_user_txs_start_time)
        except EntityMissingRequiredFieldError as e:
            logger.warning(f"Skipping tx {txhash} with error {e}")
            skipped_tx_count += 1
            add_node_level_skipped_transaction(session, block_number,
                                               blockhash, txhash)
            pass
        except Exception as e:
            logger.error("Error in parse user transaction")
            raise IndexingError("user", block_number, blockhash, txhash,
                                str(e)) from e

    return processed_entries
示例#8
0
def playlist_state_update(
    self,
    update_task: DatabaseTask,
    session: Session,
    playlist_factory_txs,
    block_number,
    block_timestamp,
    block_hash,
    _ipfs_metadata,  # prefix unused args with underscore to prevent pylint
    _blacklisted_cids,
) -> Tuple[int, Set]:
    """Return Tuple containing int representing number of Playlist model state changes found in transaction and set of processed playlist IDs."""
    blockhash = update_task.web3.toHex(block_hash)
    num_total_changes = 0
    skipped_tx_count = 0
    # This stores the playlist_ids created or updated in the set of transactions
    playlist_ids: Set[int] = set()

    if not playlist_factory_txs:
        return num_total_changes, playlist_ids

    playlist_events_lookup: Dict[int, Dict[str, Any]] = {}
    for tx_receipt in playlist_factory_txs:
        txhash = update_task.web3.toHex(tx_receipt.transactionHash)
        for event_type in playlist_event_types_arr:
            playlist_events_tx = get_playlist_events_tx(
                update_task, event_type, tx_receipt)
            processedEntries = 0  # if record does not get added, do not count towards num_total_changes
            for entry in playlist_events_tx:
                existing_playlist_record = None
                playlist_id = helpers.get_tx_arg(entry, "_playlistId")
                try:
                    # look up or populate existing record
                    if playlist_id in playlist_events_lookup:
                        existing_playlist_record = playlist_events_lookup[
                            playlist_id]["playlist"]
                    else:
                        existing_playlist_record = lookup_playlist_record(
                            update_task, session, entry, block_number, txhash)

                    # parse playlist event to add metadata to record
                    playlist_record = parse_playlist_event(
                        self,
                        update_task,
                        entry,
                        event_type,
                        existing_playlist_record,
                        block_timestamp,
                        session,
                    )

                    # process playlist record
                    if playlist_record is not None:
                        if playlist_id not in playlist_events_lookup:
                            playlist_events_lookup[playlist_id] = {
                                "playlist": playlist_record,
                                "events": [],
                            }
                        else:
                            playlist_events_lookup[playlist_id][
                                "playlist"] = playlist_record
                        playlist_events_lookup[playlist_id]["events"].append(
                            event_type)
                        playlist_ids.add(playlist_id)
                        processedEntries += 1
                except EntityMissingRequiredFieldError as e:
                    logger.warning(f"Skipping tx {txhash} with error {e}")
                    skipped_tx_count += 1
                    add_node_level_skipped_transaction(session, block_number,
                                                       blockhash, txhash)
                    pass
                except Exception as e:
                    logger.info("Error in parse playlist transaction")
                    raise IndexingError("playlist", block_number, blockhash,
                                        txhash, str(e)) from e
            num_total_changes += processedEntries

    logger.info(
        f"index.py | playlists.py | There are {num_total_changes} events processed and {skipped_tx_count} skipped transactions."
    )

    for playlist_id, value_obj in playlist_events_lookup.items():
        logger.info(
            f"index.py | playlists.py | Adding {value_obj['playlist']})")
        if value_obj["events"]:
            invalidate_old_playlist(session, playlist_id)
            session.add(value_obj["playlist"])

    return num_total_changes, playlist_ids
示例#9
0
def parse_track_event(
    self,
    session,
    update_task: DatabaseTask,
    entry,
    event_type,
    track_record,
    block_number,
    block_timestamp,
    track_metadata,
    pending_track_routes,
):
    challenge_bus = update_task.challenge_event_bus
    # Just use block_timestamp as integer
    block_datetime = datetime.utcfromtimestamp(block_timestamp)

    if event_type == track_event_types_lookup["new_track"]:
        track_record.created_at = block_datetime

        track_metadata_digest = helpers.get_tx_arg(entry,
                                                   "_multihashDigest").hex()
        track_metadata_hash_fn = helpers.get_tx_arg(entry, "_multihashHashFn")
        buf = multihash.encode(bytes.fromhex(track_metadata_digest),
                               track_metadata_hash_fn)
        track_metadata_multihash = multihash.to_b58_string(buf)
        logger.info(
            f"index.py | tracks.py | track metadata ipld : {track_metadata_multihash}"
        )

        owner_id = helpers.get_tx_arg(entry, "_trackOwnerId")
        track_record.owner_id = owner_id
        track_record.is_delete = False

        handle = (session.query(User.handle).filter(
            User.user_id == owner_id, User.is_current == True).first())[0]
        if not handle:
            raise EntityMissingRequiredFieldError(
                "track",
                track_record,
                f"No user found for {track_record}",
            )

        update_track_routes_table(session, track_record, track_metadata,
                                  pending_track_routes)
        track_record = populate_track_record_metadata(track_record,
                                                      track_metadata, handle)
        track_record.metadata_multihash = track_metadata_multihash

        # if cover_art CID is of a dir, store under _sizes field instead
        if track_record.cover_art:
            # If CID is in IPLD blacklist table, do not continue with indexing
            if is_blacklisted_ipld(session, track_record.cover_art):
                logger.info(
                    f"index.py | tracks.py | Encountered blacklisted cover art CID:"
                    f"{track_record.cover_art} in indexing new track")
                return None

            logger.warning(
                f"index.py | tracks.py | Processing track cover art {track_record.cover_art}"
            )
            track_record.cover_art_sizes = track_record.cover_art
            track_record.cover_art = None

        update_stems_table(session, track_record, track_metadata)
        update_remixes_table(session, track_record, track_metadata)
        dispatch_challenge_track_upload(challenge_bus, block_number,
                                        track_record)

    if event_type == track_event_types_lookup["update_track"]:
        upd_track_metadata_digest = helpers.get_tx_arg(
            entry, "_multihashDigest").hex()
        upd_track_metadata_hash_fn = helpers.get_tx_arg(
            entry, "_multihashHashFn")
        update_buf = multihash.encode(bytes.fromhex(upd_track_metadata_digest),
                                      upd_track_metadata_hash_fn)
        upd_track_metadata_multihash = multihash.to_b58_string(update_buf)
        logger.info(
            f"index.py | tracks.py | update track metadata ipld : {upd_track_metadata_multihash}"
        )

        owner_id = helpers.get_tx_arg(entry, "_trackOwnerId")
        track_record.owner_id = owner_id
        track_record.is_delete = False

        handle = (session.query(User.handle).filter(
            User.user_id == owner_id, User.is_current == True).first())[0]
        if not handle:
            raise EntityMissingRequiredFieldError(
                "track",
                track_record,
                f"No user found for {track_record}",
            )

        update_track_routes_table(session, track_record, track_metadata,
                                  pending_track_routes)
        track_record = populate_track_record_metadata(track_record,
                                                      track_metadata, handle)
        track_record.metadata_multihash = upd_track_metadata_multihash

        # All incoming cover art is intended to be a directory
        # Any write to cover_art field is replaced by cover_art_sizes
        if track_record.cover_art:
            # If CID is in IPLD blacklist table, do not continue with indexing
            if is_blacklisted_ipld(session, track_record.cover_art):
                logger.info(
                    f"index.py | tracks.py | Encountered blacklisted cover art CID:"
                    f"{track_record.cover_art} in indexing update track")
                return None

            logger.info(
                f"index.py | tracks.py | Processing track cover art {track_record.cover_art}"
            )
            track_record.cover_art_sizes = track_record.cover_art
            track_record.cover_art = None

        update_remixes_table(session, track_record, track_metadata)

    if event_type == track_event_types_lookup["delete_track"]:
        track_record.is_delete = True
        track_record.stem_of = null()
        track_record.remix_of = null()
        logger.info(
            f"index.py | tracks.py | Removing track : {track_record.track_id}")

    track_record.updated_at = block_datetime

    if not all_required_fields_present(Track, track_record):
        raise EntityMissingRequiredFieldError(
            "track",
            track_record,
            f"Error parsing track {track_record} with entity missing required field(s)",
        )

    return track_record
示例#10
0
def track_state_update(
    self,
    update_task: DatabaseTask,
    session: Session,
    track_factory_txs,
    block_number,
    block_timestamp,
    block_hash,
    ipfs_metadata,
    blacklisted_cids,
) -> Tuple[int, Set]:
    """Return tuple containing int representing number of Track model state changes found in transaction and set of processed track IDs."""
    begin_track_state_update = datetime.now()
    metric = PrometheusMetric(
        "track_state_update_runtime_seconds",
        "Runtimes for src.task.tracks:track_state_update()",
        ("scope", ),
    )

    blockhash = update_task.web3.toHex(block_hash)
    num_total_changes = 0
    skipped_tx_count = 0
    # This stores the track_ids created or updated in the set of transactions
    track_ids: Set[int] = set()

    if not track_factory_txs:
        return num_total_changes, track_ids

    pending_track_routes: List[TrackRoute] = []
    track_events: Dict[int, Dict[str, Any]] = {}
    for tx_receipt in track_factory_txs:
        txhash = update_task.web3.toHex(tx_receipt.transactionHash)
        for event_type in track_event_types_arr:
            track_events_tx = get_track_events_tx(update_task, event_type,
                                                  tx_receipt)
            processedEntries = 0  # if record does not get added, do not count towards num_total_changes
            for entry in track_events_tx:
                track_event_start_time = time()
                event_args = entry["args"]
                track_id = (helpers.get_tx_arg(entry, "_trackId") if "_trackId"
                            in event_args else helpers.get_tx_arg(
                                entry, "_id"))
                existing_track_record = None
                track_metadata = None
                try:
                    # look up or populate existing record
                    if track_id in track_events:
                        existing_track_record = track_events[track_id]["track"]
                    else:
                        existing_track_record = lookup_track_record(
                            update_task,
                            session,
                            entry,
                            track_id,
                            block_number,
                            blockhash,
                            txhash,
                        )
                    # parse track event to add metadata to record
                    if event_type in [
                            track_event_types_lookup["new_track"],
                            track_event_types_lookup["update_track"],
                    ]:
                        track_metadata_digest = event_args._multihashDigest.hex(
                        )
                        track_metadata_hash_fn = event_args._multihashHashFn
                        buf = multihash.encode(
                            bytes.fromhex(track_metadata_digest),
                            track_metadata_hash_fn)
                        cid = multihash.to_b58_string(buf)
                        # do not process entry if cid is blacklisted
                        if cid in blacklisted_cids:
                            continue
                        track_metadata = ipfs_metadata[cid]

                    parsed_track = parse_track_event(
                        self,
                        session,
                        update_task,
                        entry,
                        event_type,
                        existing_track_record,
                        block_number,
                        block_timestamp,
                        track_metadata,
                        pending_track_routes,
                    )

                    # If track record object is None, it has a blacklisted metadata CID
                    if parsed_track is not None:
                        if track_id not in track_events:
                            track_events[track_id] = {
                                "track": parsed_track,
                                "events": [],
                            }
                        else:
                            track_events[track_id]["track"] = parsed_track
                        track_events[track_id]["events"].append(event_type)
                        track_ids.add(track_id)
                        processedEntries += 1
                except EntityMissingRequiredFieldError as e:
                    logger.warning(f"Skipping tx {txhash} with error {e}")
                    skipped_tx_count += 1
                    add_node_level_skipped_transaction(session, block_number,
                                                       blockhash, txhash)
                    pass
                except Exception as e:
                    logger.info("Error in parse track transaction")
                    raise IndexingError("track", block_number, blockhash,
                                        txhash, str(e)) from e
                metric.save_time({"scope": "track_event"},
                                 start_time=track_event_start_time)

            num_total_changes += processedEntries

    logger.info(
        f"index.py | tracks.py | [track indexing] There are {num_total_changes} events processed and {skipped_tx_count} skipped transactions."
    )

    for track_id, value_obj in track_events.items():
        if value_obj["events"]:
            logger.info(f"index.py | tracks.py | Adding {value_obj['track']}")
            invalidate_old_track(session, track_id)
            session.add(value_obj["track"])

    if num_total_changes:
        metric.save_time({"scope": "full"})
        logger.info(
            f"index.py | tracks.py | track_state_update | finished track_state_update in {datetime.now() - begin_track_state_update} // per event: {(datetime.now() - begin_track_state_update) / num_total_changes} secs"
        )
    return num_total_changes, track_ids