Ejemplo n.º 1
0
 async def update_token_metadatas_deferred(
         self,
         collectionTokenIds: List[Tuple[str, str]],
         shouldForce: bool = False) -> None:
     if len(collectionTokenIds) == 0:
         return
     if not shouldForce:
         query = (TokenMetadatasTable.select().where(
             TokenMetadatasTable.c.updatedDate > date_util.
             datetime_from_now(days=-_TOKEN_UPDATE_MIN_DAYS)).where(
                 sqlalchemy.tuple_(
                     TokenMetadatasTable.c.registryAddress,
                     TokenMetadatasTable.c.tokenId).in_(collectionTokenIds))
                  )
         recentlyUpdatedTokenMetadatas = await self.retriever.query_token_metadatas(
             query=query)
         recentlyUpdatedTokenIds = set(
             (tokenMetadata.registryAddress, tokenMetadata.tokenId)
             for tokenMetadata in recentlyUpdatedTokenMetadatas)
         logging.info(
             f'Skipping {len(recentlyUpdatedTokenIds)} collectionTokenIds because they have been updated recently.'
         )
         collectionTokenIds = set(
             collectionTokenIds) - recentlyUpdatedTokenIds
     messages = [
         UpdateTokenMetadataMessageContent(
             registryAddress=registryAddress,
             tokenId=tokenId,
             shouldForce=shouldForce).to_message()
         for (registryAddress, tokenId) in collectionTokenIds
     ]
     await self.tokenQueue.send_messages(messages=messages)
Ejemplo n.º 2
0
 async def list_token_metadatas(self, fieldFilters: Optional[Sequence[FieldFilter]] = None, orders: Optional[Sequence[Order]] = None, limit: Optional[int] = None, connection: Optional[DatabaseConnection] = None) -> Sequence[TokenMetadata]:
     query = TokenMetadatasTable.select()
     if fieldFilters:
         query = self._apply_field_filters(query=query, table=TokenMetadatasTable, fieldFilters=fieldFilters)
     if orders:
         query = self._apply_orders(query=query, table=TokenMetadatasTable, orders=orders)
     if limit:
         query = query.limit(limit)
     return await self.query_token_metadatas(query=query, connection=connection)
Ejemplo n.º 3
0
 async def get_token_metadata_by_registry_address_token_id(self, registryAddress: str, tokenId: str, connection: Optional[DatabaseConnection] = None) -> TokenMetadata:
     query = TokenMetadatasTable.select() \
         .where(TokenMetadatasTable.c.registryAddress == registryAddress) \
         .where(TokenMetadatasTable.c.tokenId == tokenId)
     result = await self.database.execute(query=query, connection=connection)
     row = result.first()
     if not row:
         raise NotFoundException(message=f'TokenMetadata with registry:{registryAddress} tokenId:{tokenId} not found')
     tokenMetdata = token_metadata_from_row(row)
     return tokenMetdata
Ejemplo n.º 4
0
async def reprocess_metadata(startId: int, endId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database)
    tokenMetadataProcessor = TokenMetadataProcessor(requester=None,
                                                    ethClient=None,
                                                    s3manager=None,
                                                    bucketName=None)

    await database.connect()

    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        logging.info(f'Working on {start} to {end}...')
        async with database.transaction():
            query = TokenMetadatasTable.select()
            query = query.where(TokenMetadatasTable.c.tokenMetadataId >= start)
            query = query.where(TokenMetadatasTable.c.tokenMetadataId < end)
            query = query.where(
                TokenMetadatasTable.c.metadataUrl.startswith('data:'))
            query = query.where(TokenMetadatasTable.c.name == None)
            tokenMetadatasToChange = [
                token_metadata_from_row(row)
                async for row in database.execute(query=query)
            ]
            logging.info(
                f'Updating {len(tokenMetadatasToChange)} transfers...')
            for tokenMetadata in tokenMetadatasToChange:
                try:
                    tokenMetadataDict = tokenMetadataProcessor._resolve_data(
                        dataString=tokenMetadata.metadataUrl,
                        registryAddress=tokenMetadata.registryAddress,
                        tokenId=tokenMetadata.tokenId)
                    if tokenMetadataDict:
                        logging.info(
                            f'Processed: {tokenMetadata.tokenMetadataId}')
                        await saver.update_token_metadata(
                            tokenMetadataId=tokenMetadata.tokenMetadataId,
                            name=tokenMetadataDict.get('name'),
                            imageUrl=tokenMetadataDict.get('image'),
                            description=tokenMetadataDict.get('description'),
                            attributes=tokenMetadataDict.get('attributes', []))
                except Exception as e:
                    logging.exception(
                        f'Error processing {tokenMetadata.tokenMetadataId}: {e}'
                    )
        currentId = currentId + batchSize
    await database.disconnect()
async def process_token_ownerships(startTokenId: int, endTokenId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor)
    revueApiKey = os.environ['REVUE_API_KEY']
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'], requester=requester, defaultSender='worker', defaultChannel='notd-notifications')

    await database.connect()
    await workQueue.connect()
    await s3manager.connect()
    await tokenQueue.connect()

    await database.connect()
    await slackClient.post(text=f'process_token_ownerships → 🚧 started: {startTokenId}-{endTokenId}')
    try:
        currentTokenId = startTokenId
        while currentTokenId < endTokenId:
            start = currentTokenId
            end = min(currentTokenId + batchSize, endTokenId)
            currentTokenId = end
            logging.info(f'Working on {start}-{end}')
            query = TokenMetadatasTable.select() \
                .where(TokenMetadatasTable.c.tokenMetadataId >= start) \
                .where(TokenMetadatasTable.c.tokenMetadataId < end)
            tokenMetadatas = await retriever.query_token_metadatas(query=query)
            await asyncio.gather(*[process_token_ownership(tokenManager=tokenManager, registryAddress=tokenMetadata.registryAddress, tokenId=tokenMetadata.tokenId) for tokenMetadata in tokenMetadatas])
        await slackClient.post(text=f'process_token_ownerships → ✅ completed : {startTokenId}-{endTokenId}')
    except Exception as exception:
        await slackClient.post(text=f'process_token_ownerships → � error: {startTokenId}-{endTokenId}\n```{str(exception)}```')
        raise exception
    finally:
        await database.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await s3manager.disconnect()
Ejemplo n.º 6
0
async def _update_token_metadatas(tokensToProcess: Sequence[tuple], tokenManager: TokenManager, retriever: Retriever):
    query = (
        TokenMetadatasTable.select()
            .where(sqlalchemy.tuple_(TokenMetadatasTable.c.registryAddress, TokenMetadatasTable.c.tokenId).in_(tokensToProcess))
    )
    recentlyUpdatedTokenMetadatas = await retriever.query_token_metadatas(query=query)
    recentlyUpdatedTokenIds = set((tokenMetadata.registryAddress, tokenMetadata.tokenId) for tokenMetadata in recentlyUpdatedTokenMetadatas)
    tokensToUpdate = set(tokensToProcess) - recentlyUpdatedTokenIds
    print('len(tokensToUpdate)', len(tokensToUpdate))
    for tokensToUpdateChunk in list_util.generate_chunks(lst=list(tokensToUpdate), chunkSize=10):
        tokenProcessResults = await asyncio.gather(*[tokenManager.update_token_metadata(registryAddress=registryAddress, tokenId=tokenId) for (registryAddress, tokenId) in tokensToUpdateChunk], return_exceptions=True)
        tokenProcessSuccessCount = tokenProcessResults.count(None)
        if tokenProcessSuccessCount:
            print(f'{tokenProcessSuccessCount} / {len(tokenProcessResults)} token updates succeeded')
        # NOTE(krishan711): if less than 90% of things succeed, bail out
        if len(tokenProcessResults) >= 100 and tokenProcessSuccessCount / len(tokenProcessResults) < 0.9:
            raise Exception('Less than 90% of token updates failed!')
Ejemplo n.º 7
0
async def reprocess_metadata(startId: Optional[int], endId: Optional[int],
                             batchSize: Optional[int]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    requester = Requester()
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    tokenMetadataProcessor = TokenMetadataProcessor(
        requester=requester,
        ethClient=ethClient,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(
        requester=requester,
        ethClient=ethClient,
        openseaApiKey=openseaApiKey,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    tokenManger = TokenManager(saver=saver,
                               retriever=retriever,
                               tokenQueue=tokenQueue,
                               collectionProcessor=collectionProcessor,
                               tokenMetadataProcessor=tokenMetadataProcessor)

    await s3manager.connect()
    await tokenQueue.connect()
    await database.connect()
    if not startId:
        startId = 0
    if not endId:
        maxTokenMetadata = await retriever.list_token_metadatas(
            limit=1,
            orders=[
                Order(fieldName=TokenMetadatasTable.c.tokenMetadataId.key,
                      direction=Direction.DESCENDING)
            ])
        print(maxTokenMetadata)
        endId = maxTokenMetadata[0].tokenMetadataId + 1
    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        query = TokenMetadatasTable.select()
        query = query.where(TokenMetadatasTable.c.tokenMetadataId >= start)
        query = query.where(TokenMetadatasTable.c.tokenMetadataId < end)
        query = query.where(
            TokenMetadatasTable.c.updatedDate < datetime.datetime(2022, 2, 13))
        query = query.order_by(TokenMetadatasTable.c.tokenMetadataId.asc())
        tokenMetadatasToChange = [
            token_metadata_from_row(row)
            for row in await database.execute(query=query)
        ]
        logging.info(f'Working on {start} - {end}')
        logging.info(f'Updating {len(tokenMetadatasToChange)} transfers...')
        await asyncio.gather(*[
            _reprocess_metadata_from_s3(
                tokenMetadataProcessor=tokenMetadataProcessor,
                s3manager=s3manager,
                tokenManger=tokenManger,
                tokenMetadata=tokenMetadata)
            for tokenMetadata in tokenMetadatasToChange
        ])
        currentId = currentId + batchSize

    await s3manager.disconnect()
    await tokenQueue.disconnect()
    await awsRequester.close_connections()
    await requester.close_connections()
    await database.disconnect()