Example #1
0
async def daily_new_registries():
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)

    await database.connect()
    query = TokenTransfersTable.select()
    query = query.where(
        TokenTransfersTable.c.registryAddress.in_(
            TokenTransfersTable.select().with_only_columns([
                TokenTransfersTable.c.registryAddress
            ]).group_by(TokenTransfersTable.c.registryAddress).having(
                sqlalchemyfunc.count(TokenTransfersTable.c.registryAddress) ==
                1)))
    query = query.where(
        sqlalchemyfunc.date(TokenTransfersTable.c.blockDate) ==
        sqlalchemyfunc.current_date())
    rows = await database.fetch_all(query)
    for row in rows:
        logging.info(
            f'New Tokens: registry address {row[2]} and tokenId {row[5]}')

    await database.disconnect()
Example #2
0
async def check_all_processed(startBlockNumber: int, endBlockNumber: int,
                              batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    await database.connect()
    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        logging.info(f'Working on {start} to {end}...')
        result = await database.execute(f'''
            update tbl_token_transfers
            set amount_2 = amount
            where block_number >= {start}
            and block_number < {end}
            and amount_2 is null;
        ''')
        print('result:', result)
        currentBlockNumber += batchSize
    await database.disconnect()
Example #3
0
async def process_tokens_from_old_transfers(startBlockNumber: int, endBlockNumber: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor)
    revueApiKey = os.environ['REVUE_API_KEY']

    await database.connect()
    await workQueue.connect()
    await s3manager.connect()
    await tokenQueue.connect()
    cache = set()
    registryCache = set()
    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        currentBlockNumber = end
        logging.info(f'Working on {start}-{end}...')
        query = (
             sqlalchemy.select(TokenTransfersTable.c.registryAddress, TokenTransfersTable.c.tokenId)
             .where(TokenTransfersTable.c.blockNumber >= start)
             .where(TokenTransfersTable.c.blockNumber < end)
         )
        result = await database.execute(query=query,)
        tokensToProcess = set()
        collectionsToProcess = set()
        for (registryAddress, tokenId) in result:
            if (registryAddress, tokenId) in cache:
                continue
            cache.add((registryAddress, tokenId))
            tokensToProcess.add((registryAddress, tokenId))
            if registryAddress in registryCache:
                continue
            registryCache.add(registryAddress)
            collectionsToProcess.add(registryAddress)
        print('len(tokensToProcess)', len(tokensToProcess))
        print('len(collectionsToProcess)', len(collectionsToProcess))
        try:
            await _update_token_metadatas(tokensToProcess=tokensToProcess, tokenManager=tokenManager, retriever=retriever)
            await _update_collections(collectionsToProcess=collectionsToProcess, tokenManager=tokenManager, retriever=retriever)
        except:
            logging.error(f'Failed during: {start}-{end}')
            raise
    await database.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await s3manager.disconnect()
Example #4
0
async def main():
    requestIdHolder = RequestIdHolder()
    name = os.environ.get('NAME', 'notd-api')
    version = os.environ.get('VERSION', 'local')
    environment = os.environ.get('ENV', 'dev')
    isRunningDebugMode = environment == 'dev'

    if isRunningDebugMode:
        logging.init_basic_logging()
    else:
        logging.init_json_logging(name=name, version=version, environment=environment, requestIdHolder=requestIdHolder)

    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionActivityProcessor = CollectionActivityProcessor(retriever=retriever)
    revueApiKey = os.environ['REVUE_API_KEY']
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor, collectionActivityProcessor=collectionActivityProcessor)
    notdManager = NotdManager(blockProcessor=blockProcessor, saver=saver, retriever=retriever, workQueue=workQueue, tokenManager=tokenManager, requester=requester, revueApiKey=revueApiKey)

    processor = NotdMessageProcessor(notdManager=notdManager)
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'], requester=requester, defaultSender='worker', defaultChannel='notd-notifications')
    workQueueProcessor = MessageQueueProcessor(queue=workQueue, messageProcessor=processor, slackClient=slackClient, requestIdHolder=requestIdHolder)
    tokenQueueProcessor = MessageQueueProcessor(queue=tokenQueue, messageProcessor=processor, slackClient=slackClient, requestIdHolder=requestIdHolder)

    await database.connect()
    await s3manager.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    try:
        while True:
            hasProcessedWork = await workQueueProcessor.execute_batch(batchSize=3, longPollSeconds=1, shouldProcessInParallel=True)
            if hasProcessedWork:
                continue
            hasProcessedToken = await tokenQueueProcessor.execute_batch(batchSize=10, longPollSeconds=1, shouldProcessInParallel=True)
            if hasProcessedToken:
                continue
            logging.info('No message received.. sleeping')
            time.sleep(60)
    finally:
        await database.disconnect()
        await s3manager.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await requester.close_connections()
async def reprocess_metadata(startId: int, endId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database)
    tokenMetadataProcessor = TokenMetadataProcessor(requester=None,
                                                    ethClient=None,
                                                    s3manager=None,
                                                    bucketName=None)

    await database.connect()

    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        logging.info(f'Working on {start} to {end}...')
        async with database.transaction():
            query = TokenMetadatasTable.select()
            query = query.where(TokenMetadatasTable.c.tokenMetadataId >= start)
            query = query.where(TokenMetadatasTable.c.tokenMetadataId < end)
            query = query.where(
                TokenMetadatasTable.c.metadataUrl.startswith('data:'))
            query = query.where(TokenMetadatasTable.c.name == None)
            tokenMetadatasToChange = [
                token_metadata_from_row(row)
                async for row in database.execute(query=query)
            ]
            logging.info(
                f'Updating {len(tokenMetadatasToChange)} transfers...')
            for tokenMetadata in tokenMetadatasToChange:
                try:
                    tokenMetadataDict = tokenMetadataProcessor._resolve_data(
                        dataString=tokenMetadata.metadataUrl,
                        registryAddress=tokenMetadata.registryAddress,
                        tokenId=tokenMetadata.tokenId)
                    if tokenMetadataDict:
                        logging.info(
                            f'Processed: {tokenMetadata.tokenMetadataId}')
                        await saver.update_token_metadata(
                            tokenMetadataId=tokenMetadata.tokenMetadataId,
                            name=tokenMetadataDict.get('name'),
                            imageUrl=tokenMetadataDict.get('image'),
                            description=tokenMetadataDict.get('description'),
                            attributes=tokenMetadataDict.get('attributes', []))
                except Exception as e:
                    logging.exception(
                        f'Error processing {tokenMetadata.tokenMetadataId}: {e}'
                    )
        currentId = currentId + batchSize
    await database.disconnect()
async def calculate_token_fields(startCollectionId: Optional[int], endCollectionId: Optional[int]):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    bucketName = os.environ['S3_BUCKET']

    await database.connect()
    await s3manager.connect()
    query = TokenCollectionsTable.select()
    if startCollectionId:
        query = query.where(TokenCollectionsTable.c.collectionId >= startCollectionId)
    if endCollectionId:
        query = query.where(TokenCollectionsTable.c.collectionId < endCollectionId)
    collections = [collection_from_row(row) async for row in database.iterate(query=query)]
    rows = []
    fields = set()
    for collection in collections:
        logging.info(f'Working on {collection.address}')
        collectionDirectory = f'{bucketName}/token-metadatas/{collection.address}/'
        index = 0
        async for tokenFile in s3manager.generate_directory_files(s3Directory=collectionDirectory):
            logging.info(f'Working on file {tokenFile.bucket}/{tokenFile.path}')
            if index > 3:
                break
            try:
                tokenDict = json.loads(await s3manager.read_file(sourcePath=f'{tokenFile.bucket}/{tokenFile.path}'))
                tokenDict['tokenId'] = tokenFile.path.split('/')[2]
                if tokenDict.get('attributes'):
                    tokenDict['attributes'] = ",".join(list(set(key for attribute in tokenDict.get('attributes', []) for key in attribute.keys()))) if isinstance(tokenDict.get('attributes', []), List) else [attribute for attribute in tokenDict.get('attributes')]
                else:
                    tokenDict['attributes'] = None
                tokenDict['description'] = tokenDict["description"][:10] if tokenDict.get('description') else None
                tokenDict['collection'] = collection.address
                fields.update(tokenDict.keys())
                rows.append(tokenDict)
            except Exception as exception:
                logging.exception(exception)
            index += 1

    with open(f'./output{startCollectionId}-{endCollectionId}.tsv', 'w') as outFile:
        dictWriter = csv.DictWriter(outFile, fields, delimiter='\t')
        dictWriter.writeheader()
        dictWriter.writerows(rows)

    fieldCounts = defaultdict(int)
    for row in rows:
        for key, value in row.items():
            if value:
                fieldCounts[key] += 1
    print(fieldCounts)


    await database.disconnect()
    await s3manager.disconnect()
async def process_token_ownerships(startTokenId: int, endTokenId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor)
    revueApiKey = os.environ['REVUE_API_KEY']
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'], requester=requester, defaultSender='worker', defaultChannel='notd-notifications')

    await database.connect()
    await workQueue.connect()
    await s3manager.connect()
    await tokenQueue.connect()

    await database.connect()
    await slackClient.post(text=f'process_token_ownerships → 🚧 started: {startTokenId}-{endTokenId}')
    try:
        currentTokenId = startTokenId
        while currentTokenId < endTokenId:
            start = currentTokenId
            end = min(currentTokenId + batchSize, endTokenId)
            currentTokenId = end
            logging.info(f'Working on {start}-{end}')
            query = TokenMetadatasTable.select() \
                .where(TokenMetadatasTable.c.tokenMetadataId >= start) \
                .where(TokenMetadatasTable.c.tokenMetadataId < end)
            tokenMetadatas = await retriever.query_token_metadatas(query=query)
            await asyncio.gather(*[process_token_ownership(tokenManager=tokenManager, registryAddress=tokenMetadata.registryAddress, tokenId=tokenMetadata.tokenId) for tokenMetadata in tokenMetadatas])
        await slackClient.post(text=f'process_token_ownerships → ✅ completed : {startTokenId}-{endTokenId}')
    except Exception as exception:
        await slackClient.post(text=f'process_token_ownerships → � error: {startTokenId}-{endTokenId}\n```{str(exception)}```')
        raise exception
    finally:
        await database.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await s3manager.disconnect()
Example #8
0
async def fix_address(startBlockNumber: int, endBlockNumber: int,
                      batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    await database.connect()

    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        logging.info(f'Working on {start} to {end}...')
        async with database.transaction():
            query = TokenTransfersTable.select()
            query = query.where(TokenTransfersTable.c.blockNumber >= start)
            query = query.where(TokenTransfersTable.c.blockNumber < end)
            query = query.where(
                or_(
                    sqlalchemyfunc.length(
                        TokenTransfersTable.c.toAddress) != 42,
                    sqlalchemyfunc.length(TokenTransfersTable.c.toAddress) !=
                    42,
                ))
            tokenTransfersToChange = [
                token_transfer_from_row(row)
                async for row in database.iterate(query=query)
            ]
            logging.info(
                f'Updating {len(tokenTransfersToChange)} transfers...')
            for tokenTransfer in tokenTransfersToChange:
                query = TokenTransfersTable.update(
                    TokenTransfersTable.c.tokenTransferId ==
                    tokenTransfer.tokenTransferId)
                values = {
                    TokenTransfersTable.c.toAddress.key:
                    normalize_address(tokenTransfer.toAddress),
                    TokenTransfersTable.c.fromAddress.key:
                    normalize_address(tokenTransfer.fromAddress),
                }
                await database.execute(query=query, values=values)
        currentBlockNumber = end
    await database.disconnect()
Example #9
0
async def check_all_processed(startBlockNumber: int, endBlockNumber: int,
                              batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    await database.connect()
    await workQueue.connect()

    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        logging.info(f'Working on {start} - {end}...')
        async with database.transaction():
            query = TokenTransfersTable.select() \
                .with_only_columns([TokenTransfersTable.c.blockNumber]) \
                .filter(TokenTransfersTable.c.blockNumber >= start) \
                .filter(TokenTransfersTable.c.blockNumber < end) \
                .distinct(TokenTransfersTable.c.blockNumber)
            processedBlocks = [
                row[0] for row in await database.fetch_all(query)
            ]
        unprocessedBlocks = set(range(start, end)) - set(processedBlocks)
        logging.info(
            f'Processing {len(unprocessedBlocks)} blocks in {start} - {end}')
        await workQueue.send_message(message=ProcessBlocksMessageContent(
            blockNumbers=unprocessedBlocks).to_message())
        currentBlockNumber = currentBlockNumber + batchSize
    await database.disconnect()
    await workQueue.disconnect()
Example #10
0
async def process_collection(address: str, shouldDefer: bool):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)

    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()

    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    revueApiKey = os.environ['REVUE_API_KEY']
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor)
    notdManager = NotdManager(blockProcessor=blockProcessor, saver=saver, retriever=retriever, workQueue=workQueue, tokenManager=tokenManager, requester=requester, revueApiKey=revueApiKey)

    await database.connect()
    await s3manager.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    retrievedCollectionTokenMetadatas = await retriever.list_token_metadatas(
        fieldFilters=[
            StringFieldFilter(fieldName=TokenTransfersTable.c.registryAddress.key, eq=address),
        ],
    )
    for tokenMetadata in retrievedCollectionTokenMetadatas:
        if shouldDefer:
            await notdManager.update_token_metadata_deferred(registryAddress=address, tokenId=tokenMetadata.tokenId, shouldForce=True)
        else:
            await notdManager.update_token_metadata(registryAddress=address, tokenId=tokenMetadata.tokenId, shouldForce=True)
    await database.disconnect()
    await s3manager.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await requester.close_connections()
Example #11
0
async def owned_tokens(ownerAddress: Optional[str]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    retriever = Retriever(database=database)

    await database.connect()
    boughtTokens = []
    soldTokens = []
    async with database.transaction():
        query = TokenTransfersTable.select()
        query = query.where(TokenTransfersTable.c.toAddress == ownerAddress)
        async for row in retriever.database.iterate(query=query):
            tokenTransfer = token_transfer_from_row(row)
            boughtTokens.append(tokenTransfer)
        query = TokenTransfersTable.select()
        query = query.where(TokenTransfersTable.c.fromAddress == ownerAddress)
        async for row in retriever.database.iterate(query=query):
            tokenTransfer = token_transfer_from_row(row)
            soldTokens.append(tokenTransfer)

        uniqueBoughtTokens = set(boughtTokens)
        uniqueSoldTokens = set(soldTokens)
        tokensOwned = uniqueBoughtTokens - uniqueSoldTokens

        for tokenTransfer in tokensOwned:
            logging.info(
                f'Tokens Owned: registry_address: {tokenTransfer.registryAddress}, token_id: {tokenTransfer.tokenId}'
            )

    await database.disconnect()
    logging.info(f'Got {len(tokensOwned)} total owned')
Example #12
0
async def reprocess_metadata(startId: Optional[int], endId: Optional[int],
                             batchSize: Optional[int]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    requester = Requester()
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    tokenMetadataProcessor = TokenMetadataProcessor(
        requester=requester,
        ethClient=ethClient,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(
        requester=requester,
        ethClient=ethClient,
        openseaApiKey=openseaApiKey,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    tokenManger = TokenManager(saver=saver,
                               retriever=retriever,
                               tokenQueue=tokenQueue,
                               collectionProcessor=collectionProcessor,
                               tokenMetadataProcessor=tokenMetadataProcessor)

    await s3manager.connect()
    await tokenQueue.connect()
    await database.connect()
    if not startId:
        startId = 0
    if not endId:
        maxTokenMetadata = await retriever.list_token_metadatas(
            limit=1,
            orders=[
                Order(fieldName=TokenMetadatasTable.c.tokenMetadataId.key,
                      direction=Direction.DESCENDING)
            ])
        print(maxTokenMetadata)
        endId = maxTokenMetadata[0].tokenMetadataId + 1
    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        query = TokenMetadatasTable.select()
        query = query.where(TokenMetadatasTable.c.tokenMetadataId >= start)
        query = query.where(TokenMetadatasTable.c.tokenMetadataId < end)
        query = query.where(
            TokenMetadatasTable.c.updatedDate < datetime.datetime(2022, 2, 13))
        query = query.order_by(TokenMetadatasTable.c.tokenMetadataId.asc())
        tokenMetadatasToChange = [
            token_metadata_from_row(row)
            for row in await database.execute(query=query)
        ]
        logging.info(f'Working on {start} - {end}')
        logging.info(f'Updating {len(tokenMetadatasToChange)} transfers...')
        await asyncio.gather(*[
            _reprocess_metadata_from_s3(
                tokenMetadataProcessor=tokenMetadataProcessor,
                s3manager=s3manager,
                tokenManger=tokenManger,
                tokenMetadata=tokenMetadata)
            for tokenMetadata in tokenMetadatasToChange
        ])
        currentId = currentId + batchSize

    await s3manager.disconnect()
    await tokenQueue.disconnect()
    await awsRequester.close_connections()
    await requester.close_connections()
    await database.disconnect()
Example #13
0
name = os.environ.get('NAME', 'mdtp-api')
version = os.environ.get('VERSION', 'local')
environment = os.environ.get('ENV', 'dev')
isRunningDebugMode = environment == 'dev'

if isRunningDebugMode:
    logging.init_basic_logging()
else:
    logging.init_json_logging(name=name,
                              version=version,
                              environment=environment,
                              requestIdHolder=requestIdHolder)

databaseConnectionString = Database.create_psql_connection_string(
    username=os.environ["DB_USERNAME"],
    password=os.environ["DB_PASSWORD"],
    host=os.environ["DB_HOST"],
    port=os.environ["DB_PORT"],
    name=os.environ["DB_NAME"])
database = Database(connectionString=databaseConnectionString)
saver = Saver(database=database)
retriever = Retriever(database=database)

workQueue = SqsMessageQueue(
    region='eu-west-1',
    accessKeyId=os.environ['AWS_KEY'],
    accessKeySecret=os.environ['AWS_SECRET'],
    queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/mdtp-work-queue'
)
s3Manager = S3Manager(region='eu-west-1',
                      accessKeyId=os.environ['AWS_KEY'],
                      accessKeySecret=os.environ['AWS_SECRET'])
Example #14
0
async def reprocess_transfers(startBlockNumber: int, endBlockNumber: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    requester = Requester()
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'],
                              requester=requester,
                              defaultSender='worker',
                              defaultChannel='notd-notifications')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    notdManager = NotdManager(blockProcessor=None,
                              saver=saver,
                              retriever=retriever,
                              workQueue=None,
                              tokenManager=None,
                              requester=requester,
                              revueApiKey=None)

    await database.connect()
    await slackClient.post(
        text=
        f'reprocess_transfers → 🚧 started: {startBlockNumber}-{endBlockNumber}'
    )
    try:
        currentBlockNumber = startBlockNumber
        while currentBlockNumber < endBlockNumber:
            logging.info(f'Working on {currentBlockNumber}')
            try:
                retrievedTokenTransfers = await blockProcessor.process_block(
                    blockNumber=currentBlockNumber)
            except Exception as exception:
                logging.info(
                    f'Got exception whilst getting blocks: {str(exception)}. Will retry in 10 secs.'
                )
                time.sleep(60)
                currentBlockNumber -= 1
                continue
            await notdManager._save_block_transfers(
                blockNumber=currentBlockNumber,
                retrievedTokenTransfers=retrievedTokenTransfers)
            currentBlockNumber = currentBlockNumber + 1
        await slackClient.post(
            text=
            f'reprocess_transfers → ✅ completed : {startBlockNumber}-{endBlockNumber}'
        )
    except Exception as exception:
        await slackClient.post(
            text=
            f'reprocess_transfers → � error: {startBlockNumber}-{endBlockNumber}\n```{str(exception)}```'
        )
        raise exception
    finally:
        await database.disconnect()
        await requester.close_connections()
        await awsRequester.close_connections()
async def reprocess_bad_blocks(startBlockNumber: int, endBlockNumber: int,
                               batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'],
                              requester=requester,
                              defaultSender='worker',
                              defaultChannel='notd-notifications')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    # NOTE(krishan711): use tokenqueue so its lower prioritized work
    notdManager = NotdManager(blockProcessor=None,
                              saver=saver,
                              retriever=retriever,
                              workQueue=tokenQueue,
                              tokenManager=None,
                              requester=requester,
                              revueApiKey=None)

    await database.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    await slackClient.post(
        text=
        f'reprocess_bad_blocks → 🚧 started: {startBlockNumber}-{endBlockNumber}'
    )
    try:
        currentBlockNumber = startBlockNumber
        while currentBlockNumber < endBlockNumber:
            start = currentBlockNumber
            end = min(start + batchSize, endBlockNumber)
            logging.info(f'Working on {start}-{end}')
            blockNumbers = set(range(start, end))
            processedBlocksQuey = (sqlalchemy.select(
                BlocksTable.c.blockNumber).where(
                    BlocksTable.c.blockNumber >= start).where(
                        BlocksTable.c.blockNumber < end))
            results = await database.execute(query=processedBlocksQuey)
            processedBlocks = {blockNumber for (blockNumber, ) in results}
            logging.info(f'Ignoring {len(processedBlocks)} processedBlocks')
            blockNumbers = list(blockNumbers - processedBlocks)
            # blockUncleCounts = []
            # for chunk in list_util.generate_chunks(lst=blockNumbers, chunkSize=10):
            #     blockUncleCounts += await asyncio.gather(*[blockProcessor.ethClient.get_block_uncle_count(blockNumber=blockNumber) for blockNumber in chunk])
            #   blocksWithUncles = {blockNumber for (blockNumber, uncleCount) in zip(blockNumbers, blockUncleCounts) if uncleCount > 0}
            blocksWithUncles = set()
            logging.info(f'Found {len(blocksWithUncles)} blocks with uncles')
            blocksWithDuplicatesQuery = (sqlalchemy.select(
                TokenTransfersTable.c.blockNumber,
                sqlalchemy.func.count(
                    sqlalchemy.func.distinct(TokenTransfersTable.c.blockHash))
            ).where(TokenTransfersTable.c.blockNumber >= start).where(
                TokenTransfersTable.c.blockNumber < end).group_by(
                    TokenTransfersTable.c.blockNumber))
            results = await database.execute(query=blocksWithDuplicatesQuery)
            blocksWithDuplicates = {
                blockNumber
                for (blockNumber, blockHashCount) in results
                if blockHashCount > 1
            }
            logging.info(
                f'Found {len(blocksWithDuplicates)} blocks with multiple blockHashes'
            )
            badBlockTransactionsQuery = (sqlalchemy.select(
                TokenTransfersTable.c.transactionHash).where(
                    TokenTransfersTable.c.blockNumber.in_(
                        blocksWithDuplicates)))
            results = await database.execute(query=badBlockTransactionsQuery)
            badBlockTransactions = {
                transactionHash
                for (transactionHash, ) in results
            }
            logging.info(
                f'Found {len(badBlockTransactions)} transactions in bad blocks'
            )
            badBlockTransactionActualBlocks = set()
            for chunk in list_util.generate_chunks(
                    lst=list(badBlockTransactions), chunkSize=10):
                transactionReceipts = await asyncio.gather(*[
                    blockProcessor.get_transaction_receipt(
                        transactionHash=transactionHash)
                    for transactionHash in chunk
                ])
                badBlockTransactionActualBlocks.update({
                    transactionReceipt['blockNumber']
                    for transactionReceipt in transactionReceipts
                    if transactionReceipt is not None
                })
            badBlockTransactionBlocksQuery = (sqlalchemy.select(
                sqlalchemy.func.distinct(
                    TokenTransfersTable.c.blockNumber)).where(
                        TokenTransfersTable.c.transactionHash.in_(
                            badBlockTransactions)))
            results = await database.execute(
                query=badBlockTransactionBlocksQuery)
            badBlockTransactionBlocks = {
                blockNumber
                for (blockNumber, ) in results
            }
            allBadBlocks = blocksWithUncles.union(
                badBlockTransactionActualBlocks).union(
                    blocksWithDuplicates).union(badBlockTransactionBlocks)
            logging.info(f'Found {len(allBadBlocks)} blocks to reprocess')
            await notdManager.process_blocks_deferred(blockNumbers=allBadBlocks
                                                      )
            insertQuery = BlocksTable.insert().from_select(
                [
                    BlocksTable.c.createdDate.key,
                    BlocksTable.c.updatedDate.key,
                    BlocksTable.c.blockNumber.key, BlocksTable.c.blockHash.key,
                    BlocksTable.c.blockDate.key
                ],
                sqlalchemy.select(
                    sqlalchemy.func.min(TokenTransfersTable.c.blockDate) +
                    datetime.timedelta(minutes=15),
                    sqlalchemy.func.min(TokenTransfersTable.c.blockDate) +
                    datetime.timedelta(minutes=15),
                    TokenTransfersTable.c.blockNumber,
                    TokenTransfersTable.c.blockHash,
                    sqlalchemy.func.min(
                        TokenTransfersTable.c.blockDate)).where(
                            TokenTransfersTable.c.blockNumber.in_(
                                set(blockNumbers) - allBadBlocks)).
                where(TokenTransfersTable.c.blockNumber >= start).where(
                    TokenTransfersTable.c.blockNumber < end).group_by(
                        TokenTransfersTable.c.blockNumber,
                        TokenTransfersTable.c.blockHash))
            async with database.create_transaction() as connection:
                await database.execute(connection=connection,
                                       query=insertQuery)
            currentBlockNumber = end
        await slackClient.post(
            text=
            f'reprocess_bad_blocks → ✅ completed : {startBlockNumber}-{endBlockNumber}'
        )
    except Exception as exception:
        await slackClient.post(
            text=
            f'reprocess_bad_blocks → � error: {startBlockNumber}-{endBlockNumber}\n```{str(exception)}```'
        )
        raise exception
    finally:
        await database.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await requester.close_connections()
        await awsRequester.close_connections()
Example #16
0
async def run(registryAddress: Optional[str]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    tokenManager = TokenManager(saver=saver,
                                retriever=retriever,
                                tokenQueue=tokenQueue,
                                collectionProcessor=None,
                                tokenMetadataProcessor=None,
                                tokenOwnershipProcessor=None)
    # NOTE(krishan711): use tokenqueue so its lower prioritized work
    notdManager = NotdManager(blockProcessor=blockProcessor,
                              saver=saver,
                              retriever=retriever,
                              workQueue=tokenQueue,
                              tokenManager=tokenManager,
                              requester=requester,
                              revueApiKey=None)

    await database.connect()
    await workQueue.connect()
    await tokenQueue.connect()

    if registryAddress:
        registryAddresses = [registryAddress]
    else:
        query = sqlalchemy.select(TokenCollectionsTable.c.address).filter(
            TokenCollectionsTable.c.doesSupportErc1155 == True).order_by(
                TokenCollectionsTable.c.collectionId.desc())
        results = await database.execute(query=query)
        registryAddresses = [
            registryAddress for (registryAddress, ) in results
        ]
    print(
        f'Starting to reprocess blocks for {len(registryAddresses)} collections'
    )

    for index, registryAddress in enumerate(registryAddresses):
        print(
            f'Reprocessing blocks for collection: {registryAddress} (index: {index})'
        )
        minDate = datetime.datetime(2022, 4, 8, 9, 0)
        query = (
            sqlalchemy.select(sqlalchemy.distinct(BlocksTable.c.blockNumber)) \
            .join(TokenTransfersTable, TokenTransfersTable.c.blockNumber == BlocksTable.c.blockNumber) \
            .filter(TokenTransfersTable.c.registryAddress == registryAddress)
            .filter(BlocksTable.c.updatedDate < minDate)
        )
        results = await database.execute(query=query)
        blockNumbers = set(blockNumber for (blockNumber, ) in results)
        print(f'Processing {len(blockNumbers)} blocks')
        if len(blockNumbers) == 0:
            continue
        # await notdManager.process_blocks_deferred(blockNumbers=blockNumbers)
        for blockNumberChunk in list_util.generate_chunks(
                lst=list(blockNumbers), chunkSize=5):
            await asyncio.gather(*[
                notdManager.process_block(blockNumber=blockNumber)
                for blockNumber in blockNumberChunk
            ])
        query = (
            sqlalchemy.select(TokenMetadatasTable.c.tokenId) \
            .filter(TokenMetadatasTable.c.registryAddress == registryAddress)
        )
        results = await database.execute(query=query)
        collectionTokenIds = [(registryAddress, tokenId)
                              for (tokenId, ) in results]
        await tokenManager.update_token_ownerships_deferred(
            collectionTokenIds=collectionTokenIds)
    await database.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await requester.close_connections()
    await awsRequester.close_connections()
async def create_consolidated_metadata(address: str, outputFilename: str):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    retriever = Retriever(database=database)

    await database.connect()
    address = chain_util.normalize_address(value=address)
    collection = await retriever.get_collection_by_address(address=address)
    tokens = await retriever.list_token_metadatas(fieldFilters=[
        StringFieldFilter(fieldName=TokenMetadatasTable.c.registryAddress.key,
                          eq=address)
    ])
    with open(outputFilename, 'w') as outputFile:
        outputFile.write(
            json.dumps({
                'address':
                collection.address,
                'name':
                collection.name,
                'symbol':
                collection.symbol,
                'description':
                collection.description,
                'imageUrl':
                collection.imageUrl,
                'twitterUsername':
                collection.twitterUsername,
                'instagramUsername':
                collection.instagramUsername,
                'wikiUrl':
                collection.wikiUrl,
                'openseaSlug':
                collection.openseaSlug,
                'url':
                collection.url,
                'discordUrl':
                collection.discordUrl,
                'bannerImageUrl':
                collection.bannerImageUrl,
                'doesSupportErc721':
                collection.doesSupportErc721,
                'doesSupportErc1155':
                collection.doesSupportErc1155,
                'tokens': [{
                    'registryAddress': token.registryAddress,
                    'tokenId': token.tokenId,
                    'metadataUrl': token.metadataUrl,
                    'name': token.name,
                    'description': token.description,
                    'imageUrl': token.imageUrl,
                    'animationUrl': token.animationUrl,
                    'youtubeUrl': token.youtubeUrl,
                    'backgroundColor': token.backgroundColor,
                    'frameImageUrl': token.frameImageUrl,
                    'attributes': token.attributes,
                } for token in tokens],
            }))
    await database.disconnect()
Example #18
0
async def main():
    requestIdHolder = RequestIdHolder()
    name = os.environ.get('NAME', 'mdtp-worker')
    version = os.environ.get('VERSION', 'local')
    environment = os.environ.get('ENV', 'dev')
    isRunningDebugMode = environment == 'dev'

    if isRunningDebugMode:
        logging.init_basic_logging()
    else:
        logging.init_json_logging(name=name,
                                  version=version,
                                  environment=environment,
                                  requestIdHolder=requestIdHolder)

    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)

    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/mdtp-work-queue')
    s3Manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])

    # NOTE(krishan711): The AWS eth instance is much slower with getLogs so fails when calling ContractStore.get_latest_update_block_number
    # awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    # ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    ethClient = RestEthClient(url=os.environ['ALCHEMY_MAINNET_URL'],
                              requester=requester)
    rinkebyEthClient = RestEthClient(url=os.environ['ALCHEMY_URL'],
                                     requester=requester)
    mumbaiEthClient = RestEthClient(
        url='https://matic-mumbai.chainstacklabs.com', requester=requester)
    contractStore = create_contract_store(ethClient=ethClient,
                                          rinkebyEthClient=rinkebyEthClient,
                                          mumbaiEthClient=mumbaiEthClient)

    infuraIpfsAuth = BasicAuthentication(
        username=os.environ['INFURA_IPFS_PROJECT_ID'],
        password=os.environ['INFURA_IPFS_PROJECT_SECRET'])
    infuraIpfsRequester = Requester(
        headers={'authorization': f'Basic {infuraIpfsAuth.to_string()}'})
    ipfsManager = IpfsManager(requester=infuraIpfsRequester)

    imageManager = ImageManager(requester=requester,
                                s3Manager=s3Manager,
                                ipfsManager=ipfsManager)
    manager = MdtpManager(requester=requester,
                          retriever=retriever,
                          saver=saver,
                          s3Manager=s3Manager,
                          contractStore=contractStore,
                          workQueue=workQueue,
                          imageManager=imageManager,
                          ipfsManager=ipfsManager)

    processor = MdtpMessageProcessor(manager=manager)
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'],
                              requester=requester,
                              defaultSender='worker',
                              defaultChannel='mdtp-notifications')
    messageQueueProcessor = MessageQueueProcessor(queue=workQueue,
                                                  messageProcessor=processor,
                                                  slackClient=slackClient)

    await database.connect()
    await s3Manager.connect()
    await workQueue.connect()
    await messageQueueProcessor.run()

    await requester.close_connections()
    await workQueue.disconnect()
    await s3Manager.disconnect()
    await database.disconnect()
Example #19
0
async def reprocess_collections(startId: int, endId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database)
    retriever = Retriever(database)
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    requester = Requester()
    # ethClient = RestEthClient(url=f'https://mainnet.infura.io/v3/{os.environ["INFURA_PROJECT_ID"]}', requester=requester)
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    s3manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])
    requester = Requester()
    collectionProcessor = CollectionProcessor(
        requester=requester,
        ethClient=ethClient,
        openseaApiKey=openseaApiKey,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver,
                                retriever=retriever,
                                tokenQueue=None,
                                collectionProcessor=collectionProcessor,
                                tokenMetadataProcessor=None)

    await database.connect()
    await s3manager.connect()
    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        logging.info(f'Working on {start} to {end}...')
        async with database.transaction():
            query = TokenCollectionsTable.select()
            query = query.where(TokenCollectionsTable.c.collectionId >= start)
            query = query.where(TokenCollectionsTable.c.collectionId < end)
            collectionsToChange = [
                collection_from_row(row)
                async for row in database.iterate(query=query)
            ]
            logging.info(f'Updating {len(collectionsToChange)} collections...')
            for collection in collectionsToChange:
                logging.info(f'Updating collection: {collection.address}')
                try:
                    await tokenManager.update_collection(
                        address=collection.address, shouldForce=True)
                except Exception as e:
                    logging.exception(
                        f'Error processing {collection.collectionId}: {e}')
        currentId = currentId + batchSize

    await database.disconnect()
    await s3manager.disconnect()
    await requester.close_connections()
    await awsRequester.close_connections()
async def backfill_collection_activities(startBlock: int, endBlock: int,
                                         batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    retriever = Retriever(database=database)
    saver = Saver(database=database)
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    collectionActivityProcessor = CollectionActivityProcessor(
        retriever=retriever)
    tokenManager = TokenManager(
        saver=saver,
        retriever=retriever,
        tokenQueue=tokenQueue,
        collectionProcessor=None,
        tokenMetadataProcessor=None,
        tokenOwnershipProcessor=None,
        collectionActivityProcessor=collectionActivityProcessor)

    await database.connect()
    await tokenQueue.connect()
    currentBlockNumber = startBlock
    while currentBlockNumber < endBlock:
        endBlockNumber = min(currentBlockNumber + batchSize, endBlock)
        logging.info(f'Working on {currentBlockNumber} to {endBlockNumber}...')
        tokenTransfers = await retriever.list_token_transfers(
            fieldFilters=[
                IntegerFieldFilter(BlocksTable.c.blockNumber.key,
                                   gte=currentBlockNumber),
                IntegerFieldFilter(BlocksTable.c.blockNumber.key,
                                   lte=endBlockNumber),
            ],
            orders=[
                Order(fieldName=BlocksTable.c.blockDate.key,
                      direction=Direction.ASCENDING)
            ],
        )
        if len(tokenTransfers) == 0:
            print(
                f"Skipping {currentBlockNumber} to {endBlockNumber} with 0 transfers "
            )
        else:
            collectionHourlyActivities = await retriever.list_collections_activity(
                fieldFilters=[
                    DateFieldFilter(CollectionHourlyActivityTable.c.date.key,
                                    gte=date_hour_from_datetime(
                                        tokenTransfers[0].blockDate)),
                    DateFieldFilter(CollectionHourlyActivityTable.c.date.key,
                                    lte=date_hour_from_datetime(
                                        tokenTransfers[-1].blockDate)),
                ], )
            processedPairs = {
                (collectionHourlyActivity.address,
                 collectionHourlyActivity.date)
                for collectionHourlyActivity in collectionHourlyActivities
            }
            registryDatePairs = {
                (tokenTransfer.registryAddress,
                 date_hour_from_datetime(tokenTransfer.blockDate))
                for tokenTransfer in tokenTransfers
                if (tokenTransfer.registryAddress,
                    date_hour_from_datetime(tokenTransfer.blockDate)
                    ) not in processedPairs
            }
            print(
                f'Processing {len(registryDatePairs)} pairs from {len(tokenTransfers)} transfers'
            )
            # messages = [UpdateActivityForCollectionMessageContent(address=address, startDate=startDate).to_message() for (address, startDate) in registryDatePairs]
            # await tokenQueue.send_messages(messages=messages)
            for pairChunk in list_util.generate_chunks(
                    lst=list(registryDatePairs), chunkSize=50):
                await asyncio.gather(*[
                    tokenManager.update_activity_for_collection(
                        address=registryAddress, startDate=startDate)
                    for registryAddress, startDate in pairChunk
                ])
        currentBlockNumber = endBlockNumber

    await database.disconnect()
    await tokenQueue.disconnect()