Esempio n. 1
0
async def process_tokens_from_old_transfers(startBlockNumber: int, endBlockNumber: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor)
    revueApiKey = os.environ['REVUE_API_KEY']

    await database.connect()
    await workQueue.connect()
    await s3manager.connect()
    await tokenQueue.connect()
    cache = set()
    registryCache = set()
    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        currentBlockNumber = end
        logging.info(f'Working on {start}-{end}...')
        query = (
             sqlalchemy.select(TokenTransfersTable.c.registryAddress, TokenTransfersTable.c.tokenId)
             .where(TokenTransfersTable.c.blockNumber >= start)
             .where(TokenTransfersTable.c.blockNumber < end)
         )
        result = await database.execute(query=query,)
        tokensToProcess = set()
        collectionsToProcess = set()
        for (registryAddress, tokenId) in result:
            if (registryAddress, tokenId) in cache:
                continue
            cache.add((registryAddress, tokenId))
            tokensToProcess.add((registryAddress, tokenId))
            if registryAddress in registryCache:
                continue
            registryCache.add(registryAddress)
            collectionsToProcess.add(registryAddress)
        print('len(tokensToProcess)', len(tokensToProcess))
        print('len(collectionsToProcess)', len(collectionsToProcess))
        try:
            await _update_token_metadatas(tokensToProcess=tokensToProcess, tokenManager=tokenManager, retriever=retriever)
            await _update_collections(collectionsToProcess=collectionsToProcess, tokenManager=tokenManager, retriever=retriever)
        except:
            logging.error(f'Failed during: {start}-{end}')
            raise
    await database.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await s3manager.disconnect()
Esempio n. 2
0
async def main():
    requestIdHolder = RequestIdHolder()
    name = os.environ.get('NAME', 'notd-api')
    version = os.environ.get('VERSION', 'local')
    environment = os.environ.get('ENV', 'dev')
    isRunningDebugMode = environment == 'dev'

    if isRunningDebugMode:
        logging.init_basic_logging()
    else:
        logging.init_json_logging(name=name, version=version, environment=environment, requestIdHolder=requestIdHolder)

    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionActivityProcessor = CollectionActivityProcessor(retriever=retriever)
    revueApiKey = os.environ['REVUE_API_KEY']
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor, collectionActivityProcessor=collectionActivityProcessor)
    notdManager = NotdManager(blockProcessor=blockProcessor, saver=saver, retriever=retriever, workQueue=workQueue, tokenManager=tokenManager, requester=requester, revueApiKey=revueApiKey)

    processor = NotdMessageProcessor(notdManager=notdManager)
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'], requester=requester, defaultSender='worker', defaultChannel='notd-notifications')
    workQueueProcessor = MessageQueueProcessor(queue=workQueue, messageProcessor=processor, slackClient=slackClient, requestIdHolder=requestIdHolder)
    tokenQueueProcessor = MessageQueueProcessor(queue=tokenQueue, messageProcessor=processor, slackClient=slackClient, requestIdHolder=requestIdHolder)

    await database.connect()
    await s3manager.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    try:
        while True:
            hasProcessedWork = await workQueueProcessor.execute_batch(batchSize=3, longPollSeconds=1, shouldProcessInParallel=True)
            if hasProcessedWork:
                continue
            hasProcessedToken = await tokenQueueProcessor.execute_batch(batchSize=10, longPollSeconds=1, shouldProcessInParallel=True)
            if hasProcessedToken:
                continue
            logging.info('No message received.. sleeping')
            time.sleep(60)
    finally:
        await database.disconnect()
        await s3manager.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await requester.close_connections()
async def process_token_ownerships(startTokenId: int, endTokenId: int, batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever)
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor)
    revueApiKey = os.environ['REVUE_API_KEY']
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'], requester=requester, defaultSender='worker', defaultChannel='notd-notifications')

    await database.connect()
    await workQueue.connect()
    await s3manager.connect()
    await tokenQueue.connect()

    await database.connect()
    await slackClient.post(text=f'process_token_ownerships → 🚧 started: {startTokenId}-{endTokenId}')
    try:
        currentTokenId = startTokenId
        while currentTokenId < endTokenId:
            start = currentTokenId
            end = min(currentTokenId + batchSize, endTokenId)
            currentTokenId = end
            logging.info(f'Working on {start}-{end}')
            query = TokenMetadatasTable.select() \
                .where(TokenMetadatasTable.c.tokenMetadataId >= start) \
                .where(TokenMetadatasTable.c.tokenMetadataId < end)
            tokenMetadatas = await retriever.query_token_metadatas(query=query)
            await asyncio.gather(*[process_token_ownership(tokenManager=tokenManager, registryAddress=tokenMetadata.registryAddress, tokenId=tokenMetadata.tokenId) for tokenMetadata in tokenMetadatas])
        await slackClient.post(text=f'process_token_ownerships → ✅ completed : {startTokenId}-{endTokenId}')
    except Exception as exception:
        await slackClient.post(text=f'process_token_ownerships → � error: {startTokenId}-{endTokenId}\n```{str(exception)}```')
        raise exception
    finally:
        await database.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await s3manager.disconnect()
async def run(address: str, shouldForce: Optional[bool]):
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    await tokenQueue.connect()
    await tokenQueue.send_message(message=UpdateCollectionTokensMessageContent(
        address=address, shouldForce=shouldForce).to_message())
    await tokenQueue.disconnect()
Esempio n. 5
0
async def process_collection(address: str, shouldDefer: bool):
    databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)

    s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()

    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET'])
    revueApiKey = os.environ['REVUE_API_KEY']
    tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor)
    notdManager = NotdManager(blockProcessor=blockProcessor, saver=saver, retriever=retriever, workQueue=workQueue, tokenManager=tokenManager, requester=requester, revueApiKey=revueApiKey)

    await database.connect()
    await s3manager.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    retrievedCollectionTokenMetadatas = await retriever.list_token_metadatas(
        fieldFilters=[
            StringFieldFilter(fieldName=TokenTransfersTable.c.registryAddress.key, eq=address),
        ],
    )
    for tokenMetadata in retrievedCollectionTokenMetadatas:
        if shouldDefer:
            await notdManager.update_token_metadata_deferred(registryAddress=address, tokenId=tokenMetadata.tokenId, shouldForce=True)
        else:
            await notdManager.update_token_metadata(registryAddress=address, tokenId=tokenMetadata.tokenId, shouldForce=True)
    await database.disconnect()
    await s3manager.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await requester.close_connections()
Esempio n. 6
0
async def check_all_processed(startBlockNumber: int, endBlockNumber: int,
                              batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    await database.connect()
    await workQueue.connect()

    currentBlockNumber = startBlockNumber
    while currentBlockNumber < endBlockNumber:
        start = currentBlockNumber
        end = min(currentBlockNumber + batchSize, endBlockNumber)
        logging.info(f'Working on {start} - {end}...')
        async with database.transaction():
            query = TokenTransfersTable.select() \
                .with_only_columns([TokenTransfersTable.c.blockNumber]) \
                .filter(TokenTransfersTable.c.blockNumber >= start) \
                .filter(TokenTransfersTable.c.blockNumber < end) \
                .distinct(TokenTransfersTable.c.blockNumber)
            processedBlocks = [
                row[0] for row in await database.fetch_all(query)
            ]
        unprocessedBlocks = set(range(start, end)) - set(processedBlocks)
        logging.info(
            f'Processing {len(unprocessedBlocks)} blocks in {start} - {end}')
        await workQueue.send_message(message=ProcessBlocksMessageContent(
            blockNumbers=unprocessedBlocks).to_message())
        currentBlockNumber = currentBlockNumber + batchSize
    await database.disconnect()
    await workQueue.disconnect()
async def run(blockNumber: Optional[int], startBlockNumber: Optional[int],
              endBlockNumber: Optional[int]):
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')

    await workQueue.connect()
    if blockNumber:
        await workQueue.send_message(message=ProcessBlockMessageContent(
            blockNumber=blockNumber).to_message())
    elif startBlockNumber and endBlockNumber:
        for blockNumber in range(startBlockNumber, endBlockNumber):
            await workQueue.send_message(message=ProcessBlockMessageContent(
                blockNumber=blockNumber).to_message())
    else:
        raise Exception(
            'Either blockNumber or startBlockNumber and endBlockNumber must be passed in.'
        )
    await workQueue.disconnect()
Esempio n. 8
0
                              environment=environment,
                              requestIdHolder=requestIdHolder)

databaseConnectionString = Database.create_psql_connection_string(
    username=os.environ["DB_USERNAME"],
    password=os.environ["DB_PASSWORD"],
    host=os.environ["DB_HOST"],
    port=os.environ["DB_PORT"],
    name=os.environ["DB_NAME"])
database = Database(connectionString=databaseConnectionString)
saver = Saver(database=database)
retriever = Retriever(database=database)

workQueue = SqsMessageQueue(
    region='eu-west-1',
    accessKeyId=os.environ['AWS_KEY'],
    accessKeySecret=os.environ['AWS_SECRET'],
    queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/mdtp-work-queue'
)
s3Manager = S3Manager(region='eu-west-1',
                      accessKeyId=os.environ['AWS_KEY'],
                      accessKeySecret=os.environ['AWS_SECRET'])

# NOTE(krishan711): The AWS eth instance is much slower with getLogs so fails when calling ContractStore.get_latest_update_block_number
# awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
# ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
requester = Requester()
ethClient = RestEthClient(url=os.environ['ALCHEMY_MAINNET_URL'],
                          requester=requester)
rinkebyEthClient = RestEthClient(url=os.environ['ALCHEMY_URL'],
                                 requester=requester)
mumbaiEthClient = RestEthClient(url='https://matic-mumbai.chainstacklabs.com',
Esempio n. 9
0
async def reprocess_metadata(startId: Optional[int], endId: Optional[int],
                             batchSize: Optional[int]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    s3manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    requester = Requester()
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    tokenMetadataProcessor = TokenMetadataProcessor(
        requester=requester,
        ethClient=ethClient,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    openseaApiKey = os.environ['OPENSEA_API_KEY']
    collectionProcessor = CollectionProcessor(
        requester=requester,
        ethClient=ethClient,
        openseaApiKey=openseaApiKey,
        s3manager=s3manager,
        bucketName=os.environ['S3_BUCKET'])
    tokenManger = TokenManager(saver=saver,
                               retriever=retriever,
                               tokenQueue=tokenQueue,
                               collectionProcessor=collectionProcessor,
                               tokenMetadataProcessor=tokenMetadataProcessor)

    await s3manager.connect()
    await tokenQueue.connect()
    await database.connect()
    if not startId:
        startId = 0
    if not endId:
        maxTokenMetadata = await retriever.list_token_metadatas(
            limit=1,
            orders=[
                Order(fieldName=TokenMetadatasTable.c.tokenMetadataId.key,
                      direction=Direction.DESCENDING)
            ])
        print(maxTokenMetadata)
        endId = maxTokenMetadata[0].tokenMetadataId + 1
    currentId = startId
    while currentId < endId:
        start = currentId
        end = min(currentId + batchSize, endId)
        query = TokenMetadatasTable.select()
        query = query.where(TokenMetadatasTable.c.tokenMetadataId >= start)
        query = query.where(TokenMetadatasTable.c.tokenMetadataId < end)
        query = query.where(
            TokenMetadatasTable.c.updatedDate < datetime.datetime(2022, 2, 13))
        query = query.order_by(TokenMetadatasTable.c.tokenMetadataId.asc())
        tokenMetadatasToChange = [
            token_metadata_from_row(row)
            for row in await database.execute(query=query)
        ]
        logging.info(f'Working on {start} - {end}')
        logging.info(f'Updating {len(tokenMetadatasToChange)} transfers...')
        await asyncio.gather(*[
            _reprocess_metadata_from_s3(
                tokenMetadataProcessor=tokenMetadataProcessor,
                s3manager=s3manager,
                tokenManger=tokenManger,
                tokenMetadata=tokenMetadata)
            for tokenMetadata in tokenMetadatasToChange
        ])
        currentId = currentId + batchSize

    await s3manager.disconnect()
    await tokenQueue.disconnect()
    await awsRequester.close_connections()
    await requester.close_connections()
    await database.disconnect()
Esempio n. 10
0
async def run(registryAddress: Optional[str]):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    tokenManager = TokenManager(saver=saver,
                                retriever=retriever,
                                tokenQueue=tokenQueue,
                                collectionProcessor=None,
                                tokenMetadataProcessor=None,
                                tokenOwnershipProcessor=None)
    # NOTE(krishan711): use tokenqueue so its lower prioritized work
    notdManager = NotdManager(blockProcessor=blockProcessor,
                              saver=saver,
                              retriever=retriever,
                              workQueue=tokenQueue,
                              tokenManager=tokenManager,
                              requester=requester,
                              revueApiKey=None)

    await database.connect()
    await workQueue.connect()
    await tokenQueue.connect()

    if registryAddress:
        registryAddresses = [registryAddress]
    else:
        query = sqlalchemy.select(TokenCollectionsTable.c.address).filter(
            TokenCollectionsTable.c.doesSupportErc1155 == True).order_by(
                TokenCollectionsTable.c.collectionId.desc())
        results = await database.execute(query=query)
        registryAddresses = [
            registryAddress for (registryAddress, ) in results
        ]
    print(
        f'Starting to reprocess blocks for {len(registryAddresses)} collections'
    )

    for index, registryAddress in enumerate(registryAddresses):
        print(
            f'Reprocessing blocks for collection: {registryAddress} (index: {index})'
        )
        minDate = datetime.datetime(2022, 4, 8, 9, 0)
        query = (
            sqlalchemy.select(sqlalchemy.distinct(BlocksTable.c.blockNumber)) \
            .join(TokenTransfersTable, TokenTransfersTable.c.blockNumber == BlocksTable.c.blockNumber) \
            .filter(TokenTransfersTable.c.registryAddress == registryAddress)
            .filter(BlocksTable.c.updatedDate < minDate)
        )
        results = await database.execute(query=query)
        blockNumbers = set(blockNumber for (blockNumber, ) in results)
        print(f'Processing {len(blockNumbers)} blocks')
        if len(blockNumbers) == 0:
            continue
        # await notdManager.process_blocks_deferred(blockNumbers=blockNumbers)
        for blockNumberChunk in list_util.generate_chunks(
                lst=list(blockNumbers), chunkSize=5):
            await asyncio.gather(*[
                notdManager.process_block(blockNumber=blockNumber)
                for blockNumber in blockNumberChunk
            ])
        query = (
            sqlalchemy.select(TokenMetadatasTable.c.tokenId) \
            .filter(TokenMetadatasTable.c.registryAddress == registryAddress)
        )
        results = await database.execute(query=query)
        collectionTokenIds = [(registryAddress, tokenId)
                              for (tokenId, ) in results]
        await tokenManager.update_token_ownerships_deferred(
            collectionTokenIds=collectionTokenIds)
    await database.disconnect()
    await workQueue.disconnect()
    await tokenQueue.disconnect()
    await requester.close_connections()
    await awsRequester.close_connections()
Esempio n. 11
0
async def main():
    requestIdHolder = RequestIdHolder()
    name = os.environ.get('NAME', 'mdtp-worker')
    version = os.environ.get('VERSION', 'local')
    environment = os.environ.get('ENV', 'dev')
    isRunningDebugMode = environment == 'dev'

    if isRunningDebugMode:
        logging.init_basic_logging()
    else:
        logging.init_json_logging(name=name,
                                  version=version,
                                  environment=environment,
                                  requestIdHolder=requestIdHolder)

    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)

    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/mdtp-work-queue')
    s3Manager = S3Manager(region='eu-west-1',
                          accessKeyId=os.environ['AWS_KEY'],
                          accessKeySecret=os.environ['AWS_SECRET'])

    # NOTE(krishan711): The AWS eth instance is much slower with getLogs so fails when calling ContractStore.get_latest_update_block_number
    # awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'])
    # ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester)
    requester = Requester()
    ethClient = RestEthClient(url=os.environ['ALCHEMY_MAINNET_URL'],
                              requester=requester)
    rinkebyEthClient = RestEthClient(url=os.environ['ALCHEMY_URL'],
                                     requester=requester)
    mumbaiEthClient = RestEthClient(
        url='https://matic-mumbai.chainstacklabs.com', requester=requester)
    contractStore = create_contract_store(ethClient=ethClient,
                                          rinkebyEthClient=rinkebyEthClient,
                                          mumbaiEthClient=mumbaiEthClient)

    infuraIpfsAuth = BasicAuthentication(
        username=os.environ['INFURA_IPFS_PROJECT_ID'],
        password=os.environ['INFURA_IPFS_PROJECT_SECRET'])
    infuraIpfsRequester = Requester(
        headers={'authorization': f'Basic {infuraIpfsAuth.to_string()}'})
    ipfsManager = IpfsManager(requester=infuraIpfsRequester)

    imageManager = ImageManager(requester=requester,
                                s3Manager=s3Manager,
                                ipfsManager=ipfsManager)
    manager = MdtpManager(requester=requester,
                          retriever=retriever,
                          saver=saver,
                          s3Manager=s3Manager,
                          contractStore=contractStore,
                          workQueue=workQueue,
                          imageManager=imageManager,
                          ipfsManager=ipfsManager)

    processor = MdtpMessageProcessor(manager=manager)
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'],
                              requester=requester,
                              defaultSender='worker',
                              defaultChannel='mdtp-notifications')
    messageQueueProcessor = MessageQueueProcessor(queue=workQueue,
                                                  messageProcessor=processor,
                                                  slackClient=slackClient)

    await database.connect()
    await s3Manager.connect()
    await workQueue.connect()
    await messageQueueProcessor.run()

    await requester.close_connections()
    await workQueue.disconnect()
    await s3Manager.disconnect()
    await database.disconnect()
Esempio n. 12
0
async def run():
    workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    await workQueue.connect()
    await workQueue.send_message(message=ReceiveNewBlocksMessageContent().to_message())
    await workQueue.disconnect()
Esempio n. 13
0
async def reprocess_bad_blocks(startBlockNumber: int, endBlockNumber: int,
                               batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    saver = Saver(database=database)
    retriever = Retriever(database=database)
    workQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue')
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    requester = Requester()
    slackClient = SlackClient(webhookUrl=os.environ['SLACK_WEBHOOK_URL'],
                              requester=requester,
                              defaultSender='worker',
                              defaultChannel='notd-notifications')
    awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'],
                                accessKeySecret=os.environ['AWS_SECRET'])
    ethClient = RestEthClient(
        url=
        'https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com',
        requester=awsRequester)
    blockProcessor = BlockProcessor(ethClient=ethClient)
    # NOTE(krishan711): use tokenqueue so its lower prioritized work
    notdManager = NotdManager(blockProcessor=None,
                              saver=saver,
                              retriever=retriever,
                              workQueue=tokenQueue,
                              tokenManager=None,
                              requester=requester,
                              revueApiKey=None)

    await database.connect()
    await workQueue.connect()
    await tokenQueue.connect()
    await slackClient.post(
        text=
        f'reprocess_bad_blocks → 🚧 started: {startBlockNumber}-{endBlockNumber}'
    )
    try:
        currentBlockNumber = startBlockNumber
        while currentBlockNumber < endBlockNumber:
            start = currentBlockNumber
            end = min(start + batchSize, endBlockNumber)
            logging.info(f'Working on {start}-{end}')
            blockNumbers = set(range(start, end))
            processedBlocksQuey = (sqlalchemy.select(
                BlocksTable.c.blockNumber).where(
                    BlocksTable.c.blockNumber >= start).where(
                        BlocksTable.c.blockNumber < end))
            results = await database.execute(query=processedBlocksQuey)
            processedBlocks = {blockNumber for (blockNumber, ) in results}
            logging.info(f'Ignoring {len(processedBlocks)} processedBlocks')
            blockNumbers = list(blockNumbers - processedBlocks)
            # blockUncleCounts = []
            # for chunk in list_util.generate_chunks(lst=blockNumbers, chunkSize=10):
            #     blockUncleCounts += await asyncio.gather(*[blockProcessor.ethClient.get_block_uncle_count(blockNumber=blockNumber) for blockNumber in chunk])
            #   blocksWithUncles = {blockNumber for (blockNumber, uncleCount) in zip(blockNumbers, blockUncleCounts) if uncleCount > 0}
            blocksWithUncles = set()
            logging.info(f'Found {len(blocksWithUncles)} blocks with uncles')
            blocksWithDuplicatesQuery = (sqlalchemy.select(
                TokenTransfersTable.c.blockNumber,
                sqlalchemy.func.count(
                    sqlalchemy.func.distinct(TokenTransfersTable.c.blockHash))
            ).where(TokenTransfersTable.c.blockNumber >= start).where(
                TokenTransfersTable.c.blockNumber < end).group_by(
                    TokenTransfersTable.c.blockNumber))
            results = await database.execute(query=blocksWithDuplicatesQuery)
            blocksWithDuplicates = {
                blockNumber
                for (blockNumber, blockHashCount) in results
                if blockHashCount > 1
            }
            logging.info(
                f'Found {len(blocksWithDuplicates)} blocks with multiple blockHashes'
            )
            badBlockTransactionsQuery = (sqlalchemy.select(
                TokenTransfersTable.c.transactionHash).where(
                    TokenTransfersTable.c.blockNumber.in_(
                        blocksWithDuplicates)))
            results = await database.execute(query=badBlockTransactionsQuery)
            badBlockTransactions = {
                transactionHash
                for (transactionHash, ) in results
            }
            logging.info(
                f'Found {len(badBlockTransactions)} transactions in bad blocks'
            )
            badBlockTransactionActualBlocks = set()
            for chunk in list_util.generate_chunks(
                    lst=list(badBlockTransactions), chunkSize=10):
                transactionReceipts = await asyncio.gather(*[
                    blockProcessor.get_transaction_receipt(
                        transactionHash=transactionHash)
                    for transactionHash in chunk
                ])
                badBlockTransactionActualBlocks.update({
                    transactionReceipt['blockNumber']
                    for transactionReceipt in transactionReceipts
                    if transactionReceipt is not None
                })
            badBlockTransactionBlocksQuery = (sqlalchemy.select(
                sqlalchemy.func.distinct(
                    TokenTransfersTable.c.blockNumber)).where(
                        TokenTransfersTable.c.transactionHash.in_(
                            badBlockTransactions)))
            results = await database.execute(
                query=badBlockTransactionBlocksQuery)
            badBlockTransactionBlocks = {
                blockNumber
                for (blockNumber, ) in results
            }
            allBadBlocks = blocksWithUncles.union(
                badBlockTransactionActualBlocks).union(
                    blocksWithDuplicates).union(badBlockTransactionBlocks)
            logging.info(f'Found {len(allBadBlocks)} blocks to reprocess')
            await notdManager.process_blocks_deferred(blockNumbers=allBadBlocks
                                                      )
            insertQuery = BlocksTable.insert().from_select(
                [
                    BlocksTable.c.createdDate.key,
                    BlocksTable.c.updatedDate.key,
                    BlocksTable.c.blockNumber.key, BlocksTable.c.blockHash.key,
                    BlocksTable.c.blockDate.key
                ],
                sqlalchemy.select(
                    sqlalchemy.func.min(TokenTransfersTable.c.blockDate) +
                    datetime.timedelta(minutes=15),
                    sqlalchemy.func.min(TokenTransfersTable.c.blockDate) +
                    datetime.timedelta(minutes=15),
                    TokenTransfersTable.c.blockNumber,
                    TokenTransfersTable.c.blockHash,
                    sqlalchemy.func.min(
                        TokenTransfersTable.c.blockDate)).where(
                            TokenTransfersTable.c.blockNumber.in_(
                                set(blockNumbers) - allBadBlocks)).
                where(TokenTransfersTable.c.blockNumber >= start).where(
                    TokenTransfersTable.c.blockNumber < end).group_by(
                        TokenTransfersTable.c.blockNumber,
                        TokenTransfersTable.c.blockHash))
            async with database.create_transaction() as connection:
                await database.execute(connection=connection,
                                       query=insertQuery)
            currentBlockNumber = end
        await slackClient.post(
            text=
            f'reprocess_bad_blocks → ✅ completed : {startBlockNumber}-{endBlockNumber}'
        )
    except Exception as exception:
        await slackClient.post(
            text=
            f'reprocess_bad_blocks → � error: {startBlockNumber}-{endBlockNumber}\n```{str(exception)}```'
        )
        raise exception
    finally:
        await database.disconnect()
        await workQueue.disconnect()
        await tokenQueue.disconnect()
        await requester.close_connections()
        await awsRequester.close_connections()
async def backfill_collection_activities(startBlock: int, endBlock: int,
                                         batchSize: int):
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)
    retriever = Retriever(database=database)
    saver = Saver(database=database)
    tokenQueue = SqsMessageQueue(
        region='eu-west-1',
        accessKeyId=os.environ['AWS_KEY'],
        accessKeySecret=os.environ['AWS_SECRET'],
        queueUrl=
        'https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue')
    collectionActivityProcessor = CollectionActivityProcessor(
        retriever=retriever)
    tokenManager = TokenManager(
        saver=saver,
        retriever=retriever,
        tokenQueue=tokenQueue,
        collectionProcessor=None,
        tokenMetadataProcessor=None,
        tokenOwnershipProcessor=None,
        collectionActivityProcessor=collectionActivityProcessor)

    await database.connect()
    await tokenQueue.connect()
    currentBlockNumber = startBlock
    while currentBlockNumber < endBlock:
        endBlockNumber = min(currentBlockNumber + batchSize, endBlock)
        logging.info(f'Working on {currentBlockNumber} to {endBlockNumber}...')
        tokenTransfers = await retriever.list_token_transfers(
            fieldFilters=[
                IntegerFieldFilter(BlocksTable.c.blockNumber.key,
                                   gte=currentBlockNumber),
                IntegerFieldFilter(BlocksTable.c.blockNumber.key,
                                   lte=endBlockNumber),
            ],
            orders=[
                Order(fieldName=BlocksTable.c.blockDate.key,
                      direction=Direction.ASCENDING)
            ],
        )
        if len(tokenTransfers) == 0:
            print(
                f"Skipping {currentBlockNumber} to {endBlockNumber} with 0 transfers "
            )
        else:
            collectionHourlyActivities = await retriever.list_collections_activity(
                fieldFilters=[
                    DateFieldFilter(CollectionHourlyActivityTable.c.date.key,
                                    gte=date_hour_from_datetime(
                                        tokenTransfers[0].blockDate)),
                    DateFieldFilter(CollectionHourlyActivityTable.c.date.key,
                                    lte=date_hour_from_datetime(
                                        tokenTransfers[-1].blockDate)),
                ], )
            processedPairs = {
                (collectionHourlyActivity.address,
                 collectionHourlyActivity.date)
                for collectionHourlyActivity in collectionHourlyActivities
            }
            registryDatePairs = {
                (tokenTransfer.registryAddress,
                 date_hour_from_datetime(tokenTransfer.blockDate))
                for tokenTransfer in tokenTransfers
                if (tokenTransfer.registryAddress,
                    date_hour_from_datetime(tokenTransfer.blockDate)
                    ) not in processedPairs
            }
            print(
                f'Processing {len(registryDatePairs)} pairs from {len(tokenTransfers)} transfers'
            )
            # messages = [UpdateActivityForCollectionMessageContent(address=address, startDate=startDate).to_message() for (address, startDate) in registryDatePairs]
            # await tokenQueue.send_messages(messages=messages)
            for pairChunk in list_util.generate_chunks(
                    lst=list(registryDatePairs), chunkSize=50):
                await asyncio.gather(*[
                    tokenManager.update_activity_for_collection(
                        address=registryAddress, startDate=startDate)
                    for registryAddress, startDate in pairChunk
                ])
        currentBlockNumber = endBlockNumber

    await database.disconnect()
    await tokenQueue.disconnect()