def parse_process_list(process_list): parsed = {} processes = namedtuple('process', 'pid, owner, name') hostname = '' try: task_result = base64.b64decode(process_list).decode().split('\r\n') hostname = task_result[0].split('=')[1] except Exception as e: print(e) if task_result[1] == '': logging.error('Process list is empty') return 'Hello' for process in task_result[1:]: full_result = re.findall(r'^([0-9]+)\s+(\S+)\s+(.+)$', process) if full_result: pid, owner, name = full_result[0] if hostname not in parsed.keys(): parsed[hostname] = [] parsed[hostname].append(processes(int(pid), owner, name)) continue else: #logging.error(f'No process to parse\n') continue return parsed
async def _get_token_metadata_from_data( registryAddress: str, tokenId: str, metadataUrl: str, tokenMetadataDict: Dict[str, Any]) -> RetrievedTokenMetadata: name = tokenMetadataDict.get('name') or tokenMetadataDict.get( 'title') or f'#{tokenId}' description = tokenMetadataDict.get('description') if isinstance(description, list): description = description[0] imageUrl = tokenMetadataDict.get('image') or tokenMetadataDict.get( 'image_url') or tokenMetadataDict.get( 'imageUrl') or tokenMetadataDict.get('image_data') if isinstance(imageUrl, dict): imageDict = imageUrl imageUrl = imageDict.get('src') if not imageUrl: logging.error(f'Failed to extract imageUrl from {imageDict}') retrievedTokenMetadata = RetrievedTokenMetadata( registryAddress=registryAddress, tokenId=tokenId, metadataUrl=metadataUrl, name=str(name).replace('\u0000', ''), description=str(description).replace('\u0000', '') if description else None, imageUrl=imageUrl, animationUrl=tokenMetadataDict.get('animation_url') or tokenMetadataDict.get('animation'), youtubeUrl=tokenMetadataDict.get('youtube_url'), backgroundColor=tokenMetadataDict.get('background_color'), frameImageUrl=tokenMetadataDict.get('frame_image') or tokenMetadataDict.get('frame_image_url') or tokenMetadataDict.get('frameImage'), attributes=tokenMetadataDict.get('attributes', []), ) return retrievedTokenMetadata
def main(args): logging.print(''' ____ ____ | \ ___ ___ ___| \ ___ ___ ___ | | | -_| -_| . | | | _| . | . | |____/|___|___| _|____/|_| |___| _| |_| |_| ''') app = Flask(__name__) try: # Load the models models.load_models() logging.warn('All models loaded') # Patch our payloads - will be moved. deepdrop.patch_payloads(config.payload_files, config.domain) logging.warn(f'Payloads patched for {config.domain}') if args.debug: captains_key = str(uuid.uuid4()) app.config['CAPTAINS_KEY'] = captains_key app.debug = True logging.debug(captains_key) # Setup our routes routing.setup_routes(app) except Exception as e: logging.error(str(e)) app.run('0.0.0.0', 80, threaded=False, use_reloader=False) # No threading because https://github.com/keras-team/keras/issues/2397
async def process_tokens_from_old_transfers(startBlockNumber: int, endBlockNumber: int, batchSize: int): databaseConnectionString = Database.create_psql_connection_string(username=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], host=os.environ["DB_HOST"], port=os.environ["DB_PORT"], name=os.environ["DB_NAME"]) database = Database(connectionString=databaseConnectionString) saver = Saver(database=database) retriever = Retriever(database=database) s3manager = S3Manager(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET']) workQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-work-queue') tokenQueue = SqsMessageQueue(region='eu-west-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.eu-west-1.amazonaws.com/097520841056/notd-token-queue') awsRequester = AwsRequester(accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET']) ethClient = RestEthClient(url='https://nd-foldvvlb25awde7kbqfvpgvrrm.ethereum.managedblockchain.eu-west-1.amazonaws.com', requester=awsRequester) requester = Requester() tokenMetadataProcessor = TokenMetadataProcessor(requester=requester, ethClient=ethClient, s3manager=s3manager, bucketName=os.environ['S3_BUCKET']) openseaApiKey = os.environ['OPENSEA_API_KEY'] tokenOwnershipProcessor = TokenOwnershipProcessor(retriever=retriever) collectionProcessor = CollectionProcessor(requester=requester, ethClient=ethClient, openseaApiKey=openseaApiKey, s3manager=s3manager, bucketName=os.environ['S3_BUCKET']) tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=collectionProcessor, tokenMetadataProcessor=tokenMetadataProcessor, tokenOwnershipProcessor=tokenOwnershipProcessor) revueApiKey = os.environ['REVUE_API_KEY'] await database.connect() await workQueue.connect() await s3manager.connect() await tokenQueue.connect() cache = set() registryCache = set() currentBlockNumber = startBlockNumber while currentBlockNumber < endBlockNumber: start = currentBlockNumber end = min(currentBlockNumber + batchSize, endBlockNumber) currentBlockNumber = end logging.info(f'Working on {start}-{end}...') query = ( sqlalchemy.select(TokenTransfersTable.c.registryAddress, TokenTransfersTable.c.tokenId) .where(TokenTransfersTable.c.blockNumber >= start) .where(TokenTransfersTable.c.blockNumber < end) ) result = await database.execute(query=query,) tokensToProcess = set() collectionsToProcess = set() for (registryAddress, tokenId) in result: if (registryAddress, tokenId) in cache: continue cache.add((registryAddress, tokenId)) tokensToProcess.add((registryAddress, tokenId)) if registryAddress in registryCache: continue registryCache.add(registryAddress) collectionsToProcess.add(registryAddress) print('len(tokensToProcess)', len(tokensToProcess)) print('len(collectionsToProcess)', len(collectionsToProcess)) try: await _update_token_metadatas(tokensToProcess=tokensToProcess, tokenManager=tokenManager, retriever=retriever) await _update_collections(collectionsToProcess=collectionsToProcess, tokenManager=tokenManager, retriever=retriever) except: logging.error(f'Failed during: {start}-{end}') raise await database.disconnect() await workQueue.disconnect() await tokenQueue.disconnect() await s3manager.disconnect()
async def _update_token_single_ownership(self, registryAddress: str, tokenId: str) -> None: registryAddress = chain_util.normalize_address(value=registryAddress) async with self.saver.create_transaction() as connection: try: tokenOwnership = await self.retriever.get_token_ownership_by_registry_address_token_id( connection=connection, registryAddress=registryAddress, tokenId=tokenId) except NotFoundException: tokenOwnership = None try: retrievedTokenOwnership = await self.tokenOwnershipProcessor.calculate_token_single_ownership( registryAddress=registryAddress, tokenId=tokenId) except NoOwnershipException: logging.error( f'No ownership found for {registryAddress}:{tokenId}') return if tokenOwnership: await self.saver.update_token_ownership( connection=connection, tokenOwnershipId=tokenOwnership.tokenOwnershipId, ownerAddress=retrievedTokenOwnership.ownerAddress, transferDate=retrievedTokenOwnership.transferDate, transferValue=retrievedTokenOwnership.transferValue, transferTransactionHash=retrievedTokenOwnership. transferTransactionHash) else: await self.saver.create_token_ownership( connection=connection, registryAddress=retrievedTokenOwnership.registryAddress, tokenId=retrievedTokenOwnership.tokenId, ownerAddress=retrievedTokenOwnership.ownerAddress, transferDate=retrievedTokenOwnership.transferDate, transferValue=retrievedTokenOwnership.transferValue, transferTransactionHash=retrievedTokenOwnership. transferTransactionHash)
if port == 443: host = hostname else: host = '{0}:{1}'.format(hostname, port) # Define http/ws server (only redirect to https/wss) class RedirectToSSL(tornado.web.RequestHandler): def get(self, path): self.redirect('https://{0}/{1}'.format(host, path)) return tornado.web.Application([(r"/(.*)", RedirectToSSL),],) # Load the configuration file try: config_file = open(YAML_CONFIG_PATH, 'r') except FileNotFoundError: error_message = "YAML config not found: {0}".format(YAML_CONFIG_PATH) logging.error(error_message) sys.exit(error_message) config = yaml.load(config_file) if __name__ == "__main__": # Load the top level config dictionaries general_config = config.get('general', {}) non_ssl_config = config.get('non_ssl', {}) ssl_config = config.get('ssl', {}) # Set our listening IP addresses and ports ipv4_ip = general_config.get('ipv4_ip') ipv6_ip = general_config.get('ipv6_ip') http_port = non_ssl_config.get('port', 8080) https_port = ssl_config.get('port', 8443) # Non SSL if non_ssl_config.get('enabled'):
async def retrieve_collection(self, address: str) -> RetrievedCollection: # pylint: disable=too-many-statements try: doesSupportErc721Response = await self.ethClient.call_function( toAddress=address, contractAbi=self.erc165MetadataContractAbi, functionAbi=self.erc165SupportInterfaceUriFunctionAbi, arguments={'interfaceId': _INTERFACE_ID_ERC721}) doesSupportErc721 = doesSupportErc721Response[0] except BadRequestException as exception: doesSupportErc721 = False try: doesSupportErc1155Response = await self.ethClient.call_function( toAddress=address, contractAbi=self.erc165MetadataContractAbi, functionAbi=self.erc165SupportInterfaceUriFunctionAbi, arguments={'interfaceId': _INTERFACE_ID_ERC1155}) doesSupportErc1155 = doesSupportErc1155Response[0] except BadRequestException as exception: doesSupportErc1155 = False try: tokenMetadataNameResponse = await self.ethClient.call_function( toAddress=address, contractAbi=self.erc721MetdataContractAbi, functionAbi=self.erc721MetadataNameFunctionAbi) collectionName = tokenMetadataNameResponse[0] except BadRequestException as exception: collectionName = None try: tokenMetadataSymbolResponse = await self.ethClient.call_function( toAddress=address, contractAbi=self.erc721MetdataContractAbi, functionAbi=self.erc721MetadataSymbolFunctionAbi) collectionSymbol = tokenMetadataSymbolResponse[0] except BadRequestException: collectionSymbol = None try: contractUriResponse = await self.ethClient.call_function( toAddress=address, contractAbi=self.contractAbi, functionAbi=self.contractUriFunctionAbi) contractMetadataUri = contractUriResponse[0] except BadRequestException: contractMetadataUri = None collectionMetadata = None if contractMetadataUri: try: if contractMetadataUri.startswith('ipfs://'): contractMetadataUri = contractMetadataUri.replace( 'ipfs://', 'https://pablo-images.kibalabs.com/v1/ipfs/') if "{address}" in contractMetadataUri: contractMetadataUri = contractMetadataUri.replace( '{address}', address) contractMetadataUriResponse = await self.requester.get( url=contractMetadataUri) collectionMetadata = contractMetadataUriResponse.json() if isinstance(collectionMetadata, str): collectionMetadata = json.loads(collectionMetadata) if not isinstance(collectionMetadata, dict): raise InternalServerErrorException( f'Bad response type from collection metadata: {type(collectionMetadata)}' ) await self.s3manager.write_file( content=str.encode(json.dumps(collectionMetadata)), targetPath= f'{self.bucketName}/collection-metadatas/{address}/{date_util.datetime_from_now()}.json' ) except Exception as exception: # pylint: disable=broad-except logging.info( f'Error loading collection from metadata uri for address {address}: {str(exception)}' ) collectionMetadata = None openseaResponse = None retryCount = 0 openseaCollection = None while not openseaResponse: try: openseaResponse = await self.requester.get( url= f'https://api.opensea.io/api/v1/asset_contract/{address}', headers={"X-API-KEY": self.openseaApiKey}) openseaCollection = openseaResponse.json().get('collection') except ResponseException as exception: if exception.statusCode == 404: raise CollectionDoesNotExist() if retryCount >= 3 or (exception.statusCode < 500 and exception.statusCode != 429): break logging.info(f'Retrying due to: {str(exception)}') await asyncio.sleep(1.5) except ReadTimeout as exception: if retryCount >= 3: break logging.info(f'Retrying due to: {str(exception)}') await asyncio.sleep(1.5) except Exception as exception: # pylint: disable=broad-except logging.info( f'Error loading collection from opensea for address {address}: {str(exception)}' ) break retryCount += 1 if openseaCollection is None: logging.info(f'Failed to load collection from opensea: {address}') openseaCollection = {} name = collectionName symbol = collectionSymbol description = None imageUrl = None twitterUsername = None instagramUsername = None wikiUrl = None openseaSlug = None url = None discordUrl = None bannerImageUrl = None if collectionMetadata: name = name or collectionMetadata.get('name') symbol = symbol or collectionMetadata.get('symbol') description = collectionMetadata.get('description') imageUrl = collectionMetadata.get('image') twitterUsername = collectionMetadata.get('twitterUsername') instagramUsername = collectionMetadata.get('instagramUsername') wikiUrl = collectionMetadata.get('wikiUrl') openseaSlug = collectionMetadata.get('openseaSlug') url = collectionMetadata.get('external_link') discordUrl = collectionMetadata.get('discord_url') bannerImageUrl = collectionMetadata.get('bannerImageUrl') if openseaCollection: name = name or openseaCollection.get('name') symbol = symbol or openseaCollection.get('symbol') description = description or openseaCollection.get('description') imageUrl = imageUrl or openseaCollection.get('image_url') twitterUsername = twitterUsername or openseaCollection.get( 'twitter_username') instagramUsername = instagramUsername or openseaCollection.get( 'instagram_username') wikiUrl = wikiUrl or openseaCollection.get('wiki_url') openseaSlug = openseaSlug or openseaCollection.get('slug') url = url or openseaCollection.get('external_url') discordUrl = discordUrl or openseaCollection.get('discord_url') bannerImageUrl = bannerImageUrl or openseaCollection.get( 'banner_image_url') if isinstance(imageUrl, dict): imageDict = imageUrl imageUrl = imageDict.get('src') if not imageUrl: logging.error(f'Failed to extract imageUrl from {imageDict}') retrievedCollection = RetrievedCollection( address=address, name=name.replace('\u0000', '').strip() if name else None, symbol=symbol.replace('\u0000', '').strip() if symbol else None, description=description.replace('\u0000', '').strip() if description else None, imageUrl=imageUrl, twitterUsername=twitterUsername, instagramUsername=instagramUsername, wikiUrl=wikiUrl, openseaSlug=openseaSlug, url=url, discordUrl=discordUrl, bannerImageUrl=bannerImageUrl, doesSupportErc721=doesSupportErc721, doesSupportErc1155=doesSupportErc1155, ) return retrievedCollection
____ ____ | \ ___ ___ ___| \ ___ ___ ___ | | | -_| -_| . | | | _| . | . | |____/|___|___| _|____/|_| |___| _| |_| |_| ''') app = Flask(__name__, template_folder="core/templates", static_folder="core/static") try: ddmodels.load_models() routing.setup_routes(app) logging.success('Routes loaded') payloads.patch_payloads(config.payload_files, config.domain) logging.success(f'Payloads patched. Callback info {config.domain}') except Exception as e: logging.error(str(e)) logging.success("Starting HTTP Server") app.run( '0.0.0.0', 80, threaded=False, use_reloader=False ) # No threading because https://github.com/keras-team/keras/issues/2397