class Test_Ingest_Queue: def setup_class(self): """Setup class parameters""" if "SQS_ENDPOINT" in dir(settings): self.endpoint_url = settings.SQS_ENDPOINT else: self.endpoint_url = None IngestQueue.createQueue(nd_proj, endpoint_url=self.endpoint_url) self.ingest_queue = IngestQueue(nd_proj, endpoint_url=self.endpoint_url) def teardown_class(self): """Teardown parameters""" IngestQueue.deleteQueue(nd_proj, endpoint_url=self.endpoint_url) def test_Message(self): """Testing the upload queue""" supercuboid_key = "kasthuri11&image&0&0" self.ingest_queue.sendMessage(supercuboid_key) for ( message_id, receipt_handle, message_body, ) in self.ingest_queue.receiveMessage(): assert supercuboid_key == message_body response = self.ingest_queue.deleteMessage(message_id, receipt_handle) assert "Successful" in response
# in the index. error_code = err.response['Error'].get('Code', 'Unknown') if error_code == 'ConditionalCheckFailedException': print('Chunk key entry already created - proceeding.') else: raise chunk_ready = tile_index_db.markTileAsUploaded(metadata["chunk_key"], tile_key, int(metadata["ingest_job"])) # ingest the chunk if we have all the tiles if chunk_ready: print("CHUNK READY SENDING MESSAGE: {}".format(metadata["chunk_key"])) # insert a new job in the insert queue if we have all the tiles ingest_queue = IngestQueue(proj_info) ingest_queue.sendMessage(json.dumps(metadata)) # Invoke Ingest lambda function metadata["lambda-name"] = "ingest" lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME) response = lambda_client.invoke( FunctionName=metadata["parameters"]["ingest_lambda"], InvocationType='Event', Payload=json.dumps(metadata).encode()) else: print("Chunk not ready for ingest yet: {}".format(metadata["chunk_key"])) # Delete message from upload queue upload_queue = UploadQueue(proj_info) upload_queue.deleteMessage(message_id, receipt_handle) print("DONE!")
class Test_IngestLambda: def setup_class(self): """Setup class parameters""" # create the tile index table. skip if it exists try: TileIndexDB.createTable(endpoint_url="http://localhost:8000") CuboidIndexDB.createTable(endpoint_url="http://localhost:8000") except Exception as e: pass self.tileindex_db = TileIndexDB(nd_proj.project_name, endpoint_url="http://localhost:8000") # create the tile bucket TileBucket.createBucket(endpoint_url="http://localhost:4567") self.tile_bucket = TileBucket(nd_proj.project_name, endpoint_url="http://localhost:4567") self.tiles = [self.x_tile, self.y_tile, self.z_tile] = [0, 0, 0] message_id = "testing" receipt_handle = "123456" # insert SUPER_CUBOID_SIZE tiles in the bucket for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1): tile_handle = cStringIO.StringIO() self.tile_bucket.putObject( tile_handle, nd_proj.channel_name, nd_proj.resolution, self.x_tile, self.y_tile, z_index, message_id, receipt_handle, ) # creating the cuboid bucket CuboidBucket.createBucket(endpoint_url="http://localhost:4567") self.cuboid_bucket = CuboidBucket(nd_proj.project_name, endpoint_url="http://localhost:4567") # create the ingest queue IngestQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568") self.ingest_queue = IngestQueue(nd_proj, endpoint_url="http://localhost:4568") # send message to the ingest queue morton_index = XYZMorton(self.tiles) supercuboid_key = self.cuboid_bucket.generateSupercuboidKey( nd_proj.channel_name, nd_proj.resolution, morton_index) response = self.ingest_queue.sendMessage(supercuboid_key) # create the cleanup queue CleanupQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568") def teardown_class(self): """Teardown class parameters""" # cleanup tilebucket for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1): tile_key = self.tile_bucket.encodeObjectKey( nd_proj.channel_name, nd_proj.resolution, self.x_tile, self.y_tile, z_index, ) self.tile_bucket.deleteObject(tile_key) morton_index = XYZMorton(self.tiles) supercuboid_key = self.cuboid_bucket.generateSupercuboidKey( nd_proj.channel_name, nd_proj.resolution, self.tiles) self.cuboid_bucket.deleteObject(supercuboid_key) # delete created entities TileIndexDB.deleteTable(endpoint_url="http://localhost:8000") CuboidIndexDB.deleteTable(endpoint_url="http://localhost:8000") IngestQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568") CleanupQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568") TileBucket.deleteBucket(endpoint_url="http://localhost:4567") try: CuboidBucket.deleteBucket(endpoint_url="http://localhost:4567") except Exception as e: pass def test_Uploadevent(self): """Testing the event""" # creating an emulambda function func = emulambda.import_lambda("ingestlambda.lambda_handler") # creating an emulambda event event = emulambda.parse_event( open("../ndlambda/functions/ingest/ingest_event.json").read()) # calling the emulambda function to invoke a lambda emulambda.invoke_lambda(func, event, None, 0, None) # testing if the supercuboid was inserted in the bucket morton_index = XYZMorton(self.tiles) cuboid = self.cuboid_bucket.getObject(nd_proj.channel_name, nd_proj.resolution, morton_index) # testing if the message was removed from the ingest queue for message in self.ingest_queue.receiveMessage(): # KL TODO write the message id into the JSON event file directly print(message)
def process(msg, context, region): """ Process a single message. Args: msg (dict): Contents described at the top of the file. context (Context): Lambda context object. region (str): Lambda execution region. """ job_id = int(msg['ingest_job']) chunk_key = msg['chunk_key'] tile_key = msg['tile_key'] print("Tile key: {}".format(tile_key)) proj_info = BossIngestProj.fromTileKey(tile_key) # Set the job id proj_info.job_id = msg['ingest_job'] print("Data: {}".format(msg)) # update value in the dynamo table tile_index_db = BossTileIndexDB(proj_info.project_name) chunk = tile_index_db.getCuboid(chunk_key, job_id) if chunk: if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]): print("Chunk already has all its tiles: {}".format(chunk_key)) # Go ahead and setup to fire another ingest lambda so this tile # entry will be deleted on successful execution of the ingest lambda. chunk_ready = True else: print("Updating tile index for chunk_key: {}".format(chunk_key)) chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id) else: # First tile in the chunk print("Creating first entry for chunk_key: {}".format(chunk_key)) try: tile_index_db.createCuboidEntry(chunk_key, job_id) except ClientError as err: # Under _exceptional_ circumstances, it's possible for another lambda # to beat the current instance to creating the initial cuboid entry # in the index. error_code = err.response['Error'].get('Code', 'Unknown') if error_code == 'ConditionalCheckFailedException': print('Chunk key entry already created - proceeding.') else: raise chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id) # ingest the chunk if we have all the tiles if chunk_ready: print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key)) # insert a new job in the insert queue if we have all the tiles ingest_queue = IngestQueue(proj_info) ingest_queue.sendMessage(json.dumps(msg)) # Invoke Ingest lambda function names = AWSNames.create_from_lambda_name(context.function_name) lambda_client = boto3.client('lambda', region_name=region) lambda_client.invoke( FunctionName=names.tile_ingest_lambda, InvocationType='Event', Payload=json.dumps(msg).encode()) else: print("Chunk not ready for ingest yet: {}".format(chunk_key)) print("DONE!")
def process(msg, context, region): """ Process a single message. Args: msg (dict): Contents described at the top of the file. context (Context): Lambda context object. region (str): Lambda execution region. """ job_id = int(msg['ingest_job']) chunk_key = msg['chunk_key'] tile_key = msg['tile_key'] print("Tile key: {}".format(tile_key)) proj_info = BossIngestProj.fromTileKey(tile_key) # Set the job id proj_info.job_id = msg['ingest_job'] print("Data: {}".format(msg)) # update value in the dynamo table tile_index_db = BossTileIndexDB(proj_info.project_name) chunk = tile_index_db.getCuboid(chunk_key, job_id) if chunk: if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]): print("Chunk already has all its tiles: {}".format(chunk_key)) # Go ahead and setup to fire another ingest lambda so this tile # entry will be deleted on successful execution of the ingest lambda. chunk_ready = True else: print("Updating tile index for chunk_key: {}".format(chunk_key)) chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id) else: # First tile in the chunk print("Creating first entry for chunk_key: {}".format(chunk_key)) try: tile_index_db.createCuboidEntry(chunk_key, job_id) except ClientError as err: # Under _exceptional_ circumstances, it's possible for another lambda # to beat the current instance to creating the initial cuboid entry # in the index. error_code = err.response['Error'].get('Code', 'Unknown') if error_code == 'ConditionalCheckFailedException': print('Chunk key entry already created - proceeding.') else: raise chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id) # ingest the chunk if we have all the tiles if chunk_ready: print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key)) # insert a new job in the insert queue if we have all the tiles ingest_queue = IngestQueue(proj_info) ingest_queue.sendMessage(json.dumps(msg)) # Invoke Ingest lambda function names = AWSNames.from_lambda(context.function_name) lambda_client = boto3.client('lambda', region_name=region) lambda_client.invoke( FunctionName=names.tile_ingest.lambda_, InvocationType='Event', Payload=json.dumps(msg).encode()) else: print("Chunk not ready for ingest yet: {}".format(chunk_key)) print("DONE!")