예제 #1
0
class Test_Ingest_Queue:
    def setup_class(self):
        """Setup class parameters"""
        if "SQS_ENDPOINT" in dir(settings):
            self.endpoint_url = settings.SQS_ENDPOINT
        else:
            self.endpoint_url = None
        IngestQueue.createQueue(nd_proj, endpoint_url=self.endpoint_url)
        self.ingest_queue = IngestQueue(nd_proj,
                                        endpoint_url=self.endpoint_url)

    def teardown_class(self):
        """Teardown parameters"""
        IngestQueue.deleteQueue(nd_proj, endpoint_url=self.endpoint_url)

    def test_Message(self):
        """Testing the upload queue"""

        supercuboid_key = "kasthuri11&image&0&0"
        self.ingest_queue.sendMessage(supercuboid_key)
        for (
                message_id,
                receipt_handle,
                message_body,
        ) in self.ingest_queue.receiveMessage():
            assert supercuboid_key == message_body
            response = self.ingest_queue.deleteMessage(message_id,
                                                       receipt_handle)
            assert "Successful" in response
예제 #2
0
    def delete_ingest_queue(self):
        """

        Returns:

        """
        IngestQueue.deleteQueue(self.nd_proj, endpoint_url=None)
예제 #3
0
    def createIngestJob(self, user_id, config_data):
        """Create an ingest job based on the posted config data"""

        config_data = json.loads(config_data)
        # validate schema
        if self.validateConfig(config_data):
            try:
                # create the upload queue
                UploadQueue.createQueue(
                    self.nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
                self.upload_queue = UploadQueue(
                    self.nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
                self.ingest_job.upload_queue = self.upload_queue.url
                # create the ingest queue
                IngestQueue.createQueue(
                    self.nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
                self.ingest_job.ingest_queue = IngestQueue(
                    self.nd_proj,
                    endpoint_url=ndingest_settings.SQS_ENDPOINT).url
                # create the cleanup queue
                CleanupQueue.createQueue(
                    self.nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
                self.ingest_job.cleanup_queue = CleanupQueue(
                    self.nd_proj,
                    endpoint_url=ndingest_settings.SQS_ENDPOINT).url
                self.generateUploadTasks()
                self.ingest_job.user_id = user_id
                self.ingest_job.save()
                return NDIngestJob.serialize(self.ingest_job._job)

            except Exception as e:
                print(e)
                raise NDWSError(e)
예제 #4
0
    def teardown_class(self):
        """Teardown class parameters"""

        # cleanup tilebucket
        for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1):
            tile_key = self.tile_bucket.encodeObjectKey(
                nd_proj.channel_name,
                nd_proj.resolution,
                self.x_tile,
                self.y_tile,
                z_index,
            )
            self.tile_bucket.deleteObject(tile_key)

        morton_index = XYZMorton(self.tiles)
        supercuboid_key = self.cuboid_bucket.generateSupercuboidKey(
            nd_proj.channel_name, nd_proj.resolution, self.tiles)
        self.cuboid_bucket.deleteObject(supercuboid_key)
        # delete created entities
        TileIndexDB.deleteTable(endpoint_url="http://localhost:8000")
        CuboidIndexDB.deleteTable(endpoint_url="http://localhost:8000")
        IngestQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568")
        CleanupQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568")
        TileBucket.deleteBucket(endpoint_url="http://localhost:4567")
        try:
            CuboidBucket.deleteBucket(endpoint_url="http://localhost:4567")
        except Exception as e:
            pass
예제 #5
0
 def setup_class(self):
   """Setup class parameters"""
   if 'SQS_ENDPOINT' in dir(settings):
     self.endpoint_url = settings.SQS_ENDPOINT
   else:
     self.endpoint_url = None
   IngestQueue.createQueue(nd_proj, endpoint_url=self.endpoint_url)
   self.ingest_queue = IngestQueue(nd_proj, endpoint_url=self.endpoint_url)
예제 #6
0
    def delete_ingest_queue(self):
        """
        Delete the current ingest queue
        Returns:
            None

        """
        IngestQueue.deleteQueue(self.nd_proj, endpoint_url=None)
예제 #7
0
    def delete_ingest_queue(self):
        """
        Delete the current ingest queue
        Returns:
            None

        """
        IngestQueue.deleteQueue(self.nd_proj, endpoint_url=None)
예제 #8
0
    def create_ingest_queue(self):
        """

        Returns:

        """
        IngestQueue.createQueue(self.nd_proj, endpoint_url=None)
        queue = IngestQueue(self.nd_proj, endpoint_url=None)
        return queue
예제 #9
0
    def create_ingest_queue(self):
        """
        Create an ingest queue for an ingest job using the ndingest library
        Returns:
            IngestQueue : Returns a ingest queue object

        """
        IngestQueue.createQueue(self.nd_proj, endpoint_url=None)
        queue = IngestQueue(self.nd_proj, endpoint_url=None)
        return queue
예제 #10
0
    def create_ingest_queue(self):
        """
        Create an ingest queue for an ingest job using the ndingest library
        Returns:
            IngestQueue : Returns a ingest queue object

        """
        IngestQueue.createQueue(self.nd_proj, endpoint_url=None)
        queue = IngestQueue(self.nd_proj, endpoint_url=None)
        return queue
예제 #11
0
    def delete_ingest_queue(self):
        """
        Delete the current ingest queue and removes it as an event source of
        the ingest lambda if it's connected.

        Returns:
            None
        """
        queue = IngestQueue(self.nd_proj)
        self.remove_sqs_event_source_from_lambda(queue.arn, INGEST_LAMBDA)
        IngestQueue.deleteQueue(self.nd_proj)
예제 #12
0
    def setup_class(self):
        """Setup class parameters"""

        # create the tile index table. skip if it exists
        try:
            TileIndexDB.createTable(endpoint_url="http://localhost:8000")
            CuboidIndexDB.createTable(endpoint_url="http://localhost:8000")
        except Exception as e:
            pass
        self.tileindex_db = TileIndexDB(nd_proj.project_name,
                                        endpoint_url="http://localhost:8000")

        # create the tile bucket
        TileBucket.createBucket(endpoint_url="http://localhost:4567")
        self.tile_bucket = TileBucket(nd_proj.project_name,
                                      endpoint_url="http://localhost:4567")
        self.tiles = [self.x_tile, self.y_tile, self.z_tile] = [0, 0, 0]

        message_id = "testing"
        receipt_handle = "123456"
        # insert SUPER_CUBOID_SIZE tiles in the bucket
        for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1):
            tile_handle = cStringIO.StringIO()
            self.tile_bucket.putObject(
                tile_handle,
                nd_proj.channel_name,
                nd_proj.resolution,
                self.x_tile,
                self.y_tile,
                z_index,
                message_id,
                receipt_handle,
            )

        # creating the cuboid bucket
        CuboidBucket.createBucket(endpoint_url="http://localhost:4567")
        self.cuboid_bucket = CuboidBucket(nd_proj.project_name,
                                          endpoint_url="http://localhost:4567")

        # create the ingest queue
        IngestQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568")
        self.ingest_queue = IngestQueue(nd_proj,
                                        endpoint_url="http://localhost:4568")

        # send message to the ingest queue
        morton_index = XYZMorton(self.tiles)
        supercuboid_key = self.cuboid_bucket.generateSupercuboidKey(
            nd_proj.channel_name, nd_proj.resolution, morton_index)
        response = self.ingest_queue.sendMessage(supercuboid_key)

        # create the cleanup queue
        CleanupQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568")
예제 #13
0
    def create_ingest_queue(self):
        """
        Create an ingest queue for an ingest job using the ndingest library
        Returns:
            IngestQueue : Returns a ingest queue object

        """
        IngestQueue.createQueue(self.nd_proj, endpoint_url=None)
        queue = IngestQueue(self.nd_proj, endpoint_url=None)
        timeout = self.get_ingest_lambda_timeout(INGEST_LAMBDA)
        # Ensure visibility timeout is greater than the ingest lambda that pulls
        # from it with a bit of buffer.
        queue.queue.set_attributes(Attributes={'VisibilityTimeout': str(timeout + 20)})
        return queue
예제 #14
0
    def test_setup_ingest(self):
        """Method to test the setup_ingest method"""
        try:
            ingest_mgmr = IngestManager()
            ingest_job = ingest_mgmr.setup_ingest(self.user.id,
                                                  self.example_config_data)
            assert (ingest_job is not None)

            # Check if the queue's exist
            proj_class = BossIngestProj.load()
            nd_proj = proj_class(ingest_job.collection, ingest_job.experiment,
                                 ingest_job.channel, ingest_job.resolution,
                                 ingest_job.id)
            ingest_mgmr.nd_proj = nd_proj
            upload_queue = UploadQueue(nd_proj, endpoint_url=None)
            assert (upload_queue is not None)
            ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
            assert (ingest_queue is not None)
            ingest_mgmr.remove_ingest_credentials(ingest_job.id)

        except:
            raise
        finally:
            ingest_mgmr.delete_upload_queue()
            ingest_mgmr.delete_ingest_queue()
예제 #15
0
    def test_post_new_ingest_job(self):
        """ Test view to create a new ingest job """

        config_data = self.setup_helper.get_ingest_config_data_dict()
        config_data = json.loads(json.dumps(config_data))

        # # Post the data
        url = '/' + version + '/ingest/'
        response = self.client.post(url, data=config_data, format='json')
        assert (response.status_code == 201)

        # Check if the queue's exist
        ingest_job = response.json()
        proj_class = BossIngestProj.load()
        nd_proj = proj_class(ingest_job['collection'],
                             ingest_job['experiment'],
                             ingest_job['channel_layer'], 0, ingest_job['id'])
        self.nd_proj = nd_proj
        upload_queue = UploadQueue(nd_proj, endpoint_url=None)
        assert (upload_queue is not None)
        ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
        assert (ingest_queue is not None)

        # # Delete the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.delete(url)
        assert (response.status_code == 204)
예제 #16
0
    def test_not_creator_admin(self):
        """Method to test only creators or admins can interact with ingest jobs"""
        config_data = self.setup_helper.get_ingest_config_data_dict()
        config_data = json.loads(json.dumps(config_data))

        # # Post the data
        url = '/' + version + '/ingest/'
        response = self.client.post(url, data=config_data, format='json')
        assert (response.status_code == 201)

        # Check if the queue's exist
        ingest_job = response.json()
        proj_class = BossIngestProj.load()
        nd_proj = proj_class(ingest_job['collection'], ingest_job['experiment'], ingest_job['channel'],
                             0, ingest_job['id'])
        self.nd_proj = nd_proj
        upload_queue = UploadQueue(nd_proj, endpoint_url=None)
        assert (upload_queue is not None)
        ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
        assert (ingest_queue is not None)

        # Log in as the admin and create a job
        self.client.force_login(self.superuser)

        # Test joining the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.get(url)
        assert (response.status_code == 200)
        assert (response.json()['ingest_job']['id'] == ingest_job['id'])
        assert("credentials" in response.json())

        # # Delete the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.delete(url)
        assert (response.status_code == 204)
예제 #17
0
 def setup_class(self):
     """Setup class parameters"""
     # create the tile index table. skip if it exists
     try:
         TileIndexDB.createTable(endpoint_url=settings.DYNAMO_ENDPOINT)
     except Exception as e:
         pass
     self.tileindex_db = TileIndexDB(nd_proj.project_name,
                                     endpoint_url=settings.DYNAMO_ENDPOINT)
     # create the ingest queue
     IngestQueue.createQueue(nd_proj, endpoint_url=settings.SQS_ENDPOINT)
     # create the upload queue
     UploadQueue.createQueue(nd_proj, endpoint_url=settings.SQS_ENDPOINT)
     self.upload_queue = UploadQueue(nd_proj,
                                     endpoint_url=settings.SQS_ENDPOINT)
     tile_bucket = TileBucket(nd_proj.project_name,
                              endpoint_url=settings.S3_ENDPOINT)
     [self.x_tile, self.y_tile, self.z_tile] = [0, 0, 0]
     message = serializer.encodeUploadMessage(
         nd_proj.project_name,
         nd_proj.channel_name,
         nd_proj.resolution,
         self.x_tile,
         self.y_tile,
         self.z_tile,
     )
     # insert message in the upload queue
     self.upload_queue.sendMessage(message)
     # receive message and upload object
     for (
             message_id,
             receipt_handle,
             message_body,
     ) in self.upload_queue.receiveMessage():
         tile_handle = cStringIO.StringIO()
         tile_bucket.putObject(
             tile_handle,
             nd_proj.channel_name,
             nd_proj.resolution,
             self.x_tile,
             self.y_tile,
             self.z_tile,
             message_id,
             receipt_handle,
         )
예제 #18
0
    def test_post_new_ingest_job(self):
        """ Test view to create a new ingest job """
        config_data = self.setup_helper.get_ingest_config_data_dict()
        config_data = json.loads(json.dumps(config_data))

        # # Post the data
        url = '/' + version + '/ingest/'
        response = self.client.post(url, data=config_data, format='json')
        self.assertEqual(201, response.status_code)

        # Check if the queue's exist
        ingest_job = response.json()
        proj_class = BossIngestProj.load()
        nd_proj = proj_class(ingest_job['collection'],
                             ingest_job['experiment'], ingest_job['channel'],
                             0, ingest_job['id'])
        self.nd_proj = nd_proj
        upload_queue = UploadQueue(nd_proj, endpoint_url=None)
        self.assertIsNotNone(upload_queue)
        ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
        self.assertIsNotNone(ingest_queue)

        # Test joining the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.get(url)
        self.assertEqual(response.json()['ingest_job']['id'], ingest_job['id'])
        self.assertIn("credentials", response.json())

        # # Delete the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.delete(url)
        self.assertEqual(204, response.status_code)

        # Verify Queues are removed
        with self.assertRaises(ClientError):
            UploadQueue(nd_proj, endpoint_url=None)
        with self.assertRaises(ClientError):
            IngestQueue(nd_proj, endpoint_url=None)

        # Verify the job is deleted
        url = '/' + version + '/ingest/{}/status'.format(ingest_job['id'])
        response = self.client.get(url)
        self.assertEqual(response.status_code, 404)
예제 #19
0
    def test_complete_ingest_job(self):
        """ Test view to create a new ingest job """
        config_data = self.setup_helper.get_ingest_config_data_dict()
        config_data = json.loads(json.dumps(config_data))

        # # Post the data
        url = '/' + version + '/ingest/'
        response = self.client.post(url, data=config_data, format='json')
        self.assertEqual(response.status_code, 201)

        # Check if the queues exist
        ingest_job = response.json()
        proj_class = BossIngestProj.load()
        nd_proj = proj_class(ingest_job['collection'],
                             ingest_job['experiment'], ingest_job['channel'],
                             0, ingest_job['id'])
        self.nd_proj = nd_proj
        upload_queue = UploadQueue(nd_proj, endpoint_url=None)
        self.assertIsNotNone(upload_queue)
        ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
        self.assertIsNotNone(ingest_queue)

        # Test joining the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.get(url)
        self.assertEqual(response.json()['ingest_job']['id'], ingest_job['id'])
        self.assertIn("credentials", response.json())

        # Complete the job
        url = '/' + version + '/ingest/{}/complete'.format(ingest_job['id'])
        response = self.client.post(url)

        # Can't complete until it is done
        self.assertEqual(400, response.status_code)

        # Wait for job to complete
        print('trying to join job')
        for cnt in range(0, 30):
            # Try joining to kick the status
            url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
            self.client.get(url)

            url = '/' + version + '/ingest/{}/status'.format(ingest_job['id'])
            response = self.client.get(url)
            if response.json()["status"] == IngestJob.UPLOADING:
                break

            time.sleep(10)

        print('completing')
        # Complete the job
        url = '/' + version + '/ingest/{}/complete'.format(ingest_job['id'])
        response = self.client.post(url)
        self.assertEqual(204, response.status_code)
예제 #20
0
    def deleteIngestJob(self, job_id):
        """Delete an ingest job based on job id"""

        try:
            ingest_job = NDIngestJob.fromId(job_id)
            nd_proj = NDIngestProj(ingest_job.project, ingest_job.channel,
                                   ingest_job.resolution)
            # delete the upload queue
            UploadQueue.deleteQueue(
                nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
            # delete the ingest queue
            IngestQueue.deleteQueue(
                nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
            # delete the cleanup queue
            CleanupQueue.deleteQueue(
                nd_proj, endpoint_url=ndingest_settings.SQS_ENDPOINT)
            ingest_job.status = INGEST_STATUS_DELETED
            ingest_job.save()
        except Exception as e:
            print(e)
            raise
예제 #21
0
    def get_ingest_job_ingest_queue(self, ingest_job):
        """
        Return the ingest queue for an ingest job
        Args:
            ingest_job: Ingest job model

        Returns:
            Ndingest.ingestqueue
        """
        proj_class = BossIngestProj.load()
        self.nd_proj = proj_class(ingest_job.collection, ingest_job.experiment, ingest_job.channel,
                                  ingest_job.resolution, ingest_job.id)
        queue = IngestQueue(self.nd_proj, endpoint_url=None)
        return queue
예제 #22
0
    def delete_lambda_event_source_queues(self):
        """
        Remove SQS queues connected to ingest lambdas that are connected as
        event sources.
        """
        if int(self.job['ingest_type']) != TILE_INGEST:
            return

        try:
            config = BossConfig()
            INGEST_LAMBDA = config["aws"]["tile_ingest_lambda"]
            TILE_UPLOADED_LAMBDA = config["aws"]["tile_uploaded_lambda"]
        except Exception as ex:
            log.error(f'Failed to get lambda names from boss.config: {ex}')
            return

        self.remove_sqs_event_source_from_lambda(
            TileIndexQueue(self.nd_proj).arn, TILE_UPLOADED_LAMBDA)
        self.remove_sqs_event_source_from_lambda(
            IngestQueue(self.nd_proj).arn, INGEST_LAMBDA)
예제 #23
0
def process(msg, context, region):
    """
    Process a single message.

    Args:
        msg (dict): Contents described at the top of the file.
        context (Context): Lambda context object.
        region (str): Lambda execution region.
    """

    job_id = int(msg['ingest_job'])
    chunk_key = msg['chunk_key']
    tile_key = msg['tile_key']
    print("Tile key: {}".format(tile_key))

    proj_info = BossIngestProj.fromTileKey(tile_key)

    # Set the job id
    proj_info.job_id = msg['ingest_job']

    print("Data: {}".format(msg))

    # update value in the dynamo table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    chunk = tile_index_db.getCuboid(chunk_key, job_id)
    if chunk:
        if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]):
            print("Chunk already has all its tiles: {}".format(chunk_key))
            # Go ahead and setup to fire another ingest lambda so this tile
            # entry will be deleted on successful execution of the ingest lambda.
            chunk_ready = True
        else:
            print("Updating tile index for chunk_key: {}".format(chunk_key))
            chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)
    else:
        # First tile in the chunk
        print("Creating first entry for chunk_key: {}".format(chunk_key))
        try:
            tile_index_db.createCuboidEntry(chunk_key, job_id)
        except ClientError as err:
            # Under _exceptional_ circumstances, it's possible for another lambda
            # to beat the current instance to creating the initial cuboid entry
            # in the index.
            error_code = err.response['Error'].get('Code', 'Unknown')
            if error_code == 'ConditionalCheckFailedException':
                print('Chunk key entry already created - proceeding.')
            else:
                raise
        chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)

    # ingest the chunk if we have all the tiles
    if chunk_ready:
        print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key))
        # insert a new job in the insert queue if we have all the tiles
        ingest_queue = IngestQueue(proj_info)
        ingest_queue.sendMessage(json.dumps(msg))

        # Invoke Ingest lambda function
        names = AWSNames.from_lambda(context.function_name)
        lambda_client = boto3.client('lambda', region_name=region)
        lambda_client.invoke(
            FunctionName=names.tile_ingest.lambda_,
            InvocationType='Event',
            Payload=json.dumps(msg).encode())
    else:
        print("Chunk not ready for ingest yet: {}".format(chunk_key))

    print("DONE!")
예제 #24
0
class Test_IngestLambda:
    def setup_class(self):
        """Setup class parameters"""

        # create the tile index table. skip if it exists
        try:
            TileIndexDB.createTable(endpoint_url="http://localhost:8000")
            CuboidIndexDB.createTable(endpoint_url="http://localhost:8000")
        except Exception as e:
            pass
        self.tileindex_db = TileIndexDB(nd_proj.project_name,
                                        endpoint_url="http://localhost:8000")

        # create the tile bucket
        TileBucket.createBucket(endpoint_url="http://localhost:4567")
        self.tile_bucket = TileBucket(nd_proj.project_name,
                                      endpoint_url="http://localhost:4567")
        self.tiles = [self.x_tile, self.y_tile, self.z_tile] = [0, 0, 0]

        message_id = "testing"
        receipt_handle = "123456"
        # insert SUPER_CUBOID_SIZE tiles in the bucket
        for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1):
            tile_handle = cStringIO.StringIO()
            self.tile_bucket.putObject(
                tile_handle,
                nd_proj.channel_name,
                nd_proj.resolution,
                self.x_tile,
                self.y_tile,
                z_index,
                message_id,
                receipt_handle,
            )

        # creating the cuboid bucket
        CuboidBucket.createBucket(endpoint_url="http://localhost:4567")
        self.cuboid_bucket = CuboidBucket(nd_proj.project_name,
                                          endpoint_url="http://localhost:4567")

        # create the ingest queue
        IngestQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568")
        self.ingest_queue = IngestQueue(nd_proj,
                                        endpoint_url="http://localhost:4568")

        # send message to the ingest queue
        morton_index = XYZMorton(self.tiles)
        supercuboid_key = self.cuboid_bucket.generateSupercuboidKey(
            nd_proj.channel_name, nd_proj.resolution, morton_index)
        response = self.ingest_queue.sendMessage(supercuboid_key)

        # create the cleanup queue
        CleanupQueue.createQueue(nd_proj, endpoint_url="http://localhost:4568")

    def teardown_class(self):
        """Teardown class parameters"""

        # cleanup tilebucket
        for z_index in (self.z_tile, settings.SUPER_CUBOID_SIZE[2], 1):
            tile_key = self.tile_bucket.encodeObjectKey(
                nd_proj.channel_name,
                nd_proj.resolution,
                self.x_tile,
                self.y_tile,
                z_index,
            )
            self.tile_bucket.deleteObject(tile_key)

        morton_index = XYZMorton(self.tiles)
        supercuboid_key = self.cuboid_bucket.generateSupercuboidKey(
            nd_proj.channel_name, nd_proj.resolution, self.tiles)
        self.cuboid_bucket.deleteObject(supercuboid_key)
        # delete created entities
        TileIndexDB.deleteTable(endpoint_url="http://localhost:8000")
        CuboidIndexDB.deleteTable(endpoint_url="http://localhost:8000")
        IngestQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568")
        CleanupQueue.deleteQueue(nd_proj, endpoint_url="http://localhost:4568")
        TileBucket.deleteBucket(endpoint_url="http://localhost:4567")
        try:
            CuboidBucket.deleteBucket(endpoint_url="http://localhost:4567")
        except Exception as e:
            pass

    def test_Uploadevent(self):
        """Testing the event"""
        # creating an emulambda function
        func = emulambda.import_lambda("ingestlambda.lambda_handler")
        # creating an emulambda event
        event = emulambda.parse_event(
            open("../ndlambda/functions/ingest/ingest_event.json").read())
        # calling the emulambda function to invoke a lambda
        emulambda.invoke_lambda(func, event, None, 0, None)

        # testing if the supercuboid was inserted in the bucket
        morton_index = XYZMorton(self.tiles)
        cuboid = self.cuboid_bucket.getObject(nd_proj.channel_name,
                                              nd_proj.resolution, morton_index)

        # testing if the message was removed from the ingest queue
        for message in self.ingest_queue.receiveMessage():
            # KL TODO write the message id into the JSON event file directly
            print(message)
예제 #25
0
    def test_complete_ingest_job(self):
        """ Test view to create a new ingest job """
        config_data = self.setup_helper.get_ingest_config_data_dict()
        config_data = json.loads(json.dumps(config_data))

        # # Post the data
        url = '/' + version + '/ingest/'
        response = self.client.post(url, data=config_data, format='json')
        assert (response.status_code == 201)

        # Check if the queue's exist
        ingest_job = response.json()
        proj_class = BossIngestProj.load()
        nd_proj = proj_class(ingest_job['collection'], ingest_job['experiment'], ingest_job['channel'],
                             0, ingest_job['id'])
        self.nd_proj = nd_proj
        upload_queue = UploadQueue(nd_proj, endpoint_url=None)
        assert (upload_queue is not None)
        ingest_queue = IngestQueue(nd_proj, endpoint_url=None)
        assert (ingest_queue is not None)

        # Test joining the job
        url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
        response = self.client.get(url)
        assert(response.json()['ingest_job']['id'] == ingest_job['id'])
        assert("credentials" in response.json())

        # Complete the job
        url = '/' + version + '/ingest/{}/complete'.format(ingest_job['id'])
        response = self.client.post(url)

        # Can't complete until it is done
        assert(response.status_code == 400)

        # Wait for job to complete
        for cnt in range(0, 30):
            # Try joining to kick the status
            url = '/' + version + '/ingest/{}/'.format(ingest_job['id'])
            self.client.get(url)

            url = '/' + version + '/ingest/{}/status'.format(ingest_job['id'])
            response = self.client.get(url)
            if response.json()["status"] == 1:
                break

            time.sleep(10)

        # Complete the job
        url = '/' + version + '/ingest/{}/complete'.format(ingest_job['id'])
        response = self.client.post(url)
        assert(response.status_code == 204)

        # Verify Queues are removed
        with self.assertRaises(ClientError):
            UploadQueue(nd_proj, endpoint_url=None)
        with self.assertRaises(ClientError):
            IngestQueue(nd_proj, endpoint_url=None)

        # Verify status has changed
        url = '/' + version + '/ingest/{}/status'.format(ingest_job['id'])
        response = self.client.get(url)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json()["status"], 2)
예제 #26
0
 def teardown_class(self):
     """Teardown parameters"""
     IngestQueue.deleteQueue(nd_proj, endpoint_url=self.endpoint_url)
예제 #27
0
event = json.loads(json_event)

# Load the project info from the chunk key you are processing
proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
proj_info.job_id = event["ingest_job"]

# Handle up to 2 messages before quitting (helps deal with making sure all messages get processed)
run_cnt = 0
while run_cnt < 1:   # Adjusted count down to 1 as lambda is crashing with full memory when pulling off more than 1.
    # Get message from SQS flush queue, try for ~2 seconds
    rx_cnt = 0
    msg_data = None
    msg_id = None
    msg_rx_handle = None
    while rx_cnt < 6:
        ingest_queue = IngestQueue(proj_info)
        msg = [x for x in ingest_queue.receiveMessage()]
        if msg:
            msg = msg[0]
            print("MESSAGE: {}".format(msg))
            print(len(msg))
            msg_id = msg[0]
            msg_rx_handle = msg[1]
            msg_data = json.loads(msg[2])
            print("MESSAGE DATA: {}".format(msg_data))
            break
        else:
            rx_cnt += 1
            print("No message found. Try {} of 6".format(rx_cnt))
            time.sleep(1)
예제 #28
0
def handler(event, context):
    # Load settings
    SETTINGS = BossSettings.load()

    # Used as a guard against trying to delete the SQS message when lambda is
    # triggered by SQS.
    sqs_triggered = 'Records' in event and len(event['Records']) > 0

    if sqs_triggered :
        # Lambda invoked by an SQS trigger.
        msg_data = json.loads(event['Records'][0]['body'])
        # Load the project info from the chunk key you are processing
        chunk_key = msg_data['chunk_key']
        proj_info = BossIngestProj.fromSupercuboidKey(chunk_key)
        proj_info.job_id = msg_data['ingest_job']
    else:
        # Standard async invoke of this lambda.

        # Load the project info from the chunk key you are processing
        proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
        proj_info.job_id = event["ingest_job"]

        # Get message from SQS ingest queue, try for ~2 seconds
        rx_cnt = 0
        msg_data = None
        msg_id = None
        msg_rx_handle = None
        while rx_cnt < 6:
            ingest_queue = IngestQueue(proj_info)
            msg = [x for x in ingest_queue.receiveMessage()]
            if msg:
                msg = msg[0]
                print("MESSAGE: {}".format(msg))
                print(len(msg))
                msg_id = msg[0]
                msg_rx_handle = msg[1]
                msg_data = json.loads(msg[2])
                print("MESSAGE DATA: {}".format(msg_data))
                break
            else:
                rx_cnt += 1
                print("No message found. Try {} of 6".format(rx_cnt))
                time.sleep(1)

        if not msg_id:
            # No tiles ready to ingest.
            print("No ingest message available")
            return

        # Get the chunk key of the tiles to ingest.
        chunk_key = msg_data['chunk_key']


    tile_error_queue = TileErrorQueue(proj_info)

    print("Ingesting Chunk {}".format(chunk_key))
    tiles_in_chunk = int(chunk_key.split('&')[1])

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # If chunk_key is gone, another lambda uploaded the cuboids and deleted the chunk_key afterwards.
        if not sqs_triggered:
            # Remove message so it's not redelivered.
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)

        print("Aborting due to chunk key missing from tile index table")
        return

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    if len(tile_key_list) < tiles_in_chunk:
        print("Not a full set of 16 tiles. Assuming it has handled already, tiles: {}".format(len(tile_key_list)))
        if not sqs_triggered:
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        return
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
    tile_key_list = ["&".join(x) for x in tile_key_list]
    print("Sorted Tile Keys: {}".format(tile_key_list))

    # Augment Resource JSON data so it will instantiate properly that was pruned due to S3 metadata size limits
    resource_dict = msg_data['parameters']['resource']
    _, exp_name, ch_name = resource_dict["boss_key"].split("&")

    resource_dict["channel"]["name"] = ch_name
    resource_dict["channel"]["description"] = ""
    resource_dict["channel"]["sources"] = []
    resource_dict["channel"]["related"] = []
    resource_dict["channel"]["default_time_sample"] = 0
    resource_dict["channel"]["downsample_status"] = "NOT_DOWNSAMPLED"

    resource_dict["experiment"]["name"] = exp_name
    resource_dict["experiment"]["description"] = ""
    resource_dict["experiment"]["num_time_samples"] = 1
    resource_dict["experiment"]["time_step"] = None
    resource_dict["experiment"]["time_step_unit"] = None

    resource_dict["coord_frame"]["name"] = "cf"
    resource_dict["coord_frame"]["name"] = ""
    resource_dict["coord_frame"]["x_start"] = 0
    resource_dict["coord_frame"]["x_stop"] = 100000
    resource_dict["coord_frame"]["y_start"] = 0
    resource_dict["coord_frame"]["y_stop"] = 100000
    resource_dict["coord_frame"]["z_start"] = 0
    resource_dict["coord_frame"]["z_stop"] = 100000
    resource_dict["coord_frame"]["voxel_unit"] = "nanometers"

    # Setup the resource
    resource = BossResourceBasic()
    resource.from_dict(resource_dict)
    dtype = resource.get_numpy_data_type()

    # read all tiles from bucket into a slab
    tile_bucket = TileBucket(proj_info.project_name)
    data = []
    num_z_slices = 0
    for tile_key in tile_key_list:
        try:
            image_data, message_id, receipt_handle, metadata = tile_bucket.getObjectByKey(tile_key)
        except KeyError:
            print('Key: {} not found in tile bucket, assuming redelivered SQS message and aborting.'.format(
                tile_key))
            if not sqs_triggered:
                # Remove message so it's not redelivered.
                ingest_queue.deleteMessage(msg_id, msg_rx_handle)
            print("Aborting due to missing tile in bucket")
            return

        image_bytes = BytesIO(image_data)
        image_size = image_bytes.getbuffer().nbytes

        # Get tiles size from metadata, need to shape black tile if actual tile is corrupt.
        if 'x_size' in metadata:
            tile_size_x = metadata['x_size']
        else:
            print('MetadataMissing: x_size not in tile metadata:  using 1024.')
            tile_size_x = 1024

        if 'y_size' in metadata:
            tile_size_y = metadata['y_size']
        else:
            print('MetadataMissing: y_size not in tile metadata:  using 1024.')
            tile_size_y = 1024

        if image_size == 0:
            print('TileError: Zero length tile, using black instead: {}'.format(tile_key))
            error_msg = 'Zero length tile'
            enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
            tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
        else:
            try:
                tile_img = np.asarray(Image.open(image_bytes), dtype=dtype)
            except TypeError as te:
                print('TileError: Incomplete tile, using black instead (tile_size_in_bytes, tile_key): {}, {}'
                      .format(image_size, tile_key))
                error_msg = 'Incomplete tile'
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
            except OSError as oe:
                print('TileError: OSError, using black instead (tile_size_in_bytes, tile_key): {}, {} ErrorMessage: {}'
                      .format(image_size, tile_key, oe))
                error_msg = 'OSError: {}'.format(oe)
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)

        data.append(tile_img)
        num_z_slices += 1


    # Make 3D array of image data. It should be in XYZ at this point
    chunk_data = np.array(data)
    del data
    tile_dims = chunk_data.shape

    # Break into Cube instances
    print("Tile Dims: {}".format(tile_dims))
    print("Num Z Slices: {}".format(num_z_slices))
    num_x_cuboids = int(math.ceil(tile_dims[2] / CUBOIDSIZE[proj_info.resolution][0]))
    num_y_cuboids = int(math.ceil(tile_dims[1] / CUBOIDSIZE[proj_info.resolution][1]))

    print("Num X Cuboids: {}".format(num_x_cuboids))
    print("Num Y Cuboids: {}".format(num_y_cuboids))

    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
                                                                                 y_start:y_end, x_start:x_end]

            # Create object key
            object_key = sp.objectio.generate_object_key(resource, proj_info.resolution, t_index, morton_index)
            print("Object Key: {}".format(object_key))

            # Put object in S3
            sp.objectio.put_objects([object_key], [cube.to_blosc()])

            # Add object to index
            sp.objectio.add_cuboid_to_index(object_key, ingest_job=int(msg_data["ingest_job"]))

            # Update id indices if this is an annotation channel
            # We no longer index during ingest.
            #if resource.data['channel']['type'] == 'annotation':
            #   try:
            #       sp.objectio.update_id_indices(
            #           resource, proj_info.resolution, [object_key], [cube.data])
            #   except SpdbError as ex:
            #       sns_client = boto3.client('sns')
            #       topic_arn = msg_data['parameters']["OBJECTIO_CONFIG"]["prod_mailing_list"]
            #       msg = 'During ingest:\n{}\nCollection: {}\nExperiment: {}\n Channel: {}\n'.format(
            #           ex.message,
            #           resource.data['collection']['name'],
            #           resource.data['experiment']['name'],
            #           resource.data['channel']['name'])
            #       sns_client.publish(
            #           TopicArn=topic_arn,
            #           Subject='Object services misuse',
            #           Message=msg)

    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)

    names = AWSNames.create_from_lambda_name(context.function_name)

    delete_tiles_data = {
        'tile_key_list': tile_key_list,
        'region': SETTINGS.REGION_NAME,
        'bucket': tile_bucket.bucket.name
    }

    # Delete tiles from tile bucket.
    lambda_client.invoke(
        FunctionName=names.delete_tile_objs_lambda,
        InvocationType='Event',
        Payload=json.dumps(delete_tiles_data).encode()
    )       

    delete_tile_entry_data = {
        'tile_index': tile_index_db.table.name,
        'region': SETTINGS.REGION_NAME,
        'chunk_key': chunk_key,
        'task_id': msg_data['ingest_job']
    }

    # Delete entry from tile index.
    lambda_client.invoke(
        FunctionName=names.delete_tile_index_entry_lambda,
        InvocationType='Event',
        Payload=json.dumps(delete_tile_entry_data).encode()
    )       

    if not sqs_triggered:
        # Delete message since it was processed successfully
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)
예제 #29
0
        # to beat the current instance to creating the initial cuboid entry
        # in the index.
        error_code = err.response['Error'].get('Code', 'Unknown')
        if error_code == 'ConditionalCheckFailedException':
            print('Chunk key entry already created - proceeding.')
        else:
            raise
    chunk_ready = tile_index_db.markTileAsUploaded(metadata["chunk_key"],
                                                   tile_key,
                                                   int(metadata["ingest_job"]))

# ingest the chunk if we have all the tiles
if chunk_ready:
    print("CHUNK READY SENDING MESSAGE: {}".format(metadata["chunk_key"]))
    # insert a new job in the insert queue if we have all the tiles
    ingest_queue = IngestQueue(proj_info)
    ingest_queue.sendMessage(json.dumps(metadata))

    # Invoke Ingest lambda function
    metadata["lambda-name"] = "ingest"
    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)
    response = lambda_client.invoke(
        FunctionName=metadata["parameters"]["ingest_lambda"],
        InvocationType='Event',
        Payload=json.dumps(metadata).encode())
else:
    print("Chunk not ready for ingest yet: {}".format(metadata["chunk_key"]))

# Delete message from upload queue
upload_queue = UploadQueue(proj_info)
upload_queue.deleteMessage(message_id, receipt_handle)
예제 #30
0
def process(msg, context, region):
    """
    Process a single message.

    Args:
        msg (dict): Contents described at the top of the file.
        context (Context): Lambda context object.
        region (str): Lambda execution region.
    """

    job_id = int(msg['ingest_job'])
    chunk_key = msg['chunk_key']
    tile_key = msg['tile_key']
    print("Tile key: {}".format(tile_key))

    proj_info = BossIngestProj.fromTileKey(tile_key)

    # Set the job id
    proj_info.job_id = msg['ingest_job']

    print("Data: {}".format(msg))

    # update value in the dynamo table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    chunk = tile_index_db.getCuboid(chunk_key, job_id)
    if chunk:
        if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]):
            print("Chunk already has all its tiles: {}".format(chunk_key))
            # Go ahead and setup to fire another ingest lambda so this tile
            # entry will be deleted on successful execution of the ingest lambda.
            chunk_ready = True
        else:
            print("Updating tile index for chunk_key: {}".format(chunk_key))
            chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)
    else:
        # First tile in the chunk
        print("Creating first entry for chunk_key: {}".format(chunk_key))
        try:
            tile_index_db.createCuboidEntry(chunk_key, job_id)
        except ClientError as err:
            # Under _exceptional_ circumstances, it's possible for another lambda
            # to beat the current instance to creating the initial cuboid entry
            # in the index.
            error_code = err.response['Error'].get('Code', 'Unknown')
            if error_code == 'ConditionalCheckFailedException':
                print('Chunk key entry already created - proceeding.')
            else:
                raise
        chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)

    # ingest the chunk if we have all the tiles
    if chunk_ready:
        print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key))
        # insert a new job in the insert queue if we have all the tiles
        ingest_queue = IngestQueue(proj_info)
        ingest_queue.sendMessage(json.dumps(msg))

        # Invoke Ingest lambda function
        names = AWSNames.create_from_lambda_name(context.function_name)
        lambda_client = boto3.client('lambda', region_name=region)
        lambda_client.invoke(
            FunctionName=names.tile_ingest_lambda,
            InvocationType='Event',
            Payload=json.dumps(msg).encode())
    else:
        print("Chunk not ready for ingest yet: {}".format(chunk_key))

    print("DONE!")
예제 #31
0
 def teardown_class(self):
     """Teardown class parameters"""
     TileIndexDB.deleteTable(endpoint_url=settings.DYNAMO_ENDPOINT)
     IngestQueue.deleteQueue(nd_proj, endpoint_url=settings.SQS_ENDPOINT)
     UploadQueue.deleteQueue(nd_proj, endpoint_url=settings.SQS_ENDPOINT)
예제 #32
0
def handler(event, context):
    # Load settings
    SETTINGS = BossSettings.load()

    # Used as a guard against trying to delete the SQS message when lambda is
    # triggered by SQS.
    sqs_triggered = 'Records' in event and len(event['Records']) > 0

    if sqs_triggered :
        # Lambda invoked by an SQS trigger.
        msg_data = json.loads(event['Records'][0]['body'])
        # Load the project info from the chunk key you are processing
        chunk_key = msg_data['chunk_key']
        proj_info = BossIngestProj.fromSupercuboidKey(chunk_key)
        proj_info.job_id = msg_data['ingest_job']
    else:
        # Standard async invoke of this lambda.

        # Load the project info from the chunk key you are processing
        proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
        proj_info.job_id = event["ingest_job"]

        # Get message from SQS ingest queue, try for ~2 seconds
        rx_cnt = 0
        msg_data = None
        msg_id = None
        msg_rx_handle = None
        while rx_cnt < 6:
            ingest_queue = IngestQueue(proj_info)
            try:
                msg = [x for x in ingest_queue.receiveMessage()]
            # StopIteration may be converted to a RunTimeError.
            except (StopIteration, RuntimeError):
                msg = None

            if msg:
                msg = msg[0]
                print("MESSAGE: {}".format(msg))
                print(len(msg))
                msg_id = msg[0]
                msg_rx_handle = msg[1]
                msg_data = json.loads(msg[2])
                print("MESSAGE DATA: {}".format(msg_data))
                break
            else:
                rx_cnt += 1
                print("No message found. Try {} of 6".format(rx_cnt))
                time.sleep(1)

        if not msg_id:
            # No tiles ready to ingest.
            print("No ingest message available")
            return

        # Get the chunk key of the tiles to ingest.
        chunk_key = msg_data['chunk_key']


    tile_error_queue = TileErrorQueue(proj_info)

    print("Ingesting Chunk {}".format(chunk_key))
    tiles_in_chunk = int(chunk_key.split('&')[1])

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # If chunk_key is gone, another lambda uploaded the cuboids and deleted the chunk_key afterwards.
        if not sqs_triggered:
            # Remove message so it's not redelivered.
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)

        print("Aborting due to chunk key missing from tile index table")
        return

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    if len(tile_key_list) < tiles_in_chunk:
        print("Not a full set of 16 tiles. Assuming it has handled already, tiles: {}".format(len(tile_key_list)))
        if not sqs_triggered:
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        return
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
    tile_key_list = ["&".join(x) for x in tile_key_list]
    print("Sorted Tile Keys: {}".format(tile_key_list))

    # Augment Resource JSON data so it will instantiate properly that was pruned due to S3 metadata size limits
    resource_dict = msg_data['parameters']['resource']
    _, exp_name, ch_name = resource_dict["boss_key"].split("&")

    resource_dict["channel"]["name"] = ch_name
    resource_dict["channel"]["description"] = ""
    resource_dict["channel"]["sources"] = []
    resource_dict["channel"]["related"] = []
    resource_dict["channel"]["default_time_sample"] = 0
    resource_dict["channel"]["downsample_status"] = "NOT_DOWNSAMPLED"

    resource_dict["experiment"]["name"] = exp_name
    resource_dict["experiment"]["description"] = ""
    resource_dict["experiment"]["num_time_samples"] = 1
    resource_dict["experiment"]["time_step"] = None
    resource_dict["experiment"]["time_step_unit"] = None

    resource_dict["coord_frame"]["name"] = "cf"
    resource_dict["coord_frame"]["name"] = ""
    resource_dict["coord_frame"]["x_start"] = 0
    resource_dict["coord_frame"]["x_stop"] = 100000
    resource_dict["coord_frame"]["y_start"] = 0
    resource_dict["coord_frame"]["y_stop"] = 100000
    resource_dict["coord_frame"]["z_start"] = 0
    resource_dict["coord_frame"]["z_stop"] = 100000
    resource_dict["coord_frame"]["voxel_unit"] = "nanometers"

    # Setup the resource
    resource = BossResourceBasic()
    resource.from_dict(resource_dict)
    dtype = resource.get_numpy_data_type()

    # read all tiles from bucket into a slab
    tile_bucket = TileBucket(proj_info.project_name)
    data = []
    num_z_slices = 0
    for tile_key in tile_key_list:
        try:
            image_data, message_id, receipt_handle, metadata = tile_bucket.getObjectByKey(tile_key)
        except KeyError:
            print('Key: {} not found in tile bucket, assuming redelivered SQS message and aborting.'.format(
                tile_key))
            if not sqs_triggered:
                # Remove message so it's not redelivered.
                ingest_queue.deleteMessage(msg_id, msg_rx_handle)
            print("Aborting due to missing tile in bucket")
            return

        image_bytes = BytesIO(image_data)
        image_size = image_bytes.getbuffer().nbytes

        # Get tiles size from metadata, need to shape black tile if actual tile is corrupt.
        if 'x_size' in metadata:
            tile_size_x = metadata['x_size']
        else:
            print('MetadataMissing: x_size not in tile metadata:  using 1024.')
            tile_size_x = 1024

        if 'y_size' in metadata:
            tile_size_y = metadata['y_size']
        else:
            print('MetadataMissing: y_size not in tile metadata:  using 1024.')
            tile_size_y = 1024

        if image_size == 0:
            print('TileError: Zero length tile, using black instead: {}'.format(tile_key))
            error_msg = 'Zero length tile'
            enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
            tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
        else:
            try:
                # DP NOTE: Issues when specifying dtype in the asarray function with Pillow ver 8.3.1. 
                # Fixed by separating array instantiation and dtype assignment. 
                tile_img = np.asarray(Image.open(image_bytes))
                tile_img = tile_img.astype(dtype)
            except TypeError as te:
                print('TileError: Incomplete tile, using black instead (tile_size_in_bytes, tile_key): {}, {}'
                      .format(image_size, tile_key))
                error_msg = 'Incomplete tile'
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
            except OSError as oe:
                print('TileError: OSError, using black instead (tile_size_in_bytes, tile_key): {}, {} ErrorMessage: {}'
                      .format(image_size, tile_key, oe))
                error_msg = 'OSError: {}'.format(oe)
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)

        data.append(tile_img)
        num_z_slices += 1


    # Make 3D array of image data. It should be in XYZ at this point
    chunk_data = np.array(data)
    del data
    tile_dims = chunk_data.shape

    # Break into Cube instances
    print("Tile Dims: {}".format(tile_dims))
    print("Num Z Slices: {}".format(num_z_slices))
    num_x_cuboids = int(math.ceil(tile_dims[2] / CUBOIDSIZE[proj_info.resolution][0]))
    num_y_cuboids = int(math.ceil(tile_dims[1] / CUBOIDSIZE[proj_info.resolution][1]))

    print("Num X Cuboids: {}".format(num_x_cuboids))
    print("Num Y Cuboids: {}".format(num_y_cuboids))

    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
                                                                                 y_start:y_end, x_start:x_end]

            # Create object key
            object_key = sp.objectio.generate_object_key(resource, proj_info.resolution, t_index, morton_index)
            print("Object Key: {}".format(object_key))

            # Put object in S3
            sp.objectio.put_objects([object_key], [cube.to_blosc()])

            # Add object to index
            sp.objectio.add_cuboid_to_index(object_key, ingest_job=int(msg_data["ingest_job"]))

            # Update id indices if this is an annotation channel
            # We no longer index during ingest.
            #if resource.data['channel']['type'] == 'annotation':
            #   try:
            #       sp.objectio.update_id_indices(
            #           resource, proj_info.resolution, [object_key], [cube.data])
            #   except SpdbError as ex:
            #       sns_client = boto3.client('sns')
            #       topic_arn = msg_data['parameters']["OBJECTIO_CONFIG"]["prod_mailing_list"]
            #       msg = 'During ingest:\n{}\nCollection: {}\nExperiment: {}\n Channel: {}\n'.format(
            #           ex.message,
            #           resource.data['collection']['name'],
            #           resource.data['experiment']['name'],
            #           resource.data['channel']['name'])
            #       sns_client.publish(
            #           TopicArn=topic_arn,
            #           Subject='Object services misuse',
            #           Message=msg)

    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)

    names = AWSNames.from_lambda(context.function_name)

    delete_tiles_data = {
        'tile_key_list': tile_key_list,
        'region': SETTINGS.REGION_NAME,
        'bucket': tile_bucket.bucket.name
    }

    # Delete tiles from tile bucket.
    lambda_client.invoke(
        FunctionName=names.delete_tile_objs.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tiles_data).encode()
    )       

    delete_tile_entry_data = {
        'tile_index': tile_index_db.table.name,
        'region': SETTINGS.REGION_NAME,
        'chunk_key': chunk_key,
        'task_id': msg_data['ingest_job']
    }

    # Delete entry from tile index.
    lambda_client.invoke(
        FunctionName=names.delete_tile_index_entry.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tile_entry_data).encode()
    )       

    if not sqs_triggered:
        # Delete message since it was processed successfully
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)