def test_get_ids_in_region_multiple_cubes_and_x_partials(self):
        """
        Region has some full cuboids and some partial cuboids along the x axis.
        """
        cube_dim_tuple = (self.x_dim, self.y_dim, self.z_dim)
        cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube1.zeros()
        cube1.data[0][0][40][105] = 55555
        cube1.data[0][0][50][105] = 66666
        pos1 = [7*self.x_dim, 5*self.y_dim, 2*self.z_dim]
        cube1.morton_id = XYZMorton(pos1)

        cube2 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube2.zeros()
        cube2.data[0][0][40][105] = 55555
        cube2.data[0][0][50][105] = 77777
        pos2 = [8*self.x_dim, 5*self.y_dim, 2*self.z_dim]
        cube2.morton_id = XYZMorton(pos2)

        cube3 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube3.zeros()
        cube3.data[0][0][0][105] = 88888
        pos3 = [9*self.x_dim, 5*self.y_dim, 2*self.z_dim]
        cube3.morton_id = XYZMorton(pos3)

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)

        resolution = 0
        sp.write_cuboid(self.resource, pos1, resolution, cube1.data, time_sample_start=0)
        sp.write_cuboid(self.resource, pos2, resolution, cube2.data, time_sample_start=0)
        sp.write_cuboid(self.resource, pos3, resolution, cube3.data, time_sample_start=0)

        # Make sure cube write complete and correct.
        actual_cube = sp.cutout(self.resource, pos1, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube1.data, actual_cube.data)
        actual_cube = sp.cutout(self.resource, pos2, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube2.data, actual_cube.data)
        actual_cube = sp.cutout(self.resource, pos3, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube3.data, actual_cube.data)

        corner = (7*self.x_dim+100, 5*self.y_dim, 2*self.z_dim)
        extent = (2*self.x_dim+self.x_dim//2, self.y_dim, self.z_dim)
        t_range = [0, 1]
        version = 0
        expected = ['55555', '66666', '77777', '88888']

        # Method under test.
        actual = sp.get_ids_in_region(
            self.resource, resolution, corner, extent, t_range, version)

        self.assertIn('ids', actual)
        self.assertCountEqual(expected, actual['ids'])
Example #2
0
    def test_too_many_cuboids_for_id_index(self):
        """
        Test error handling when number of cuboids that contain an id exceeds
        the limits allowed by DynamoDB.  
        
        This test writes 7651 cuboids which causes DynamoDB throttling, so we 
        normally skip this test.  
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        y = 0
        z = 0
        obj_keys = []
        cubes = []

        for x in range(0, 7651):
            mortonid = XYZMorton([x, y, z])
            obj_keys.append(
                self.obj_store.generate_object_key(resource, resolution,
                                                   time_sample, mortonid))
            # Just need one non-zero number to represent each cuboid.
            cubes.append(np.ones(1, dtype='uint64'))

        with self.assertRaises(SpdbError) as ex:
            self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                           cubes, version)
        self.assertEqual(ErrorCodes.OBJECT_STORE_ERROR,
                         ex.exception.error_code)
    def test_get_ids_in_region_single_cube(self):
        """Test single cuboid using DynamoDB index."""
        cube_dim_tuple = (self.x_dim, self.y_dim, self.z_dim)
        cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube1.zeros()
        cube1.data[0][0][40][0] = 55555
        cube1.data[0][0][50][0] = 66666000000000
        pos1 = [2*self.x_dim, 3*self.y_dim, 2*self.z_dim]
        cube1.morton_id = XYZMorton(pos1)

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)

        resolution = 0
        sp.write_cuboid(self.resource, pos1, resolution, cube1.data, time_sample_start=0)

        # Make sure cube write complete and correct.
        actual_cube = sp.cutout(self.resource, pos1, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube1.data, actual_cube.data)

        corner = (2*self.x_dim, 3*self.y_dim, 2*self.z_dim)
        extent = (self.x_dim, self.y_dim, self.z_dim)
        t_range = [0, 1]
        version = 0
        expected = ['55555', '66666000000000']

        # Method under test.
        actual = sp.get_ids_in_region(
            self.resource, resolution, corner, extent, t_range, version)

        self.assertIn('ids', actual)
        self.assertCountEqual(expected, actual['ids'])
Example #4
0
def generate_new_key(obj_key, translate, lookup_key):
    """
    Generate a new object key where the copied cuboid will live.

    Args:
        obj_key (str): Source cuboid's S3 object key.
        translate (list[x, y, z]): Translate cuboid's position by these deltas.
        lookup_key (str): coll_id&exp_id&chan_id (DB ids with separating ampersands).

    Returns:
        (str): Object key for cuboid copy destination.

    Raises:
        (ValueError): Only cuboids at resolution 0 may be copied, currently.
    """
    parts = AWSObjectStore.get_object_key_parts(obj_key)
    if int(parts.resolution) != 0:
        raise ValueError(
            'Copying non-zero resolutions not currently supported.')

    orig_x, orig_y, orig_z = MortonXYZ(parts.morton_id)

    new_x = orig_x + translate[0] / CUBOIDSIZE[0][0]
    new_y = orig_y + translate[1] / CUBOIDSIZE[0][1]
    new_z = orig_z + translate[2] / CUBOIDSIZE[0][2]

    new_morton = XYZMorton([new_x, new_y, new_z])

    base_key = '{}&{}&{}&{}'.format(lookup_key, parts.resolution,
                                    parts.time_sample, new_morton)

    hash_str = hashlib.md5(base_key.encode()).hexdigest()

    return "{}&{}".format(hash_str, base_key)
Example #5
0
    def _get_object_keys(self,
                         resource,
                         resolution,
                         cuboid_bounds,
                         t_range=[0, 1]):
        """
        Retrieves objects keys for cuboids specified in cuboid_bounds.

        Args:
            resource (project.BossResource): Data model info based on the request or target resource
            resolution (int): the resolution level
            cuboid_bounds (Region.Cuboids): ranges of cuboids to get keys for
            t_range (optional[list[int]]): time range, defaults to [0, 1]

        Returns:

        """
        key_list = []
        for x in cuboid_bounds.x_cuboids:
            for y in cuboid_bounds.y_cuboids:
                for z in cuboid_bounds.z_cuboids:
                    morton = XYZMorton([x, y, z])
                    for t in range(t_range[0], t_range[1]):
                        key_list.append(
                            AWSObjectStore.generate_object_key(
                                resource, resolution, t, morton))

        return key_list
    def test_get_loose_bounding_box(self):
        # Only need for the AWSObjectStore's generate_object_key() method, so
        # can provide dummy values to initialize it.
        with patch('spdb.spatialdb.object.get_region') as fake_get_region:
            # Force us-east-1 region for testing.
            fake_get_region.return_value = 'us-east-1'
            obj_store = AWSObjectStore(self.object_store_config)

        resolution = 0
        time_sample = 0

        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        pos0 = [4, 4, 4]
        pos1 = [2, 1, 3]
        pos2 = [6, 7, 5]

        mort0 = XYZMorton(pos0)
        mort1 = XYZMorton(pos1)
        mort2 = XYZMorton(pos2)

        key0 = obj_store.generate_object_key(self.resource, resolution,
                                             time_sample, mort0)
        key1 = obj_store.generate_object_key(self.resource, resolution,
                                             time_sample, mort1)
        key2 = obj_store.generate_object_key(self.resource, resolution,
                                             time_sample, mort2)

        id = 2234

        with patch.object(self.obj_ind, 'get_cuboids') as fake_get_cuboids:
            fake_get_cuboids.return_value = [key0, key1, key2]
            actual = self.obj_ind.get_loose_bounding_box(
                self.resource, resolution, id)
            expected = {
                'x_range': [2 * x_cube_dim, (6 + 1) * x_cube_dim],
                'y_range': [1 * y_cube_dim, (7 + 1) * y_cube_dim],
                'z_range': [3 * z_cube_dim, (5 + 1) * z_cube_dim],
                't_range': [0, 1]
            }
            self.assertEqual(expected, actual)
    def test_get_tight_bounding_box_single_cuboid(self):
        """
        Get the tight bounding box for an object that exists within a single cuboid.
        """
        resolution = 0
        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        id = 33333
        id_as_str = '33333'
        # Customize resource with so it writes to its own channel and uses a
        # coord frame large enough to encompass the data written.  This is
        # important for proper loose bounding box calculations.
        data = get_anno_dict(boss_key='col1&exp1&ch50', lookup_key='1&1&50')
        data['coord_frame']['x_stop'] = 10000
        data['coord_frame']['y_stop'] = 10000
        data['coord_frame']['z_stop'] = 10000
        resource = BossResourceBasic(data)
        time_sample = 0
        version = 0
        x_rng = [0, x_cube_dim]
        y_rng = [0, y_cube_dim]
        z_rng = [0, z_cube_dim]
        t_rng = [0, 1]

        cube_dim_tuple = (self.x_dim, self.y_dim, self.z_dim)
        cube1 = Cube.create_cube(resource, [self.x_dim, self.y_dim, self.z_dim])
        cube1.zeros()
        cube1.data[0][14][500][104] = id
        cube1.data[0][15][501][105] = id
        cube1.data[0][15][502][104] = id
        cube1.data[0][14][503][105] = id

        pos1 = [10*self.x_dim, 15*self.y_dim, 2*self.z_dim]
        cube1.morton_id = XYZMorton(pos1)

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
        sp.write_cuboid(resource, pos1, resolution, cube1.data, time_sample_start=0)

        # Make sure cube write complete and correct.
        actual_cube = sp.cutout(resource, pos1, cube_dim_tuple, resolution)
        np.testing.assert_array_equal(cube1.data, actual_cube.data)

        # Method under test.
        actual = sp.get_bounding_box(resource, resolution, id_as_str, bb_type='tight')

        expected = {
            'x_range': [pos1[0]+104, pos1[0]+106],
            'y_range': [pos1[1]+500, pos1[1]+504],
            'z_range': [pos1[2]+14, pos1[2]+16],
            't_range': t_rng
        }

        self.assertEqual(expected, actual)
Example #8
0
    def test_get_loose_bounding_box(self):
        id = 33333
        resolution = 0
        time_sample = 0
        version = 0

        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        bytes0 = np.zeros(10, dtype='uint64')
        bytes0[1] = id
        pos0 = [x_cube_dim, 2 * y_cube_dim, 3 * z_cube_dim]
        pos_ind0 = [
            pos0[0] / x_cube_dim, pos0[1] / y_cube_dim, pos0[2] / z_cube_dim
        ]
        morton_id0 = XYZMorton(pos_ind0)
        key0 = self.obj_store.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id0)

        bytes1 = np.zeros(4, dtype='uint64')
        bytes1[0] = id  # Pre-existing id.
        pos1 = [3 * x_cube_dim, 5 * y_cube_dim, 6 * z_cube_dim]
        pos_ind1 = [
            pos1[0] / x_cube_dim, pos1[1] / y_cube_dim, pos1[2] / z_cube_dim
        ]
        morton_id1 = XYZMorton(pos_ind1)
        key1 = self.obj_store.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id1)

        self.obj_ind.update_id_indices(self.resource, resolution, [key0, key1],
                                       [bytes0, bytes1], version)

        actual = self.obj_ind.get_loose_bounding_box(self.resource, resolution,
                                                     id)
        expected = {
            'x_range': [pos0[0], pos1[0] + x_cube_dim],
            'y_range': [pos0[1], pos1[1] + y_cube_dim],
            'z_range': [pos0[2], pos1[2] + z_cube_dim],
            't_range': [0, 1]
        }
        self.assertEqual(expected, actual)
    def test_get_ids_in_region_multiple_partial_cubes(self):
        """
        Region cuboid aligned in x, but doesn't span full cuboids in the y 
        and z.
        """
        cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube1.zeros()
        cube1.data[0][0][40][0] = 55555
        cube1.data[0][0][50][0] = 66666
        pos1 = [4*self.x_dim, 4*self.y_dim, 2*self.z_dim]
        cube1.morton_id = XYZMorton(pos1)

        cube2 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
        cube2.zeros()
        cube2.data[0][0][40][0] = 55555
        cube2.data[0][0][50][0] = 77777
        pos2 = [5*self.x_dim, 4*self.y_dim, 2*self.z_dim]
        cube2.morton_id = XYZMorton(pos2)

        sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)

        resolution = 0
        sp.write_cuboid(self.resource, pos1, resolution, cube1.data, time_sample_start=0)
        sp.write_cuboid(self.resource, pos2, resolution, cube2.data, time_sample_start=0)

        # Not verifying writes here because get_ids_in_region() should be doing
        # cutouts due to the region not containing full cuboids.

        corner = (4*self.x_dim, 4*self.y_dim, 2*self.z_dim)
        extent = (2*self.x_dim, 60, 10)
        t_range = [0, 1]
        version = 0
        expected = ['55555', '66666', '77777']

        # Method under test.
        actual = sp.get_ids_in_region(
            self.resource, resolution, corner, extent, t_range, version)

        self.assertIn('ids', actual)
        self.assertCountEqual(expected, actual['ids'])
Example #10
0
    def test_get_loose_bounding_box(self):
        resolution = 0
        time_sample = 0

        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        pos0 = [4, 4, 4]
        pos1 = [2, 1, 3]
        pos2 = [6, 7, 5]

        mort0 = XYZMorton(pos0)
        mort1 = XYZMorton(pos1)
        mort2 = XYZMorton(pos2)

        key0 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, mort0)
        key1 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, mort1)
        key2 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, mort2)

        id = 2234

        with patch.object(self.obj_ind, 'get_cuboids') as fake_get_cuboids:
            fake_get_cuboids.return_value = [key0, key1, key2]

            # Method under test.
            actual = self.obj_ind.get_loose_bounding_box(
                self.resource, resolution, id)

            expected = {
                'x_range': [2 * x_cube_dim, (6 + 1) * x_cube_dim],
                'y_range': [1 * y_cube_dim, (7 + 1) * y_cube_dim],
                'z_range': [3 * z_cube_dim, (5 + 1) * z_cube_dim],
                't_range': [0, 1]
            }
            self.assertEqual(expected, actual)
Example #11
0
 def test_too_many_ids_in_cuboid(self):
     """
     Test error handling when a cuboid has more unique ids than DynamoDB
     can support.
     """
     version = 0
     resolution = 0
     time_sample = 0
     resource = BossResourceBasic(data=get_anno_dict())
     mortonid = XYZMorton([0, 0, 0])
     obj_keys = [
         self.obj_store.generate_object_key(resource, resolution,
                                            time_sample, mortonid)
     ]
     cubes = [
         np.random.randint(2000000, size=(16, 512, 512), dtype='uint64')
     ]
     with self.assertRaises(SpdbError) as ex:
         self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                        cubes, version)
     self.assertEqual(ErrorCodes.OBJECT_STORE_ERROR,
                      ex.exception.error_code)
Example #12
0
    def test_too_many_ids_in_cuboid(self):
        """
        Test error handling when a cuboid has more unique ids than DynamoDB
        can support.
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        mortonid = XYZMorton([0, 0, 0])
        obj_keys = [
            AWSObjectStore.generate_object_key(resource, resolution,
                                               time_sample, mortonid)
        ]
        cubes = [
            np.random.randint(2000000, size=(16, 512, 512), dtype='uint64')
        ]

        # If too many ids, the index is skipped, logged, and False is returned to the caller.
        result = self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                                cubes, version)
        self.assertFalse(result)
Example #13
0
    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
Example #14
0
def create_messages(args):
    """Create all of the tile messages to be enqueued.  Currently not support t extent.

    Args:
        args (dict): Same arguments as populate_upload_queue()

    Returns:
        list: List of strings containing Json data
    """

    tile_size = lambda v: args[v + "_tile_size"]
    # range_ does not work with z. Need to use z_chunk_size instead with volumetric ingest
    range_ = lambda v: range(args[v + '_start'], args[v + '_stop'], tile_size(v))

    # DP NOTE: generic version of
    # BossBackend.encode_chunk_key and BiossBackend.encode.tile_key
    # from ingest-client/ingestclient/core/backend.py
    def hashed_key(*args):
        base = '&'.join(map(str,args))

        md5 = hashlib.md5()
        md5.update(base.encode())
        digest = md5.hexdigest()

        return '&'.join([digest, base])

    chunks_to_skip = args['items_to_skip']
    count_in_offset = 0
    for t in range_('t'):
        for z in range(args['z_start'], args['z_stop'], args['z_chunk_size']):
            for y in range_('y'):
                for x in range_('x'):

                    if chunks_to_skip > 0:
                        chunks_to_skip -= 1
                        continue

                    if count_in_offset == 0:
                        print("Finished skipping chunks")

                    chunk_x = int(x / tile_size('x'))
                    chunk_y = int(y / tile_size('y'))
                    chunk_z = int(z / args['z_chunk_size'])
                    chunk_key = hashed_key(1,  # num of items
                                           args['project_info'][0],
                                           args['project_info'][1],
                                           args['project_info'][2],
                                           args['resolution'],
                                           chunk_x,
                                           chunk_y,
                                           chunk_z,
                                           t)

                    count_in_offset += 1
                    if count_in_offset > args['MAX_NUM_ITEMS_PER_LAMBDA']:
                        return  # end the generator

                    cuboids = []

                    # Currently, only allow ingest for time sample 0.
                    t = 0
                    lookup_key = lookup_key_from_chunk_key(chunk_key)
                    res = resolution_from_chunk_key(chunk_key)

                    for chunk_offset_z in range(0, args["z_chunk_size"], CUBOID_Z):
                        for chunk_offset_y in range(0, tile_size('y'), CUBOID_Y):
                            for chunk_offset_x in range(0, tile_size('x'), CUBOID_X):
                                morton = XYZMorton(
                                    [(x+chunk_offset_x)/CUBOID_X, (y+chunk_offset_y)/CUBOID_Y, (z+chunk_offset_z)/CUBOID_Z])
                                object_key = generate_object_key(lookup_key, res, t, morton)
                                new_cuboid = {
                                    "x": chunk_offset_x,
                                    "y": chunk_offset_y,
                                    "z": chunk_offset_z,
                                    "key": object_key
                                }
                                cuboids.append(new_cuboid)

                    msg = {
                        'chunk_key': chunk_key,
                        'cuboids': cuboids,
                    }

                    yield json.dumps(msg)
Example #15
0
def handler(event, context):
    # Load settings
    SETTINGS = BossSettings.load()

    # Used as a guard against trying to delete the SQS message when lambda is
    # triggered by SQS.
    sqs_triggered = 'Records' in event and len(event['Records']) > 0

    if sqs_triggered :
        # Lambda invoked by an SQS trigger.
        msg_data = json.loads(event['Records'][0]['body'])
        # Load the project info from the chunk key you are processing
        chunk_key = msg_data['chunk_key']
        proj_info = BossIngestProj.fromSupercuboidKey(chunk_key)
        proj_info.job_id = msg_data['ingest_job']
    else:
        # Standard async invoke of this lambda.

        # Load the project info from the chunk key you are processing
        proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
        proj_info.job_id = event["ingest_job"]

        # Get message from SQS ingest queue, try for ~2 seconds
        rx_cnt = 0
        msg_data = None
        msg_id = None
        msg_rx_handle = None
        while rx_cnt < 6:
            ingest_queue = IngestQueue(proj_info)
            try:
                msg = [x for x in ingest_queue.receiveMessage()]
            # StopIteration may be converted to a RunTimeError.
            except (StopIteration, RuntimeError):
                msg = None

            if msg:
                msg = msg[0]
                print("MESSAGE: {}".format(msg))
                print(len(msg))
                msg_id = msg[0]
                msg_rx_handle = msg[1]
                msg_data = json.loads(msg[2])
                print("MESSAGE DATA: {}".format(msg_data))
                break
            else:
                rx_cnt += 1
                print("No message found. Try {} of 6".format(rx_cnt))
                time.sleep(1)

        if not msg_id:
            # No tiles ready to ingest.
            print("No ingest message available")
            return

        # Get the chunk key of the tiles to ingest.
        chunk_key = msg_data['chunk_key']


    tile_error_queue = TileErrorQueue(proj_info)

    print("Ingesting Chunk {}".format(chunk_key))
    tiles_in_chunk = int(chunk_key.split('&')[1])

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # If chunk_key is gone, another lambda uploaded the cuboids and deleted the chunk_key afterwards.
        if not sqs_triggered:
            # Remove message so it's not redelivered.
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)

        print("Aborting due to chunk key missing from tile index table")
        return

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    if len(tile_key_list) < tiles_in_chunk:
        print("Not a full set of 16 tiles. Assuming it has handled already, tiles: {}".format(len(tile_key_list)))
        if not sqs_triggered:
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        return
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
    tile_key_list = ["&".join(x) for x in tile_key_list]
    print("Sorted Tile Keys: {}".format(tile_key_list))

    # Augment Resource JSON data so it will instantiate properly that was pruned due to S3 metadata size limits
    resource_dict = msg_data['parameters']['resource']
    _, exp_name, ch_name = resource_dict["boss_key"].split("&")

    resource_dict["channel"]["name"] = ch_name
    resource_dict["channel"]["description"] = ""
    resource_dict["channel"]["sources"] = []
    resource_dict["channel"]["related"] = []
    resource_dict["channel"]["default_time_sample"] = 0
    resource_dict["channel"]["downsample_status"] = "NOT_DOWNSAMPLED"

    resource_dict["experiment"]["name"] = exp_name
    resource_dict["experiment"]["description"] = ""
    resource_dict["experiment"]["num_time_samples"] = 1
    resource_dict["experiment"]["time_step"] = None
    resource_dict["experiment"]["time_step_unit"] = None

    resource_dict["coord_frame"]["name"] = "cf"
    resource_dict["coord_frame"]["name"] = ""
    resource_dict["coord_frame"]["x_start"] = 0
    resource_dict["coord_frame"]["x_stop"] = 100000
    resource_dict["coord_frame"]["y_start"] = 0
    resource_dict["coord_frame"]["y_stop"] = 100000
    resource_dict["coord_frame"]["z_start"] = 0
    resource_dict["coord_frame"]["z_stop"] = 100000
    resource_dict["coord_frame"]["voxel_unit"] = "nanometers"

    # Setup the resource
    resource = BossResourceBasic()
    resource.from_dict(resource_dict)
    dtype = resource.get_numpy_data_type()

    # read all tiles from bucket into a slab
    tile_bucket = TileBucket(proj_info.project_name)
    data = []
    num_z_slices = 0
    for tile_key in tile_key_list:
        try:
            image_data, message_id, receipt_handle, metadata = tile_bucket.getObjectByKey(tile_key)
        except KeyError:
            print('Key: {} not found in tile bucket, assuming redelivered SQS message and aborting.'.format(
                tile_key))
            if not sqs_triggered:
                # Remove message so it's not redelivered.
                ingest_queue.deleteMessage(msg_id, msg_rx_handle)
            print("Aborting due to missing tile in bucket")
            return

        image_bytes = BytesIO(image_data)
        image_size = image_bytes.getbuffer().nbytes

        # Get tiles size from metadata, need to shape black tile if actual tile is corrupt.
        if 'x_size' in metadata:
            tile_size_x = metadata['x_size']
        else:
            print('MetadataMissing: x_size not in tile metadata:  using 1024.')
            tile_size_x = 1024

        if 'y_size' in metadata:
            tile_size_y = metadata['y_size']
        else:
            print('MetadataMissing: y_size not in tile metadata:  using 1024.')
            tile_size_y = 1024

        if image_size == 0:
            print('TileError: Zero length tile, using black instead: {}'.format(tile_key))
            error_msg = 'Zero length tile'
            enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
            tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
        else:
            try:
                # DP NOTE: Issues when specifying dtype in the asarray function with Pillow ver 8.3.1. 
                # Fixed by separating array instantiation and dtype assignment. 
                tile_img = np.asarray(Image.open(image_bytes))
                tile_img = tile_img.astype(dtype)
            except TypeError as te:
                print('TileError: Incomplete tile, using black instead (tile_size_in_bytes, tile_key): {}, {}'
                      .format(image_size, tile_key))
                error_msg = 'Incomplete tile'
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
            except OSError as oe:
                print('TileError: OSError, using black instead (tile_size_in_bytes, tile_key): {}, {} ErrorMessage: {}'
                      .format(image_size, tile_key, oe))
                error_msg = 'OSError: {}'.format(oe)
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)

        data.append(tile_img)
        num_z_slices += 1


    # Make 3D array of image data. It should be in XYZ at this point
    chunk_data = np.array(data)
    del data
    tile_dims = chunk_data.shape

    # Break into Cube instances
    print("Tile Dims: {}".format(tile_dims))
    print("Num Z Slices: {}".format(num_z_slices))
    num_x_cuboids = int(math.ceil(tile_dims[2] / CUBOIDSIZE[proj_info.resolution][0]))
    num_y_cuboids = int(math.ceil(tile_dims[1] / CUBOIDSIZE[proj_info.resolution][1]))

    print("Num X Cuboids: {}".format(num_x_cuboids))
    print("Num Y Cuboids: {}".format(num_y_cuboids))

    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
                                                                                 y_start:y_end, x_start:x_end]

            # Create object key
            object_key = sp.objectio.generate_object_key(resource, proj_info.resolution, t_index, morton_index)
            print("Object Key: {}".format(object_key))

            # Put object in S3
            sp.objectio.put_objects([object_key], [cube.to_blosc()])

            # Add object to index
            sp.objectio.add_cuboid_to_index(object_key, ingest_job=int(msg_data["ingest_job"]))

            # Update id indices if this is an annotation channel
            # We no longer index during ingest.
            #if resource.data['channel']['type'] == 'annotation':
            #   try:
            #       sp.objectio.update_id_indices(
            #           resource, proj_info.resolution, [object_key], [cube.data])
            #   except SpdbError as ex:
            #       sns_client = boto3.client('sns')
            #       topic_arn = msg_data['parameters']["OBJECTIO_CONFIG"]["prod_mailing_list"]
            #       msg = 'During ingest:\n{}\nCollection: {}\nExperiment: {}\n Channel: {}\n'.format(
            #           ex.message,
            #           resource.data['collection']['name'],
            #           resource.data['experiment']['name'],
            #           resource.data['channel']['name'])
            #       sns_client.publish(
            #           TopicArn=topic_arn,
            #           Subject='Object services misuse',
            #           Message=msg)

    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)

    names = AWSNames.from_lambda(context.function_name)

    delete_tiles_data = {
        'tile_key_list': tile_key_list,
        'region': SETTINGS.REGION_NAME,
        'bucket': tile_bucket.bucket.name
    }

    # Delete tiles from tile bucket.
    lambda_client.invoke(
        FunctionName=names.delete_tile_objs.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tiles_data).encode()
    )       

    delete_tile_entry_data = {
        'tile_index': tile_index_db.table.name,
        'region': SETTINGS.REGION_NAME,
        'chunk_key': chunk_key,
        'task_id': msg_data['ingest_job']
    }

    # Delete entry from tile index.
    lambda_client.invoke(
        FunctionName=names.delete_tile_index_entry.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tile_entry_data).encode()
    )       

    if not sqs_triggered:
        # Delete message since it was processed successfully
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)