Esempio n. 1
0
def add_tile_entry(ingest_job):
    """
    Put a fake chunk in the tile index for the given job.  Cleans up fake chunk
    when it goes out of scope.

    Args:
        ingest_job (dict): Job data as returned by the POST method.

    Yields:
        (None)
    """
    tiledb = BossTileIndexDB(ingest_job['collection'] + '&' + ingest_job['experiment'])
    chunk_key = '{}&16&1&2&3&0&0&0&0&0'.format(randint(0, 2000))
    tiledb.createCuboidEntry(chunk_key, ingest_job['id'])
    # Mark some tiles as uploaded.
    for i in range(12):
        tiledb.markTileAsUploaded(chunk_key, 'fake_tile_key_{}'.format(i), ingest_job['id'])
    yield

    # Cleanup.
    print('deleting fake chunk')
    tiledb.deleteCuboid(chunk_key, ingest_job['id'])
Esempio n. 2
0
def add_tile_entry(ingest_job):
    """
    Put a fake chunk in the tile index for the given job.  Cleans up fake chunk
    when it goes out of scope.

    Args:
        ingest_job (dict): Job data as returned by the POST method.

    Yields:
        (None)
    """
    tiledb = BossTileIndexDB(ingest_job['collection'] + '&' +
                             ingest_job['experiment'])
    chunk_key = '{}&16&1&2&3&0&0&0&0&0'.format(randint(0, 2000))
    tiledb.createCuboidEntry(chunk_key, ingest_job['id'])
    # Mark some tiles as uploaded.
    for i in range(12):
        tiledb.markTileAsUploaded(chunk_key, 'fake_tile_key_{}'.format(i),
                                  ingest_job['id'])
    yield

    # Cleanup.
    print('deleting fake chunk')
    tiledb.deleteCuboid(chunk_key, ingest_job['id'])
Esempio n. 3
0
# update value in the dynamo table
tile_index_db = BossTileIndexDB(proj_info.project_name)
chunk = tile_index_db.getCuboid(metadata["chunk_key"],
                                int(metadata["ingest_job"]))
if chunk:
    print("Updating tile index for chunk_key: {}".format(
        metadata["chunk_key"]))
    chunk_ready = tile_index_db.markTileAsUploaded(metadata["chunk_key"],
                                                   tile_key,
                                                   int(metadata["ingest_job"]))
else:
    # First tile in the chunk
    print("Creating first entry for chunk_key: {}".format(
        metadata["chunk_key"]))
    try:
        tile_index_db.createCuboidEntry(metadata["chunk_key"],
                                        int(metadata["ingest_job"]))
    except ClientError as err:
        # Under _exceptional_ circumstances, it's possible for another lambda
        # to beat the current instance to creating the initial cuboid entry
        # in the index.
        error_code = err.response['Error'].get('Code', 'Unknown')
        if error_code == 'ConditionalCheckFailedException':
            print('Chunk key entry already created - proceeding.')
        else:
            raise
    chunk_ready = tile_index_db.markTileAsUploaded(metadata["chunk_key"],
                                                   tile_key,
                                                   int(metadata["ingest_job"]))

# ingest the chunk if we have all the tiles
if chunk_ready:
def process(msg, context, region):
    """
    Process a single message.

    Args:
        msg (dict): Contents described at the top of the file.
        context (Context): Lambda context object.
        region (str): Lambda execution region.
    """

    job_id = int(msg['ingest_job'])
    chunk_key = msg['chunk_key']
    tile_key = msg['tile_key']
    print("Tile key: {}".format(tile_key))

    proj_info = BossIngestProj.fromTileKey(tile_key)

    # Set the job id
    proj_info.job_id = msg['ingest_job']

    print("Data: {}".format(msg))

    # update value in the dynamo table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    chunk = tile_index_db.getCuboid(chunk_key, job_id)
    if chunk:
        if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]):
            print("Chunk already has all its tiles: {}".format(chunk_key))
            # Go ahead and setup to fire another ingest lambda so this tile
            # entry will be deleted on successful execution of the ingest lambda.
            chunk_ready = True
        else:
            print("Updating tile index for chunk_key: {}".format(chunk_key))
            chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)
    else:
        # First tile in the chunk
        print("Creating first entry for chunk_key: {}".format(chunk_key))
        try:
            tile_index_db.createCuboidEntry(chunk_key, job_id)
        except ClientError as err:
            # Under _exceptional_ circumstances, it's possible for another lambda
            # to beat the current instance to creating the initial cuboid entry
            # in the index.
            error_code = err.response['Error'].get('Code', 'Unknown')
            if error_code == 'ConditionalCheckFailedException':
                print('Chunk key entry already created - proceeding.')
            else:
                raise
        chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)

    # ingest the chunk if we have all the tiles
    if chunk_ready:
        print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key))
        # insert a new job in the insert queue if we have all the tiles
        ingest_queue = IngestQueue(proj_info)
        ingest_queue.sendMessage(json.dumps(msg))

        # Invoke Ingest lambda function
        names = AWSNames.create_from_lambda_name(context.function_name)
        lambda_client = boto3.client('lambda', region_name=region)
        lambda_client.invoke(
            FunctionName=names.tile_ingest_lambda,
            InvocationType='Event',
            Payload=json.dumps(msg).encode())
    else:
        print("Chunk not ready for ingest yet: {}".format(chunk_key))

    print("DONE!")
Esempio n. 5
0
    def test_upload_tile_index_table(self):
        """"""
        ingest_mgmr = IngestManager()
        ingest_mgmr.validate_config_file(self.example_config_data)
        ingest_mgmr.validate_properties()
        ingest_mgmr.owner = self.user.pk
        ingest_job = ingest_mgmr.create_ingest_job()
        assert (ingest_job.id is not None)

        # Get the chunks in this job
        # Get the project information
        bosskey = ingest_job.collection + '&' + ingest_job.experiment + '&' + ingest_job.channel_layer
        lookup_key = (LookUpKey.get_lookup_key(bosskey)).lookup_key
        [col_id, exp_id, ch_id] = lookup_key.split('&')
        project_info = [col_id, exp_id, ch_id]
        proj_name = ingest_job.collection + '&' + ingest_job.experiment
        tile_index_db = BossTileIndexDB(proj_name)
        tilebucket = TileBucket(str(col_id) + '&' + str(exp_id))

        for time_step in range(ingest_job.t_start, ingest_job.t_stop, 1):
            # For each time step, compute the chunks and tile keys

            for z in range(ingest_job.z_start, ingest_job.z_stop, 16):
                for y in range(ingest_job.y_start, ingest_job.y_stop,
                               ingest_job.tile_size_y):
                    for x in range(ingest_job.x_start, ingest_job.x_stop,
                                   ingest_job.tile_size_x):

                        # compute the chunk indices
                        chunk_x = int(x / ingest_job.tile_size_x)
                        chunk_y = int(y / ingest_job.tile_size_y)
                        chunk_z = int(z / 16)

                        # Compute the number of tiles in the chunk
                        if ingest_job.z_stop - z >= 16:
                            num_of_tiles = 16
                        else:
                            num_of_tiles = ingest_job.z_stop - z

                        # Generate the chunk key
                        chunk_key = (BossBackend(
                            ingest_mgmr.config)).encode_chunk_key(
                                num_of_tiles, project_info,
                                ingest_job.resolution, chunk_x, chunk_y,
                                chunk_z, time_step)
                        # Upload the chunk to the tile index db
                        tile_index_db.createCuboidEntry(
                            chunk_key, ingest_job.id)
                        key_map = {}
                        for tile in range(0, num_of_tiles):
                            # get the object key and upload it
                            #tile_key = tilebucket.encodeObjectKey(ch_id, ingest_job.resolution,
                            #                              chunk_x, chunk_y, tile, time_step)
                            tile_key = 'fakekey' + str(tile)
                            tile_index_db.markTileAsUploaded(
                                chunk_key, tile_key)

                        # for each chunk key, delete entries from the tile_bucket

        # Check if data has been uploaded
        chunks = list(tile_index_db.getTaskItems(ingest_job.id))
        assert (len(chunks) != 0)

        ingest_mgmr.delete_tiles(ingest_job)
        chunks = list(tile_index_db.getTaskItems(ingest_job.id))
        assert (len(chunks) == 0)
Esempio n. 6
0
def process(msg, context, region):
    """
    Process a single message.

    Args:
        msg (dict): Contents described at the top of the file.
        context (Context): Lambda context object.
        region (str): Lambda execution region.
    """

    job_id = int(msg['ingest_job'])
    chunk_key = msg['chunk_key']
    tile_key = msg['tile_key']
    print("Tile key: {}".format(tile_key))

    proj_info = BossIngestProj.fromTileKey(tile_key)

    # Set the job id
    proj_info.job_id = msg['ingest_job']

    print("Data: {}".format(msg))

    # update value in the dynamo table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    chunk = tile_index_db.getCuboid(chunk_key, job_id)
    if chunk:
        if tile_index_db.cuboidReady(chunk_key, chunk["tile_uploaded_map"]):
            print("Chunk already has all its tiles: {}".format(chunk_key))
            # Go ahead and setup to fire another ingest lambda so this tile
            # entry will be deleted on successful execution of the ingest lambda.
            chunk_ready = True
        else:
            print("Updating tile index for chunk_key: {}".format(chunk_key))
            chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)
    else:
        # First tile in the chunk
        print("Creating first entry for chunk_key: {}".format(chunk_key))
        try:
            tile_index_db.createCuboidEntry(chunk_key, job_id)
        except ClientError as err:
            # Under _exceptional_ circumstances, it's possible for another lambda
            # to beat the current instance to creating the initial cuboid entry
            # in the index.
            error_code = err.response['Error'].get('Code', 'Unknown')
            if error_code == 'ConditionalCheckFailedException':
                print('Chunk key entry already created - proceeding.')
            else:
                raise
        chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)

    # ingest the chunk if we have all the tiles
    if chunk_ready:
        print("CHUNK READY SENDING MESSAGE: {}".format(chunk_key))
        # insert a new job in the insert queue if we have all the tiles
        ingest_queue = IngestQueue(proj_info)
        ingest_queue.sendMessage(json.dumps(msg))

        # Invoke Ingest lambda function
        names = AWSNames.from_lambda(context.function_name)
        lambda_client = boto3.client('lambda', region_name=region)
        lambda_client.invoke(
            FunctionName=names.tile_ingest.lambda_,
            InvocationType='Event',
            Payload=json.dumps(msg).encode())
    else:
        print("Chunk not ready for ingest yet: {}".format(chunk_key))

    print("DONE!")
Esempio n. 7
0
class Test_BossTileIndexDB(unittest.TestCase):
    """
    Note that the chunk keys used, for testing, do not have real hash keys.
    The rest of the chunk key is valid.
    """
    def setUp(self):
        # Suppress ResourceWarning messages about unclosed connections.
        warnings.simplefilter('ignore')

        with open('nddynamo/schemas/boss_tile_index.json') as fp:
            schema = json.load(fp)

        BossTileIndexDB.createTable(schema,
                                    endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

        self.tileindex_db = BossTileIndexDB(
            nd_proj.project_name, endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

    def tearDown(self):
        BossTileIndexDB.deleteTable(endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

    def test_cuboidReady_false(self):
        fake_map = {'o': 1}
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        self.assertFalse(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_true(self):
        fake_map = {
            's1': 1,
            's2': 1,
            's3': 1,
            's4': 1,
            's5': 1,
            's6': 1,
            's7': 1,
            's8': 1,
            's9': 1,
            's10': 1,
            's11': 1,
            's12': 1,
            's13': 1,
            's14': 1,
            's15': 1,
            's16': 1
        }
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        self.assertTrue(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_small_cuboid_true(self):
        """Test case where the number of tiles is smaller than a cuboid in the z direction."""
        fake_map = {
            's1': 1,
            's2': 1,
            's3': 1,
            's4': 1,
            's5': 1,
            's6': 1,
            's7': 1,
            's8': 1
        }

        num_tiles = 8
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        self.assertTrue(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_small_cuboid_false(self):
        """Test case where the number of tiles is smaller than a cuboid in the z direction."""
        fake_map = {
            's1': 1,
            's2': 1,
            's3': 1,
            's4': 1,
            's5': 1,
            's6': 1,
            's7': 1
        }

        num_tiles = 8
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        self.assertFalse(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_createCuboidEntry(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        task_id = 21
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)
        preDelResp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(chunk_key, preDelResp['chunk_key'])
        self.assertEqual({}, preDelResp['tile_uploaded_map'])

    def test_markTileAsUploaded(self):
        # Cuboid must first have an entry before one of its tiles may be marked
        # as uploaded.
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        self.tileindex_db.markTileAsUploaded(chunk_key, 'fakekey&sss', task_id)

        expected = {'fakekey&sss': 1}
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(expected, resp['tile_uploaded_map'])

    def test_markTileAsUploaded_multiple(self):
        # Cuboid must first have an entry before one of its tiles may be marked
        # as uploaded.
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        self.tileindex_db.markTileAsUploaded(chunk_key, 'fakekey&sss', task_id)

        expected_first = {'fakekey&sss': 1}
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(expected_first, resp['tile_uploaded_map'])

        expected_second = {'fakekey&sss': 1, 'fakekey&ttt': 1}
        self.tileindex_db.markTileAsUploaded(chunk_key, 'fakekey&ttt', task_id)
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertCountEqual(expected_second, resp['tile_uploaded_map'])

    def test_deleteItem(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = '<hash>&{}&111&222&333&0&0&0&0&0'.format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)
        preDelResp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(chunk_key, preDelResp['chunk_key'])
        self.tileindex_db.deleteCuboid(chunk_key, task_id)
        postDelResp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertIsNone(postDelResp)

    def test_getTaskItems(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key1 = '<hash>&{}&111&222&333&0&0&0&z&t'.format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key1, task_id=3)

        chunk_key2 = '<hash>&{}&111&222&333&0&1&0&z&t'.format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key2, task_id=3)

        chunk_key3 = '<hash>&{}&111&222&333&0&2&0&z&t'.format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key3, task_id=3)

        # Cuboid for a different upload job.
        chunk_key4 = '<hash>&{}&555&666&777&0&0&0&z&t'.format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key4, task_id=5)

        expected = [{
            'task_id': 3,
            'tile_uploaded_map': {},
            'chunk_key': chunk_key1
        }, {
            'task_id': 3,
            'tile_uploaded_map': {},
            'chunk_key': chunk_key2
        }, {
            'task_id': 3,
            'tile_uploaded_map': {},
            'chunk_key': chunk_key3
        }]

        actual = list(self.tileindex_db.getTaskItems(3))

        six.assertCountEqual(self, expected, actual)

    def test_createCuboidAlreadyExistsRaises(self):
        """Raise an error if the chunk key already exists in the index."""
        chunk_key = 'foo'
        task_id = 9999999
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        with self.assertRaises(botocore.exceptions.ClientError) as err:
            self.tileindex_db.createCuboidEntry(chunk_key, task_id)
            error_code = err.response['Error'].get('Code', 'Unknown')
            self.assertEqual('ConditionalCheckFailedException', error_code)
class Test_BossTileIndexDB(unittest.TestCase):
    """
    Note that the chunk keys used, for testing, do not have real hash keys.
    The rest of the chunk key is valid.
    """
    def setUp(self):
        # Suppress ResourceWarning messages about unclosed connections.
        warnings.simplefilter("ignore")

        with open("ndingest/nddynamo/schemas/boss_tile_index.json") as fp:
            schema = json.load(fp)

        BossTileIndexDB.createTable(schema,
                                    endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

        self.tileindex_db = BossTileIndexDB(
            nd_proj.project_name, endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

    def tearDown(self):
        BossTileIndexDB.deleteTable(endpoint_url=settings.DYNAMO_TEST_ENDPOINT)

    def test_cuboidReady_false(self):
        fake_map = {"o": 1}
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        self.assertFalse(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_true(self):
        fake_map = {
            "s1": 1,
            "s2": 1,
            "s3": 1,
            "s4": 1,
            "s5": 1,
            "s6": 1,
            "s7": 1,
            "s8": 1,
            "s9": 1,
            "s10": 1,
            "s11": 1,
            "s12": 1,
            "s13": 1,
            "s14": 1,
            "s15": 1,
            "s16": 1,
        }
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        self.assertTrue(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_small_cuboid_true(self):
        """Test case where the number of tiles is smaller than a cuboid in the z direction."""
        fake_map = {
            "s1": 1,
            "s2": 1,
            "s3": 1,
            "s4": 1,
            "s5": 1,
            "s6": 1,
            "s7": 1,
            "s8": 1,
        }

        num_tiles = 8
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        self.assertTrue(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_cuboidReady_small_cuboid_false(self):
        """Test case where the number of tiles is smaller than a cuboid in the z direction."""
        fake_map = {
            "s1": 1,
            "s2": 1,
            "s3": 1,
            "s4": 1,
            "s5": 1,
            "s6": 1,
            "s7": 1
        }

        num_tiles = 8
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        self.assertFalse(self.tileindex_db.cuboidReady(chunk_key, fake_map))

    def test_createCuboidEntry(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        task_id = 21
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)
        actual = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(chunk_key, actual["chunk_key"])
        self.assertEqual({}, actual[TILE_UPLOADED_MAP_KEY])
        self.assertIn("expires", actual)
        self.assertEqual(task_id, actual["task_id"])
        self.assertTrue(actual["appended_task_id"].startswith(
            "{}_".format(task_id)))

    def test_markTileAsUploaded(self):
        # Cuboid must first have an entry before one of its tiles may be marked
        # as uploaded.
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        self.tileindex_db.markTileAsUploaded(chunk_key, "fakekey&sss", task_id)

        expected = {"fakekey&sss": 1}
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(expected, resp[TILE_UPLOADED_MAP_KEY])

    def test_markTileAsUploaded_multiple(self):
        # Cuboid must first have an entry before one of its tiles may be marked
        # as uploaded.
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        self.tileindex_db.markTileAsUploaded(chunk_key, "fakekey&sss", task_id)

        expected_first = {"fakekey&sss": 1}
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(expected_first, resp[TILE_UPLOADED_MAP_KEY])

        expected_second = {"fakekey&sss": 1, "fakekey&ttt": 1}
        self.tileindex_db.markTileAsUploaded(chunk_key, "fakekey&ttt", task_id)
        resp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertCountEqual(expected_second, resp[TILE_UPLOADED_MAP_KEY])

    def test_deleteItem(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key = "<hash>&{}&111&222&333&0&0&0&0&0".format(num_tiles)
        task_id = 231
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)
        preDelResp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertEqual(chunk_key, preDelResp["chunk_key"])
        self.tileindex_db.deleteCuboid(chunk_key, task_id)
        postDelResp = self.tileindex_db.getCuboid(chunk_key, task_id)
        self.assertIsNone(postDelResp)

    def test_getTaskItems(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        chunk_key1 = "<hash>&{}&111&222&333&0&0&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key1, task_id=3)

        chunk_key2 = "<hash>&{}&111&222&333&0&1&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key2, task_id=3)

        chunk_key3 = "<hash>&{}&111&222&333&0&2&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key3, task_id=3)

        # Cuboid for a different upload job.
        chunk_key4 = "<hash>&{}&555&666&777&0&0&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key4, task_id=5)

        expected = [
            {
                "task_id": 3,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key1
            },
            {
                "task_id": 3,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key2
            },
            {
                "task_id": 3,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key3
            },
        ]

        actual = list(self.tileindex_db.getTaskItems(3))
        filtered = [{
            "task_id": i["task_id"],
            TILE_UPLOADED_MAP_KEY: i[TILE_UPLOADED_MAP_KEY],
            "chunk_key": i["chunk_key"],
        } for i in actual]

        six.assertCountEqual(self, expected, filtered)

    def test_getTaskItems_force_multiple_queries(self):
        num_tiles = settings.SUPER_CUBOID_SIZE[2]
        job = 3
        chunk_key1 = "<hash>&{}&111&222&333&0&0&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key1, task_id=job)

        chunk_key2 = "<hash>&{}&111&222&333&0&1&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key2, task_id=job)

        chunk_key3 = "<hash>&{}&111&222&333&0&2&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key3, task_id=job)

        # Cuboid for a different upload job.
        chunk_key4 = "<hash>&{}&555&666&777&0&0&0&z&t".format(num_tiles)
        self.tileindex_db.createCuboidEntry(chunk_key4, task_id=5)

        expected = [
            {
                "task_id": job,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key1
            },
            {
                "task_id": job,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key2
            },
            {
                "task_id": job,
                TILE_UPLOADED_MAP_KEY: {},
                "chunk_key": chunk_key3
            },
        ]

        # Limit only 1 read per query so multiple queries required.
        query_limit = 1
        actual = list(self.tileindex_db.getTaskItems(job, query_limit))
        filtered = [{
            "task_id": i["task_id"],
            TILE_UPLOADED_MAP_KEY: i[TILE_UPLOADED_MAP_KEY],
            "chunk_key": i["chunk_key"],
        } for i in actual]

        six.assertCountEqual(self, expected, filtered)

    def test_createCuboidAlreadyExistsRaises(self):
        """Raise an error if the chunk key already exists in the index."""
        chunk_key = "foo"
        task_id = 9999999
        self.tileindex_db.createCuboidEntry(chunk_key, task_id)

        with self.assertRaises(botocore.exceptions.ClientError) as err:
            self.tileindex_db.createCuboidEntry(chunk_key, task_id)
            error_code = err.response["Error"].get("Code", "Unknown")
            self.assertEqual("ConditionalCheckFailedException", error_code)