Ejemplo n.º 1
0
    def test_generate_object_keys_iso_anisotropic_above_fork(self):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, above the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 3, 2, 56, iso=True)
        assert object_keys == 'cf934dccf1764290fd3db83b9b46b07b&4&3&2&3&2&56'

        object_keys = os.generate_object_key(self.resource, 5, 2, 56, iso=True)
        assert object_keys == '068e7246f31aacac92ca74923b9da6f1&ISO&4&3&2&5&2&56'
Ejemplo n.º 2
0
    def test_too_many_cuboids_for_id_index(self):
        """
        Test error handling when number of cuboids that contain an id exceeds
        the limits allowed by DynamoDB.  
        
        This test writes 7651 cuboids which causes DynamoDB throttling, so we 
        normally skip this test.  
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        y = 0
        z = 0
        obj_keys = []
        cubes = []

        for x in range(0, 7651):
            mortonid = XYZMorton([x, y, z])
            obj_keys.append(
                AWSObjectStore.generate_object_key(resource, resolution,
                                                   time_sample, mortonid))
            # Just need one non-zero number to represent each cuboid.
            cubes.append(np.ones(1, dtype='uint64'))

        with self.assertRaises(SpdbError) as ex:
            self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                           cubes, version)
        self.assertEqual(ErrorCodes.OBJECT_STORE_ERROR,
                         ex.exception.error_code)
Ejemplo n.º 3
0
    def test_generate_object_keys_iso_isotropic(self):
        """Test to create object key when asking for isotropic data, in an isotropic channel"""
        data = self.setup_helper.get_image8_dict()
        data['experiment']['hierarchy_method'] = "isotropic"
        data['coord_frame']['z_voxel_size'] = 4
        resource = BossResourceBasic(data)

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(resource, 0, 2, 56, iso=True)
        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'

        object_keys = os.generate_object_key(resource, 3, 2, 56, iso=True)
        assert object_keys == 'cf934dccf1764290fd3db83b9b46b07b&4&3&2&3&2&56'

        object_keys = os.generate_object_key(resource, 5, 2, 56, iso=True)
        assert object_keys == '831adead1bc05b24d0799206ee9fe832&4&3&2&5&2&56'
Ejemplo n.º 4
0
    def test_generate_object_keys_iso_anisotropic_below_fork(
            self, fake_get_region):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, below the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56, iso=True)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'
Ejemplo n.º 5
0
    def test_get_loose_bounding_box(self):
        id = 33333
        resolution = 0
        time_sample = 0
        version = 0

        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        bytes0 = np.zeros(10, dtype='uint64')
        bytes0[1] = id
        pos0 = [x_cube_dim, 2 * y_cube_dim, 3 * z_cube_dim]
        pos_ind0 = [
            pos0[0] / x_cube_dim, pos0[1] / y_cube_dim, pos0[2] / z_cube_dim
        ]
        morton_id0 = XYZMorton(pos_ind0)
        key0 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id0)

        bytes1 = np.zeros(4, dtype='uint64')
        bytes1[0] = id  # Pre-existing id.
        pos1 = [3 * x_cube_dim, 5 * y_cube_dim, 6 * z_cube_dim]
        pos_ind1 = [
            pos1[0] / x_cube_dim, pos1[1] / y_cube_dim, pos1[2] / z_cube_dim
        ]
        morton_id1 = XYZMorton(pos_ind1)
        key1 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id1)

        self.obj_ind.update_id_indices(self.resource, resolution, [key0, key1],
                                       [bytes0, bytes1], version)

        actual = self.obj_ind.get_loose_bounding_box(self.resource, resolution,
                                                     id)
        expected = {
            'x_range': [pos0[0], pos1[0] + x_cube_dim],
            'y_range': [pos0[1], pos1[1] + y_cube_dim],
            'z_range': [pos0[2], pos1[2] + z_cube_dim],
            't_range': [0, 1]
        }
        self.assertEqual(expected, actual)
Ejemplo n.º 6
0
    def test_get_cuboids(self):
        resource = BossResourceBasic(data=get_anno_dict())
        id = 22222
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = id
        resolution = 1
        key = AWSObjectStore.generate_object_key(resource, resolution, 0, 56)
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())

        new_bytes = np.zeros(4, dtype='uint64')
        new_bytes[0] = id  # Pre-existing id.
        new_key = AWSObjectStore.generate_object_key(resource, resolution, 0,
                                                     59)

        self.obj_ind.update_id_indices(resource, resolution, [key, new_key],
                                       [bytes, new_bytes], version)

        # Method under test.
        actual = self.obj_ind.get_cuboids(resource, resolution, id)

        expected = [key, new_key]
        self.assertCountEqual(expected, actual)
Ejemplo n.º 7
0
    def test_legacy_cuboids_in_id_index(self):
        """Tet to verify that legacy and "new" cuboid indices in the ID index table both work

        Returns:

        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 222
        bytes[2] = 222
        bytes[5] = 555
        bytes[8] = 1001
        expected_ids = ['222', '555', '1001', '12345']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 2000
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        # Write a legacy index
        self.dynamodb.update_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S':
                    self.obj_ind.generate_channel_id_key(
                        resource, resolution, 12345)
                },
                'version': {
                    'N': "{}".format(version)
                }
            },
            UpdateExpression='ADD #cuboidset :objkey',
            ExpressionAttributeNames={'#cuboidset': 'cuboid-set'},
            ExpressionAttributeValues={':objkey': {
                'SS': [object_key]
            }},
            ReturnConsumedCapacity='NONE')

        # Add new index values
        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        # Confirm each id has the object_key in its cuboid-set attribute.
        for id in expected_ids:
            cuboid_object_keys = self.obj_ind.get_cuboids(
                resource, resolution, id)
            self.assertEqual(cuboid_object_keys[0], object_key)
Ejemplo n.º 8
0
    def test_get_object_key_parts_iso(self):
        """Test to get an object key parts after the iso split on an anisotropic channel"""
        os = AWSObjectStore(self.object_store_config)
        object_key = os.generate_object_key(self.resource, 5, 2, 56, iso=True)

        parts = os.get_object_key_parts(object_key)

        self.assertEqual(object_key, '068e7246f31aacac92ca74923b9da6f1&ISO&4&3&2&5&2&56')
        self.assertEqual(parts.hash, "068e7246f31aacac92ca74923b9da6f1")
        self.assertEqual(parts.collection_id, "4")
        self.assertEqual(parts.experiment_id, "3")
        self.assertEqual(parts.channel_id, "2")
        self.assertEqual(parts.resolution, "5")
        self.assertEqual(parts.time_sample, "2")
        self.assertEqual(parts.morton_id, "56")
        self.assertEqual(parts.is_iso, True)
Ejemplo n.º 9
0
    def test_get_object_key_parts(self):
        """Test to get an object key parts"""
        os = AWSObjectStore(self.object_store_config)
        object_key = os.generate_object_key(self.resource, 0, 2, 56)

        parts = os.get_object_key_parts(object_key)

        self.assertEqual(object_key, '631424bf68302b683a0be521101c192b&4&3&2&0&2&56')
        self.assertEqual(parts.hash, "631424bf68302b683a0be521101c192b")
        self.assertEqual(parts.collection_id, "4")
        self.assertEqual(parts.experiment_id, "3")
        self.assertEqual(parts.channel_id, "2")
        self.assertEqual(parts.resolution, "0")
        self.assertEqual(parts.time_sample, "2")
        self.assertEqual(parts.morton_id, "56")
        self.assertEqual(parts.is_iso, False)
Ejemplo n.º 10
0
    def test_update_id_indices_new_entry_for_id_index(self):
        """
        Test adding new ids to the id index.
        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 20
        bytes[2] = 20
        bytes[5] = 55
        bytes[8] = 1000
        bytes[9] = 55
        expected_ids = ['20', '55', '1000']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 20
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        # Method under test.
        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        # Confirm each id has the object_key in its cuboid-set attribute.
        for id in expected_ids:
            key = self.obj_ind.generate_channel_id_key(resource, resolution,
                                                       id)

            response = self.dynamodb.get_item(
                TableName=self.object_store_config["id_index_table"],
                Key={
                    'channel-id-key': {
                        'S': key
                    },
                    'version': {
                        'N': "{}".format(version)
                    }
                },
                ConsistentRead=True,
                ReturnConsumedCapacity='NONE')

            self.assertIn('Item', response)
            self.assertIn('cuboid-set', response['Item'])
            self.assertIn('SS', response['Item']['cuboid-set'])
            self.assertIn(
                object_key.split("&")[-1],
                response['Item']['cuboid-set']['SS'])
Ejemplo n.º 11
0
    def test_too_many_ids_in_cuboid(self):
        """
        Test error handling when a cuboid has more unique ids than DynamoDB
        can support.
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        mortonid = XYZMorton([0, 0, 0])
        obj_keys = [
            AWSObjectStore.generate_object_key(resource, resolution,
                                               time_sample, mortonid)
        ]
        cubes = [
            np.random.randint(2000000, size=(16, 512, 512), dtype='uint64')
        ]

        # If too many ids, the index is skipped, logged, and False is returned to the caller.
        result = self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                                cubes, version)
        self.assertFalse(result)
Ejemplo n.º 12
0
    def upload_data(self, args):
        """
        Fill the coord frame with random data.

        Args:
            args (dict): This should be the dict returned by get_downsample_args().
        """
        cuboid_size = CUBOIDSIZE[0]
        x_dim = cuboid_size[0]
        y_dim = cuboid_size[1]
        z_dim = cuboid_size[2]

        resource = BossResourceBasic()
        resource.from_dict(self.get_image_dict())
        resolution = 0
        ts = 0
        version = 0

        # DP HACK: uploading all cubes will take longer than the actual downsample
        #          just upload the first volume worth of cubes.
        #          The downsample volume lambda will only read these cubes when
        #          passed the 'test' argument.
        bucket = S3Bucket(self.bosslet_config.session, args['s3_bucket'])
        print('Uploading test data', end='', flush=True)
        for cube in xyz_range(XYZ(0,0,0), XYZ(2,2,2)):
            key = AWSObjectStore.generate_object_key(resource, resolution, ts, cube.morton)
            key += "&0" # Add the version number
            #print('morton: {}'.format(cube.morton))
            #print('key: {}'.format(key))
            #print("{} -> {} -> {}".format(cube, cube.morton, key))
            cube = Cube.create_cube(resource, [x_dim, y_dim, z_dim])
            cube.random()
            data = cube.to_blosc()
            bucket.put(key, data)
            print('.', end='', flush=True)
        print(' Done uploading.')
Ejemplo n.º 13
0
    def upload_data(self, session, args):
        """
        Fill the coord frame with random data.

        Args:
            args (dict): This should be the dict returned by get_downsample_args().
        """
        cuboid_size = CUBOIDSIZE[0]
        x_dim = cuboid_size[0]
        y_dim = cuboid_size[1]
        z_dim = cuboid_size[2]

        resource = BossResourceBasic()
        resource.from_dict(self.get_image_dict())
        resolution = 0
        ts = 0
        version = 0

        # DP HACK: uploading all cubes will take longer than the actual downsample
        #          just upload the first volume worth of cubes.
        #          The downsample volume lambda will only read these cubes when
        #          passed the 'test' argument.
        bucket = S3Bucket(session, args['s3_bucket'])
        print('Uploading test data', end='', flush=True)
        for cube in xyz_range(XYZ(0,0,0), XYZ(2,2,2)):
            key = AWSObjectStore.generate_object_key(resource, resolution, ts, cube.morton)
            key += "&0" # Add the version number
            #print('morton: {}'.format(cube.morton))
            #print('key: {}'.format(key))
            #print("{} -> {} -> {}".format(cube, cube.morton, key))
            cube = Cube.create_cube(resource, [x_dim, y_dim, z_dim])
            cube.random()
            data = cube.to_blosc()
            bucket.put(key, data)
            print('.', end='', flush=True)
        print(' Done uploading.')
Ejemplo n.º 14
0
    def test_update_id_indices_add_new_cuboids_to_existing_ids(self):
        """
        Test that new cuboid object keys are added to the cuboid-set attributes of pre-existing ids.
        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 20
        bytes[2] = 20
        bytes[5] = 55
        bytes[8] = 1000
        bytes[9] = 55
        expected_ids = ['20', '55', '1000']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 20
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        new_bytes = np.zeros(4, dtype='uint64')
        new_bytes[0] = 1000  # Pre-existing id.
        new_bytes[1] = 4444
        new_bytes[3] = 55  # Pre-existing id.

        new_morton_id = 90
        new_object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, new_morton_id)

        # Method under test.
        self.obj_ind.update_id_indices(resource, resolution, [new_object_key],
                                       [new_bytes], version)

        # Confirm cuboids for id 55.
        key55 = self.obj_ind.generate_channel_id_key(resource, resolution, 55)

        response = self.dynamodb.get_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S': key55
                },
                'version': {
                    'N': '{}'.format(version)
                }
            },
            ConsistentRead=True,
            ReturnConsumedCapacity='NONE')

        self.assertIn('Item', response)
        self.assertIn('cuboid-set', response['Item'])
        self.assertIn('SS', response['Item']['cuboid-set'])
        # Check that mortons are there since using "new" index style
        self.assertIn(
            object_key.split("&")[-1], response['Item']['cuboid-set']['SS'])
        self.assertIn(
            new_object_key.split("&")[-1],
            response['Item']['cuboid-set']['SS'])

        # Confirm cuboids for id 1000.
        key1000 = self.obj_ind.generate_channel_id_key(resource, resolution,
                                                       1000)

        response2 = self.dynamodb.get_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S': key1000
                },
                'version': {
                    'N': '{}'.format(version)
                }
            },
            ConsistentRead=True,
            ReturnConsumedCapacity='NONE')

        self.assertIn('Item', response2)
        self.assertIn('cuboid-set', response2['Item'])
        self.assertIn('SS', response2['Item']['cuboid-set'])
        # Check that mortons are there since using "new" index style
        self.assertIn(
            object_key.split("&")[-1], response2['Item']['cuboid-set']['SS'])
        self.assertIn(
            new_object_key.split("&")[-1],
            response2['Item']['cuboid-set']['SS'])
Ejemplo n.º 15
0
    def test_generate_object_keys(self):
        """Test to create object keys"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'
Ejemplo n.º 16
0
    def test_generate_object_keys_iso_anisotropic_below_fork(self):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, below the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56, iso=True)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'