示例#1
0
    def test_generate_object_keys_iso_anisotropic_below_fork(
            self, fake_get_region):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, below the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56, iso=True)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'
示例#2
0
    def test_generate_object_keys_iso_anisotropic_above_fork(self):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, above the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 3, 2, 56, iso=True)
        assert object_keys == 'cf934dccf1764290fd3db83b9b46b07b&4&3&2&3&2&56'

        object_keys = os.generate_object_key(self.resource, 5, 2, 56, iso=True)
        assert object_keys == '068e7246f31aacac92ca74923b9da6f1&ISO&4&3&2&5&2&56'
示例#3
0
    def test_object_to_cached_cuboid_keys(self):
        """Test to check key conversion from cached cuboid to object"""

        object_keys = ['a4931d58076dc47773957809380f206e4228517c9fa6daed536043782024e480&1&1&1&0&0&12',
                       'f2b449f7e247c8aec6ecf754388a65ee6ea9dc245cd5ef149aebb2e0d20b4251&1&1&1&0&0&13']

        os = AWSObjectStore(self.object_store_config)
        cached_cuboid_keys = os.object_to_cached_cuboid_keys(object_keys)
示例#4
0
    def test_write_cuboid_to_object_atr(self):
        """Test to check key conversion from cached cuboid to object when a string instead of a list is passed"""

        write_cuboid_keys = "WRITE-CUBOID&1&1&1&0&0&12&SDFJlskDJasdfniasdf"

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.write_cuboid_to_object_keys(write_cuboid_keys)

        assert len(object_keys) == 1
        assert object_keys[0] == '6b5ebb14395dec6cd9d7edaa1fbcd748&1&1&1&0&0&12'
示例#5
0
    def test_object_to_cached_cuboid_keys_str(self):
        """Test to check key conversion from cached cuboid to object when a string instead of a list is passed"""

        object_keys = 'a4931d58076dc47773957809380f206e4228517c9fa6daed536043782024e480&1&1&1&0&0&12'

        os = AWSObjectStore(self.object_store_config)
        cached_cuboid_keys = os.object_to_cached_cuboid_keys(object_keys)

        assert len(cached_cuboid_keys) == 1
        assert cached_cuboid_keys[0] == "CACHED-CUBOID&1&1&1&0&0&12"
示例#6
0
    def test_cached_cuboid_to_object_keys_str(self):
        """Test to check key conversion from cached cuboid to object"""

        cached_cuboid_keys = "CACHED-CUBOID&1&1&1&0&0&12"

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.cached_cuboid_to_object_keys(cached_cuboid_keys)

        assert len(object_keys) == 1
        assert object_keys[0] == '6b5ebb14395dec6cd9d7edaa1fbcd748&1&1&1&0&0&12'
示例#7
0
    def test_get_object_keys(self):
        os = AWSObjectStore(self.object_store_config)
        cuboid_bounds = Region.Cuboids(range(2, 3), range(2, 3), range(2, 3))
        resolution = 0

        expected = ['631424bf68302b683a0be521101c192b&4&3&2&0&2&56']
        actual = os._get_object_keys(
            self.resource, resolution, cuboid_bounds, t_range=[2, 3])

        assert expected == actual
示例#8
0
    def test_cached_cuboid_to_object_keys(self):
        """Test to check key conversion from cached cuboid to object"""

        cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13"]

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.cached_cuboid_to_object_keys(cached_cuboid_keys)

        assert len(object_keys) == 2
        assert object_keys[0] == '6b5ebb14395dec6cd9d7edaa1fbcd748&1&1&1&0&0&12'
        assert object_keys[1] == '592ed5f40528bb16bce769fed5b2e9c6&1&1&1&0&0&13'
示例#9
0
    def test_write_cuboid_to_object_keys(self):
        """Test to check key conversion from cached cuboid to object"""

        write_cuboid_keys = ["WRITE-CUBOID&1&1&1&0&0&12&SDFJlskDJasdfniasdf",
                             "WRITE-CUBOID&1&1&1&0&0&13&KJHDLFHjsdhfshdfhsdfdsf"]

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.write_cuboid_to_object_keys(write_cuboid_keys)

        assert len(object_keys) == 2
        assert object_keys[0] == '6b5ebb14395dec6cd9d7edaa1fbcd748&1&1&1&0&0&12'
        assert object_keys[1] == '592ed5f40528bb16bce769fed5b2e9c6&1&1&1&0&0&13'
    def add_lookup_key(self, item):
        """
        Using the given item from the S3 index table, extract the lookup key
        from the object key and write it back to the item as a new attribute.

        If throttled, will use exponential backoff and retry 5 times.

        Args:
            item (dict): An item from the response dictionary returned by DynamoDB.Client.scan().
        """
        if OBJ_KEY not in item or 'S' not in item[OBJ_KEY]:
            return

        if VERSION_NODE not in item:
            return

        parts = AWSObjectStore.get_object_key_parts(item[OBJ_KEY]['S'])
        lookup_key = AWSObjectStore.generate_lookup_key(
            parts.collection_id, parts.experiment_id, parts.channel_id,
            parts.resolution)

        NUM_RETRIES = 5
        for backoff in range(0, NUM_RETRIES + 1):
            try:
                self.dynamodb.update_item(
                    TableName=self.table,
                    Key={OBJ_KEY: item[OBJ_KEY], VERSION_NODE: item[VERSION_NODE]},
                    ExpressionAttributeNames = {'#lookupkey': LOOKUP_KEY},
                    ExpressionAttributeValues = {':lookupkey': {'S': lookup_key}},
                    UpdateExpression='set #lookupkey = :lookupkey'
                )
                return
            except botocore.exceptions.ClientError as ex:
                if ex.response['Error']['Code'] == 'ProvisionedThroughputExceededException':
                    print('Throttled during update of item: {} - {}'.format(
                        item[OBJ_KEY]['S'], item[VERSION_NODE]['N']))
                    time.sleep(((2 ** backoff) + (random.randint(0, 1000) / 1000.0))/10.0)
                else:
                    print('Failed updating item: {} - {}'.format(
                        item[OBJ_KEY]['S'], item[VERSION_NODE]['N']))
                    raise
            except:
                print('Failed updating item: {} - {}'.format(
                    item[OBJ_KEY]['S'], item[VERSION_NODE]['N']))
                raise


        print('Failed and giving up after {} retries trying to update item: {} - {}'
            .format(NUM_RETRIES, item[OBJ_KEY]['S'], item[VERSION_NODE]['N']))
示例#11
0
    def test_get_object_key_parts(self):
        """Test to get an object key parts"""
        os = AWSObjectStore(self.object_store_config)
        object_key = os.generate_object_key(self.resource, 0, 2, 56)

        parts = os.get_object_key_parts(object_key)

        self.assertEqual(object_key, '631424bf68302b683a0be521101c192b&4&3&2&0&2&56')
        self.assertEqual(parts.hash, "631424bf68302b683a0be521101c192b")
        self.assertEqual(parts.collection_id, "4")
        self.assertEqual(parts.experiment_id, "3")
        self.assertEqual(parts.channel_id, "2")
        self.assertEqual(parts.resolution, "0")
        self.assertEqual(parts.time_sample, "2")
        self.assertEqual(parts.morton_id, "56")
        self.assertEqual(parts.is_iso, False)
示例#12
0
    def test_object_key_chunks(self, fake_get_region):
        """Test method to return object keys in chunks"""
        keys = ['1', '2', '3', '4', '5', '6', '7']
        expected = [['1', '2', '3'], ['4', '5', '6'], ['7']]

        for cnt, chunk in enumerate(AWSObjectStore.object_key_chunks(keys, 3)):
            assert chunk == expected[cnt]
示例#13
0
    def test_get_object_key_parts_iso(self):
        """Test to get an object key parts after the iso split on an anisotropic channel"""
        os = AWSObjectStore(self.object_store_config)
        object_key = os.generate_object_key(self.resource, 5, 2, 56, iso=True)

        parts = os.get_object_key_parts(object_key)

        self.assertEqual(object_key, '068e7246f31aacac92ca74923b9da6f1&ISO&4&3&2&5&2&56')
        self.assertEqual(parts.hash, "068e7246f31aacac92ca74923b9da6f1")
        self.assertEqual(parts.collection_id, "4")
        self.assertEqual(parts.experiment_id, "3")
        self.assertEqual(parts.channel_id, "2")
        self.assertEqual(parts.resolution, "5")
        self.assertEqual(parts.time_sample, "2")
        self.assertEqual(parts.morton_id, "56")
        self.assertEqual(parts.is_iso, True)
示例#14
0
    def test_generate_object_keys_iso_isotropic(self):
        """Test to create object key when asking for isotropic data, in an isotropic channel"""
        data = self.setup_helper.get_image8_dict()
        data['experiment']['hierarchy_method'] = "isotropic"
        data['coord_frame']['z_voxel_size'] = 4
        resource = BossResourceBasic(data)

        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(resource, 0, 2, 56, iso=True)
        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'

        object_keys = os.generate_object_key(resource, 3, 2, 56, iso=True)
        assert object_keys == 'cf934dccf1764290fd3db83b9b46b07b&4&3&2&3&2&56'

        object_keys = os.generate_object_key(resource, 5, 2, 56, iso=True)
        assert object_keys == '831adead1bc05b24d0799206ee9fe832&4&3&2&5&2&56'
示例#15
0
    def test_too_many_cuboids_for_id_index(self):
        """
        Test error handling when number of cuboids that contain an id exceeds
        the limits allowed by DynamoDB.  
        
        This test writes 7651 cuboids which causes DynamoDB throttling, so we 
        normally skip this test.  
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        y = 0
        z = 0
        obj_keys = []
        cubes = []

        for x in range(0, 7651):
            mortonid = XYZMorton([x, y, z])
            obj_keys.append(
                AWSObjectStore.generate_object_key(resource, resolution,
                                                   time_sample, mortonid))
            # Just need one non-zero number to represent each cuboid.
            cubes.append(np.ones(1, dtype='uint64'))

        with self.assertRaises(SpdbError) as ex:
            self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                           cubes, version)
        self.assertEqual(ErrorCodes.OBJECT_STORE_ERROR,
                         ex.exception.error_code)
示例#16
0
    def setUp(self):
        """ Copy params from the Layer setUpClass
        """
        self.data = self.layer.setup_helper.get_anno64_dict()
        # Ensure that a random channel id used so tests don't stomp on each
        # other.
        self.data['lookup_key'] = "1&2&{}".format(random.randint(3, 999))
        # Expand coord frame to fit test data.  This must be sized properly or
        # loose bounding box calculation will fail.
        self.data['coord_frame']['x_stop'] = 10000
        self.data['coord_frame']['y_stop'] = 10000
        self.data['coord_frame']['z_stop'] = 10000
        self.resource = BossResourceBasic(self.data)

        self.kvio_config = self.layer.kvio_config
        self.state_config = self.layer.state_config
        self.object_store_config = self.layer.object_store_config
        self.region = 'us-east-1'

        self.endpoint_url = None
        if 'LOCAL_DYNAMODB_URL' in os.environ:
            self.endpoint_url = os.environ['LOCAL_DYNAMODB_URL']

        self.dynamodb = boto3.client('dynamodb',
                                     region_name=self.region,
                                     endpoint_url=self.endpoint_url)

        self.obj_ind = ObjectIndices(
            self.object_store_config["s3_index_table"],
            self.object_store_config["id_index_table"],
            self.object_store_config["id_count_table"],
            self.object_store_config["cuboid_bucket"], self.region,
            self.endpoint_url)

        self.obj_store = AWSObjectStore(self.object_store_config)
示例#17
0
    def test_object_key_chunks(self):
        """Test method to return object keys in chunks"""
        keys = ['1', '2', '3', '4', '5', '6', '7']
        expected = [['1', '2', '3'],
                    ['4', '5', '6'],
                    ['7']]

        for cnt, chunk in enumerate(AWSObjectStore.object_key_chunks(keys, 3)):
            assert chunk == expected[cnt]
示例#18
0
    def test_add_cuboid_to_index(self):
        """Test method to compute final object key and add to S3"""
        dummy_key = "SLDKFJDSHG&1&1&1&0&0&12"
        os = AWSObjectStore(self.object_store_config)
        os.add_cuboid_to_index(dummy_key)

        # Get item
        dynamodb = boto3.client('dynamodb', region_name=get_region())
        response = dynamodb.get_item(
            TableName=self.object_store_config['s3_index_table'],
            Key={'object-key': {'S': dummy_key},
                 'version-node': {'N': "0"}},
            ReturnConsumedCapacity='NONE'
        )

        assert response['Item']['object-key']['S'] == dummy_key
        assert response['Item']['version-node']['N'] == "0"
        assert response['Item']['ingest-job-hash']['S'] == '1'
        assert response['Item']['ingest-job-range']['S'] == '1&1&0&0'
示例#19
0
    def test_cuboids_exist_with_cache_miss(self, fake_get_region):
        """Test method for checking if cuboids exist in S3 index while supporting
        the cache miss key index parameter"""
        os = AWSObjectStore(self.object_store_config)

        expected_keys = [
            "CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13",
            "CACHED-CUBOID&1&1&1&0&0&14"
        ]
        test_keys = [
            "CACHED-CUBOID&1&1&1&0&0&100", "CACHED-CUBOID&1&1&1&0&0&13",
            "CACHED-CUBOID&1&1&1&0&0&14", "CACHED-CUBOID&1&1&1&0&0&15"
        ]

        expected_object_keys = os.cached_cuboid_to_object_keys(expected_keys)

        # Populate table
        for k in expected_object_keys:
            os.add_cuboid_to_index(k)

        # Check for keys
        exist_keys, missing_keys = os.cuboids_exist(test_keys, [1, 2])

        assert exist_keys == [1, 2]
        assert missing_keys == []
示例#20
0
    def test_get_loose_bounding_box(self):
        id = 33333
        resolution = 0
        time_sample = 0
        version = 0

        [x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]

        bytes0 = np.zeros(10, dtype='uint64')
        bytes0[1] = id
        pos0 = [x_cube_dim, 2 * y_cube_dim, 3 * z_cube_dim]
        pos_ind0 = [
            pos0[0] / x_cube_dim, pos0[1] / y_cube_dim, pos0[2] / z_cube_dim
        ]
        morton_id0 = XYZMorton(pos_ind0)
        key0 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id0)

        bytes1 = np.zeros(4, dtype='uint64')
        bytes1[0] = id  # Pre-existing id.
        pos1 = [3 * x_cube_dim, 5 * y_cube_dim, 6 * z_cube_dim]
        pos_ind1 = [
            pos1[0] / x_cube_dim, pos1[1] / y_cube_dim, pos1[2] / z_cube_dim
        ]
        morton_id1 = XYZMorton(pos_ind1)
        key1 = AWSObjectStore.generate_object_key(self.resource, resolution,
                                                  time_sample, morton_id1)

        self.obj_ind.update_id_indices(self.resource, resolution, [key0, key1],
                                       [bytes0, bytes1], version)

        actual = self.obj_ind.get_loose_bounding_box(self.resource, resolution,
                                                     id)
        expected = {
            'x_range': [pos0[0], pos1[0] + x_cube_dim],
            'y_range': [pos0[1], pos1[1] + y_cube_dim],
            'z_range': [pos0[2], pos1[2] + z_cube_dim],
            't_range': [0, 1]
        }
        self.assertEqual(expected, actual)
示例#21
0
    def test_get_cuboids(self):
        resource = BossResourceBasic(data=get_anno_dict())
        id = 22222
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = id
        resolution = 1
        key = AWSObjectStore.generate_object_key(resource, resolution, 0, 56)
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())

        new_bytes = np.zeros(4, dtype='uint64')
        new_bytes[0] = id  # Pre-existing id.
        new_key = AWSObjectStore.generate_object_key(resource, resolution, 0,
                                                     59)

        self.obj_ind.update_id_indices(resource, resolution, [key, new_key],
                                       [bytes, new_bytes], version)

        # Method under test.
        actual = self.obj_ind.get_cuboids(resource, resolution, id)

        expected = [key, new_key]
        self.assertCountEqual(expected, actual)
示例#22
0
    def test_legacy_cuboids_in_id_index(self):
        """Tet to verify that legacy and "new" cuboid indices in the ID index table both work

        Returns:

        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 222
        bytes[2] = 222
        bytes[5] = 555
        bytes[8] = 1001
        expected_ids = ['222', '555', '1001', '12345']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 2000
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        # Write a legacy index
        self.dynamodb.update_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S':
                    self.obj_ind.generate_channel_id_key(
                        resource, resolution, 12345)
                },
                'version': {
                    'N': "{}".format(version)
                }
            },
            UpdateExpression='ADD #cuboidset :objkey',
            ExpressionAttributeNames={'#cuboidset': 'cuboid-set'},
            ExpressionAttributeValues={':objkey': {
                'SS': [object_key]
            }},
            ReturnConsumedCapacity='NONE')

        # Add new index values
        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        # Confirm each id has the object_key in its cuboid-set attribute.
        for id in expected_ids:
            cuboid_object_keys = self.obj_ind.get_cuboids(
                resource, resolution, id)
            self.assertEqual(cuboid_object_keys[0], object_key)
示例#23
0
    def test_update_id_indices_new_entry_for_id_index(self):
        """
        Test adding new ids to the id index.
        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 20
        bytes[2] = 20
        bytes[5] = 55
        bytes[8] = 1000
        bytes[9] = 55
        expected_ids = ['20', '55', '1000']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 20
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        # Method under test.
        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        # Confirm each id has the object_key in its cuboid-set attribute.
        for id in expected_ids:
            key = self.obj_ind.generate_channel_id_key(resource, resolution,
                                                       id)

            response = self.dynamodb.get_item(
                TableName=self.object_store_config["id_index_table"],
                Key={
                    'channel-id-key': {
                        'S': key
                    },
                    'version': {
                        'N': "{}".format(version)
                    }
                },
                ConsistentRead=True,
                ReturnConsumedCapacity='NONE')

            self.assertIn('Item', response)
            self.assertIn('cuboid-set', response['Item'])
            self.assertIn('SS', response['Item']['cuboid-set'])
            self.assertIn(
                object_key.split("&")[-1],
                response['Item']['cuboid-set']['SS'])
示例#24
0
    def test_put_get_single_object(self):
        """Method to test putting and getting objects to and from S3"""
        os = AWSObjectStore(self.object_store_config)

        cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12"]
        fake_data = [b"aaaadddffffaadddfffaadddfff"]

        object_keys = os.cached_cuboid_to_object_keys(cached_cuboid_keys)

        os.put_objects(object_keys, fake_data)

        returned_data = os.get_single_object(object_keys[0])
        assert fake_data[0] == returned_data
示例#25
0
    def test_put_get_objects_syncronous(self):
        """Method to test putting and getting objects to and from S3"""
        os = AWSObjectStore(self.object_store_config)

        cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13"]
        fake_data = [b"aaaadddffffaadddfffaadddfff", b"fffddaaffddffdfffaaa"]

        object_keys = os.cached_cuboid_to_object_keys(cached_cuboid_keys)

        os.put_objects(object_keys, fake_data)

        returned_data = os.get_objects(object_keys)
        for rdata, sdata in zip(returned_data, fake_data):
            assert rdata == sdata
示例#26
0
    def test_too_many_ids_in_cuboid(self):
        """
        Test error handling when a cuboid has more unique ids than DynamoDB
        can support.
        """
        version = 0
        resolution = 0
        time_sample = 0
        resource = BossResourceBasic(data=get_anno_dict())
        mortonid = XYZMorton([0, 0, 0])
        obj_keys = [
            AWSObjectStore.generate_object_key(resource, resolution,
                                               time_sample, mortonid)
        ]
        cubes = [
            np.random.randint(2000000, size=(16, 512, 512), dtype='uint64')
        ]

        # If too many ids, the index is skipped, logged, and False is returned to the caller.
        result = self.obj_ind.update_id_indices(resource, resolution, obj_keys,
                                                cubes, version)
        self.assertFalse(result)
示例#27
0
    def upload_data(self, args):
        """
        Fill the coord frame with random data.

        Args:
            args (dict): This should be the dict returned by get_downsample_args().
        """
        cuboid_size = CUBOIDSIZE[0]
        x_dim = cuboid_size[0]
        y_dim = cuboid_size[1]
        z_dim = cuboid_size[2]

        resource = BossResourceBasic()
        resource.from_dict(self.get_image_dict())
        resolution = 0
        ts = 0
        version = 0

        # DP HACK: uploading all cubes will take longer than the actual downsample
        #          just upload the first volume worth of cubes.
        #          The downsample volume lambda will only read these cubes when
        #          passed the 'test' argument.
        bucket = S3Bucket(self.bosslet_config.session, args['s3_bucket'])
        print('Uploading test data', end='', flush=True)
        for cube in xyz_range(XYZ(0,0,0), XYZ(2,2,2)):
            key = AWSObjectStore.generate_object_key(resource, resolution, ts, cube.morton)
            key += "&0" # Add the version number
            #print('morton: {}'.format(cube.morton))
            #print('key: {}'.format(key))
            #print("{} -> {} -> {}".format(cube, cube.morton, key))
            cube = Cube.create_cube(resource, [x_dim, y_dim, z_dim])
            cube.random()
            data = cube.to_blosc()
            bucket.put(key, data)
            print('.', end='', flush=True)
        print(' Done uploading.')
示例#28
0
    def upload_data(self, session, args):
        """
        Fill the coord frame with random data.

        Args:
            args (dict): This should be the dict returned by get_downsample_args().
        """
        cuboid_size = CUBOIDSIZE[0]
        x_dim = cuboid_size[0]
        y_dim = cuboid_size[1]
        z_dim = cuboid_size[2]

        resource = BossResourceBasic()
        resource.from_dict(self.get_image_dict())
        resolution = 0
        ts = 0
        version = 0

        # DP HACK: uploading all cubes will take longer than the actual downsample
        #          just upload the first volume worth of cubes.
        #          The downsample volume lambda will only read these cubes when
        #          passed the 'test' argument.
        bucket = S3Bucket(session, args['s3_bucket'])
        print('Uploading test data', end='', flush=True)
        for cube in xyz_range(XYZ(0,0,0), XYZ(2,2,2)):
            key = AWSObjectStore.generate_object_key(resource, resolution, ts, cube.morton)
            key += "&0" # Add the version number
            #print('morton: {}'.format(cube.morton))
            #print('key: {}'.format(key))
            #print("{} -> {} -> {}".format(cube, cube.morton, key))
            cube = Cube.create_cube(resource, [x_dim, y_dim, z_dim])
            cube.random()
            data = cube.to_blosc()
            bucket.put(key, data)
            print('.', end='', flush=True)
        print(' Done uploading.')
示例#29
0
    def test_cuboids_exist(self):
        """Test method for checking if cuboids exist in S3 index"""
        os = AWSObjectStore(self.object_store_config)

        expected_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13", "CACHED-CUBOID&1&1&1&0&0&14"]
        test_keys = ["CACHED-CUBOID&1&1&1&0&0&100", "CACHED-CUBOID&1&1&1&0&0&13", "CACHED-CUBOID&1&1&1&0&0&14",
                     "CACHED-CUBOID&1&1&1&0&0&15"]

        expected_object_keys = os.cached_cuboid_to_object_keys(expected_keys)

        # Populate table
        for k in expected_object_keys:
            os.add_cuboid_to_index(k)

        # Check for keys
        exist_keys, missing_keys = os.cuboids_exist(test_keys)

        assert exist_keys == [1, 2]
        assert missing_keys == [0, 3]
示例#30
0
    def test_update_id_indices_add_new_cuboids_to_existing_ids(self):
        """
        Test that new cuboid object keys are added to the cuboid-set attributes of pre-existing ids.
        """
        bytes = np.zeros(10, dtype='uint64')
        bytes[1] = 20
        bytes[2] = 20
        bytes[5] = 55
        bytes[8] = 1000
        bytes[9] = 55
        expected_ids = ['20', '55', '1000']
        version = 0
        resource = BossResourceBasic(data=get_anno_dict())
        resolution = 1
        time_sample = 0
        morton_id = 20
        object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, morton_id)

        self.obj_ind.update_id_indices(resource, resolution, [object_key],
                                       [bytes], version)

        new_bytes = np.zeros(4, dtype='uint64')
        new_bytes[0] = 1000  # Pre-existing id.
        new_bytes[1] = 4444
        new_bytes[3] = 55  # Pre-existing id.

        new_morton_id = 90
        new_object_key = AWSObjectStore.generate_object_key(
            resource, resolution, time_sample, new_morton_id)

        # Method under test.
        self.obj_ind.update_id_indices(resource, resolution, [new_object_key],
                                       [new_bytes], version)

        # Confirm cuboids for id 55.
        key55 = self.obj_ind.generate_channel_id_key(resource, resolution, 55)

        response = self.dynamodb.get_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S': key55
                },
                'version': {
                    'N': '{}'.format(version)
                }
            },
            ConsistentRead=True,
            ReturnConsumedCapacity='NONE')

        self.assertIn('Item', response)
        self.assertIn('cuboid-set', response['Item'])
        self.assertIn('SS', response['Item']['cuboid-set'])
        # Check that mortons are there since using "new" index style
        self.assertIn(
            object_key.split("&")[-1], response['Item']['cuboid-set']['SS'])
        self.assertIn(
            new_object_key.split("&")[-1],
            response['Item']['cuboid-set']['SS'])

        # Confirm cuboids for id 1000.
        key1000 = self.obj_ind.generate_channel_id_key(resource, resolution,
                                                       1000)

        response2 = self.dynamodb.get_item(
            TableName=self.object_store_config["id_index_table"],
            Key={
                'channel-id-key': {
                    'S': key1000
                },
                'version': {
                    'N': '{}'.format(version)
                }
            },
            ConsistentRead=True,
            ReturnConsumedCapacity='NONE')

        self.assertIn('Item', response2)
        self.assertIn('cuboid-set', response2['Item'])
        self.assertIn('SS', response2['Item']['cuboid-set'])
        # Check that mortons are there since using "new" index style
        self.assertIn(
            object_key.split("&")[-1], response2['Item']['cuboid-set']['SS'])
        self.assertIn(
            new_object_key.split("&")[-1],
            response2['Item']['cuboid-set']['SS'])
示例#31
0
    def test_generate_object_keys_iso_anisotropic_below_fork(self):
        """Test to create object key when asking for isotropic data, in an anisotropic channel, below the iso fork"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56, iso=True)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'
示例#32
0
    def test_generate_object_keys(self):
        """Test to create object keys"""
        os = AWSObjectStore(self.object_store_config)
        object_keys = os.generate_object_key(self.resource, 0, 2, 56)

        assert object_keys == '631424bf68302b683a0be521101c192b&4&3&2&0&2&56'