def test_delete_forbidden(self): """Tests deleting file without permissions raises the correct error.""" # First add an image image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, file_size) self.assertEqual(u"file1", metadata["backend"]) uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) # Mock unlink to raise an OSError for lack of permissions # and make sure we can't delete the image with mock.patch.object(os, 'unlink') as unlink: e = OSError() e.errno = errno unlink.side_effect = e self.assertRaises(exceptions.Forbidden, self.store.delete, loc) # Make sure the image didn't get deleted loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) self.store.get(loc)
def test_delete_forbidden(self): """Tests deleting file without permissions raises the correct error.""" # First add an image image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, file_size) self.assertEqual(u"file1", metadata["store"]) uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) # Mock unlink to raise an OSError for lack of permissions # and make sure we can't delete the image with mock.patch.object(os, 'unlink') as unlink: e = OSError() e.errno = errno unlink.side_effect = e self.assertRaises(exceptions.Forbidden, self.store.delete, loc) # Make sure the image didn't get deleted loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) self.store.get(loc)
def delete(uri, backend, context=None): """Removes chunks of data from backend specified by uri.""" if backend: loc = location.get_location_from_uri_and_backend(uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) return store.delete(loc, context=context) LOG.warning('Backend is not set to image, searching all backends based on ' 'location URI.') backends = CONF.enabled_backends for backend in backends: try: if not uri.startswith(backends[backend]): continue loc = location.get_location_from_uri_and_backend(uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) return store.delete(loc, context=context) except (exceptions.NotFound, exceptions.UnknownScheme): continue raise exceptions.NotFound(_("Image not found in any configured backend"))
def get(uri, backend, offset=0, chunk_size=None, context=None): """Yields chunks of data from backend specified by uri.""" if backend: loc = location.get_location_from_uri_and_backend(uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) return store.get(loc, offset=offset, chunk_size=chunk_size, context=context) LOG.warning('Backend is not set to image, searching all backends based on ' 'location URI.') backends = CONF.enabled_backends for backend in backends: try: if not uri.startswith(backends[backend]): continue loc = location.get_location_from_uri_and_backend( uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) data, size = store.get(loc, offset=offset, chunk_size=chunk_size, context=context) if data: return data, size except (exceptions.NotFound, exceptions.UnknownScheme): continue raise exceptions.NotFound(_("Image not found in any configured backend"))
def get_size_from_uri_and_backend(uri, backend, context=None): """Retrieves image size from backend specified by uri.""" loc = location.get_location_from_uri_and_backend( uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) return store.get_size(loc, context=context)
def test_get(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add( image_id, image_file, len(file_contents)) # Check metadata contains 'file1' as a store self.assertEqual(u"file1", metadata['store']) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, 'file1', conf=self.conf) (image_file, image_size) = self.store.get(loc) expected_data = b"chunk00000remainder" expected_num_chunks = 2 data = b"" num_chunks = 0 for chunk in image_file: num_chunks += 1 data += chunk self.assertEqual(expected_data, data) self.assertEqual(expected_num_chunks, num_chunks)
def test_get_non_existing(self): """Test trying to retrieve a file that doesn't exist raises error.""" loc = location.get_location_from_uri_and_backend( "file:///%s/non-existing" % self.test_dir, 'file1', conf=self.conf) self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_add(self): """Test that we can add an image via the filesystem backend.""" filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = hashlib.md5(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (self.test_dir, expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) self.assertEqual(u"file1", metadata['store']) uri = "file:///%s/%s" % (self.test_dir, expected_image_id) loc = location.get_location_from_uri_and_backend( uri, 'file1', conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_delete_non_existing(self): """Test deleting file that doesn't exist raises an error.""" loc = location.get_location_from_uri_and_backend( "file:///tmp/glance-tests/non-existing", "file1", conf=self.conf) self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def _unstage(self, image_repo, image, staging_store): """ Restore the image to queued status and remove data from staging. :param image_repo: The instance of ImageRepo :param image: The image will be restored :param staging_store: The store used for staging """ if CONF.enabled_backends: file_path = "%s/%s" % (getattr( CONF, 'os_glance_staging_store').filesystem_store_datadir, image.image_id) try: loc = location.get_location_from_uri_and_backend( file_path, 'os_glance_staging_store') staging_store.delete(loc) except (glance_store.exceptions.NotFound, glance_store.exceptions.UnknownScheme): pass else: file_path = str(CONF.node_staging_uri + '/' + image.image_id)[7:] if os.path.exists(file_path): try: os.unlink(file_path) except OSError as e: LOG.error(_("Cannot delete staged image data %(fn)s " "[Errno %(en)d]"), {'fn': file_path, 'en': e.errno}) else: LOG.warning(_("Staged image data not found " "at %(fn)s"), {'fn': file_path}) self._restore(image_repo, image)
def test_add(self): """Test that we can add an image via the filesystem backend.""" filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = hashlib.md5(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (self.test_dir, expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) self.assertEqual(u"file1", metadata['backend']) uri = "file:///%s/%s" % (self.test_dir, expected_image_id) loc = location.get_location_from_uri_and_backend( uri, 'file1', conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_http_get_redirect(self, mock_api_session): # Add two layers of redirects to the response stack, which will # return the default 200 OK with the expected data after resolving # both redirects. redirect1 = {"location": "https://example.com?dsName=ds1&dcPath=dc1"} redirect2 = {"location": "https://example.com?dsName=ds2&dcPath=dc2"} responses = [ utils.fake_response(), utils.fake_response(status_code=302, headers=redirect1), utils.fake_response(status_code=301, headers=redirect2) ] def getresponse(*args, **kwargs): return responses.pop() expected_image_size = 31 expected_returns = ['I am a teapot, short and stout\n'] loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.side_effect = getresponse (image_file, image_size) = self.store.get(loc) self.assertEqual(expected_image_size, image_size) chunks = [c for c in image_file] self.assertEqual(expected_returns, chunks)
def test_get(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add( image_id, image_file, len(file_contents)) # Check metadata contains 'file1' as a backend self.assertEqual(u"file1", metadata['backend']) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, 'file1', conf=self.conf) (image_file, image_size) = self.store.get(loc) expected_data = b"chunk00000remainder" expected_num_chunks = 2 data = b"" num_chunks = 0 for chunk in image_file: num_chunks += 1 data += chunk self.assertEqual(expected_data, data) self.assertEqual(expected_num_chunks, num_chunks)
def test_partial_get(self): loc = location.get_location_from_uri_and_backend( "s3+https://user:key@auth_address/glance/%s" % FAKE_UUID, 's3_region1', conf=self.conf) self.assertRaises(exceptions.StoreRandomGetNotSupported, self.store.get, loc, chunk_size=1)
def test_http_get_redirect_invalid(self, mock_api_session): redirect = {"location": "https://example.com?dsName=ds1&dcPath=dc1"} loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response(status_code=307, headers=redirect) self.assertRaises(exceptions.BadStoreUri, self.store.get, loc)
def test_configure_add_same_dir_multiple_times_same_priority(self): """Tests handling of same dir in config multiple times. Tests BadStoreConfiguration exception is raised if same directory is specified multiple times in filesystem_store_datadirs with the same priority. """ store_map = [ self.useFixture(fixtures.TempDir()).path, self.useFixture(fixtures.TempDir()).path ] self.conf.set_override('filesystem_store_datadir', override=None, group='file1') self.conf.set_override('filesystem_store_datadirs', [ store_map[0] + ":100", store_map[1] + ":200", store_map[0] + ":100" ], group='file1') try: self.store.configure() except exceptions.BadStoreConfiguration: self.fail("configure() raised BadStoreConfiguration unexpectedly!") # Test that we can add an image via the filesystem backend filesystem.ChunkedFile.CHUNKSIZE = 1024 expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = md5(expected_file_contents, usedforsecurity=False).hexdigest() expected_location = "file://%s/%s" % (store_map[1], expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(u"file1", metadata["store"]) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) loc = location.get_location_from_uri_and_backend(expected_location, "file1", conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_get_size(self, mock_api_session): """ Test we can get the size of an existing image in the VMware store """ loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response() image_size = self.store.get_size(loc) self.assertEqual(image_size, 31)
def test_get_invalid_bucket_name(self): self.config(s3_store_bucket_url_format='virtual', group='s3_region1') invalid_buckets = ['not.dns.compliant', 'aa', 'bucket-'] for bucket in invalid_buckets: loc = location.get_location_from_uri_and_backend( "s3+https://user:key@auth_address/%s/key" % bucket, 's3_region1', conf=self.conf) self.assertRaises(boto_exceptions.InvalidDNSNameError, self.store.get, loc)
def test_delete_non_existing(self, mock_api_session): """ Test that trying to delete an image that doesn't exist raises an error """ loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s?" "dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch.object(self.store.session, 'wait_for_task') as mock_task: mock_task.side_effect = vmware_exceptions.FileNotFoundException self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_delete(self, mock_api_session): """Test we can delete an existing image in the VMware store.""" loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s?" "dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response() vm_store.Store._service_content = mock.Mock() self.store.delete(loc) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response(status_code=404) self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_get_size_non_existing(self, mock_api_session): """ Test that trying to retrieve an image size that doesn't exist raises an error """ loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glan" "ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response(status_code=404) self.assertRaises(exceptions.NotFound, self.store.get_size, loc)
def test_configure_add_same_dir_multiple_times_same_priority(self): """Tests handling of same dir in config multiple times. Tests BadStoreConfiguration exception is raised if same directory is specified multiple times in filesystem_store_datadirs with the same priority. """ store_map = [self.useFixture(fixtures.TempDir()).path, self.useFixture(fixtures.TempDir()).path] self.conf.set_override('filesystem_store_datadir', override=None, group='file1') self.conf.set_override('filesystem_store_datadirs', [store_map[0] + ":100", store_map[1] + ":200", store_map[0] + ":100"], group='file1') try: self.store.configure() except exceptions.BadStoreConfiguration: self.fail("configure() raised BadStoreConfiguration unexpectedly!") # Test that we can add an image via the filesystem backend filesystem.ChunkedFile.CHUNKSIZE = 1024 expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = hashlib.md5(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (store_map[1], expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(u"file1", metadata["backend"]) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) loc = location.get_location_from_uri_and_backend( expected_location, "file1", conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_get(self, mock_api_session): """Test a "normal" retrieval of an image in chunks.""" expected_image_size = 31 expected_returns = ['I am a teapot, short and stout\n'] loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.return_value = utils.fake_response() (image_file, image_size) = self.store.get(loc) self.assertEqual(expected_image_size, image_size) chunks = [c for c in image_file] self.assertEqual(expected_returns, chunks)
def test_http_get_max_redirects(self, mock_api_session): redirect = {"location": "https://example.com?dsName=ds1&dcPath=dc1"} responses = ([utils.fake_response(status_code=302, headers=redirect)] * (vm_store.MAX_REDIRECTS + 1)) def getresponse(*args, **kwargs): return responses.pop() loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.side_effect = getresponse self.assertRaises(exceptions.MaxRedirectsExceeded, self.store.get, loc)
def test_add_with_multiple_dirs(self): """Test adding multiple filesystem directories.""" store_map = [ self.useFixture(fixtures.TempDir()).path, self.useFixture(fixtures.TempDir()).path ] self.conf.set_override('filesystem_store_datadir', override=None, group='file1') self.conf.set_override('filesystem_store_datadirs', [store_map[0] + ":100", store_map[1] + ":200"], group='file1') self.store.configure() # Test that we can add an image via the filesystem backend filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = md5(expected_file_contents, usedforsecurity=False).hexdigest() expected_location = "file://%s/%s" % (store_map[1], expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(u"file1", metadata["store"]) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) loc = location.get_location_from_uri_and_backend(expected_location, "file1", conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_cinder_get_size(self): fake_client = FakeObject(auth_token=None, management_url=None) fake_volume_uuid = str(uuid.uuid4()) fake_volume = FakeObject(size=5, metadata={}) fake_volumes = {fake_volume_uuid: fake_volume} with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc: mocked_cc.return_value = FakeObject(client=fake_client, volumes=fake_volumes) uri = 'cinder://cinder1/%s' % fake_volume_uuid loc = location.get_location_from_uri_and_backend(uri, "cinder1", conf=self.conf) image_size = self.store.get_size(loc, context=self.context) self.assertEqual(fake_volume.size * units.Gi, image_size)
def test_cinder_delete(self): fake_client = FakeObject(auth_token=None, management_url=None) fake_volume_uuid = str(uuid.uuid4()) fake_volume = FakeObject(delete=mock.Mock()) fake_volumes = {fake_volume_uuid: fake_volume} with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc: mocked_cc.return_value = FakeObject(client=fake_client, volumes=fake_volumes) uri = 'cinder://cinder1/%s' % fake_volume_uuid loc = location.get_location_from_uri_and_backend(uri, "cinder1", conf=self.conf) self.store.delete(loc, context=self.context) fake_volume.delete.assert_called_once_with()
def test_add_with_multiple_dirs(self): """Test adding multiple filesystem directories.""" store_map = [self.useFixture(fixtures.TempDir()).path, self.useFixture(fixtures.TempDir()).path] self.conf.set_override('filesystem_store_datadir', override=None, group='file1') self.conf.set_override('filesystem_store_datadirs', [store_map[0] + ":100", store_map[1] + ":200"], group='file1') self.store.configure() # Test that we can add an image via the filesystem backend filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = hashlib.md5(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (store_map[1], expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(u"file1", metadata["backend"]) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) loc = location.get_location_from_uri_and_backend( expected_location, "file1", conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def set_acls_for_multi_store(location_uri, backend, public=False, read_tenants=[], write_tenants=None, context=None): if write_tenants is None: write_tenants = [] loc = location.get_location_from_uri_and_backend( location_uri, backend, conf=CONF) store = get_store_from_store_identifier(backend) try: store.set_acls(loc, public=public, read_tenants=read_tenants, write_tenants=write_tenants, context=context) except NotImplementedError: LOG.debug("Skipping store.set_acls... not implemented")
def test_delete(self): """Test we can delete an existing image in the filesystem store.""" # First add an image image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, file_size) self.assertEqual(u"file1", metadata["backend"]) # Now check that we can delete it uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) self.store.delete(loc) self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete(self): """Test we can delete an existing image in the filesystem store.""" # First add an image image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, file_size) self.assertEqual(u"file1", metadata["store"]) # Now check that we can delete it uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, "file1", conf=self.conf) self.store.delete(loc) self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_add_to_different_backned(self): """Test that we can add an image via the filesystem backend.""" self.store = filesystem.Store(self.conf, backend='file2') self.config(filesystem_store_datadir=self.test_dir, group="file2") self.store.configure() self.register_store_backend_schemes(self.store, 'file', 'file2') filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = hashlib.md5(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (self.test_dir, expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, metadata = self.store.add(expected_image_id, image_file, expected_file_size) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) self.assertEqual(u"file2", metadata['backend']) uri = "file:///%s/%s" % (self.test_dir, expected_image_id) loc = location.get_location_from_uri_and_backend( uri, 'file2', conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size)
def test_get(self, mock_client): """Test a "normal" retrieval of an image in chunks.""" bucket, key = 'glance', FAKE_UUID fixture_object = { 'Body': six.BytesIO(b"*" * FIVE_KB), 'ContentLength': FIVE_KB } fake_s3_client = botocore.session.get_session().create_client('s3') with stub.Stubber(fake_s3_client) as stubber: stubber.add_response(method='head_object', service_response={}, expected_params={ 'Bucket': bucket, 'Key': key }) stubber.add_response(method='get_object', service_response=fixture_object, expected_params={ 'Bucket': bucket, 'Key': key }) mock_client.return_value = fake_s3_client loc = location.get_location_from_uri_and_backend( "s3+https://user:key@auth_address/%s/%s" % (bucket, key), 's3_region1', conf=self.conf) (image_s3, image_size) = self.store.get(loc) self.assertEqual(FIVE_KB, image_size) expected_data = b"*" * FIVE_KB data = b"" for chunk in image_s3: data += chunk self.assertEqual(expected_data, data)
def test_cinder_get(self): expected_size = 5 * units.Ki expected_file_contents = b"*" * expected_size volume_file = six.BytesIO(expected_file_contents) fake_client = FakeObject(auth_token=None, management_url=None) fake_volume_uuid = str(uuid.uuid4()) fake_volume = mock.MagicMock(id=fake_volume_uuid, metadata={'image_size': expected_size}, status='available') fake_volume.manager.get.return_value = fake_volume fake_volumes = FakeObject(get=lambda id: fake_volume) @contextlib.contextmanager def fake_open(client, volume, mode): self.assertEqual('rb', mode) yield volume_file with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \ mock.patch.object(self.store, '_open_cinder_volume', side_effect=fake_open): mock_cc.return_value = FakeObject(client=fake_client, volumes=fake_volumes) uri = "cinder://cinder1/%s" % fake_volume_uuid loc = location.get_location_from_uri_and_backend(uri, "cinder1", conf=self.conf) (image_file, image_size) = self.store.get(loc, context=self.context) expected_num_chunks = 2 data = b"" num_chunks = 0 for chunk in image_file: num_chunks += 1 data += chunk self.assertEqual(expected_num_chunks, num_chunks) self.assertEqual(expected_file_contents, data)
def test_get_random_access(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, len(file_contents)) # Check metadata contains 'file1' as a backend self.assertEqual(u"file1", metadata['backend']) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, 'file1', conf=self.conf) data = b"" for offset in range(len(file_contents)): (image_file, image_size) = self.store.get(loc, offset=offset, chunk_size=1) for chunk in image_file: data += chunk self.assertEqual(file_contents, data) data = b"" chunk_size = 5 (image_file, image_size) = self.store.get(loc, offset=chunk_size, chunk_size=chunk_size) for chunk in image_file: data += chunk self.assertEqual(b'00000', data) self.assertEqual(chunk_size, image_size)
def test_get_random_access(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, metadata = self.store.add(image_id, image_file, len(file_contents)) # Check metadata contains 'file1' as a store self.assertEqual(u"file1", metadata['store']) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri_and_backend(uri, 'file1', conf=self.conf) data = b"" for offset in range(len(file_contents)): (image_file, image_size) = self.store.get(loc, offset=offset, chunk_size=1) for chunk in image_file: data += chunk self.assertEqual(file_contents, data) data = b"" chunk_size = 5 (image_file, image_size) = self.store.get(loc, offset=chunk_size, chunk_size=chunk_size) for chunk in image_file: data += chunk self.assertEqual(b'00000', data) self.assertEqual(chunk_size, image_size)
def test_delete_non_existing(self, mock_client): """Test that trying to delete a s3 that doesn't exist raises an error """ bucket, key = 'glance', 'no_exist' fake_s3_client = botocore.session.get_session().create_client('s3') with stub.Stubber(fake_s3_client) as stubber: stubber.add_client_error(method='head_object', service_error_code='404', service_message=''' The specified key does not exist. ''', expected_params={ 'Bucket': bucket, 'Key': key }) fake_s3_client.head_bucket = mock.MagicMock() mock_client.return_value = fake_s3_client uri = "s3+https://user:key@auth_address/%s/%s" % (bucket, key) loc = location.get_location_from_uri_and_backend(uri, 's3_region1', conf=self.conf) self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_http_get_redirect(self, mock_api_session): # Add two layers of redirects to the response stack, which will # return the default 200 OK with the expected data after resolving # both redirects. redirect1 = {"location": "https://example.com?dsName=ds1&dcPath=dc1"} redirect2 = {"location": "https://example.com?dsName=ds2&dcPath=dc2"} responses = [utils.fake_response(), utils.fake_response(status_code=302, headers=redirect1), utils.fake_response(status_code=301, headers=redirect2)] def getresponse(*args, **kwargs): return responses.pop() expected_image_size = 31 expected_returns = ['I am a teapot, short and stout\n'] loc = location.get_location_from_uri_and_backend( "vsphere://127.0.0.1/folder/openstack_glance/%s" "?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf) with mock.patch('requests.Session.request') as HttpConn: HttpConn.side_effect = getresponse (image_file, image_size) = self.store.get(loc) self.assertEqual(expected_image_size, image_size) chunks = [c for c in image_file] self.assertEqual(expected_returns, chunks)