def check_available_space(dest, image_size, image_id): # TODO(e0ne): replace psutil with shutil.disk_usage when we drop # Python 2.7 support. if not os.path.isdir(dest): dest = os.path.dirname(dest) free_space = psutil.disk_usage(dest).free if free_space <= image_size: msg = ('There is no space to convert image. ' 'Requested: %(image_size)s, available: %(free_space)s') % { 'image_size': image_size, 'free_space': free_space } raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None): fetch(context, image_service, image_id, dest, None, None) with fileutils.remove_path_on_error(dest): data = qemu_img_info(dest) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file }))
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None, run_as_root=True): fetch(context, image_service, image_id, dest, None, None) image_meta = image_service.show(context, image_id) with fileutils.remove_path_on_error(dest): has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, dest, run_as_root) # We can only really do verification of the image if we have # qemu data to use if data is not None: fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None: check_virtual_size(data.virtual_size, size, image_id)
def get_qemu_data(image_id, has_meta, disk_format_raw, dest, run_as_root, force_share=False): # We may be on a system that doesn't have qemu-img installed. That # is ok if we are working with a RAW image. This logic checks to see # if qemu-img is installed. If not we make sure the image is RAW and # throw an exception if not. Otherwise we stop before needing # qemu-img. Systems with qemu-img will always progress through the # whole function. try: # Use the empty tmp file to make sure qemu_img_info works. data = qemu_img_info(dest, run_as_root=run_as_root, force_share=force_share) # There are a lot of cases that can cause a process execution # error, but until we do more work to separate out the various # cases we'll keep the general catch here except processutils.ProcessExecutionError: data = None if has_meta: if not disk_format_raw: raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and image is of " "type %s. Only RAW images can be used if " "qemu-img is not installed.") % disk_format_raw, image_id=image_id) else: raise exception.ImageUnacceptable(reason=_( "qemu-img is not installed and the disk " "format is not specified. Only RAW images " "can be used if qemu-img is not installed."), image_id=image_id) return data
def _copy_image_to_volume(self, context, volume, image_id, image_location, image_service): """Downloads Glance image to the specified volume.""" LOG.debug( "Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", { 'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location }) try: if volume.encryption_key_id: self.driver.copy_image_to_encrypted_volume( context, volume, image_service, image_id) else: self.driver.copy_image_to_volume(context, volume, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.exception( _LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s"), { 'volume_id': volume.id, 'image_id': image_id }) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), {'volume_id': volume.id}) raise exception.ImageUnacceptable(ex) except Exception as ex: LOG.exception( _LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s"), { 'volume_id': volume.id, 'image_id': image_id }) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug( "Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", { 'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location })
def test_copy_image_to_encrypted_volume_failed_fetch( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = { 'device': { 'path': local_path }, 'conn': { 'driver_volume_type': 'iscsi', 'data': {}, } } mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = exception.ImageUnacceptable(reason='fake', image_id=fake.IMAGE_ID) mock_fetch_to_raw.side_effect = raised_exception encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} self.assertRaises(exception.ImageUnacceptable, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) mock_attach_volume.assert_called_once_with(self.context, volume, properties) mock_attach_encryptor.assert_called_once_with(self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with(self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with(attach_info, encryption) mock_detach_volume.assert_called_once_with(self.context, attach_info, volume, properties)
def create_volume_from_extend(self, **kwargs): ''' create_linkclone_volume :param kwargs: :return: ''' LOG.debug(_("[VRM-CINDER] start create_volume_from_extend()")) image_type = kwargs.get('image_type') if image_type == "nfs": LOG.debug(_("[VRM-CINDER] start create_volume_from_nfs")) vm_urn = self.import_vm_from_nfs(**kwargs) elif image_type == 'uds': LOG.debug(_("[VRM-CINDER] start create_volume_from_uds")) vm_urn = self.import_vm_from_uds(**kwargs) else: LOG.debug(_("[VRM-CINDER] start create_volume_from_glance")) vm_urn = self.import_vm_from_glance(**kwargs) vm_id = vm_urn[-10:] vm = self.query_vm(vm_id=vm_id) vm_config = vm['vmConfig'] disks = vm_config['disks'] volume_urn = None if kwargs.get('volume_sequence_num') is None: kwargs['volume_sequence_num'] = 1 for disk in disks: if int(disk['sequenceNum']) == int(kwargs['volume_sequence_num']): volume_urn = disk['volumeUrn'] break if volume_urn is None: msg = (_("[VRM-CINDER] no available disk")) LOG.error(msg) self.delete_vm(vm_id=vm_id) raise exception.ImageUnacceptable(image_id=kwargs['image_id'], reason=msg) try: self.detach_vol_from_vm(vm_id=vm_id, volume_urn=volume_urn) except Exception as ex: LOG.error(_('detach volume is failed')) self.delete_vm(vm_id=vm_id) raise ex self.delete_vm(vm_id=vm_id) LOG.debug(_("[VRM-CINDER] end ()")) return volume_urn
def upload_volume(context, image_service, image_meta, volume_path, volume_format='raw', run_as_root=True): image_id = image_meta['id'] if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s" % (image_id, volume_format, image_meta['disk_format'])) if os.name == 'nt' or os.access(volume_path, os.R_OK): with fileutils.file_open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) else: with utils.temporary_chown(volume_path): with fileutils.file_open(volume_path) as image_file: image_service.update(context, image_id, {}, image_file) return if (CONF.image_conversion_dir and not os.path.exists(CONF.image_conversion_dir)): os.makedirs(CONF.image_conversion_dir) fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir) os.close(fd) with fileutils.remove_path_on_error(tmp): LOG.debug("%s was %s, converting to %s" % (image_id, volume_format, image_meta['disk_format'])) convert_image(volume_path, tmp, image_meta['disk_format'], bps_limit=CONF.volume_copy_bps_limit, run_as_root=run_as_root) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != image_meta['disk_format']: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % { 'f1': image_meta['disk_format'], 'f2': data.file_format }) with fileutils.file_open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) fileutils.delete_if_exists(tmp)
def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" volume_format = self.get_volume_format(volume, qemu_format=True) image_utils.fetch_to_volume_format( context, image_service, image_id, self.local_path(volume), volume_format, self.configuration.volume_dd_blocksize) self._do_extend_volume(self.local_path(volume), volume['size'], volume['name']) data = image_utils.qemu_img_info(self.local_path(volume)) virt_size = data.virtual_size / units.Gi if virt_size != volume['size']: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("Expected volume size was %d") % volume['size']) + (_(" but size is now %d.") % virt_size))
def test_volume_reimage_raise_exception(self): volume = tests_utils.create_volume(self.context) self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'copy_image_to_volume' ) as mock_cp_img: mock_cp_img.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta) self.assertEqual(volume.previous_status, 'available') self.assertEqual(volume.status, 'error') mock_cp_img.side_effect = exception.ImageUnacceptable( image_id=self.image_meta['id'], reason='') self.assertRaises(exception.ImageUnacceptable, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = exception.ImageConversionNotAllowed( image_id=self.image_meta['id'], reason='') with mock.patch.object( self.volume.message_api, 'create' ) as mock_msg_create: self.assertRaises( exception.ImageConversionNotAllowed, self.volume.reimage, self.context, volume, self.image_meta) mock_msg_create.assert_called_with( self.context, message_field.Action.REIMAGE_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.IMAGE_FORMAT_UNACCEPTABLE) mock_cp_img.side_effect = exception.ImageTooBig( image_id=self.image_meta['id'], reason='') self.assertRaises(exception.ImageTooBig, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = Exception self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = exception.ImageCopyFailure(reason='') self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta)
def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume)) # NOTE (leseb): Set the virtual size of the image # the raw conversion overwrote the destination file # (which had the correct size) # with the fetched glance image size, # thus the initial 'size' parameter is not honored # this sets the size to the one asked in the first place by the user # and then verify the final virtual size image_utils.resize_image(self.local_path(volume), volume['size']) data = image_utils.qemu_img_info(self.local_path(volume)) virt_size = data.virtual_size / units.GiB if virt_size != volume['size']: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("Expected volume size was %d") % volume['size']) + (_(" but size is now %d") % virt_size))
def upload_volume(context, image_service, image_meta, volume_path, volume_format='raw', run_as_root=True): image_id = image_meta['id'] if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s" % (image_id, volume_format, image_meta['disk_format'])) if os.name == 'nt' or os.access(volume_path, os.R_OK): with fileutils.file_open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) else: with utils.temporary_chown(volume_path): with fileutils.file_open(volume_path) as image_file: image_service.update(context, image_id, {}, image_file) return with temporary_file() as tmp: LOG.debug("%s was %s, converting to %s" % (image_id, volume_format, image_meta['disk_format'])) convert_image(volume_path, tmp, image_meta['disk_format'], bps_limit=CONF.volume_copy_bps_limit, run_as_root=run_as_root) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != image_meta['disk_format']: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % { 'f1': image_meta['disk_format'], 'f2': data.file_format }) with fileutils.file_open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, image_file)
def _copy_image_to_volume(self, context, volume_ref, image_id, image_location, image_service): """Downloads Glance image to the specified volume.""" copy_image_to_volume = self.driver.copy_image_to_volume volume_id = volume_ref['id'] LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s." % {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location}) try: copy_image_to_volume(context, volume_ref, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.error(_LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s, error: %(error)s") % {'volume_id': volume_id, 'error': ex.stderr, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.error(_LE("Failed to copy image to volume: %(volume_id)s, " "error: %(error)s") % {'volume_id': volume_id, 'error': ex}) raise exception.ImageUnacceptable(ex) except Exception as ex: LOG.error(_LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s, error: %(error)s") % {'volume_id': volume_id, 'error': ex, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully." % {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location})
def fetch_from_localfile_to_raw(context, image_service, image_id, dest, blocksize, user_id=None, project_id=None, size=None, run_as_root=True): LOG.error('begin time of fetch_from_localfile_to_raw is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) image_meta = image_service.show(context, image_id) file_name = image_meta.get('id') volume_format = 'raw' # if (CONF.local_file_dir and not # os.path.exists(CONF.local_file_dir)): # raise exception.ImageUnacceptable( # reason=_("the dir of back file of image doesn't exist"), # image_id=image_id) #tmp=CONF.local_file_dir + '/'+file_name tmp = CONF.store_file_dir + '/' + file_name #tmp='/home/upload/d5414dfe-b6b0-4286-8a23-5867d84f8f27' # if not os.path.exists(CONF.local_file_dir): # raise exception.ImageUnacceptable( # reason=_("the back file of image doesn't exist"), # image_id=image_id) if is_xenserver_image(context, image_service, image_id): replace_xenserver_image_with_coalesced_vhd(tmp) data = qemu_img_info(tmp) virt_size = data.virtual_size / units.Gi # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and virt_size > size: params = {'image_size': virt_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file, }) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. LOG.debug("%s was %s, converting to %s " % (image_id, fmt, volume_format)) convert_image(tmp, dest, volume_format) data = qemu_img_info(dest) if data.file_format != volume_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % { 'vol_format': volume_format, 'file_format': data.file_format }) LOG.error('end time of fetch_from_localfile_to_raw is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def create_volume_from_template(self, **kwargs): ''' create_linkclone_volume :param kwargs: :return: ''' LOG.debug(_("[VRM-CINDER] start create_volume_from_template()")) template = kwargs['image_location'] kwargs['template_id'] = template[-10:] is_exist = self.check_template(**kwargs) if is_exist is None: msg = (_("[VRM-CINDER] no such template %s "), kwargs.get('template_id')) LOG.error(msg) raise exception.ImageUnacceptable(image_id=kwargs['image_id'], reason=msg) while True: template_vm = self.query_vm(vm_id=kwargs['template_id']) LOG.debug(_("[VRM-CINDER] template_vm status is %s"), template_vm.get('status')) if 'creating' == template_vm.get('status'): sleep(10) elif 'stopped' == template_vm.get('status'): break else: msg = (_("[VRM-CINDER] template isn't available %s "), kwargs.get('template_id')) LOG.error(msg) raise exception.ImageUnacceptable(image_id=kwargs['image_id'], reason=msg) vm_config = template_vm['vmConfig'] template_disks = vm_config['disks'] if len(template_disks) != 1: msg = _("template must have one disk") LOG.error(msg) raise exception.ImageUnacceptable(image_id=kwargs['image_id'], reason=msg) vm_urn = self.clone_vm(**kwargs) vm_id = vm_urn[-10:] vm = self.query_vm(vm_id=vm_id) vm_config = vm['vmConfig'] disks = vm_config['disks'] volume_urn = None if kwargs.get('volume_sequence_num') is None: kwargs['volume_sequence_num'] = 1 for disk in disks: if int(disk['sequenceNum']) == int(kwargs['volume_sequence_num']): volume_urn = disk['volumeUrn'] break if volume_urn is None: msg = (_("[VRM-CINDER] no available disk")) LOG.error(msg) self.delete_vm(vm_id=vm_id) raise exception.ImageUnacceptable(image_id=kwargs['image_id'], reason=msg) try: self.detach_vol_from_vm(vm_id=vm_id, volume_urn=volume_urn) except Exception as ex: LOG.error(_("detach volume is failed ")) self.delete_vm(vm_id=vm_id) raise ex self.delete_vm(vm_id=vm_id) LOG.debug(_("[VRM-CINDER] end ()")) return volume_urn
def fetch_to_volume_format(context, image_service, image_id, dest, volume_format, blocksize, user_id=None, project_id=None, size=None, run_as_root=True): qemu_img = True image_meta = image_service.show(context, image_id) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. with temporary_file() as tmp: has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, tmp, run_as_root) if data is None: qemu_img = False tmp_images = TemporaryImages.for_image_service(image_service) tmp_image = tmp_images.get(context, image_id) if tmp_image: tmp = tmp_image else: fetch(context, image_service, image_id, tmp, user_id, project_id) if is_xenserver_format(image_meta): replace_xenserver_image_with_coalesced_vhd(tmp) if not qemu_img: # qemu-img is not installed but we do have a RAW image. As a # result we only need to copy the image to the destination and then # return. LOG.debug('Copying image from %(tmp)s to volume %(dest)s - ' 'size: %(size)s', {'tmp': tmp, 'dest': dest, 'size': image_meta['size']}) image_size_m = math.ceil(float(image_meta['size']) / units.Mi) volume_utils.copy_volume(tmp, dest, image_size_m, blocksize) return data = qemu_img_info(tmp, run_as_root=run_as_root) virt_size = int(math.ceil(float(data.virtual_size) / units.Gi)) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and virt_size > size: params = {'image_size': virt_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) # NOTE(e0ne): check for free space in destination directory before # image convertion. check_available_space(dest, virt_size, image_id) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. LOG.debug("%s was %s, converting to %s ", image_id, fmt, volume_format) disk_format = fixup_disk_format(image_meta['disk_format']) convert_image(tmp, dest, volume_format, src_format=disk_format, run_as_root=run_as_root)
def copy_image_to_volume(self, context, volume, image_service, image_id): LOG.error('begin time of copy_image_to_volume is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) image_meta = image_service.show(context, image_id) container_format=image_meta.get('container_format') if container_format == 'vgw_url': #1.get the provider_volume at provider cloud provider_volume_id = self._get_provider_volumeid_from_volume(volume) retry_time = 10 provider_volume=self._get_provider_volume(provider_volume_id) while retry_time > 0: if provider_volume and \ provider_volume.state == StorageVolumeState.AVAILABLE and \ provider_volume.extra.get('attachment_status') is None: break else: time.sleep(1) provider_volume=self._get_provider_volume(provider_volume_id) retry_time = retry_time-1 try: #3.1 get the vgw host vgw_host= self._get_provider_node(self.configuration.cgw_host_id) if not vgw_host: raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id) device_name=self._get_next_device_name(vgw_host) self.adpter.attach_volume(vgw_host, provider_volume, device_name) #query volume status time.sleep(1) retry_time = 10 provider_volume=self._get_provider_volume(provider_volume_id) while retry_time > 0: if provider_volume and provider_volume.extra.get('attachment_status') =='attached': break else: time.sleep(1) provider_volume=self._get_provider_volume(provider_volume_id) retry_time = retry_time-1 LOG.error('**********************************************') LOG.error('the volume status %s' %provider_volume.state) conn=rpyc.connect(self.configuration.cgw_host_ip,int(CONF.vgw.rpc_service_port)) copy_file_to_device_result = conn.root.copy_file_to_volume(image_id,CONF.vgw.store_file_dir,device_name) if not copy_file_to_device_result: LOG.error("qemu-img convert %s %s failed" %(image_id,device_name)) self.adpter.detach_volume(provider_volume) conn.close() raise exception.ImageUnacceptable( reason= ("copy image %s file to volume %s failed " %(image_id,volume['id']))) conn.close() self.adpter.detach_volume(provider_volume) retry_time = 10 while retry_time > 0: if provider_volume and provider_volume.extra.get('attachment_status') is None: break else: time.sleep(1) provider_volume=self._get_provider_volume(provider_volume_id) retry_time = retry_time-1 LOG.error('**********************************************') LOG.error('the volume status %s' %provider_volume.state) except Exception as e: raise e elif container_format == 'hypervm': # 0.get provider_image, image_uuid = self._get_image_id_from_meta(image_meta) provider_image = self._get_provider_image(image_meta) if provider_image is None: LOG.error('Get image %s error at provider cloud' % image_uuid) return # create provider node with hypervm volume provider_bdm = None size = volume.get('size') name = volume.get('display_name') provider_size = self._get_provider_node_size('m1.tiny') provider_tmp_volume = self.adpter.create_volume(size, name) provider_snap = self.adpter.create_volume_snapshot(provider_tmp_volume) self._wait_for_snapshot_completed(list(provider_snap.id)) hypervm_bdm = {'DeviceName': '/dev/sdz', 'Ebs': {'SnapshotId': provider_snap.id, 'DeleteOnTermination': True} } provider_bdm.append(hypervm_bdm) provider_node_name = image_uuid try: provider_node = self.adpter.create_node(name = provider_node_name, image = provider_image, size= provider_size, location=CONF.provider_opts.availability_zone, # ex_subnet=provider_subnet_data, ex_security_group_ids =self.provider_security_group_id, ex_blockdevicemappings = provider_bdm) # ex_network_interfaces=self.provider_interfaces, # ex_userdata=user_data, # auth=self._get_auth(instance._key_data, # instance._key_name) except Exception as e: LOG.warning('Provider instance is booting error') LOG.error(e.message) provider_node = self.adpter.list_nodes(ex_filters={'tag:name': provider_node_name}) if not provider_node: raise e # wait for node avalaible while provider_node.state != NodeState.RUNNING : try: provider_nodes = self.adpter.list_nodes(ex_node_ids=[provider_node.id]) if len(provider_nodes) == 0: break else: provider_node = provider_nodes[0] except Exception as e: LOG.warning('Provider instance is booting but adapter is failed to get status. Try it later') time.sleep(10) provider_bdm_list = provider_node.extra.get('block_device_mapping') provider_volume_id = provider_bdm_list[0].get('ebs').get('volume_id') provider_volume = self.adpter.list_volumes(ex_volume_ids=[provider_volume_id]) self.adpter.destroy_volume(provider_tmp_volume) # 7.2 create docker app base_ip = provider_node.private_ips[0] self._client = Client('http://%s' % base_ip + ':%s' % HYPER_SERVICE_PORT) self._client.config_network_service(CONF.rabbit_userid, CONF.rabbit_password, CONF.rabbit_host) self._client.create_container(image_meta.get('name', '')) self._client.start(network_info=network_info, block_device_info=block_device_info) # wait for docker running self.adpter.ex_stop_node(provider_node) # wait for node stoped while provider_node.state != NodeState.STOPPED: try: provider_nodes = self.adpter.list_nodes(ex_node_ids=[provider_node.id]) if len(provider_nodes) == 0: break else: provider_node = provider_nodes[0] except Exception as e: LOG.warning('Provider instance is stop but adapter is failed to get status. Try it later') time.sleep(10) self.adpter.detach_volume(provider_volume) retry_time = 10 while retry_time > 0: if provider_volume and provider_volume.extra.get('attachment_status') is None: break else: time.sleep(10) provider_volume = self._get_provider_volume(provider_volume_id) retry_time -= 1 if (not provider_volume) or (provider_volume.extra.get('attachment_status') is not None): LOG.error('detach volume failed') raise Exception('detach volume failed') self.adpter.destroy_node(provider_volume) # map volume to provider volume create_tags_func = getattr(self.adpter, 'ex_create_tags') if create_tags_func: create_tags_func(provider_volume, {'hybrid_cloud_volume_id': volume['id']}) ctx = cinder.context.get_admin_context() if ctx: self.db.volume_metadata_update(ctx, volume['id'], {'provider_volume_id': provider_volume.id}, False) model_update = {'provider_location': provider_volume.id} return model_update
def upload_volume(context, image_service, image_meta, volume_path, volume_format='raw', run_as_root=True, compress=True): image_id = image_meta['id'] if image_meta.get('container_format') != 'compressed': if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s", image_id, volume_format, image_meta['disk_format']) if os.name == 'nt' or os.access(volume_path, os.R_OK): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, tpool.Proxy(image_file)) else: with utils.temporary_chown(volume_path): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, tpool.Proxy(image_file)) return with temporary_file() as tmp: LOG.debug("%s was %s, converting to %s", image_id, volume_format, image_meta['disk_format']) data = qemu_img_info(volume_path, run_as_root=run_as_root) backing_file = data.backing_file fmt = data.file_format if backing_file is not None: # Disallow backing files as a security measure. # This prevents a user from writing an image header into a raw # volume with a backing file pointing to data they wish to # access. raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file }) out_format = fixup_disk_format(image_meta['disk_format']) convert_image(volume_path, tmp, out_format, run_as_root=run_as_root, compress=compress) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != out_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % { 'f1': out_format, 'f2': data.file_format }) # NOTE(ZhengMa): This is used to do image compression on image # uploading with 'compressed' container_format. # Compress file 'tmp' in-place if image_meta.get('container_format') == 'compressed': LOG.debug("Container_format set to 'compressed', compressing " "image before uploading.") accel = accelerator.ImageAccel(tmp, tmp) accel.compress_img(run_as_root=run_as_root) with open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, tpool.Proxy(image_file))
def fetch_to_volume_format(context, image_service, image_id, dest, volume_format, blocksize, volume_subformat=None, user_id=None, project_id=None, size=None, run_as_root=True): qemu_img = True image_meta = image_service.show(context, image_id) allow_image_compression = CONF.allow_compression_on_image_upload if image_meta and (image_meta.get('container_format') == 'compressed'): if allow_image_compression is False: compression_param = { 'container_format': image_meta.get('container_format') } raise exception.ImageUnacceptable( image_id=image_id, reason=_("Image compression disallowed, " "but container_format is " "%(container_format)s.") % compression_param) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. with temporary_file() as tmp: has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, tmp, run_as_root) if data is None: qemu_img = False tmp_images = TemporaryImages.for_image_service(image_service) tmp_image = tmp_images.get(context, image_id) if tmp_image: tmp = tmp_image else: fetch(context, image_service, image_id, tmp, user_id, project_id) if is_xenserver_format(image_meta): replace_xenserver_image_with_coalesced_vhd(tmp) if not qemu_img: # qemu-img is not installed but we do have a RAW image. As a # result we only need to copy the image to the destination and then # return. LOG.debug( 'Copying image from %(tmp)s to volume %(dest)s - ' 'size: %(size)s', { 'tmp': tmp, 'dest': dest, 'size': image_meta['size'] }) image_size_m = math.ceil(float(image_meta['size']) / units.Mi) volume_utils.copy_volume(tmp, dest, image_size_m, blocksize) return data = qemu_img_info(tmp, run_as_root=run_as_root) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None: check_virtual_size(data.virtual_size, size, image_id) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file, }) # NOTE(ZhengMa): This is used to do image decompression on image # downloading with 'compressed' container_format. It is a # transparent level between original image downloaded from # Glance and Cinder image service. So the source file path is # the same with destination file path. if image_meta.get('container_format') == 'compressed': LOG.debug("Found image with compressed container format") if not accelerator.is_gzip_compressed(tmp): raise exception.ImageUnacceptable( image_id=image_id, reason=_("Unsupported compressed image format found. " "Only gzip is supported currently")) accel = accelerator.ImageAccel(tmp, tmp) accel.decompress_img(run_as_root=run_as_root) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. disk_format = fixup_disk_format(image_meta['disk_format']) LOG.debug("%s was %s, converting to %s", image_id, fmt, volume_format) convert_image(tmp, dest, volume_format, out_subformat=volume_subformat, src_format=disk_format, run_as_root=run_as_root)
class GenericVolumeDriverTestCase(BaseDriverTestCase): """Test case for VolumeDriver.""" driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver" def test_create_temp_cloned_volume(self): with mock.patch.object( self.volume.driver, 'create_cloned_volume') as mock_create_cloned_volume: model_update = {'provider_location': 'dummy'} mock_create_cloned_volume.return_value = model_update vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) self.assertEqual('dummy', cloned_vol.provider_location) self.assertEqual('available', cloned_vol.status) mock_create_cloned_volume.return_value = None vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) self.assertEqual('available', cloned_vol.status) def test_get_backup_device_available(self): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) (backup_device, is_snapshot) = self.volume.driver.get_backup_device( self.context, backup_obj) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertEqual(volume, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertIsNone(backup_obj.temp_volume_id) def test_get_backup_device_in_use(self): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') temp_vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) with mock.patch.object( self.volume.driver, '_create_temp_cloned_volume') as mock_create_temp: mock_create_temp.return_value = temp_vol (backup_device, is_snapshot) = ( self.volume.driver.get_backup_device(self.context, backup_obj)) self.assertEqual(temp_vol, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) def test__create_temp_volume_from_snapshot(self): volume_dict = {'id': fake.SNAPSHOT_ID, 'host': 'fakehost', 'cluster_name': 'fakecluster', 'availability_zone': 'fakezone', 'size': 1} vol = fake_volume.fake_volume_obj(self.context, **volume_dict) snapshot = fake_snapshot.fake_snapshot_obj(self.context) with mock.patch.object( self.volume.driver, 'create_volume_from_snapshot'): temp_vol = self.volume.driver._create_temp_volume_from_snapshot( self.context, vol, snapshot) self.assertEqual(fields.VolumeAttachStatus.DETACHED, temp_vol.attach_status) self.assertEqual('fakezone', temp_vol.availability_zone) self.assertEqual('fakecluster', temp_vol.cluster_name) @mock.patch.object(utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') @mock.patch.object(volutils, 'copy_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') @mock.patch.object(cinder.volume.volume_types, 'volume_types_encryption_changed') @ddt.data(False, True) def test_copy_volume_data_mgr(self, encryption_changed, mock_encryption_changed, mock_get_capabilities, mock_copy, mock_detach, mock_attach, mock_get_connector): """Test function of _copy_volume_data.""" src_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) dest_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) mock_get_connector.return_value = {} mock_encryption_changed.return_value = encryption_changed self.volume.driver._throttle = mock.MagicMock() attach_expected = [ mock.call(self.context, dest_vol, {}, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, src_vol, {}, remote=False, attach_encryptor=encryption_changed)] detach_expected = [ mock.call(self.context, {'device': {'path': 'bar'}}, dest_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, {'device': {'path': 'foo'}}, src_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed)] attach_volume_returns = [ {'device': {'path': 'bar'}}, {'device': {'path': 'foo'}} ] # Test case for sparse_copy_volume = False mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False) self.assertEqual(detach_expected, mock_detach.mock_calls) # Test case for sparse_copy_volume = True mock_attach.reset_mock() mock_detach.reset_mock() mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {'sparse_copy_volume': True} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True) self.assertEqual(detach_expected, mock_detach.mock_calls) # cleanup resource db.volume_destroy(self.context, src_vol['id']) db.volume_destroy(self.context, dest_vol['id']) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume(self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] self.volume.driver.copy_image_to_encrypted_volume( self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume_failed_attach_encryptor( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() attach_info = {'device': {'path': 'dev/sda'}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = os_brick.exception.VolumeEncryptionNotSupported( volume_id = "123", volume_type = "abc") mock_attach_encryptor.side_effect = raised_exception self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) self.assertFalse(mock_fetch_to_raw.called) self.assertFalse(mock_detach_encryptor.called) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') @ddt.data(exception.ImageUnacceptable( reason='fake', image_id=fake.IMAGE_ID), exception.ImageTooBig( reason='fake image size exceeded', image_id=fake.IMAGE_ID)) def test_copy_image_to_encrypted_volume_failed_fetch( self, excep, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] mock_fetch_to_raw.side_effect = excep encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} self.assertRaises(type(excep), self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch('cinder.volume.driver.brick_exception') @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'terminate_connection', side_effect=Exception) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'remove_export', side_effect=Exception) def test_detach_volume_force(self, remove_mock, terminate_mock, exc_mock): """Test force parameter on _detach_volume. On the driver if we receive the force parameter we will do everything even with Exceptions on disconnect, terminate, and remove export. """ connector = mock.Mock() connector.disconnect_volume.side_effect = Exception # TODO(geguileo): Remove this ExceptionChainer simulation once we # release OS-Brick version with it and bump min version. exc = exc_mock.ExceptionChainer.return_value exc.context.return_value.__enter__.return_value = exc exc.context.return_value.__exit__.return_value = True volume = {'id': fake.VOLUME_ID} attach_info = {'device': {}, 'connector': connector, 'conn': {'data': {}, }} # TODO(geguileo): Change TypeError to ExceptionChainer once we release # OS-Brick version with it and bump min version. self.assertRaises(TypeError, self.volume.driver._detach_volume, self.context, attach_info, volume, {}, force=True) self.assertTrue(connector.disconnect_volume.called) self.assertTrue(remove_mock.called) self.assertTrue(terminate_mock.called) self.assertEqual(3, exc.context.call_count)
def _create_from_image(self, context, volume_ref, image_location, image_id, image_meta, image_service, **kwargs): LOG.debug("Cloning %(volume_id)s from image %(image_id)s " " at location %(image_location)s.", {'volume_id': volume_ref['id'], 'image_location': image_location, 'image_id': image_id}) # Create the volume from an image. # # First see if the driver can clone the image directly. # # NOTE (singn): two params need to be returned # dict containing provider_location for cloned volume # and clone status. model_update, cloned = self.driver.clone_image(context, volume_ref, image_location, image_meta, image_service) # Try and clone the image if we have it set as a glance location. if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: model_update, cloned = self._clone_image_volume(context, volume_ref, image_location, image_meta) # Try and use the image cache. should_create_cache_entry = False internal_context = cinder_context.get_internal_tenant_context() if not internal_context: LOG.warning(_LW('Unable to get Cinder internal context, will ' 'not use image-volume cache.')) if not cloned and internal_context and self.image_volume_cache: model_update, cloned = self._create_from_image_cache( context, internal_context, volume_ref, image_id, image_meta ) if not cloned: should_create_cache_entry = True # Fall back to default behavior of creating volume, # download the image data and copy it into the volume. original_size = volume_ref['size'] try: if not cloned: with image_utils.TemporaryImages.fetch( image_service, context, image_id) as tmp_image: # Try to create the volume as the minimal size, then we can # extend once the image has been downloaded. if should_create_cache_entry: data = image_utils.qemu_img_info(tmp_image) virtual_size = int( math.ceil(float(data.virtual_size) / units.Gi)) if virtual_size > volume_ref.size: params = {'image_size': virtual_size, 'volume_size': volume_ref.size} reason = _("Image virtual size is %(image_size)dGB" " and doesn't fit in a volume of size" " %(volume_size)dGB.") % params raise exception.ImageUnacceptable( image_id=image_id, reason=reason) if virtual_size and virtual_size != original_size: updates = {'size': virtual_size} volume_ref = self.db.volume_update( context, volume_ref['id'], updates ) model_update = self._create_from_image_download( context, volume_ref, image_location, image_id, image_service ) if should_create_cache_entry: # Update the newly created volume db entry before we clone it # for the image-volume creation. if model_update: volume_ref = self.db.volume_update(context, volume_ref['id'], model_update) self.manager._create_image_cache_volume_entry(internal_context, volume_ref, image_id, image_meta) finally: # If we created the volume as the minimal size, extend it back to # what was originally requested. If an exception has occurred we # still need to put this back before letting it be raised further # up the stack. if volume_ref['size'] != original_size: self.driver.extend_volume(volume_ref, original_size) updates = {'size': original_size} self.db.volume_update(context, volume_ref['id'], updates) self._handle_bootable_volume_glance_meta(context, volume_ref['id'], image_id=image_id, image_meta=image_meta) return model_update
def upload_volume(context, image_service, image_meta, volume_path, volume_format='raw', run_as_root=True): image_id = image_meta['id'] if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s", image_id, volume_format, image_meta['disk_format']) if os.name == 'nt' or os.access(volume_path, os.R_OK): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {"size": image_meta.get("size", 0)}, image_file) else: with utils.temporary_chown(volume_path): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {"size": image_meta.get("size", 0)}, image_file) return with temporary_file() as tmp: LOG.debug("%s was %s, converting to %s", image_id, volume_format, image_meta['disk_format']) data = qemu_img_info(volume_path, run_as_root=run_as_root) backing_file = data.backing_file fmt = data.file_format if backing_file is not None: # Disallow backing files as a security measure. # This prevents a user from writing an image header into a raw # volume with a backing file pointing to data they wish to # access. raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file }) out_format = image_meta['disk_format'] # qemu-img accepts 'vpc' as argument for 'vhd 'format and 'parallels' # as argument for 'ploop'. if out_format == 'vhd': out_format = 'vpc' if out_format == 'ploop': out_format = 'parallels' convert_image(volume_path, tmp, out_format, run_as_root=run_as_root) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != out_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % { 'f1': out_format, 'f2': data.file_format }) # qemu_img_info round the size of the of the image created. # For the upload process we need the exact size of the file size # so we get it from the opened file with open(tmp, 'rb') as image_file: old_file_position = image_file.tell() image_file.seek(0, os.SEEK_END) file_size = image_file.tell() image_file.seek(old_file_position, os.SEEK_SET) image_service.update(context, image_id, {"size": file_size}, image_file)
def _validate_container_format(self, container_format, image_id): if container_format and container_format != 'bare': msg = _("Container format: %s is unsupported, only 'bare' " "is supported.") % container_format LOG.error(msg) raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
class GenericVolumeDriverTestCase(BaseDriverTestCase): """Test case for VolumeDriver.""" driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver" def test_create_temp_cloned_volume(self): with mock.patch.object( self.volume.driver, 'create_cloned_volume') as mock_create_cloned_volume: model_update = {'provider_location': 'dummy'} mock_create_cloned_volume.return_value = model_update vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) self.assertEqual('dummy', cloned_vol.provider_location) self.assertEqual('available', cloned_vol.status) mock_create_cloned_volume.return_value = None vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) self.assertEqual('available', cloned_vol.status) def test_get_backup_device_available(self): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) (backup_device, is_snapshot) = self.volume.driver.get_backup_device( self.context, backup_obj) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertNotIn('temporary', backup_device.admin_metadata.keys()) self.assertEqual(volume, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertIsNone(backup_obj.temp_volume_id) def test_get_backup_device_in_use(self): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') admin_meta = {'temporary': 'True'} temp_vol = tests_utils.create_volume(self.context, admin_metadata=admin_meta) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) with mock.patch.object( self.volume.driver, '_create_temp_cloned_volume') as mock_create_temp: mock_create_temp.return_value = temp_vol (backup_device, is_snapshot) = ( self.volume.driver.get_backup_device(self.context, backup_obj)) self.assertEqual(temp_vol, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) def test_create_temp_volume_from_snapshot(self): volume_dict = {'id': fake.SNAPSHOT_ID, 'host': 'fakehost', 'cluster_name': 'fakecluster', 'availability_zone': 'fakezone', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID} vol = fake_volume.fake_volume_obj(self.context, **volume_dict) snapshot = fake_snapshot.fake_snapshot_obj(self.context) with mock.patch.object( self.volume.driver, 'create_volume_from_snapshot'): temp_vol = self.volume.driver._create_temp_volume_from_snapshot( self.context, vol, snapshot) self.assertEqual(fields.VolumeAttachStatus.DETACHED, temp_vol.attach_status) self.assertEqual('fakezone', temp_vol.availability_zone) self.assertEqual('fakecluster', temp_vol.cluster_name) @mock.patch.object(utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') @mock.patch.object(volume_utils, 'copy_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') @mock.patch.object(cinder.volume.volume_types, 'volume_types_encryption_changed') @ddt.data(False, True) def test_copy_volume_data_mgr(self, encryption_changed, mock_encryption_changed, mock_get_capabilities, mock_copy, mock_detach, mock_attach, mock_get_connector): """Test function of _copy_volume_data.""" src_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) dest_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) mock_get_connector.return_value = {} mock_encryption_changed.return_value = encryption_changed self.volume.driver._throttle = mock.MagicMock() attach_expected = [ mock.call(self.context, dest_vol, {}, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, src_vol, {}, remote=False, attach_encryptor=encryption_changed)] detach_expected = [ mock.call(self.context, {'device': {'path': 'bar'}}, dest_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, {'device': {'path': 'foo'}}, src_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed)] attach_volume_returns = [ {'device': {'path': 'bar'}}, {'device': {'path': 'foo'}} ] # Test case for sparse_copy_volume = False mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False) self.assertEqual(detach_expected, mock_detach.mock_calls) # Test case for sparse_copy_volume = True mock_attach.reset_mock() mock_detach.reset_mock() mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {'sparse_copy_volume': True} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True) self.assertEqual(detach_expected, mock_detach.mock_calls) # cleanup resource db.volume_destroy(self.context, src_vol['id']) db.volume_destroy(self.context, dest_vol['id']) @mock.patch(driver_name + '.initialize_connection') @mock.patch(driver_name + '.create_export', return_value=None) @mock.patch(driver_name + '._connect_device') def test_attach_volume_encrypted(self, connect_mock, export_mock, initialize_mock): properties = {'host': 'myhost', 'ip': '192.168.1.43', 'initiator': u'iqn.1994-05.com.redhat:d9be887375', 'multipath': False, 'os_type': 'linux2', 'platform': 'x86_64'} data = {'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'discard': False} passed_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} initialize_mock.return_value = passed_conn # _attach_volume adds the encrypted value based on the volume expected_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} expected_conn['data']['encrypted'] = True volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) attach_info, vol = self.volume.driver._attach_volume(self.context, volume, properties) export_mock.assert_called_once_with(self.context, volume, properties) initialize_mock.assert_called_once_with(volume, properties) connect_mock.assert_called_once_with(expected_conn) self.assertEqual(connect_mock.return_value, attach_info) self.assertEqual(volume, vol) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume(self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] self.volume.driver.copy_image_to_encrypted_volume( self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume_failed_attach_encryptor( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() attach_info = {'device': {'path': 'dev/sda'}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = os_brick.exception.VolumeEncryptionNotSupported( volume_id = "123", volume_type = "abc") mock_attach_encryptor.side_effect = raised_exception self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) self.assertFalse(mock_fetch_to_raw.called) self.assertFalse(mock_detach_encryptor.called) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') @ddt.data(exception.ImageUnacceptable( reason='fake', image_id=fake.IMAGE_ID), exception.ImageTooBig( reason='fake image size exceeded', image_id=fake.IMAGE_ID)) def test_copy_image_to_encrypted_volume_failed_fetch( self, excep, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] mock_fetch_to_raw.side_effect = excep encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} self.assertRaises(type(excep), self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch('cinder.volume.driver.brick_exception') @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'terminate_connection', side_effect=Exception) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'remove_export', side_effect=Exception) def test_detach_volume_force(self, remove_mock, terminate_mock, exc_mock): """Test force parameter on _detach_volume. On the driver if we receive the force parameter we will do everything even with Exceptions on disconnect, terminate, and remove export. """ connector = mock.Mock() connector.disconnect_volume.side_effect = Exception # TODO(geguileo): Remove this ExceptionChainer simulation once we # release OS-Brick version with it and bump min version. exc = exc_mock.ExceptionChainer.return_value exc.context.return_value.__enter__.return_value = exc exc.context.return_value.__exit__.return_value = True volume = {'id': fake.VOLUME_ID} attach_info = {'device': {}, 'connector': connector, 'conn': {'data': {}, }} # TODO(geguileo): Change TypeError to ExceptionChainer once we release # OS-Brick version with it and bump min version. self.assertRaises(TypeError, self.volume.driver._detach_volume, self.context, attach_info, volume, {}, force=True) self.assertTrue(connector.disconnect_volume.called) self.assertTrue(remove_mock.called) self.assertTrue(terminate_mock.called) self.assertEqual(3, exc.context.call_count) @ddt.data({'cfg_value': '10', 'valid': True}, {'cfg_value': 'auto', 'valid': True}, {'cfg_value': '1', 'valid': True}, {'cfg_value': '1.2', 'valid': True}, {'cfg_value': '100', 'valid': True}, {'cfg_value': '20.15', 'valid': True}, {'cfg_value': 'True', 'valid': False}, {'cfg_value': 'False', 'valid': False}, {'cfg_value': '10.0.0', 'valid': False}, {'cfg_value': '0.00', 'valid': True}, {'cfg_value': 'anything', 'valid': False},) @ddt.unpack def test_auto_max_subscription_ratio_options(self, cfg_value, valid): # This tests the max_over_subscription_ratio option as it is now # checked by a regex def _set_conf(config, value): config.set_override('max_over_subscription_ratio', value) config = conf.Configuration(None) config.append_config_values(driver.volume_opts) if valid: _set_conf(config, cfg_value) self.assertEqual(cfg_value, config.safe_get( 'max_over_subscription_ratio')) else: self.assertRaises(ValueError, _set_conf, config, cfg_value)
def fetch_to_volume_format(context, image_service, image_id, dest, volume_format, blocksize, user_id=None, project_id=None, size=None): if (CONF.image_conversion_dir and not os.path.exists(CONF.image_conversion_dir)): os.makedirs(CONF.image_conversion_dir) qemu_img = True image_meta = image_service.show(context, image_id) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. with temporary_file() as tmp: # We may be on a system that doesn't have qemu-img installed. That # is ok if we are working with a RAW image. This logic checks to see # if qemu-img is installed. If not we make sure the image is RAW and # throw an exception if not. Otherwise we stop before needing # qemu-img. Systems with qemu-img will always progress through the # whole function. try: # Use the empty tmp file to make sure qemu_img_info works. qemu_img_info(tmp) except processutils.ProcessExecutionError: qemu_img = False if image_meta: if image_meta['disk_format'] != 'raw': raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and image is of " "type %s. Only RAW images can be used if " "qemu-img is not installed.") % image_meta['disk_format'], image_id=image_id) else: raise exception.ImageUnacceptable(reason=_( "qemu-img is not installed and the disk " "format is not specified. Only RAW images " "can be used if qemu-img is not installed."), image_id=image_id) fetch(context, image_service, image_id, tmp, user_id, project_id) if is_xenserver_image(context, image_service, image_id): replace_xenserver_image_with_coalesced_vhd(tmp) if not qemu_img: # qemu-img is not installed but we do have a RAW image. As a # result we only need to copy the image to the destination and then # return. LOG.debug('Copying image from %(tmp)s to volume %(dest)s - ' 'size: %(size)s' % { 'tmp': tmp, 'dest': dest, 'size': image_meta['size'] }) volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize) return data = qemu_img_info(tmp) virt_size = data.virtual_size / units.Gi # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and virt_size > size: params = {'image_size': virt_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file, }) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. LOG.debug("%s was %s, converting to %s " % (image_id, fmt, volume_format)) convert_image(tmp, dest, volume_format, bps_limit=CONF.volume_copy_bps_limit) data = qemu_img_info(dest) if data.file_format != volume_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % { 'vol_format': volume_format, 'file_format': data.file_format })
def _cache_vol_2_2(self, context, vol, image_meta, image_service): image_id = image_meta['id'] # Pull down image and determine if valid with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file, }) vsize = int(math.ceil(float(data.virtual_size) / units.Gi)) vol['size'] = vsize vtype = vol['volume_type_id'] LOG.info( "Creating cached image with volume type: %(vtype)s and " "size %(size)s", { 'vtype': vtype, 'size': vsize }) self._create_volume_2_2(vol) with self._connect_vol(context, vol) as device: LOG.debug("Moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) image_utils.convert_image(tmp_image, device, 'raw', run_as_root=True) LOG.debug("Finished moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) data = image_utils.qemu_img_info(device, run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % { 'vol_format': 'raw', 'file_format': data.file_format }) # TODO(_alastor_): Remove this snapshot creation when we fix # "created_at" attribute in the frontend # We don't actually care about the snapshot uuid, we just want # a single snapshot snapshot = { 'id': str(uuid.uuid4()), 'volume_id': vol['id'], 'project_id': vol['project_id'] } self._create_snapshot_2_2(snapshot) metadata = {'type': 'cached_image'} tenant = self.get_tenant(vol['project_id']) ai = self.cvol_to_ai(vol, tenant=tenant) ai.metadata.set(tenant=tenant, **metadata) # Cloning offline AI is ~4 seconds faster than cloning online AI self._detach_volume_2_2(None, vol)
def upload_volume_to_vgw(context, image_service, image_meta, volume_path, volume, vgw_url, volume_format='raw', run_as_root=True): LOG.error('begin time of upload_volume_to_vgw is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) image_id = image_meta['id'] volume_id = volume['id'] if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s" % (image_id, volume_format, image_meta['disk_format'])) if os.name == 'nt' or os.access(volume_path, os.R_OK): with fileutils.file_open(volume_path, 'rb') as files: r = requests.post(vgw_url, data=files) if r.status_code != 200: #LOG.error('upload file %s to %s failed' %(file_name,vgw_url)) raise exception.ImageUnacceptable( reason=("upload the volume %s back_file failed" % volume_id)) else: with utils.temporary_chown(volume_path): with fileutils.file_open(volume_path) as files: r = requests.post(vgw_url, data=files) #LOG.debug('the request result is %s' %(str(r.status_code))) if r.status_code != 200: #LOG.error('upload file %s to %s failed' %(file_name,vgw_url)) raise exception.ImageUnacceptable( reason=("upload the volume %s back_file failed" % volume_id)) return with temporary_file() as tmp: LOG.debug("%s was %s, converting to %s" % (image_id, volume_format, image_meta['disk_format'])) LOG.error('begin time of convert_image is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) convert_image(volume_path, tmp, image_meta['disk_format']) LOG.error('end time of upload_volume_to_vgw is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) data = qemu_img_info(tmp) if data.file_format != image_meta['disk_format']: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % { 'f1': image_meta['disk_format'], 'f2': data.file_format }) with fileutils.file_open(tmp, 'rb') as files: LOG.error('begin time of upload file is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) r = requests.post(vgw_url, data=files) #LOG.debug('the request result is %' %(str(r.status_code))) if r.status_code != 200: #LOG.error('upload file %s to %s failed' %(file_name,vgw_url)) raise exception.ImageUnacceptable( reason=("upload the volume %s back_file failed" % volume_id)) LOG.error('end time of upload file is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) #todo delete the tmp file fileutils.delete_if_exists(tmp) LOG.error('end time of upload_volume_to_vgw is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def _copy_image_to_volume(self, context, volume, image_meta, image_location, image_service): image_id = image_meta['id'] """Downloads Glance image to the specified volume.""" LOG.debug( "Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", { 'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location }) try: image_properties = image_meta.get('properties', {}) image_encryption_key = image_properties.get( 'cinder_encryption_key_id') if volume.encryption_key_id and image_encryption_key: # If the image provided an encryption key, we have # already cloned it to the volume's key in # _get_encryption_key_id, so we can do a direct copy. self.driver.copy_image_to_volume(context, volume, image_service, image_id) elif volume.encryption_key_id: # Creating an encrypted volume from a normal, unencrypted, # image. self.driver.copy_image_to_encrypted_volume( context, volume, image_service, image_id) else: self.driver.copy_image_to_volume(context, volume, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.exception( "Failed to copy image %(image_id)s to volume: " "%(volume_id)s", { 'volume_id': volume.id, 'image_id': image_id }) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.exception("Failed to copy image to volume: %(volume_id)s", {'volume_id': volume.id}) raise exception.ImageUnacceptable(ex) except exception.ImageTooBig as ex: LOG.exception( "Failed to copy image %(image_id)s to volume: " "%(volume_id)s", { 'volume_id': volume.id, 'image_id': image_id }) excutils.save_and_reraise_exception() except Exception as ex: LOG.exception( "Failed to copy image %(image_id)s to " "volume: %(volume_id)s", { 'volume_id': volume.id, 'image_id': image_id }) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug( "Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", { 'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location })
def copy_image_to_volume(self, context, volume, image_service, image_id): LOG.error('begin time of copy_image_to_volume is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) image_meta = image_service.show(context, image_id) container_format = image_meta.get('container_format') if container_format in ['fs_vgw_url', 'vcloud_vgw_url', 'aws_vgw_url']: #1.get the provider_volume at provider cloud provider_volume_id = self._get_provider_volumeid_from_volume( volume) retry_time = 10 provider_volume = self._get_provider_volume(provider_volume_id) while retry_time > 0: if provider_volume and \ provider_volume.state == StorageVolumeState.AVAILABLE and \ provider_volume.extra.get('attachment_status') is None: break else: time.sleep(1) provider_volume = self._get_provider_volume( provider_volume_id) retry_time = retry_time - 1 try: #3.1 get the vgw host vgw_host = self._get_provider_node( self.configuration.cgw_host_id) if not vgw_host: raise exception_ex.VgwHostNotFound( Vgw_id=self.configuration.cgw_host_id) device_name = self._get_next_device_name(vgw_host) self.adpter.attach_volume(vgw_host, provider_volume, device_name) #query volume status time.sleep(1) retry_time = 10 provider_volume = self._get_provider_volume(provider_volume_id) while retry_time > 0: if provider_volume and provider_volume.extra.get( 'attachment_status') == 'attached': break else: time.sleep(1) provider_volume = self._get_provider_volume( provider_volume_id) retry_time = retry_time - 1 LOG.error('**********************************************') LOG.error('the volume status %s' % provider_volume.state) conn = rpyc.connect(self.configuration.cgw_host_ip, int(CONF.vgw.rpc_service_port)) copy_file_to_device_result = conn.root.copy_file_to_volume( image_id, CONF.vgw.store_file_dir, device_name) if not copy_file_to_device_result: LOG.error("qemu-img convert %s %s failed" % (image_id, device_name)) self.adpter.detach_volume(provider_volume) conn.close() raise exception.ImageUnacceptable( reason=("copy image %s file to volume %s failed " % (image_id, volume['id']))) conn.close() self.adpter.detach_volume(provider_volume) while retry_time > 0: if provider_volume and provider_volume.extra.get( 'attachment_status') is None: break else: time.sleep(1) provider_volume = self._get_provider_volume( provider_volume_id) retry_time = retry_time - 1 LOG.error('**********************************************') LOG.error('the volume status %s' % provider_volume.state) except Exception as e: raise e else: pass LOG.error('end time of copy_image_to_volume is %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def import_vm_from_uds(self, **kwargs): ''' :param kwargs: :return: ''' LOG.debug(_("[VRM-CINDER] start import_vm_from_uds()")) uri = self.site_uri + '/vms/action/import' method = 'POST' new_url = self._generate_url(uri) link_nfs = kwargs.get('linkClone') if link_nfs: name = kwargs.get('image_id') else: name = kwargs.get('volume_id') is_template = kwargs.get("is_template") if is_template is None: template = 'false' else: template = 'true' if FC_DRIVER_CONF.s3_store_access_key_for_cinder is None or \ FC_DRIVER_CONF.s3_store_secret_key_for_cinder is None: LOG.error( _("[VRM-CINDER] some params is None, please check: " "s3_store_access_key_for_cinder, " "s3_store_secret_key_for_cinder")) raise exception.ParameterNotFound( param='s3_store_access_key_for_cinder or ' 's3_store_secret_key_for_cinder') uds_name = FC_DRIVER_CONF.s3_store_access_key_for_cinder uds_password = FC_DRIVER_CONF.s3_store_secret_key_for_cinder location = kwargs.get('image_location') location = location.split(":") if len(location) != 4: msg = _('image_location is invalid') LOG.error(msg) raise exception.ImageUnacceptable(image_id=kwargs.get('image_id'), reason=msg) serverIp = location[0].strip() port = location[1].strip() bucketName = location[2].strip() key = location[3].strip() body = { 'name': 'cinder-vm-' + name, 'group': 'FSP', 'description': 'cinder-uds-vm', 'autoBoot': 'false', 'location': kwargs.get("cluster_urn"), 'osOptions': self._combine_os_options(**kwargs), 'protocol': "uds", 'vmConfig': self._combine_vmConfig_4_import(**kwargs), 'isTemplate': template, 's3Config': { 'serverIp': serverIp, 'port': port, 'accessKey': uds_name, 'secretKey': uds_password, 'bucketName': bucketName, 'key': key } } resp, body = self.vrmhttpclient.request(new_url, method, body=json.dumps(body)) task_uri = body.get('taskUri') self.task_proxy.wait_task(task_uri=task_uri) LOG.debug(_("[VRM-CINDER] end import_vm_from_uds()")) return body.get('urn')