def _check_volume_type_for_lun(self, volume, lun, existing_ref): """Check if lun satisfies volume type.""" extra_specs = na_utils.get_volume_extra_specs(volume) if extra_specs and extra_specs.pop('netapp:qos_policy_group', None): raise exception.ManageExistingVolumeTypeMismatch( reason=_("Setting LUN QoS policy group is not supported" " on this storage family and ONTAP version."))
def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" LOG.info('Extending volume %s.', volume['name']) try: path = self.local_path(volume) self._resize_image_file(path, new_size) except Exception as err: exception_msg = (_("Failed to extend volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) try: extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg)
def _is_share_vol_type_match(self, volume, share): """Checks if share matches volume type.""" netapp_vol = self._get_vol_for_share(share) LOG.debug("Found volume %(vol)s for share %(share)s.", {"vol": netapp_vol, "share": share}) extra_specs = na_utils.get_volume_extra_specs(volume) vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) return netapp_vol in vols
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s', volume['host']) self._ensure_shares_mounted() # get share as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) try: volume['provider_location'] = pool_name LOG.debug('Using pool %s.', pool_name) self._do_create_volume(volume) self._do_qos_for_volume(volume, extra_specs) return {'provider_location': volume['provider_location']} except Exception: LOG.exception(_LE("Exception creating vol %(name)s on " "pool %(pool)s."), {'name': volume['name'], 'pool': volume['provider_location']}) # We need to set this for the model update in order for the # manager to behave correctly. volume['provider_location'] = None msg = _("Volume %(vol)s could not be created in pool %(pool)s.") raise exception.VolumeBackendAPIException(data=msg % { 'vol': volume['name'], 'pool': pool_name})
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ self._ensure_shares_mounted() extra_specs = get_volume_extra_specs(volume) eligible = self._find_containers(volume['size'], extra_specs) if not eligible: raise exception.NfsNoSuitableShareFound(volume_size=volume['size']) for sh in eligible: try: if self.ssc_enabled: volume['provider_location'] = sh.export['path'] else: volume['provider_location'] = sh LOG.info(_('casted to %s') % volume['provider_location']) self._do_create_volume(volume) return {'provider_location': volume['provider_location']} except Exception: LOG.warn( _("Exception creating vol %(name)s" " on share %(share)s") % { 'name': volume['name'], 'share': volume['provider_location'] }) volume['provider_location'] = None finally: if self.ssc_enabled: self._update_stale_vols(volume=sh) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _clone_source_to_destination_volume(self, source, destination_volume): share = self._get_volume_location(source['id']) extra_specs = na_utils.get_volume_extra_specs(destination_volume) try: destination_volume['provider_location'] = share self._clone_with_extension_check(source, destination_volume) self._do_qos_for_volume(destination_volume, extra_specs) model_update = (self._get_volume_model_update(destination_volume) or {}) model_update['provider_location'] = destination_volume[ 'provider_location'] return model_update except Exception: LOG.exception( "Exception creating volume %(name)s from source " "%(source)s on share %(share)s.", { 'name': destination_volume['id'], 'source': source['name'], 'share': destination_volume['provider_location'] }) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (destination_volume['id']))
def _clone_source_to_destination_volume(self, source, destination_volume): share = self._get_volume_location(source['id']) extra_specs = na_utils.get_volume_extra_specs(destination_volume) try: destination_volume['provider_location'] = share self._clone_with_extension_check( source, destination_volume) self._do_qos_for_volume(destination_volume, extra_specs) model_update = ( self._get_volume_model_update(destination_volume) or {}) model_update['provider_location'] = destination_volume[ 'provider_location'] return model_update except Exception: LOG.exception("Exception creating volume %(name)s from source " "%(source)s on share %(share)s.", {'name': destination_volume['id'], 'source': source['name'], 'share': destination_volume['provider_location']}) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % ( destination_volume['id']))
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ self._ensure_shares_mounted() extra_specs = get_volume_extra_specs(volume) eligible = self._find_containers(volume["size"], extra_specs) if not eligible: raise exception.NfsNoSuitableShareFound(volume_size=volume["size"]) for sh in eligible: try: if self.ssc_enabled: volume["provider_location"] = sh.export["path"] else: volume["provider_location"] = sh LOG.info(_("casted to %s") % volume["provider_location"]) self._do_create_volume(volume) return {"provider_location": volume["provider_location"]} except Exception: LOG.warn( _("Exception creating vol %(name)s" " on share %(share)s") % {"name": volume["name"], "share": volume["provider_location"]} ) volume["provider_location"] = None finally: if self.ssc_enabled: self._update_stale_vols(volume=sh) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume["name"]))
def manage_existing(self, volume, existing_ref): """Brings an existing storage object under Cinder management. existing_ref can contain source-id or source-name or both. source-id: lun uuid. source-name: complete lun path eg. /vol/vol0/lun. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs) qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info(qos_policy_group_info) path = lun.get_metadata_property("Path") if lun.name == volume["name"]: new_path = path LOG.info(_LI("LUN with given ref %s need not be renamed " "during manage operation."), existing_ref) else: (rest, splitter, name) = path.rpartition("/") new_path = "%s/%s" % (rest, volume["name"]) self.zapi_client.move_lun(path, new_path) lun = self._get_existing_vol_with_manage_ref({"source-name": new_path}) if qos_policy_group_name is not None: self.zapi_client.set_lun_qos_policy_group(new_path, qos_policy_group_name) self._add_lun_to_table(lun) LOG.info( _LI("Manage operation completed for LUN with new path" " %(path)s and uuid %(uuid)s."), {"path": lun.get_metadata_property("Path"), "uuid": lun.get_metadata_property("UUID")}, )
def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" LOG.info(_LI('Extending volume %s.'), volume['name']) try: path = self.local_path(volume) self._resize_image_file(path, new_size) except Exception as err: exception_msg = (_("Failed to extend volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) try: extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg)
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) lun_name = volume['name'] size = int(volume['size']) * units.Gi metadata = { 'OsType': self.lun_ostype, 'SpaceReserved': self.lun_space_reservation, 'Path': '/vol/%s/%s' % (pool_name, lun_name) } qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) qos_policy_group_is_adaptive = volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name, qos_policy_group_is_adaptive) except Exception: LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.", { 'name': lun_name, 'pool': pool_name }) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % (volume['name'])) LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', { 'name': lun_name, 'qos': qos_policy_group_info }) metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) model_update = self._get_volume_model_update(volume) return model_update
def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is an NFS share point and some [/path]/volume; e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage :param volume: Cinder volume to manage :param existing_vol_ref: Driver-specific information used to identify a volume """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = \ self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s", {'vol': volume['id'], 'ref': existing_vol_ref['source-name']}) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type(volume, nfs_share, vol_path, extra_specs) if vol_path == volume['name']: LOG.debug("New Cinder volume %s name matches reference name: " "no need to rename.", volume['name']) else: src_vol = os.path.join(nfs_mount, vol_path) dst_vol = os.path.join(nfs_mount, volume['name']) try: self._execute("mv", src_vol, dst_vol, run_as_root=self._execute_as_root, check_exit_code=True) LOG.debug("Setting newly managed Cinder volume name to %s", volume['name']) self._set_rw_permissions_for_all(dst_vol) except processutils.ProcessExecutionError as err: exception_msg = (_("Failed to manage existing volume %(name)s," " because rename operation failed:" " Error msg: %(msg)s."), {'name': existing_vol_ref['source-name'], 'msg': err}) raise exception.VolumeBackendAPIException(data=exception_msg) try: self._do_qos_for_volume(volume, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': existing_vol_ref['source-name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) model_update = self._get_volume_model_update(volume) or {} model_update['provider_location'] = nfs_share return model_update
def _is_share_vol_type_match(self, volume, share, flexvol_name): """Checks if share matches volume type.""" LOG.debug("Found volume %(vol)s for share %(share)s.", {'vol': flexvol_name, 'share': share}) extra_specs = na_utils.get_volume_extra_specs(volume) flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs( extra_specs) return flexvol_name in flexvol_names
def _is_share_vol_type_match(self, volume, share): """Checks if share matches volume type.""" netapp_vol = self._get_vol_for_share(share) LOG.debug("Found volume %(vol)s for share %(share)s.", {'vol': netapp_vol, 'share': share}) extra_specs = na_utils.get_volume_extra_specs(volume) vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) return netapp_vol in vols
def _check_volume_type(self, volume, share, file_name): """Matches a volume type for share file.""" extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None if qos_policy_group: raise exception.ManageExistingVolumeTypeMismatch( reason=(_("Setting file qos policy group is not supported" " on this storage family and ontap version.")))
def test_get_volume_extra_specs_no_type_id(self): fake_volume = {} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type') self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result)
def test_get_volume_extra_specs_no_volume_type(self): fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock(return_value=None)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result)
def test_get_volume_extra_specs_no_volume_type(self): fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock( return_value=None)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result)
def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is an NFS share point and some [/path]/volume; e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage :param volume: Cinder volume to manage :param existing_vol_ref: Driver-specific information used to identify a volume """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) LOG.debug( "Asked to manage NFS volume %(vol)s, with vol ref %(ref)s", {"vol": volume["id"], "ref": existing_vol_ref["source-name"]}, ) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type(volume, nfs_share, vol_path, extra_specs) if vol_path == volume["name"]: LOG.debug("New Cinder volume %s name matches reference name: " "no need to rename.", volume["name"]) else: src_vol = os.path.join(nfs_mount, vol_path) dst_vol = os.path.join(nfs_mount, volume["name"]) try: shutil.move(src_vol, dst_vol) LOG.debug("Setting newly managed Cinder volume name to %s", volume["name"]) self._set_rw_permissions_for_all(dst_vol) except (OSError, IOError) as err: exception_msg = ( _( "Failed to manage existing volume %(name)s," " because rename operation failed:" " Error msg: %(msg)s." ), {"name": existing_vol_ref["source-name"], "msg": err}, ) raise exception.VolumeBackendAPIException(data=exception_msg) try: self._do_qos_for_volume(volume, extra_specs, cleanup=False) except Exception as err: exception_msg = _("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % { "name": existing_vol_ref["source-name"], "msg": six.text_type(err), } raise exception.VolumeBackendAPIException(data=exception_msg) return {"provider_location": nfs_share}
def test_get_volume_extra_specs(self): fake_extra_specs = {'fake_key': 'fake_value'} fake_volume_type = {'extra_specs': fake_extra_specs} fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', return_value=fake_volume_type) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual(fake_extra_specs, result)
def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] extra_specs = na_utils.get_volume_extra_specs(destination_volume) qos_policy_group_info = self._setup_qos_for_volume( destination_volume, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) qos_policy_group_is_adaptive = ( volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) or na_utils.is_qos_policy_group_spec_adaptive(qos_policy_group_info)) try: self._clone_lun( source_name, destination_name, space_reserved=self.lun_space_reservation, qos_policy_group_name=qos_policy_group_name, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive) if destination_size != source_size: try: self._extend_volume(destination_volume, destination_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Resizing %s failed. Cleaning volume.", destination_volume['id']) self.delete_volume(destination_volume) return self._get_volume_model_update(destination_volume) except Exception: LOG.exception( "Exception cloning volume %(name)s from source " "volume %(source)s.", { 'name': destination_name, 'source': source_name }) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException(data=msg % destination_name)
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) lun_name = volume['name'] size = int(volume['size']) * units.Gi metadata = {'OsType': self.lun_ostype, 'SpaceReserved': self.lun_space_reservation, 'Path': '/vol/%s/%s' % (pool_name, lun_name)} qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name) except Exception: LOG.exception(_LE("Exception creating LUN %(name)s in pool " "%(pool)s."), {'name': lun_name, 'pool': pool_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % ( volume['name'])) LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', {'name': lun_name, 'qos': qos_policy_group_info}) metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) model_update = self._get_volume_model_update(volume) return model_update
def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) major, minor = self.zapi_client.get_ontapi_version() col_path = self.configuration.netapp_copyoffload_tool_path try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._copy_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) # Try to use the copy offload tool if not cloned and col_path and major == 1 and minor >= 20: cloned = self._copy_from_img_service(context, volume, image_service, image_id) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info( 'Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s', { 'image_id': image_id, 'msg': msg }) finally: cloned = cloned and post_clone share = (volume_utils.extract_host(volume['host'], level='pool') if cloned else None) bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned
def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) major, minor = self.zapi_client.get_ontapi_version() col_path = self.configuration.netapp_copyoffload_tool_path try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._copy_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) # Try to use the copy offload tool if not cloned and col_path and major == 1 and minor >= 20: cloned = self._copy_from_img_service(context, volume, image_service, image_id) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info('Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s', {'image_id': image_id, 'msg': msg}) finally: cloned = cloned and post_clone share = (volume_utils.extract_host(volume['host'], level='pool') if cloned else None) bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug("create_volume on %s", volume["host"]) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume["host"], level="pool") if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) lun_name = volume["name"] size = int(volume["size"]) * units.Gi metadata = { "OsType": self.lun_ostype, "SpaceReserved": self.lun_space_reservation, "Path": "/vol/%s/%s" % (pool_name, lun_name), } qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info(qos_policy_group_info) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name) except Exception: LOG.exception( _LE("Exception creating LUN %(name)s in pool " "%(pool)s."), {"name": lun_name, "pool": pool_name} ) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % (volume["name"])) LOG.debug( "Created LUN with name %(name)s and QoS info %(qos)s", {"name": lun_name, "qos": qos_policy_group_info} ) metadata["Path"] = "/vol/%s/%s" % (pool_name, lun_name) metadata["Volume"] = pool_name metadata["Qtree"] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] extra_specs = na_utils.get_volume_extra_specs(destination_volume) qos_policy_group_info = self._setup_qos_for_volume( destination_volume, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._clone_lun(source_name, destination_name, space_reserved='true', qos_policy_group_name=qos_policy_group_name) if destination_size != source_size: try: self.extend_volume( destination_volume, destination_size, qos_policy_group_name=qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Resizing %s failed. Cleaning volume."), destination_volume['id']) self.delete_volume(destination_volume) except Exception: LOG.exception( _LE("Exception cloning volume %(name)s from source " "volume %(source)s."), { 'name': destination_name, 'source': source_name }) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException(data=msg % destination_name)
def create_volume(self, volume): """Driver entry point for creating a new volume.""" default_size = "104857600" # 100 MB gigabytes = 1073741824L # 2^30 name = volume["name"] if int(volume["size"]) == 0: size = default_size else: size = str(int(volume["size"]) * gigabytes) metadata = {} metadata["OsType"] = "linux" metadata["SpaceReserved"] = "true" extra_specs = get_volume_extra_specs(volume) self._create_lun_on_eligible_vol(name, size, metadata, extra_specs) LOG.debug(_("Created LUN with name %s") % name) handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
def manage_existing(self, volume, existing_ref): """Brings an existing storage object under Cinder management. existing_ref can contain source-id or source-name or both. source-id: lun uuid. source-name: complete lun path eg. /vol/vol0/lun. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs) qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) path = lun.get_metadata_property('Path') if lun.name == volume['name']: new_path = path LOG.info( "LUN with given ref %s need not be renamed " "during manage operation.", existing_ref) else: (rest, splitter, name) = path.rpartition('/') new_path = '%s/%s' % (rest, volume['name']) self.zapi_client.move_lun(path, new_path) lun = self._get_existing_vol_with_manage_ref( {'source-name': new_path}) if qos_policy_group_name is not None: self.zapi_client.set_lun_qos_policy_group(new_path, qos_policy_group_name, is_adaptive) self._add_lun_to_table(lun) LOG.info( "Manage operation completed for LUN with new path" " %(path)s and uuid %(uuid)s.", { 'path': lun.get_metadata_property('Path'), 'uuid': lun.get_metadata_property('UUID') }) return self._get_volume_model_update(volume)
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s' % volume['host']) self._ensure_shares_mounted() # get share as pool name share = volume_utils.extract_host(volume['host'], level='pool') if share is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) try: volume['provider_location'] = share LOG.info(_LI('casted to %s') % volume['provider_location']) self._do_create_volume(volume) if qos_policy_group: self._set_qos_policy_group_on_volume(volume, share, qos_policy_group) return {'provider_location': volume['provider_location']} except Exception as ex: LOG.error( _LW("Exception creating vol %(name)s on " "share %(share)s. Details: %(ex)s") % { 'name': volume['name'], 'share': volume['provider_location'], 'ex': ex }) volume['provider_location'] = None finally: if self.ssc_enabled: self._update_stale_vols(self._get_vol_for_share(share)) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] extra_specs = na_utils.get_volume_extra_specs(destination_volume) qos_policy_group_info = self._setup_qos_for_volume( destination_volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._clone_lun(source_name, destination_name, space_reserved=self.lun_space_reservation, qos_policy_group_name=qos_policy_group_name) if destination_size != source_size: try: self._extend_volume(destination_volume, destination_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Resizing %s failed. Cleaning volume."), destination_volume['id']) self.delete_volume(destination_volume) return self._get_volume_model_update(destination_volume) except Exception: LOG.exception(_LE("Exception cloning volume %(name)s from source " "volume %(source)s."), {'name': destination_name, 'source': source_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException( data=msg % destination_name)
def _check_volume_type(self, volume, share, file_name): """Match volume type for share file.""" extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None if not self._is_share_vol_type_match(volume, share): raise exception.ManageExistingVolumeTypeMismatch( reason=(_("Volume type does not match for share %s."), share)) if qos_policy_group: try: vserver, flex_vol_name = self._get_vserver_and_exp_vol( share=share) self.zapi_client.file_assign_qos(flex_vol_name, qos_policy_group, file_name) except na_api.NaApiError as ex: LOG.exception(_LE('Setting file QoS policy group failed. %s'), ex) raise exception.NetAppDriverException( reason=(_('Setting file QoS policy group failed. %s'), ex))
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s' % volume['host']) self._ensure_shares_mounted() # get share as pool name share = volume_utils.extract_host(volume['host'], level='pool') if share is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) try: volume['provider_location'] = share LOG.info(_LI('casted to %s') % volume['provider_location']) self._do_create_volume(volume) if qos_policy_group: self._set_qos_policy_group_on_volume(volume, share, qos_policy_group) return {'provider_location': volume['provider_location']} except Exception as ex: LOG.error(_LW("Exception creating vol %(name)s on " "share %(share)s. Details: %(ex)s") % {'name': volume['name'], 'share': volume['provider_location'], 'ex': ex}) volume['provider_location'] = None finally: if self.ssc_enabled: self._update_stale_vols(self._get_vol_for_share(share)) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s' % volume['host']) # get Data ONTAP volume name as pool name ontap_volume_name = volume_utils.extract_host(volume['host'], level='pool') if ontap_volume_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) lun_name = volume['name'] # start with default size, get requested size default_size = units.Mi * 100 # 100 MB size = default_size if not int(volume['size'])\ else int(volume['size']) * units.Gi metadata = { 'OsType': 'linux', 'SpaceReserved': 'true', 'Path': '/vol/%s/%s' % (ontap_volume_name, lun_name) } extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) self._create_lun(ontap_volume_name, lun_name, size, metadata, qos_policy_group) LOG.debug('Created LUN with name %s' % lun_name) metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name) metadata['Volume'] = ontap_volume_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def extend_volume(self, volume, new_size): """Driver entry point to increase the size of a volume.""" extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size qos_policy_group_info = self._setup_qos_for_volume( volume_copy, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._extend_volume(volume, new_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): # If anything went wrong, revert QoS settings self._setup_qos_for_volume(volume, extra_specs)
def _check_volume_type_for_lun(self, volume, lun, existing_ref): """Check if LUN satisfies volume type.""" extra_specs = na_utils.get_volume_extra_specs(volume) match_write = False def scan_ssc_data(): volumes = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) for vol in volumes: if lun.get_metadata_property('Volume') == vol.id['name']: return True return False match_read = scan_ssc_data() if not match_read: ssc_cmode.get_cluster_latest_ssc(self, self.zapi_client.get_connection(), self.vserver) match_read = scan_ssc_data() qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None if qos_policy_group: if match_read: try: path = lun.get_metadata_property('Path') self.zapi_client.set_lun_qos_policy_group( path, qos_policy_group) match_write = True except netapp_api.NaApiError as nae: LOG.error(_LE("Failure setting QoS policy group. %s"), nae) else: match_write = True if not (match_read and match_write): raise exception.ManageExistingVolumeTypeMismatch( reason=(_("LUN with given ref %(ref)s does not satisfy volume" " type. Ensure LUN volume with ssc features is" " present on vserver %(vs)s.") % { 'ref': existing_ref, 'vs': self.vserver }))
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s' % volume['host']) # get Data ONTAP volume name as pool name ontap_volume_name = volume_utils.extract_host(volume['host'], level='pool') if ontap_volume_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) lun_name = volume['name'] # start with default size, get requested size default_size = units.Mi * 100 # 100 MB size = default_size if not int(volume['size'])\ else int(volume['size']) * units.Gi metadata = {'OsType': 'linux', 'SpaceReserved': 'true', 'Path': '/vol/%s/%s' % (ontap_volume_name, lun_name)} extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) self._create_lun(ontap_volume_name, lun_name, size, metadata, qos_policy_group) LOG.debug('Created LUN with name %s' % lun_name) metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name) metadata['Volume'] = ontap_volume_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def _clone_source_to_destination_volume(self, source, destination_volume): share = self._get_volume_location(source["id"]) extra_specs = na_utils.get_volume_extra_specs(destination_volume) try: destination_volume["provider_location"] = share self._clone_with_extension_check(source, destination_volume) self._do_qos_for_volume(destination_volume, extra_specs) return {"provider_location": destination_volume["provider_location"]} except Exception: LOG.exception( _LE("Exception creating volume %(name)s from source " "%(source)s on share %(share)s."), { "name": destination_volume["id"], "source": source["name"], "share": destination_volume["provider_location"], }, ) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (destination_volume["id"]))
def extend_volume(self, volume, new_size): """Driver entry point to increase the size of a volume.""" extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size qos_policy_group_info = self._setup_qos_for_volume(volume_copy, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._extend_volume(volume, new_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): # If anything went wrong, revert QoS settings self._setup_qos_for_volume(volume, extra_specs)
def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._clone_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info( _LI('Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s'), { 'image_id': image_id, 'msg': msg }) finally: cloned = cloned and post_clone share = volume['provider_location'] if cloned else None bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned
def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._clone_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info(_LI('Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s'), {'image_id': image_id, 'msg': msg}) finally: cloned = cloned and post_clone share = volume['provider_location'] if cloned else None bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned
def _check_volume_type_for_lun(self, volume, lun, existing_ref): """Check if LUN satisfies volume type.""" extra_specs = na_utils.get_volume_extra_specs(volume) match_write = False def scan_ssc_data(): volumes = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) for vol in volumes: if lun.get_metadata_property('Volume') == vol.id['name']: return True return False match_read = scan_ssc_data() if not match_read: ssc_cmode.get_cluster_latest_ssc( self, self.zapi_client.get_connection(), self.vserver) match_read = scan_ssc_data() qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None if qos_policy_group: if match_read: try: path = lun.get_metadata_property('Path') self.zapi_client.set_lun_qos_policy_group(path, qos_policy_group) match_write = True except netapp_api.NaApiError as nae: LOG.error(_LE("Failure setting QoS policy group. %s"), nae) else: match_write = True if not (match_read and match_write): raise exception.ManageExistingVolumeTypeMismatch( reason=(_("LUN with given ref %(ref)s does not satisfy volume" " type. Ensure LUN volume with ssc features is" " present on vserver %(vs)s.") % {'ref': existing_ref, 'vs': self.vserver}))
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug("create_volume on %s", volume["host"]) self._ensure_shares_mounted() # get share as pool name pool_name = volume_utils.extract_host(volume["host"], level="pool") if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) try: volume["provider_location"] = pool_name LOG.debug("Using pool %s.", pool_name) self._do_create_volume(volume) self._do_qos_for_volume(volume, extra_specs) return {"provider_location": volume["provider_location"]} except Exception: LOG.exception( _LE("Exception creating vol %(name)s on " "pool %(pool)s."), {"name": volume["name"], "pool": volume["provider_location"]}, ) # We need to set this for the model update in order for the # manager to behave correctly. volume["provider_location"] = None finally: if self.ssc_enabled: self._update_stale_vols(self._get_vol_for_share(pool_name)) msg = _("Volume %(vol)s could not be created in pool %(pool)s.") raise exception.VolumeBackendAPIException(data=msg % {"vol": volume["name"], "pool": pool_name})
LOG.info(_LI('Extending volume %s.'), volume['name']) <<<<<<< HEAD try: path = self.local_path(volume) self._resize_image_file(path, new_size) except Exception as err: exception_msg = (_("Failed to extend volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) try: extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) ======= >>>>>>> refs/remotes/openstack/stable/kilo