def migrate_volume(self, context, volume, host, force_host_copy): """Migrate the volume to the specified host.""" # We only handle "available" volumes for now if volume['status'] not in ['available', 'in-use']: msg = _('Volume status must be available/in-use.') LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure volume is not part of a migration if volume['migration_status'] is not None: msg = _("Volume is already part of an active migration") raise exception.InvalidVolume(reason=msg) # We only handle volumes without snapshots for now snaps = self.db.snapshot_get_all_for_volume(context, volume['id']) if snaps: msg = _("volume must not have snapshots") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure the host is in the list of available hosts elevated = context.elevated() topic = CONF.volume_topic services = self.db.service_get_all_by_topic(elevated, topic, disabled=False) found = False for service in services: if utils.service_is_up(service) and service['host'] == host: found = True if not found: msg = (_('No available service named %s') % host) LOG.error(msg) raise exception.InvalidHost(reason=msg) # Make sure the destination host is different than the current one if host == volume['host']: msg = _('Destination host must be different than current host') LOG.error(msg) raise exception.InvalidHost(reason=msg) self.update(context, volume, {'migration_status': 'starting'}) # Call the scheduler to ensure that the host exists and that it can # accept the volume volume_type = {} volume_type_id = volume['volume_type_id'] if volume_type_id: volume_type = volume_types.get_volume_type(context, volume_type_id) request_spec = { 'volume_properties': volume, 'volume_type': volume_type, 'volume_id': volume['id'] } self.scheduler_rpcapi.migrate_volume_to_host(context, CONF.volume_topic, volume['id'], host, force_host_copy, request_spec)
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s' % volume['host']) self._ensure_shares_mounted() # get share as pool name share = volume_utils.extract_host(volume['host'], level='pool') if share is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) volume['provider_location'] = share LOG.info( _LI('Creating volume at location %s') % volume['provider_location']) try: self._do_create_volume(volume) except Exception as ex: LOG.error( _LE("Exception creating vol %(name)s on " "share %(share)s. Details: %(ex)s") % { 'name': volume['name'], 'share': volume['provider_location'], 'ex': six.text_type(ex) }) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume['name'])) return {'provider_location': volume['provider_location']}
def check_for_setup_error(self): if self.proxy.session_id is None: msg = _('FSS cinder volume driver not ready: Unable to determine ' 'session id.') raise exception.VolumeBackendAPIException(data=msg) if self.configuration.fss_pool: self.configuration.fss_pools = { 'A': six.text_type(self.configuration.fss_pool) } # The fss_pool is deprecated. LOG.warning("'fss_pool=<pool-id>' is deprecated. Using the " "fss_pools=A:<pool-id> for single pool or " "fss_pools=P:<pool-id>,O:<other-pool-id> instead " "as old format will be removed once Queens development" " opens up.") if not self.configuration.fss_pools: msg = _('Pool is not available in the cinder configuration ' 'fields.') raise exception.InvalidHost(reason=msg) self._pool_checking(self.configuration.fss_pools) if self.configuration.san_thin_provision: if not self.configuration.max_over_subscription_ratio: msg = _('The max_over_subscription_ratio have to set ' 'when thin provisioning enabled.') raise exception.InvalidConfigurationValue(reason=msg)
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s', volume['host']) self._ensure_shares_mounted() # get share as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) try: volume['provider_location'] = pool_name LOG.debug('Using pool %s.', pool_name) self._do_create_volume(volume) self._do_qos_for_volume(volume, extra_specs) return {'provider_location': volume['provider_location']} except Exception: LOG.exception(_LE("Exception creating vol %(name)s on " "pool %(pool)s."), {'name': volume['name'], 'pool': volume['provider_location']}) # We need to set this for the model update in order for the # manager to behave correctly. volume['provider_location'] = None msg = _("Volume %(vol)s could not be created in pool %(pool)s.") raise exception.VolumeBackendAPIException(data=msg % { 'vol': volume['name'], 'pool': pool_name})
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) lun_name = volume['name'] size = int(volume['size']) * units.Gi metadata = { 'OsType': self.lun_ostype, 'SpaceReserved': self.lun_space_reservation, 'Path': '/vol/%s/%s' % (pool_name, lun_name) } qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) qos_policy_group_is_adaptive = volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name, qos_policy_group_is_adaptive) except Exception: LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.", { 'name': lun_name, 'pool': pool_name }) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % (volume['name'])) LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', { 'name': lun_name, 'qos': qos_policy_group_info }) metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) model_update = self._get_volume_model_update(volume) return model_update
def check_for_setup_error(self): if self.proxy.session_id is None: msg = (_('FSS cinder volume driver not ready: Unable to determine ' 'session id.')) raise exception.VolumeBackendAPIException(data=msg) if not self.configuration.fss_pool: msg = _('Pool is not available in the cinder configuration ' 'fields.') raise exception.InvalidHost(reason=msg) self._pool_checking(self.configuration.fss_pool)
def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s' % volume['host']) self._ensure_shares_mounted() # get share as pool name share = volume_utils.extract_host(volume['host'], level='pool') if share is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) try: volume['provider_location'] = share LOG.info(_LI('casted to %s') % volume['provider_location']) self._do_create_volume(volume) if qos_policy_group: self._set_qos_policy_group_on_volume(volume, share, qos_policy_group) return {'provider_location': volume['provider_location']} except Exception as ex: LOG.error( _LW("Exception creating vol %(name)s on " "share %(share)s. Details: %(ex)s") % { 'name': volume['name'], 'share': volume['provider_location'], 'ex': ex }) volume['provider_location'] = None finally: if self.ssc_enabled: self._update_stale_vols(self._get_vol_for_share(share)) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def create_volume(self, volume): """Creates a volume.""" LOG.debug('create_volume on %s', volume['host']) # get E-series pool label as pool name eseries_pool_label = volume_utils.extract_host(volume['host'], level='pool') if eseries_pool_label is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id']) # get size of the requested volume creation size_gb = int(volume['size']) self._create_volume(eseries_pool_label, eseries_volume_label, size_gb)
def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s' % volume['host']) # get Data ONTAP volume name as pool name ontap_volume_name = volume_utils.extract_host(volume['host'], level='pool') if ontap_volume_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) lun_name = volume['name'] # start with default size, get requested size default_size = units.Mi * 100 # 100 MB size = default_size if not int(volume['size'])\ else int(volume['size']) * units.Gi metadata = { 'OsType': 'linux', 'SpaceReserved': 'true', 'Path': '/vol/%s/%s' % (ontap_volume_name, lun_name) } extra_specs = na_utils.get_volume_extra_specs(volume) qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None # warn on obsolete extra specs na_utils.log_extra_spec_warnings(extra_specs) self._create_lun(ontap_volume_name, lun_name, size, metadata, qos_policy_group) LOG.debug('Created LUN with name %s' % lun_name) metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name) metadata['Volume'] = ontap_volume_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def get_manageable_resources(req, is_detail, function_get_manageable, view_builder): context = req.environ['cinder.context'] params = req.params.copy() host = params.get('host') if host is None: raise exception.InvalidHost( reason=_("Host must be specified in query parameters")) marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params, default_key='reference') # These parameters are generally validated at the DB layer, but in this # case sorting is not done by the DB valid_sort_keys = ('reference', 'size') invalid_keys = [key for key in sort_keys if key not in valid_sort_keys] if invalid_keys: msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) raise exception.InvalidParameterValue(err=msg) valid_sort_dirs = ('asc', 'desc') invalid_dirs = [d for d in sort_dirs if d not in valid_sort_dirs] if invalid_dirs: msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) raise exception.InvalidParameterValue(err=msg) resources = function_get_manageable(context, host, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) resource_count = len(resources) if is_detail: resources = view_builder.detail_list(req, resources, resource_count) else: resources = view_builder.summary_list(req, resources, resource_count) return resources
def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" super(FSSFCDriver, self).check_for_setup_error() if len(self.gateway_fc_wwns) == 0: msg = _('No FC targets found') raise exception.InvalidHost(reason=msg)