def find_retype_backend(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a backend that can accept the volume with its new type.""" filter_properties = filter_properties or {} backend = (request_spec['volume_properties'].get('cluster_name') or request_spec['volume_properties']['host']) # The volume already exists on this backend, and so we shouldn't check # if it can accept the volume again in the CapacityFilter. filter_properties['vol_exists_on'] = backend weighed_backends = self._get_weighted_candidates( context, request_spec, filter_properties) if not weighed_backends: raise exception.NoValidBackend( reason=_('No valid backends for volume %(id)s with type ' '%(type)s') % { 'id': request_spec['volume_id'], 'type': request_spec['volume_type'] }) for weighed_backend in weighed_backends: backend_state = weighed_backend.obj if backend_state.backend_id == backend: return backend_state if utils.extract_host(backend, 'pool') is None: # legacy volumes created before pool is introduced has no pool # info in host. But host_state.host always include pool level # info. In this case if above exact match didn't work out, we # find host_state that are of the same host of volume being # retyped. In other words, for legacy volumes, retyping could # cause migration between pools on same host, which we consider # it is different from migration between hosts thus allow that # to happen even migration policy is 'never'. for weighed_backend in weighed_backends: backend_state = weighed_backend.obj new_backend = utils.extract_host(backend_state.backend_id, 'backend') if new_backend == backend: return backend_state if migration_policy == 'never': raise exception.NoValidBackend( reason=_('Current backend not valid for volume %(id)s with ' 'type %(type)s, migration not allowed') % { 'id': request_spec['volume_id'], 'type': request_spec['volume_type'] }) top_backend = self._choose_top_backend(weighed_backends, request_spec) return top_backend.obj
def test_create_volume_exception_puts_volume_in_error_state( self, _mock_volume_update, _mock_message_create, _mock_sched_create): # Test NoValidBackend exception behavior for create_volume. # Puts the volume in 'error' state and eats the exception. _mock_sched_create.side_effect = exception.NoValidBackend(reason="") volume = fake_volume.fake_volume_obj(self.context) request_spec = { 'volume_id': volume.id, 'volume': { 'id': volume.id, '_name_id': None, 'metadata': {}, 'admin_metadata': {}, 'glance_metadata': {} } } request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, volume, request_spec=request_spec_obj, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) _mock_message_create.assert_called_once_with( self.context, message_field.Action.SCHEDULE_ALLOCATE_VOLUME, resource_uuid=volume.id, exception=mock.ANY)
def test_extend_volume_no_valid_host(self, status, mock_rollback, mock_extend, mock_consume, mock_backend_passes): volume = fake_volume.fake_volume_obj( self.context, **{ 'size': 1, 'previous_status': status }) no_valid_backend = exception.NoValidBackend(reason='') mock_backend_passes.side_effect = [no_valid_backend] with mock.patch.object(self.manager, '_set_volume_state_and_notify') as mock_notify: self.manager.extend_volume(self.context, volume, 2, 'fake_reservation') mock_notify.assert_called_once_with( 'extend_volume', {'volume_state': { 'status': status, 'previous_status': None }}, self.context, no_valid_backend, None) mock_rollback.assert_called_once_with(self.context, 'fake_reservation', project_id=volume.project_id) mock_consume.assert_not_called() mock_extend.assert_not_called()
def _populate_retry(self, filter_properties, request_spec): """Populate filter properties with history of retries for request. If maximum retries is exceeded, raise NoValidBackend. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'backends': [], # list of volume service backends tried 'hosts': [] # TODO(geguileo): Remove in P and leave backends } filter_properties['retry'] = retry resource_id = request_spec.get('volume_id') or request_spec.get( "group_id") self._log_volume_error(resource_id, retry) if retry['num_attempts'] > max_attempts: raise exception.NoValidBackend( reason=_("Exceeded max scheduling attempts %(max_attempts)d " "for resource %(resource_id)s") % { 'max_attempts': max_attempts, 'resource_id': resource_id })
def schedule_create_volume(self, context, request_spec, filter_properties): backend = self._schedule(context, request_spec, filter_properties) if not backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available")) backend = backend.obj volume_id = request_spec['volume_id'] updated_volume = driver.volume_update_db( context, volume_id, backend.host, backend.cluster_name, availability_zone=backend.service['availability_zone']) self._post_select_populate_filter_properties(filter_properties, backend) # context is not serializable filter_properties.pop('context', None) self.volume_rpcapi.create_volume(context, updated_volume, request_spec, filter_properties, allow_reschedule=True)
def backend_passes_filters(self, context, backend, request_spec, filter_properties): """Check if the specified backend passes the filters.""" weighed_backends = self._get_weighted_candidates( context, request_spec, filter_properties) # If backend has no pool defined we will ignore it in the comparison ignore_pool = not bool(utils.extract_host(backend, 'pool')) for weighed_backend in weighed_backends: backend_id = weighed_backend.obj.backend_id if ignore_pool: backend_id = utils.extract_host(backend_id) if backend_id == backend: return weighed_backend.obj reason_param = { 'resource': 'volume', 'id': '??id missing??', 'backend': backend } for resource in ['volume', 'group', 'snapshot']: resource_id = request_spec.get('%s_id' % resource, None) if resource_id: reason_param.update({'resource': resource, 'id': resource_id}) break raise exception.NoValidBackend( _('Cannot place %(resource)s %(id)s ' 'on %(backend)s.') % reason_param)
def test_create_consistencygroup_exceptions(self): with mock.patch.object(filter_scheduler.FilterScheduler, 'schedule_create_consistencygroup') as mock_cg: original_driver = self.manager.driver consistencygroup_obj = \ fake_consistencygroup.fake_consistencyobject_obj(self.context) self.manager.driver = filter_scheduler.FilterScheduler LOG = self.mock_object(manager, 'LOG') self.mock_object(db, 'consistencygroup_update') ex = exception.CinderException('test') mock_cg.side_effect = ex group_id = fake.CONSISTENCY_GROUP_ID self.assertRaises(exception.CinderException, self.manager.create_consistencygroup, self.context, consistencygroup_obj) self.assertGreater(LOG.exception.call_count, 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': (fields.ConsistencyGroupStatus.ERROR)}) mock_cg.reset_mock() LOG.exception.reset_mock() db.consistencygroup_update.reset_mock() mock_cg.side_effect = exception.NoValidBackend( reason="No weighed hosts available") self.manager.create_consistencygroup(self.context, consistencygroup_obj) self.assertGreater(LOG.error.call_count, 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': (fields.ConsistencyGroupStatus.ERROR)}) self.manager.driver = original_driver
def test_retype_volume_exception_returns_volume_state( self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update): # Test NoValidBackend exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status='retyping', previous_status='in-use') instance_uuid = '12345678-1234-5678-1234-567812345678' volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, '/dev/fake') _mock_vol_attachment_get.return_value = [volume_attach] reservations = mock.sentinel.reservations request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand', 'quota_reservations': reservations} _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_backend = mock.Mock( side_effect=exception.NoValidBackend(reason="")) orig_retype = self.manager.driver.find_retype_backend self.manager.driver.find_retype_backend = _mock_find_retype_backend self.manager.retype(self.context, volume, request_spec=request_spec, filter_properties={}) _mock_find_retype_backend.assert_called_once_with(self.context, request_spec, {}, 'on-demand') quota_rollback.assert_called_once_with(self.context, reservations) _mock_vol_update.assert_called_once_with(self.context, volume.id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype
def schedule_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): weighed_backend = self._schedule_group(context, request_spec_list, filter_properties_list) if not weighed_backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available")) backend = weighed_backend.obj updated_group = driver.group_update_db(context, group, backend.host, backend.cluster_name) self.volume_rpcapi.create_consistencygroup(context, updated_group)
def backend_passes_filters(self, context, backend, request_spec, filter_properties): """Check if the specified backend passes the filters.""" weighed_backends = self._get_weighted_candidates( context, request_spec, filter_properties) for weighed_backend in weighed_backends: backend_state = weighed_backend.obj if backend_state.backend_id == backend: return backend_state volume_id = request_spec.get('volume_id', '??volume_id missing??') raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s ' 'on %(backend)s') % { 'id': volume_id, 'backend': backend })
def _test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_backend_passes, _mock_volume_get, status, fake_updates): volume = tests_utils.create_volume(self.context, status=status, previous_status='available') fake_volume_id = volume.id request_spec = {'volume_id': fake_volume_id} _mock_backend_passes.side_effect = exception.NoValidBackend(reason="") _mock_volume_get.return_value = volume self.manager.migrate_volume_to_host(self.context, volume, 'host', True, request_spec=request_spec, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, fake_updates) _mock_backend_passes.assert_called_once_with(self.context, 'host', request_spec, {})
def backend_passes_filters(self, context, backend, request_spec, filter_properties): """Check if the specified backend passes the filters.""" weighed_backends = self._get_weighted_candidates(context, request_spec, filter_properties) # If backend has no pool defined we will ignore it in the comparison ignore_pool = not bool(utils.extract_host(backend, 'pool')) for weighed_backend in weighed_backends: backend_id = weighed_backend.obj.backend_id if ignore_pool: backend_id = utils.extract_host(backend_id) if backend_id == backend: return weighed_backend.obj volume_id = request_spec.get('volume_id', '??volume_id missing??') raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s ' 'on %(backend)s') % {'id': volume_id, 'backend': backend})
def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. Translate a host's name and IP to the prefix of its hostname on the storage subsystem. We create a host name from the host and IP address, replacing any invalid characters (at most 55 characters), and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ # Build cleanup translation tables for host names invalid_ch_in_host = '' for num in range(0, 128): ch = six.text_type(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: invalid_ch_in_host = invalid_ch_in_host + ch host_name = connector['host'] if isinstance(host_name, six.text_type): unicode_host_name_filter = { ord(six.text_type(char)): u'-' for char in invalid_ch_in_host } host_name = host_name.translate(unicode_host_name_filter) elif isinstance(host_name, str): string_host_name_filter = string.maketrans( invalid_ch_in_host, '-' * len(invalid_ch_in_host)) host_name = host_name.translate(string_host_name_filter) else: msg = _('_create_host: Can not translate host name. Host name ' 'is not unicode or string.') LOG.error(msg) raise exception.NoValidBackend(reason=msg) host_name = six.text_type(host_name) # FlashSystem family doesn't like hostname that starts with number. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name return host_name[:55]
def schedule_create_volume(self, context, request_spec, filter_properties): backend = self._schedule(context, request_spec, filter_properties) if not backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available")) backend = backend.obj volume_id = request_spec['volume_id'] updated_volume = driver.volume_update_db(context, volume_id, backend.host, backend.cluster_name) self._post_select_populate_filter_properties(filter_properties, backend) # context is not serializable filter_properties.pop('context', None) # In case of multiple cinder backends, it is possible for # one backend to fail to schedule, while another succeeds. # If the volume is scheduled successfully, clear any fault # generated. utils.update_volume_fault(context, volume_id, "") LOG.info( ("Volume %(volume_id)s is scheduled to create. " "\n--request_spec: %(request_spec)s, " "\n--filter_properties: %(filter_properties)s, " "\n--snapshot_id: %(snapshot_id)s, " "\n--image_id: %(image_id)s"), { 'volume_id': volume_id, 'request_spec': request_spec, 'filter_properties': filter_properties, 'snapshot_id': request_spec['snapshot_id'], 'image_id': request_spec['image_id'] }) self.volume_rpcapi.create_volume(context, updated_volume, request_spec, filter_properties, allow_reschedule=True)