def find_retype_host(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a host that can accept the volume with its new type.""" filter_properties = filter_properties or {} current_host = request_spec['volume_properties']['host'] # The volume already exists on this host, and so we shouldn't check if # it can accept the volume again in the CapacityFilter. filter_properties['vol_exists_on'] = current_host weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) if not weighed_hosts: msg = (_('No valid hosts for volume %(id)s with type %(type)s') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) raise exception.NoValidHost(reason=msg) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == current_host: return host_state if migration_policy == 'never': msg = (_('Current host not valid for volume %(id)s with type ' '%(type)s, migration not allowed') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) raise exception.NoValidHost(reason=msg) top_host = self._choose_top_host(weighed_hosts, request_spec) return top_host.obj
def schedule_create_volume(self, context, volume_id, **_kwargs): """Picks a host that is up and has the fewest volumes.""" elevated = context.elevated() volume_ref = db.volume_get(context, volume_id) availability_zone = volume_ref.get('availability_zone') zone, host = None, None if availability_zone: zone, _x, host = availability_zone.partition(':') if host and context.is_admin: service = db.service_get_by_args(elevated, host, 'cinder-volume') if not utils.service_is_up(service): raise exception.WillNotSchedule(host=host) driver.cast_to_volume_host(context, host, 'create_volume', volume_id=volume_id, **_kwargs) return None results = db.service_get_all_volume_sorted(elevated) if zone: results = [(service, gigs) for (service, gigs) in results if service['availability_zone'] == zone] for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: msg = _("Not enough allocatable volume gigabytes remaining") raise exception.NoValidHost(reason=msg) if utils.service_is_up(service) and not service['disabled']: driver.cast_to_volume_host(context, service['host'], 'create_volume', volume_id=volume_id, **_kwargs) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg)
def find_retype_host(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a host that can accept the volume with its new type.""" filter_properties = filter_properties or {} current_host = request_spec['volume_properties']['host'] # The volume already exists on this host, and so we shouldn't check if # it can accept the volume again in the CapacityFilter. filter_properties['vol_exists_on'] = current_host weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) if not weighed_hosts: raise exception.NoValidHost( reason=_('No valid hosts for volume ' '%(id)s with type %(type)s') % { 'id': request_spec['volume_id'], 'type': request_spec['volume_type'] }) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == current_host: return host_state if utils.extract_host(current_host, 'pool') is None: # legacy volumes created before pool is introduced has no pool # info in host. But host_state.host always include pool level # info. In this case if above exact match didn't work out, we # find host_state that are of the same host of volume being # retyped. In other words, for legacy volumes, retyping could # cause migration between pools on same host, which we consider # it is different from migration between hosts thus allow that # to happen even migration policy is 'never'. for weighed_host in weighed_hosts: host_state = weighed_host.obj backend = utils.extract_host(host_state.host, 'backend') if backend == current_host: return host_state if migration_policy == 'never': raise exception.NoValidHost( reason=_('Current host not valid for ' 'volume %(id)s with type ' '%(type)s, migration not ' 'allowed') % { 'id': request_spec['volume_id'], 'type': request_spec['volume_type'] }) top_host = self._choose_top_host(weighed_hosts, request_spec) return top_host.obj
def schedule_create_volume(self, context, request_spec, filter_properties): """Picks a host that is up and has the fewest volumes.""" elevated = context.elevated() volume_id = request_spec.get('volume_id') snapshot_id = request_spec.get('snapshot_id') image_id = request_spec.get('image_id') volume_properties = request_spec.get('volume_properties') volume_size = volume_properties.get('size') availability_zone = volume_properties.get('availability_zone') zone, host = None, None if availability_zone: zone, _x, host = availability_zone.partition(':') if host and context.is_admin: topic = CONF.volume_topic service = db.service_get_by_args(elevated, host, topic) if not utils.service_is_up(service): raise exception.WillNotSchedule(host=host) updated_volume = driver.volume_update_db(context, volume_id, host) self.volume_rpcapi.create_volume(context, updated_volume, host, request_spec, filter_properties, snapshot_id=snapshot_id, image_id=image_id) return None results = db.service_get_all_volume_sorted(elevated) if zone: results = [(s, gigs) for (s, gigs) in results if s['availability_zone'] == zone] for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_size > CONF.max_gigabytes: msg = _("Not enough allocatable volume gigabytes remaining") raise exception.NoValidHost(reason=msg) if utils.service_is_up(service) and not service['disabled']: updated_volume = driver.volume_update_db( context, volume_id, service['host']) self.volume_rpcapi.create_volume(context, updated_volume, service['host'], request_spec, filter_properties, snapshot_id=snapshot_id, image_id=image_id) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg)
def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))]
def _populate_retry(self, filter_properties, properties): """Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of volume service hosts tried } filter_properties['retry'] = retry volume_id = properties.get('volume_id') self._log_volume_error(volume_id, retry) if retry['num_attempts'] > max_attempts: raise exception.NoValidHost( reason=_("Exceeded max scheduling attempts %(max_attempts)d " "for volume %(volume_id)s") % {'max_attempts': max_attempts, 'volume_id': volume_id})
def test_create_consistencygroup_exceptions(self): with mock.patch.object(filter_scheduler.FilterScheduler, 'schedule_create_consistencygroup') as mock_cg: original_driver = self.manager.driver self.manager.driver = filter_scheduler.FilterScheduler LOG = logging.getLogger('cinder.scheduler.manager') self.stubs.Set(LOG, 'error', mock.Mock()) self.stubs.Set(LOG, 'exception', mock.Mock()) self.stubs.Set(db, 'consistencygroup_update', mock.Mock()) ex = exception.CinderException('test') mock_cg.side_effect = ex group_id = '1' self.assertRaises(exception.CinderException, self.manager.create_consistencygroup, self.context, 'volume', group_id) self.assertTrue(LOG.exception.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': 'error'}) mock_cg.reset_mock() LOG.exception.reset_mock() db.consistencygroup_update.reset_mock() mock_cg.side_effect = exception.NoValidHost( reason="No weighed hosts available") self.manager.create_consistencygroup(self.context, 'volume', group_id) self.assertTrue(LOG.error.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': 'error'}) self.manager.driver = original_driver
def _test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_host_passes, _mock_volume_get, status, fake_updates): volume = tests_utils.create_volume(self.context, status=status, previous_status='available') fake_volume_id = volume.id topic = 'fake_topic' request_spec = {'volume_id': fake_volume_id} _mock_host_passes.side_effect = exception.NoValidHost(reason="") _mock_volume_get.return_value = volume self.manager.migrate_volume_to_host(self.context, topic, fake_volume_id, 'host', True, request_spec=request_spec, filter_properties={}, volume=volume) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, fake_updates) _mock_host_passes.assert_called_once_with(self.context, 'host', request_spec, {})
def test_retype_volume_exception_returns_volume_state(self, _mock_vol_get, _mock_vol_update): # Test NoValidHost exception behavior for retype. # Puts the volume in original state and eats the exception. fake_volume_id = 1 topic = 'fake_topic' volume_id = fake_volume_id request_spec = {'volume_id': fake_volume_id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand'} vol_info = {'id': fake_volume_id, 'status': 'in-use', 'instance_uuid': 'foo', 'attached_host': None} _mock_vol_get.return_value = vol_info _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_host = mock.Mock( side_effect=exception.NoValidHost(reason="")) orig_retype = self.manager.driver.find_retype_host self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.retype(self.context, topic, volume_id, request_spec=request_spec, filter_properties={}) _mock_vol_get.assert_called_once_with(self.context, fake_volume_id) _mock_find_retype_host.assert_called_once_with(self.context, request_spec, {}, 'on-demand') _mock_vol_update.assert_called_once_with(self.context, fake_volume_id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype
def test_create_volume_exception_puts_volume_in_error_state( self, _mock_volume_update, _mock_message_create, _mock_sched_create): # Test NoValidHost exception behavior for create_volume. # Puts the volume in 'error' state and eats the exception. _mock_sched_create.side_effect = exception.NoValidHost(reason="") volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' request_spec = {'volume_id': volume.id, 'volume': {'id': volume.id, '_name_id': None, 'metadata': {}, 'admin_metadata': {}, 'glance_metadata': {}}} request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) _mock_message_create.assert_called_once_with( self.context, defined_messages.UNABLE_TO_ALLOCATE, self.context.project_id, resource_type='VOLUME', resource_uuid=volume.id)
def test_migrate_volume_exception_returns_volume_state(self): """Test NoValidHost exception behavior for migrate_volume_to_host. Puts the volume in 'error_migrating' state and eats the exception. """ fake_volume_id = 1 self._mox_schedule_method_helper('host_passes_filters') self.mox.StubOutWithMock(db, 'volume_update') topic = 'fake_topic' volume_id = fake_volume_id request_spec = {'volume_id': fake_volume_id} self.manager.driver.host_passes_filters( self.context, 'host', request_spec, {}).AndRaise(exception.NoValidHost(reason="")) db.volume_update(self.context, fake_volume_id, {'migration_status': None}) self.mox.ReplayAll() self.manager.migrate_volume_to_host(self.context, topic, volume_id, 'host', True, request_spec=request_spec, filter_properties={})
def test_retype_volume_exception_returns_volume_state( self, _mock_vol_attachment_get, _mock_vol_update): # Test NoValidHost exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status='retyping', previous_status='in-use') instance_uuid = '12345678-1234-5678-1234-567812345678' volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, '/dev/fake') _mock_vol_attachment_get.return_value = [volume_attach] topic = 'fake_topic' request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand'} _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_host = mock.Mock( side_effect=exception.NoValidHost(reason="")) orig_retype = self.manager.driver.find_retype_host self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.retype(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_find_retype_host.assert_called_once_with(self.context, request_spec, {}, 'on-demand') _mock_vol_update.assert_called_once_with(self.context, volume.id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype
def _check_mode_get_or_register_storage_system(self): """Does validity checks for storage system registry and health.""" def _resolve_host(host): try: ip = utils.resolve_hostname(host) return ip except socket.gaierror as e: LOG.error( _('Error resolving host %(host)s. Error - %(e)s.') % { 'host': host, 'e': e }) return None ips = self.configuration.netapp_controller_ips ips = [i.strip() for i in ips.split(",")] ips = [x for x in ips if _resolve_host(x)] host = utils.resolve_hostname( self.configuration.netapp_server_hostname) if not ips: msg = _('Controller ips not valid after resolution.') raise exception.NoValidHost(reason=msg) if host in ips: LOG.info(_('Embedded mode detected.')) system = self._client.list_storage_systems()[0] else: LOG.info(_('Proxy mode detected.')) system = self._client.register_storage_system( ips, password=self.configuration.netapp_sa_password) self._client.set_system_id(system.get('id'))
def test_create_consistencygroup_exceptions(self): with mock.patch.object(filter_scheduler.FilterScheduler, 'schedule_create_consistencygroup') as mock_cg: original_driver = self.manager.driver consistencygroup_obj = \ fake_consistencygroup.fake_consistencyobject_obj(self.context) self.manager.driver = filter_scheduler.FilterScheduler LOG = self.mock_object(manager, 'LOG') self.stubs.Set(db, 'consistencygroup_update', mock.Mock()) ex = exception.CinderException('test') mock_cg.side_effect = ex group_id = fake.consistency_group_id self.assertRaises(exception.CinderException, self.manager.create_consistencygroup, self.context, 'volume', consistencygroup_obj) self.assertTrue(LOG.exception.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': (fields.ConsistencyGroupStatus.ERROR)}) mock_cg.reset_mock() LOG.exception.reset_mock() db.consistencygroup_update.reset_mock() mock_cg.side_effect = exception.NoValidHost( reason="No weighed hosts available") self.manager.create_consistencygroup(self.context, 'volume', consistencygroup_obj) self.assertTrue(LOG.error.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': (fields.ConsistencyGroupStatus.ERROR)}) self.manager.driver = original_driver
def schedule_create_volume(self, context, request_spec, filter_properties): weighed_host = self._schedule(context, request_spec, filter_properties) if not weighed_host: raise exception.NoValidHost(reason="") host = weighed_host.obj.host volume_id = request_spec['volume_id'] snapshot_id = request_spec['snapshot_id'] image_id = request_spec['image_id'] updated_volume = driver.volume_update_db(context, volume_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.volume_rpcapi.create_volume(context, updated_volume, host, request_spec=request_spec, filter_properties=filter_properties, allow_reschedule=True, snapshot_id=snapshot_id, image_id=image_id)
def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" hosts = self._get_weighted_candidates(context, topic, request_spec, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))]
def _resolve_host(host): try: ip = na_utils.resolve_hostname(host) return ip except socket.gaierror as e: LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'), {'host': host, 'e': e}) raise exception.NoValidHost( _("Controller IP '%(host)s' could not be resolved: %(e)s.") % {'host': host, 'e': e})
def _get_weighted_candidates(self, context, topic, request_spec, **kwargs): """Returns a list of the available hosts.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) return self._filter_hosts(request_spec, hosts, **kwargs)
def host_passes_filters(self, context, host, request_spec, filter_properties): """Check if the specified host passes the filters.""" weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == host: return host_state msg = (_('cannot place volume %(id)s on %(host)s') % {'id': request_spec['volume_id'], 'host': host}) raise exception.NoValidHost(reason=msg)
def host_passes_filters(self, context, host, request_spec, filter_properties): """Check if the specified host passes the filters.""" weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == host: return host_state volume_id = request_spec.get('volume_id', '??volume_id missing??') raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on ' '%(host)s') % {'id': volume_id, 'host': host})
def schedule_create_consistencygroup(self, context, group_id, request_spec_list, filter_properties_list): weighed_host = self._schedule_group(context, request_spec_list, filter_properties_list) if not weighed_host: raise exception.NoValidHost(reason="No weighed hosts available") host = weighed_host.obj.host updated_group = driver.group_update_db(context, group_id, host) self.volume_rpcapi.create_consistencygroup(context, updated_group, host)
def test_create_volume_exception_puts_volume_in_error_state( self, _mock_volume_update, _mock_sched_create): # Test NoValidHost exception behavior for create_volume. # Puts the volume in 'error' state and eats the exception. _mock_sched_create.side_effect = exception.NoValidHost(reason="") fake_volume_id = 1 topic = 'fake_topic' request_spec = {'volume_id': fake_volume_id} self.manager.create_volume(self.context, topic, fake_volume_id, request_spec=request_spec, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, {'status': 'error'}) _mock_sched_create.assert_called_once_with(self.context, request_spec, {})
def test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_host_passes): # Test NoValidHost exception behavior for migrate_volume_to_host. # Puts the volume in 'error_migrating' state and eats the exception. _mock_host_passes.side_effect = exception.NoValidHost(reason="") fake_volume_id = 1 topic = 'fake_topic' request_spec = {'volume_id': fake_volume_id} self.manager.migrate_volume_to_host(self.context, topic, fake_volume_id, 'host', True, request_spec=request_spec, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, {'migration_status': None}) _mock_host_passes.assert_called_once_with(self.context, 'host', request_spec, {})
def schedule_create_volume(self, context, request_spec, filter_properties): resource_type = request_spec.get('volume_type', None) extra_specs = {} if resource_type: extra_specs = resource_type.get('extra_specs', {}) volume_id = request_spec.get('volume_id') snapshot_id = request_spec.get('snapshot_id') image_id = request_spec.get('image_id') replica_vol = None if 'True' in extra_specs.get('replication_enabled', ''): weighed_host, weighed_host2 = self._get_replication_hosts( context, request_spec, filter_properties) else: weighed_host = self._schedule(context, request_spec, filter_properties) weighed_host2 = None if not weighed_host: raise exception.NoValidHost(reason="") if weighed_host2: az = weighed_host2.obj.service['availability_zone'] replica_vol = {'status': 'secondary_replica', 'attach_status': 'detached', 'host': weighed_host2.obj.host, 'availability_zone': az} host = weighed_host.obj.host updated_volume = driver.volume_update_db(context, volume_id, host, replica=replica_vol) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.volume_rpcapi.create_volume(context, updated_volume, host, request_spec, filter_properties, allow_reschedule=True, snapshot_id=snapshot_id, image_id=image_id)
def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. Translate a host's name and IP to the prefix of its hostname on the storage subsystem. We create a host name from the host and IP address, replacing any invalid characters (at most 55 characters), and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ # Build cleanup translation tables for host names invalid_ch_in_host = '' for num in range(0, 128): ch = six.text_type(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: invalid_ch_in_host = invalid_ch_in_host + ch host_name = connector['host'] if isinstance(host_name, six.text_type): unicode_host_name_filter = { ord(six.text_type(char)): u'-' for char in invalid_ch_in_host } host_name = host_name.translate(unicode_host_name_filter) elif isinstance(host_name, str): string_host_name_filter = string.maketrans( invalid_ch_in_host, '-' * len(invalid_ch_in_host)) host_name = host_name.translate(string_host_name_filter) else: msg = _('_create_host: Can not translate host name. Host name ' 'is not unicode or string.') LOG.error(msg) raise exception.NoValidHost(reason=msg) host_name = six.text_type(host_name) # FlashSystem family doesn't like hostname that starts with number. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name return host_name[:55]
def host_passes_filters(self, context, host, request_spec, filter_properties): """Check if the specified host passes the filters.""" weighed_hosts = self._get_weighted_candidates( context, CONF.volume_topic, request_spec, filter_properties=filter_properties) for weighed_host in weighed_hosts: if weighed_host == host: elevated = context.elevated() host_states = self.host_manager.get_all_host_states(elevated) for host_state in host_states: if host_state.host == host: return host_state msg = (_('cannot place volume %(id)s on %(host)s') % { 'id': request_spec['volume_id'], 'host': host }) raise exception.NoValidHost(reason=msg)
def test_create_volume_exception_puts_volume_in_error_state(self): """Test NoValidHost exception behavior for create_volume. Puts the volume in 'error' state and eats the exception. """ fake_volume_id = 1 self._mox_schedule_method_helper('schedule_create_volume') self.mox.StubOutWithMock(db, 'volume_update') topic = 'fake_topic' volume_id = fake_volume_id request_spec = {'volume_id': fake_volume_id} self.manager.driver.schedule_create_volume( self.context, request_spec, {}).AndRaise(exception.NoValidHost(reason="")) db.volume_update(self.context, fake_volume_id, {'status': 'error'}) self.mox.ReplayAll() self.manager.create_volume(self.context, topic, volume_id, request_spec=request_spec, filter_properties={})