def test_aggregate_metadata_get_by_host_with_key(self): host_state = fakes.FakeHostState("fake", "node", {"aggregates": _AGGREGATE_FIXTURES}) metadata = utils.aggregate_metadata_get_by_host(host_state, "k1") self.assertIn("k1", metadata) self.assertEqual(set(["1", "3", "7", "6"]), metadata["k1"])
def test_aggregate_metadata_get_by_host_empty_result(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': []}) metadata = utils.aggregate_metadata_get_by_host(host_state, 'k3') self.assertEqual({}, metadata)
def from_host_state(cls, host_state, allocation_request=None, allocation_request_version=None): """A convenience method for converting a HostState, an allocation_request, and an allocation_request_version into a Selection object. Note that allocation_request and allocation_request_version must be passed separately, as they are not part of the HostState. """ allocation_request_json = jsonutils.dumps(allocation_request) limits = objects.SchedulerLimits.from_dict(host_state.limits) # Note that the AZ logic here is similar to the AvailabilityZoneFilter. metadata = filter_utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') availability_zone = metadata.get('availability_zone') if availability_zone: # aggregate_metadata_get_by_host returns a set for the value but # a host can only be in one AZ. availability_zone = list(availability_zone)[0] else: availability_zone = CONF.default_availability_zone return cls(compute_node_uuid=host_state.uuid, service_host=host_state.host, nodename=host_state.nodename, cell_uuid=host_state.cell_uuid, limits=limits, allocation_request=allocation_request_json, allocation_request_version=allocation_request_version, availability_zone=availability_zone)
def host_passes(self, host_state, spec_obj): """If a host is in an aggregate that has the metadata key "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key "filter_tenant_id" it can create instances from all tenants. """ #import ipdb; ipdb.set_trace() tenant_id = spec_obj.project_id LOG.debug("Spec: %s" % spec_obj) metadata = utils.aggregate_metadata_get_by_host( host_state, key="domain_tenant_ids") if metadata != {}: configured_tenant_ids = metadata.get("domain_tenant_ids") LOG.debug("%s %s", tenant_id, configured_tenant_ids) if configured_tenant_ids: if tenant_id in configured_tenant_ids: LOG.debug("Host tenant id %s matched", tenant_id) return True else: LOG.debug("%s fails tenant id on aggregate", host_state) return False else: LOG.debug("No tenant id's defined on host. Host passes.") return True
def host_passes(self, host_state, spec_obj): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator image_props = spec_obj.image.properties if spec_obj.image else {} metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in six.iteritems(metadata): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = None try: prop = image_props.get(key) except AttributeError: LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' " "that is not present in the image metadata.") % {"host": host_state.host, "key": key}) continue # NOTE(sbauza): Aggregate metadata is only strings, we need to # stringify the property to match with the option # TODO(sbauza): Fix that very ugly pattern matching if prop and str(prop) not in options: LOG.debug("%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", {'host_state': host_state, 'prop': prop, 'options': options}) return False return True
def host_passes(self, host_state, spec_obj): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator image_props = spec_obj.image.properties if spec_obj.image else {} metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in six.iteritems(metadata): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = image_props.get(key) # NOTE(sbauza): Aggregate metadata is only strings, we need to # stringify the property to match with the option # TODO(sbauza): Fix that very ugly pattern matching if prop and str(prop) not in options: LOG.debug( "%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", { 'host_state': host_state, 'prop': prop, 'options': options }) return False return True
def host_passes(self, host_state, spec_obj): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator image_props = spec_obj.image.properties if spec_obj.image else {} metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in six.iteritems(metadata): if cfg_namespace and not key.startswith(cfg_namespace + cfg_separator): continue prop = image_props.get(key) # NOTE(sbauza): Aggregate metadata is only strings, we need to # stringify the property to match with the option # TODO(sbauza): Fix that very ugly pattern matching if prop and str(prop) not in options: LOG.debug( "%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", {"host_state": host_state, "prop": prop, "options": options}, ) return False return True
def host_passes(self, host_state, spec_obj): """If a host is in an aggregate that has the metadata key "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key "filter_tenant_id" it can create instances from all tenants. """ tenant_id = spec_obj.project_id metadata = utils.aggregate_metadata_get_by_host(host_state, key="filter_tenant_id") if metadata != {}: configured_tenant_ids = metadata.get("filter_tenant_id") if configured_tenant_ids: if tenant_id not in configured_tenant_ids: LOG.debug("%s fails tenant id on aggregate", host_state) msg = ('tenant_id %(tid)s not in aggregate: %(agg)s' % { 'tid': tenant_id, 'agg': metadata.get("filter_tenant_id") }) self.filter_reject(host_state, spec_obj, msg) return False LOG.debug("Host tenant id %s matched", tenant_id) else: LOG.debug("No tenant id's defined on host. Host passes.") return True
def host_passes(self, host_state, filter_properties): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator spec = filter_properties.get('request_spec', {}) image_props = spec.get('image', {}).get('properties', {}) context = filter_properties['context'] metadata = utils.aggregate_metadata_get_by_host( context, host_state.host) for key, options in metadata.iteritems(): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = image_props.get(key) if prop and prop not in options: LOG.debug( "%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", { 'host_state': host_state, 'prop': prop, 'options': options }) return False return True
def host_passes(self, host_state, filter_properties): """If a host is in an aggregate that has the metadata key "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key "filter_tenant_id" it can create instances from all tenants. """ spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) tenant_id = props.get('project_id') metadata = utils.aggregate_metadata_get_by_host(host_state, key="filter_tenant_id") if metadata != {}: configured_tenant_ids = metadata.get("filter_tenant_id") if configured_tenant_ids: if tenant_id not in configured_tenant_ids: LOG.debug("%s fails tenant id on aggregate", host_state) return False LOG.debug("Host tenant id %s matched", tenant_id) else: LOG.debug("No tenant id's defined on host. Host passes.") return True
def host_passes(self, host_state, filter_properties, filter_errors={}): spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) availability_zone = props.get('availability_zone') if not availability_zone: return True metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') if 'availability_zone' in metadata: hosts_passes = availability_zone in metadata['availability_zone'] host_az = metadata['availability_zone'] else: hosts_passes = availability_zone == CONF.default_availability_zone host_az = CONF.default_availability_zone if not hosts_passes: LOG.debug( "Availability Zone '%(az)s' requested. " "%(host_state)s has AZs: %(host_az)s", { 'host_state': host_state, 'az': availability_zone, 'host_az': host_az }) return hosts_passes
def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) availability_zone = props.get('availability_zone') if not availability_zone: return True metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') if 'availability_zone' in metadata: hosts_passes = availability_zone in metadata['availability_zone'] host_az = metadata['availability_zone'] else: hosts_passes = availability_zone == CONF.default_availability_zone host_az = CONF.default_availability_zone if not hosts_passes: LOG.debug("Availability Zone '%(az)s' requested. " "%(host_state)s has AZs: %(host_az)s", {'host_state': host_state, 'az': availability_zone, 'host_az': host_az}) return hosts_passes
def host_passes(self, host_state, filter_properties): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator spec = filter_properties.get('request_spec', {}) image_props = spec.get('image', {}).get('properties', {}) metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in metadata.iteritems(): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = image_props.get(key) if prop and prop not in options: LOG.debug("%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", {'host_state': host_state, 'prop': prop, 'options': options}) return False return True
def host_passes(self, host_state, spec_obj): availability_zone = spec_obj.availability_zone if not availability_zone: return True metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') if 'availability_zone' in metadata: hosts_passes = availability_zone in metadata['availability_zone'] host_az = metadata['availability_zone'] else: hosts_passes = availability_zone == CONF.default_availability_zone host_az = CONF.default_availability_zone if not hosts_passes: LOG.debug("Availability Zone '%(az)s' requested. " "%(host_state)s has AZs: %(host_az)s", {'host_state': host_state, 'az': availability_zone, 'host_az': host_az}) msg = ('avail zone %(az)s not in host AZ: %(host_az)s' % {'az': availability_zone, 'host_az': host_az}) self.filter_reject(host_state, spec_obj, msg) return hosts_passes
def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ image_props = spec_obj.image.properties if spec_obj.image else {} agg_metadata = utils.aggregate_metadata_get_by_host(host_state) for key, prop in six.iteritems(image_props.cim_properties): agg_vals = agg_metadata.get(key, None) if not agg_vals: LOG.debug("%(host_state)s fails image properties " "requirements. Image propertiy %(key)s is not in " "aggregate.", {'host_state': host_state, 'key': key}) return False for agg_val in agg_vals: if not extra_specs_ops.match(agg_val, prop): break else: LOG.debug("%(host_state)s fails image properties " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", {'host_state': host_state, 'aggregate_vals': agg_vals, 'req': prop}) return False return True
def host_passes(self, host_state, spec_obj): # If the configuration does not list any hosts, the filter will always # return True, assuming a configuration error, so letting all hosts # through. tenant_id = spec_obj.project_id if not isolated_tenants: # As there are no images to match, return True if the filter is # not restrictive otherwise return False if the host is in the # isolation list. LOG.debug("There is no configured isolation globally") return True if tenant_id not in isolated_tenants: LOG.debug("Tenant doesn't need an isolation") return True metadata = utils.aggregate_metadata_get_by_host(host_state, key="filter_tenant_id") if metadata != {}: configured_tenant_ids = metadata.get("filter_tenant_id") LOG.debug("configured filter %s on host %s", configured_tenant_ids, host_state) if configured_tenant_ids: if tenant_id not in configured_tenant_ids: LOG.debug("NOT matched tenant_id on aggregate %s", host_state) return False LOG.debug("Matched for tenant %s", tenant_id) return True else: LOG.debug("No tenant id's defined on host. Skipping") return False return False
def test_aggregate_metadata_get_by_host_with_key(self): host_state = fakes.FakeHostState('fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) metadata = utils.aggregate_metadata_get_by_host(host_state, 'k1') self.assertIn('k1', metadata) self.assertEqual(set(['1', '3', '7', '6']), metadata['k1'])
def test_aggregate_metadata_get_by_host_with_key(self): host_state = fakes.FakeHostState( 'fake', 'node', {'aggregates': _AGGREGATE_FIXTURES}) metadata = utils.aggregate_metadata_get_by_host(host_state, 'k1') self.assertIn('k1', metadata) self.assertEqual(set(['1', '3', '7', '6']), metadata['k1'])
def host_passes(self, host_state, spec_obj): """ Search for host aggregates that have the 'private_iaas_project_id' metadata key. Hosts in these aggregates can accept instance only from the matching project id if the project id belongs to a tenant which has the private_iaas extra_spec set to True, else they are filtered out. Hosts can belong to more than one aggregate, more than one project ids can be specified in the 'private_iaas_project_id' key. Hosts that are outside every aggregate, or in aggregates that do not match the metadata key are filtered out. (eg. can only accept instances from shared tenants) """ # (mcaimi) Update call to request_object properties to accomodate changes in Newton # get project id from the request object #if isinstance(spec_obj, dict): # request_object = spec_obj.get('request_spec', None) # instance_properties = request_object.get('instance_properties', None) # current_project_id = instance_properties.get('project_id', None) #else: # current_project_id = spec_obj.get('project_id', None) # also, handle both v3 and v3 API versions current_project_id = getattr(spec_obj, 'project_id', None) or getattr( spec_obj, 'tenant_id', None) if current_project_id is None: LOG.error("[PRIVATEIAAS]: request is broken. Missing project UUID") return False # ask keystone for tenant metadata try: resolved_project_spec = self.tenant_manager.get(current_project_id) except Exception as e: if CONF.debug: LOG.info( "[PRIVATEIAAS]: Keystone connection broken, scheduler filter will default to %s" % CONF.keystone_unreachable_defaults_to_true) if CONF.keystone_unreachable_defaults_to_true: return True else: return False tenant_is_private = getattr(resolved_project_spec, "private_iaas", False) tenant_is_private = bool(tenant_is_private) if CONF.debug: LOG.info("[PRIVATEIAAS]: ProjectID %s, is_private: %s" % (current_project_id, tenant_is_private)) # retrieve host aggregate, filter by 'private_iaas_project_id' metadata key private_iaas_hg = utils.aggregate_metadata_get_by_host( host_state, key="private_iaas_project_id") # now computing scheduling decision... # we have some matching host groups, then select the matching callback return self.private_iaas_callbacks[tenant_is_private]( current_project_id, private_iaas_hg, host_state)
def test_aggregate_metadata_get_by_host_empty_result(self, get_by_host): context = mock.MagicMock() get_by_host.return_value = objects.AggregateList(objects=[]) metadata = utils.aggregate_metadata_get_by_host(context, 'fake-host', 'k3') get_by_host.assert_called_with(context.elevated(), 'fake-host', key='k3') self.assertEqual({}, metadata)
def test_aggregate_metadata_get_by_host_empty_result(self, get_by_host): context = mock.MagicMock() get_by_host.return_value = objects.AggregateList(objects=[]) metadata = utils.aggregate_metadata_get_by_host( context, 'fake-host', 'k3') get_by_host.assert_called_with(context.elevated(), 'fake-host', key='k3') self.assertEqual({}, metadata)
def test_aggregate_metadata_get_by_host_with_key(self, get_by_host): context = mock.MagicMock() get_by_host.return_value = objects.AggregateList( objects=_AGGREGATE_FIXTURES) metadata = utils.aggregate_metadata_get_by_host(context, 'fake-host', 'k1') get_by_host.assert_called_with(context.elevated(), 'fake-host', key='k1') self.assertIn('k1', metadata) self.assertEqual(set(['1', '3']), metadata['k1'])
def test_aggregate_metadata_get_by_host_with_key(self, get_by_host): context = mock.MagicMock() get_by_host.return_value = objects.AggregateList( objects=_AGGREGATE_FIXTURES) metadata = utils.aggregate_metadata_get_by_host( context, 'fake-host', 'k1') get_by_host.assert_called_with(context.elevated(), 'fake-host', key='k1') self.assertIn('k1', metadata) self.assertEqual(set(['1', '3']), metadata['k1'])
def host_passes(self, host_state, filter_properties, filter_errors={}): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = filter_properties.get('instance_type') if 'extra_specs' not in instance_type: return True metadata = utils.aggregate_metadata_get_by_host(host_state) for key, req in six.iteritems(instance_type['extra_specs']): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate.", { 'host_state': host_state, 'key': key }) # PF9 change self.mark_filter_error(self.__class__, filter_errors) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", { 'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals }) # PF9 change self.mark_filter_error(self.__class__, filter_errors) return False return True
def host_passes(self, host_state, spec_obj): """If the host is in an aggregate with metadata key "provider:physical_network" and contains the set of values needed by each tenant network, it may create instances. """ # WRS - disable this filter for ironic hypervisor if nova_utils.is_ironic_compute(host_state): return True physkey = 'provider:physical_network' scheduler_hints = {} if spec_obj.obj_attr_is_set('scheduler_hints'): scheduler_hints = spec_obj.scheduler_hints or {} physnets = set(scheduler_hints.get(physkey, [])) metadata = utils.aggregate_metadata_get_by_host(host_state, key=physkey) # Match each provider physical network with host-aggregate metadata. if metadata: if not physnets.issubset(metadata[physkey]): msg = ("%s = %r, require: %r" % (str(physkey), list(metadata[physkey]), list(physnets))) self.filter_reject(host_state, spec_obj, msg) return False else: LOG.info( _LI("(%(host)s, %(nodename)s) PASS. " "%(key)s = %(metalist)r. " "require: %(physnetlist)r"), { 'host': host_state.host, 'nodename': host_state.nodename, 'key': physkey, 'metalist': list(metadata[physkey]), 'physnetlist': list(physnets) }) else: LOG.info( _LI("(%(host)s, %(nodename)s) NOT CONFIGURED. " "%(key)s = %(metalist)r. " "require: %(physnetlist)r"), { 'host': host_state.host, 'nodename': host_state.nodename, 'key': physkey, 'metalist': metadata, 'physnetlist': list(physnets) }) return False return True
def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = spec_obj.flavor # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further if (not instance_type.obj_attr_is_set('extra_specs') or not instance_type.extra_specs): return True metadata = utils.aggregate_metadata_get_by_host(host_state) for key, req in instance_type.extra_specs.items(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate.", { 'host_state': host_state, 'key': key }) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", { 'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals }) return False return True
def host_passes(self, host_state, spec_obj): """Return whether the instance matches the host's spec. All host's metadata entries have to be present. It effectively filters out flavors and images without the required or even no specs. """ # get the host/aggregate specs metadata = utils.aggregate_metadata_get_by_host(host_state) # try to get the instance specs instance_type = spec_obj.flavor # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further if (not instance_type.obj_attr_is_set('extra_specs') or not instance_type.extra_specs): # if no instance specs are present, the host is denied if it defines # some specs return not metadata for key, req in metadata.items(): aggregate_vals = instance_type.extra_specs.get(key, None) if not aggregate_vals: # keys may be scoped with 'aggregate_instance_extra_specs' aggregate_vals = instance_type.extra_specs.get( "aggregate_instance_extra_specs:" + key, None) if not aggregate_vals: LOG.debug( "%(extra_specs)s fails require host extra_specs, key %(key)s is not in instance definition.", { 'extra_specs': instance_type.extra_specs, 'key': key }) return False for aggregate_val in aggregate_vals: if not extra_specs_ops.match(aggregate_val, req): LOG.debug( "%(extra_specs)s fails required host extra_specs, '%(aggregate_vals)s' do not " "match '%(req)s' for key %{key}s.", { 'extra_specs': instance_type.extra_specs, 'req': req, 'aggregate_vals': aggregate_vals, 'key': key }) return False return True
def host_passes(self, host_state, spec_obj): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = (CONF.filter_scheduler. aggregate_image_properties_isolation_namespace) cfg_separator = (CONF.filter_scheduler. aggregate_image_properties_isolation_separator) image_props = spec_obj.image.properties if spec_obj.image else {} metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in six.iteritems(metadata): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = None try: prop = image_props.get(key) except AttributeError: LOG.warning( _LW("Host '%(host)s' has a metadata key '%(key)s' " "that is not present in the image metadata."), { "host": host_state.host, "key": key }) continue # NOTE(sbauza): Aggregate metadata is only strings, we need to # stringify the property to match with the option # TODO(sbauza): Fix that very ugly pattern matching if prop and str(prop) not in options: LOG.debug( "%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", { 'host_state': host_state, 'prop': prop, 'options': options }) return False return True
def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = spec_obj.flavor # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further if (not instance_type.obj_attr_is_set('extra_specs') or not instance_type.extra_specs): return True metadata = utils.aggregate_metadata_get_by_host(host_state) for key, req in instance_type.extra_specs.items(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate.", {'host_state': host_state, 'key': key}) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug("%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", {'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals}) return False return True
def host_passes(self, host_state, spec_obj): instance_type = spec_obj.flavor specs = instance_type.extra_specs availability_zone_list = specs.get('sched:res_pool', {}) #LOG.debug("availability_zone_list: {0}".format(availability_zone_list)) if availability_zone_list: if not isinstance(availability_zone_list, (list, tuple)): availability_zone_list = availability_zone_list.split(',') metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') LOG.debug("metadata: {0}".format(metadata)) if 'availability_zone' in metadata: return metadata['availability_zone'].intersection( set(availability_zone_list)) else: return CONF.default_availability_zone in availability_zone_list return True
def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = filter_properties.get('instance_type') if 'extra_specs' not in instance_type: return True context = filter_properties['context'] metadata = utils.aggregate_metadata_get_by_host(context, host_state.host) for key, req in instance_type['extra_specs'].iteritems(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug("%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate.", {'host_state': host_state, 'key': key}) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug("%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", {'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals}) return False return True
def host_passes(self, host_state, filter_properties): """If a host is in an aggregate that has the metadata key "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key "filter_tenant_id" it can create instances from all tenants. """ spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) tenant_id = props.get('project_id') context = filter_properties['context'] metadata = utils.aggregate_metadata_get_by_host(context, host_state.host, key="filter_tenant_id") if metadata != {}: if tenant_id not in metadata["filter_tenant_id"]: LOG.debug("%s fails tenant id on aggregate", host_state) return False return True
def host_passes(self, host_state, spec_obj): instance_type = spec_obj.flavor if not instance_type.extra_specs: return True sla_key_found = 0 metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') LOG.debug("metadata: {0}".format(metadata)) for key, req in instance_type.extra_specs.iteritems(): scope = key.split(':') if scope[0] == SLA: sla_key_found = 1 if len(scope) > 1 and scope[0] == SLA: if key in metadata and req in metadata[key]: return True else: continue return False if sla_key_found else True
def host_passes(self, host_state, spec_obj): availability_zone = spec_obj.availability_zone if not availability_zone: return True metadata = utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') if 'availability_zone' in metadata: hosts_passes = availability_zone in metadata['availability_zone'] host_az = metadata['availability_zone'] else: hosts_passes = availability_zone == CONF.default_availability_zone host_az = CONF.default_availability_zone if not hosts_passes: LOG.debug("Availability Zone '%(az)s' requested. " "%(host_state)s has AZs: %(host_az)s", {'host_state': host_state, 'az': availability_zone, 'host_az': host_az}) return hosts_passes
def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = spec_obj.flavor # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further if (not instance_type.obj_attr_is_set('extra_specs') or not instance_type.extra_specs): return True metadata = utils.aggregate_metadata_get_by_host(host_state) is_ironic = nova_utils.is_ironic_compute(host_state) for key, req in instance_type.extra_specs.items(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] # WRS - Hybrid baremetal support if is_ironic and key in BAREMETAL_IGNORE_KEYS: continue aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate.", { 'host_state': host_state, 'key': key }) msg = ("extra_specs '%(key)s' not in aggregate, " "cannot match '%(req)s'." % { 'key': key, 'req': req }) self.filter_reject(host_state, spec_obj, msg) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug( "%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'", { 'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals }) msg = ("extra_specs '%(agg)s' do not match '%(req)s'" % { 'req': req, 'agg': aggregate_vals }) self.filter_reject(host_state, spec_obj, msg) return False return True
def test_aggregate_metadata_get_by_host_empty_result(self): host_state = fakes.FakeHostState("fake", "node", {"aggregates": []}) metadata = utils.aggregate_metadata_get_by_host(host_state, "k3") self.assertEqual({}, metadata)