def get_weight_multiplier(host_state, multiplier_name, multiplier_config): """Given a HostState object, multplier_type name and multiplier_config, returns the weight multiplier. It reads the "multiplier_name" from "aggregate metadata" in host_state to override the multiplier_config. If the aggregate metadata doesn't contain the multiplier_name, the multiplier_config will be returned directly. :param host_state: The HostState object, which contains aggregate metadata :param multiplier_name: The weight multiplier name, like "cpu_weight_multiplier". :param multiplier_config: The weight multiplier configuration value """ aggregate_vals = filters_utils.aggregate_values_from_key( host_state, multiplier_name) try: value = filters_utils.validate_num_values(aggregate_vals, multiplier_config, cast_to=float) except ValueError as e: LOG.warning("Could not decode '%(name)s' weight multiplier: %(exce)s", { 'exce': e, 'name': multiplier_name }) value = multiplier_config return value
def get_weight_multiplier(host_state, multiplier_name, multiplier_config): """Given a HostState object, multplier_type name and multiplier_config, returns the weight multiplier. It reads the "multiplier_name" from "aggregate metadata" in host_state to override the multiplier_config. If the aggregate metadata doesn't contain the multiplier_name, the multiplier_config will be returned directly. :param host_state: The HostState object, which contains aggregate metadata :param multiplier_name: The weight multiplier name, like "cpu_weight_multiplier". :param multiplier_config: The weight multiplier configuration value """ aggregate_vals = filters_utils.aggregate_values_from_key(host_state, multiplier_name) try: value = filters_utils.validate_num_values( aggregate_vals, multiplier_config, cast_to=float) except ValueError as e: LOG.warning("Could not decode '%(name)s' weight multiplier: %(exce)s", {'exce': e, 'name': multiplier_name}) value = multiplier_config return value
def _get_max_io_ops_per_host(self, host_state, filter_properties): aggregate_vals = utils.aggregate_values_from_key(host_state, "max_io_ops_per_host") try: value = utils.validate_num_values(aggregate_vals, CONF.max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e) value = CONF.max_io_ops_per_host return value
def _get_max_instances_per_host(self, host_state, spec_obj): max_instances_per_host = CONF.filter_scheduler.max_instances_per_host aggregate_vals = utils.aggregate_values_from_key(host_state, "max_instances_per_host") try: value = utils.validate_num_values(aggregate_vals, max_instances_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_instances_per_host: '%s'"), e) value = max_instances_per_host return value
def _get_cpu_allocation_ratio(self, host_state, spec_obj): aggregate_vals = utils.aggregate_values_from_key( host_state, 'cpu_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, host_state.cpu_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning("Could not decode cpu_allocation_ratio: '%s'", e) ratio = host_state.cpu_allocation_ratio return ratio
def _get_cpu_allocation_ratio(self, host_state, filter_properties): aggregate_vals = utils.aggregate_values_from_key( host_state, 'cpu_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, CONF.cpu_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio
def _get_disk_allocation_ratio(self, host_state, filter_properties): aggregate_vals = utils.aggregate_values_from_key( host_state, 'disk_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, CONF.disk_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e) ratio = CONF.disk_allocation_ratio return ratio
def _get_max_io_ops_per_host(self, host_state, filter_properties): aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_io_ops_per_host') try: value = utils.validate_num_values( aggregate_vals, CONF.max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e) value = CONF.max_io_ops_per_host return value
def _get_max_io_ops_per_host(self, host_state, spec_obj): max_io_ops_per_host = CONF.filter_scheduler.max_io_ops_per_host aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_io_ops_per_host') try: value = utils.validate_num_values(aggregate_vals, max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e) value = max_io_ops_per_host return value
def _get_disk_allocation_ratio(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_db( filter_properties["context"], host_state.host, "disk_allocation_ratio" ) try: ratio = utils.validate_num_values(aggregate_vals, CONF.disk_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e) ratio = CONF.disk_allocation_ratio return ratio
def _get_cpu_allocation_ratio(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_key( host_state, 'cpu_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, CONF.cpu_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio
def _get_max_io_ops_per_host(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_io_ops_per_host') try: value = utils.validate_num_values(aggregate_vals, CONF.max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e) value = CONF.max_io_ops_per_host return value
def _get_max_io_ops_per_host(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_io_ops_per_host') try: value = utils.validate_num_values( aggregate_vals, CONF.max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e) value = CONF.max_io_ops_per_host return value
def _get_max_instances_per_host(self, host_state, spec_obj): max_instances_per_host = CONF.filter_scheduler.max_instances_per_host # TODO(sfinucan): Remove this warning when the named config options # gains a 'min' parameter. if max_instances_per_host < 1: LOG.warning(_LW('Future versions of nova will restrict the ' '"filter_scheduler.max_instances_per_host" config option to ' 'values >=0. Update your configuration file to mitigate ' 'future upgrade issues.')) aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_instances_per_host') try: value = utils.validate_num_values( aggregate_vals, max_instances_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_instances_per_host: '%s'"), e) value = max_instances_per_host return value
def _get_max_instances_per_host(self, host_state, spec_obj): max_instances_per_host = CONF.filter_scheduler.max_instances_per_host # TODO(sfinucan): Remove this warning when the named config options # gains a 'min' parameter. if max_instances_per_host < 1: LOG.warning( _LW('Future versions of nova will restrict the ' '"filter_scheduler.max_instances_per_host" config option to ' 'values >=0. Update your configuration file to mitigate ' 'future upgrade issues.')) aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_instances_per_host') try: value = utils.validate_num_values(aggregate_vals, max_instances_per_host, cast_to=int) except ValueError as e: LOG.warning(_LW("Could not decode max_instances_per_host: '%s'"), e) value = max_instances_per_host return value