示例#1
0
    def test_resolve(self):
        sot = schema.Number()
        mock_convert = self.patchobject(sot, 'to_schema_type')

        res = sot.resolve(1)
        self.assertEqual(mock_convert.return_value, res)
        mock_convert.assert_called_once_with(1)
示例#2
0
class ResourceAlarm(Alarm):

    rule_schema = {
        METRIC:
        schema.String(
            _('Name of a metric to evaluate against.'),
            required=True,
        ),
        OPERATOR:
        schema.String(
            _('Comparison operator for evaluation.'),
            constraints=[
                constraints.AllowedValues(OPERATOR_VALUES),
            ],
            default=OP_EQUAL,
        ),
        THRESHOLD:
        schema.Number(_('Threshold for evaluation.'), required=True),
        GRANULARITY:
        schema.Integer(
            _('Length of each evaluation period in seconds.'),
            default=60,
        ),
        EVALUATIONS:
        schema.Integer(
            _('Number of periods to evaluate over.'),
            default=1,
        ),
        AGG_METHOD:
        schema.String(
            _('Statistics to evaluate. Must be one of %s, default to "avg".') %
            list(STATISTIC_VALUES),
            constraints=[
                constraints.AllowedValues(STATISTIC_VALUES),
            ],
            default=SV_AVG,
        ),
        RESOURCE_TYPE:
        schema.String(
            _('The resource type.'),
            required=True,
        ),
        RESOURCE_ID:
        schema.String(
            _('The ID of a resource.'),
            required=True,
        )
    }

    def __init__(self, name, spec, **kwargs):
        super(ResourceAlarm, self).__init__(name, spec, **kwargs)

        rule_spec = spec.get('rule', {})
        self.rule = schema.Spec(self.rule_schema, rule_spec)
        self.namespace = 'gnocchi_resources_threshold'
示例#3
0
    def test_to_schema_type(self):
        sot = schema.Number('desc')

        res = sot.to_schema_type(123)
        self.assertEqual(123, res)

        res = sot.to_schema_type(123.34)
        self.assertEqual(123.34, res)

        res = sot.to_schema_type(False)
        self.assertEqual(False, res)
示例#4
0
    def test_allowed_values_numeric_float(self):
        '''Test AllowedValues constraint for numeric floating point values.

        Test if the AllowedValues constraint works for numeric values in any
        combination of numeric strings or numbers in the constraint and
        numeric strings or numbers as value.
        '''

        # Allowed values defined as numbers
        s = schema.Number(
            constraints=[constraints.AllowedValues([1.1, 2.2, 4.4])])
        # ... and value as number or string
        self.assertIsNone(s.validate_constraints(1.1))
        err = self.assertRaises(exception.SpecValidationFailed,
                                s.validate_constraints, 3.3)
        self.assertEqual(
            '"3.3" must be one of the allowed values: '
            '1.1, 2.2, 4.4', six.text_type(err))
        self.assertIsNone(s.validate_constraints('1.1', s))
        err = self.assertRaises(exception.SpecValidationFailed,
                                s.validate_constraints, '3.3')
        self.assertEqual(
            '"3.3" must be one of the allowed values: '
            '1.1, 2.2, 4.4', six.text_type(err))

        # Allowed values defined as strings
        s = schema.Number(
            constraints=[constraints.AllowedValues(['1.1', '2.2', '4.4'])])
        # ... and value as number or string
        self.assertIsNone(s.validate_constraints(1.1, s))
        err = self.assertRaises(exception.SpecValidationFailed,
                                s.validate_constraints, 3.3, s)
        self.assertEqual(
            '"3.3" must be one of the allowed values: '
            '1.1, 2.2, 4.4', six.text_type(err))
        self.assertIsNone(s.validate_constraints('1.1', s))
        err = self.assertRaises(exception.SpecValidationFailed,
                                s.validate_constraints, '3.3', s)
        self.assertEqual(
            '"3.3" must be one of the allowed values: '
            '1.1, 2.2, 4.4', six.text_type(err))
示例#5
0
    def test_allowed_values_numeric_float(self):
        """Test AllowedValues constraint for numeric floating point values.

        Test if the AllowedValues constraint works for numeric values in any
        combination of numeric strings or numbers in the constraint and
        numeric strings or numbers as value.
        """

        # Allowed values defined as numbers
        s = schema.Number(
            constraints=[constraints.AllowedValues([1.1, 2.2, 4.4])])
        # ... and value as number or string
        self.assertIsNone(s.validate_constraints(1.1))
        err = self.assertRaises(exc.ESchema, s.validate_constraints, 3.3)
        self.assertEqual(
            "'3.3' must be one of the allowed values: "
            "1.1, 2.2, 4.4", str(err))
        self.assertIsNone(s.validate_constraints('1.1', s))
        err = self.assertRaises(exc.ESchema, s.validate_constraints, '3.3')
        self.assertEqual(
            "'3.3' must be one of the allowed values: "
            "1.1, 2.2, 4.4", str(err))

        # Allowed values defined as strings
        s = schema.Number(
            constraints=[constraints.AllowedValues(['1.1', '2.2', '4.4'])])
        # ... and value as number or string
        self.assertIsNone(s.validate_constraints(1.1, s))
        err = self.assertRaises(exc.ESchema, s.validate_constraints, 3.3, s)
        self.assertEqual(
            "'3.3' must be one of the allowed values: "
            "1.1, 2.2, 4.4", str(err))
        self.assertIsNone(s.validate_constraints('1.1', s))
        err = self.assertRaises(exc.ESchema, s.validate_constraints, '3.3', s)
        self.assertEqual(
            "'3.3' must be one of the allowed values: "
            "1.1, 2.2, 4.4", str(err))
示例#6
0
class AggregateByMetricsAlarm(Alarm):

    rule_schema = {
        METRICS:
        schema.String(
            _('Metrics to evaluate against.'),
            required=True,
        ),
        OPERATOR:
        schema.String(
            _('Comparison operator for evaluation.'),
            constraints=[
                constraints.AllowedValues(OPERATOR_VALUES),
            ],
            default=OP_EQUAL,
        ),
        THRESHOLD:
        schema.Number(_('Threshold for evaluation.'), required=True),
        GRANULARITY:
        schema.Integer(
            _('Length of every evaluation period in seconds.'),
            default=60,
        ),
        EVALUATIONS:
        schema.Integer(
            _('Number of periods to evaluate over.'),
            default=1,
        ),
        AGG_METHOD:
        schema.String(
            _('Statistics to evaluate. Must be one of %s.') %
            list(STATISTIC_VALUES),
            constraints=[
                constraints.AllowedValues(STATISTIC_VALUES),
            ],
            default=SV_AVG,
        ),
    }

    def __init__(self, name, spec, **kwargs):
        super(AggregateByMetricsAlarm, self).__init__(name, spec, **kwargs)
        rule_spec = spec.get('rule', {})
        self.rule = schema.Spec(self.rule_schema, rule_spec)
        self.namespace = 'gnocchi_aggregation_by_metrics_threshold'
示例#7
0
    def test_validate(self):
        sot = schema.Number()

        res = sot.validate(1)
        self.assertIsNone(res)

        res = sot.validate('1')
        self.assertIsNone(res)

        ex = self.assertRaises(exc.ESchema, sot.validate, "bogus")
        self.assertEqual("The value 'bogus' is not a valid number.",
                         str(ex))

        mock_constraints = self.patchobject(sot, 'validate_constraints',
                                            return_value=None)

        res = sot.validate('1234')
        self.assertIsNone(res)
        mock_constraints.assert_called_once_with(
            1234, schema=sot, context=None)
示例#8
0
class ScalingPolicy(base.Policy):
    """Policy for changing the size of a cluster.

    This policy is expected to be enforced before the node count of a cluster
    is changed.
    """

    VERSION = '1.0'

    PRIORITY = 100

    TARGET = [
        ('BEFORE', consts.CLUSTER_SCALE_IN),
        ('BEFORE', consts.CLUSTER_SCALE_OUT),
    ]

    PROFILE_TYPE = [
        'ANY',
    ]

    KEYS = (
        EVENT,
        ADJUSTMENT,
    ) = (
        'event',
        'adjustment',
    )

    _SUPPORTED_EVENTS = (
        CLUSTER_SCALE_IN,
        CLUSTER_SCALE_OUT,
    ) = (
        consts.CLUSTER_SCALE_IN,
        consts.CLUSTER_SCALE_OUT,
    )

    _ADJUSTMENT_KEYS = (
        ADJUSTMENT_TYPE,
        ADJUSTMENT_NUMBER,
        MIN_STEP,
        BEST_EFFORT,
        COOLDOWN,
    ) = (
        'type',
        'number',
        'min_step',
        'best_effort',
        'cooldown',
    )

    properties_schema = {
        EVENT:
        schema.String(
            _('Event that will trigger this policy. Must be one of '
              'CLUSTER_SCALE_IN and CLUSTER_SCALE_OUT.'),
            constraints=[
                constraints.AllowedValues(_SUPPORTED_EVENTS),
            ],
            required=True,
        ),
        ADJUSTMENT:
        schema.Map(
            _('Detailed specification for scaling adjustments.'),
            schema={
                ADJUSTMENT_TYPE:
                schema.String(
                    _('Type of adjustment when scaling is triggered.'),
                    constraints=[
                        constraints.AllowedValues(consts.ADJUSTMENT_TYPES),
                    ],
                    default=consts.CHANGE_IN_CAPACITY,
                ),
                ADJUSTMENT_NUMBER:
                schema.Number(
                    _('A number specifying the amount of adjustment.'),
                    default=1,
                ),
                MIN_STEP:
                schema.Integer(
                    _('When adjustment type is set to "CHANGE_IN_PERCENTAGE",'
                      ' this specifies the cluster size will be decreased by '
                      'at least this number of nodes.'),
                    default=1,
                ),
                BEST_EFFORT:
                schema.Boolean(
                    _('Whether do best effort scaling when new size of '
                      'cluster will break the size limitation'),
                    default=False,
                ),
                COOLDOWN:
                schema.Integer(
                    _('Number of seconds to hold the cluster for cool-down '
                      'before allowing cluster to be resized again.'),
                    default=0,
                ),
            }),
    }

    def __init__(self, name, spec, **kwargs):
        """Intialize a scaling policy object.

        :param name: Name for the policy object.
        :param spec: A dictionary containing the detailed specification for
                     the policy.
        :param \*\*kwargs: Other optional parameters for policy object
                           creation.
        :return: An object of `ScalingPolicy`.
        """
        super(ScalingPolicy, self).__init__(name, spec, **kwargs)

        self.singleton = False

        self.event = self.properties[self.EVENT]

        adjustment = self.properties[self.ADJUSTMENT]
        self.adjustment_type = adjustment[self.ADJUSTMENT_TYPE]
        self.adjustment_number = adjustment[self.ADJUSTMENT_NUMBER]
        self.adjustment_min_step = adjustment[self.MIN_STEP]

        self.best_effort = adjustment[self.BEST_EFFORT]
        self.cooldown = adjustment[self.COOLDOWN]

    def _calculate_adjustment_count(self, current_size):
        """Calculate adjustment count based on current_size.

        :param current_size: The current size of the target cluster.
        :return: The number of nodes to add or to remove.
        """

        if self.adjustment_type == consts.EXACT_CAPACITY:
            if self.event == consts.CLUSTER_SCALE_IN:
                count = current_size - self.adjustment_number
            else:
                count = self.adjustment_number - current_size
        elif self.adjustment_type == consts.CHANGE_IN_CAPACITY:
            count = self.adjustment_number
        else:  # consts.CHANGE_IN_PERCENTAGE:
            count = int((self.adjustment_number * current_size) / 100.0)
            if count < self.adjustment_min_step:
                count = self.adjustment_min_step

        return count

    def pre_op(self, cluster_id, action):
        """The hook function that is executed before the action.

        The checking result is stored in the ``data`` property of the action
        object rather than returned directly from the function.

        :param cluster_id: The ID of the target cluster.
        :param action: Action instance against which the policy is being
                       checked.
        :return: None.
        """

        # Use action input if count is provided
        count = action.inputs.get('count', None)
        current = db_api.node_count_by_cluster(action.context, cluster_id)
        if count is None:
            # count not specified, calculate it
            count = self._calculate_adjustment_count(current)

        # Count must be positive value
        try:
            count = utils.parse_int_param('count', count, allow_zero=False)
        except exception.InvalidParameter:
            action.data.update({
                'status': base.CHECK_ERROR,
                'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % {
                    'c': count,
                    'a': action.action
                }
            })
            action.store(action.context)
            return

        # Check size constraints
        cluster = db_api.cluster_get(action.context, cluster_id)
        if action.action == consts.CLUSTER_SCALE_IN:
            if self.best_effort:
                count = min(count, current - cluster.min_size)
            result = su.check_size_params(cluster,
                                          current - count,
                                          strict=not self.best_effort)
        else:
            if self.best_effort:
                count = min(count, cluster.max_size - current)
            result = su.check_size_params(cluster,
                                          current + count,
                                          strict=not self.best_effort)

        if result:
            # failed validation
            pd = {'status': base.CHECK_ERROR, 'reason': result}
        else:
            # passed validation
            pd = {
                'status': base.CHECK_OK,
                'reason': _('Scaling request validated.'),
            }
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return

    def need_check(self, target, action):
        res = super(ScalingPolicy, self).need_check(target, action)
        if res:
            # Check if the action is expected by the policy
            res = (self.event == action.action)

        return res
示例#9
0
    def test_basic(self):
        sot = schema.Number('desc')

        self.assertEqual('Number', sot['type'])
        self.assertEqual('desc', sot['description'])
示例#10
0
class ScalingPolicy(base.Policy):
    '''Policy for changing the size of a cluster.

    This policy is expected to be enforced before the node count of a cluster
    is changed.
    '''

    VERSION = '1.0'

    TARGET = [
        ('BEFORE', consts.CLUSTER_SCALE_IN),
        ('BEFORE', consts.CLUSTER_SCALE_OUT),
    ]

    PROFILE_TYPE = [
        'ANY',
    ]

    KEYS = (
        EVENT,
        ADJUSTMENT,
    ) = (
        'event',
        'adjustment',
    )

    _SUPPORTED_EVENTS = (
        CLUSTER_SCALE_IN,
        CLUSTER_SCALE_OUT,
    ) = (
        consts.CLUSTER_SCALE_IN,
        consts.CLUSTER_SCALE_OUT,
    )

    _ADJUSTMENT_KEYS = (
        ADJUSTMENT_TYPE,
        ADJUSTMENT_NUMBER,
        MIN_STEP,
        BEST_EFFORT,
    ) = (
        'type',
        'number',
        'min_step',
        'best_effort',
    )

    properties_schema = {
        EVENT:
        schema.String(
            _('Event that will trigger this policy. Must be one of '
              'CLUSTER_SCALE_IN and CLUSTER_SCALE_OUT.'),
            constraints=[
                constraints.AllowedValues(_SUPPORTED_EVENTS),
            ],
            required=True,
        ),
        ADJUSTMENT:
        schema.Map(
            _('Detailed specification for scaling adjustments.'),
            schema={
                ADJUSTMENT_TYPE:
                schema.String(
                    _('Type of adjustment when scaling is triggered.'),
                    constraints=[
                        constraints.AllowedValues(consts.ADJUSTMENT_TYPES),
                    ],
                    default=consts.CHANGE_IN_CAPACITY,
                ),
                ADJUSTMENT_NUMBER:
                schema.Number(
                    _('A number specifying the amount of adjustment.'),
                    default=1,
                ),
                MIN_STEP:
                schema.Integer(
                    _('When adjustment type is set to "CHANGE_IN_PERCENTAGE",'
                      ' this specifies the cluster size will be decreased by '
                      'at least this number of nodes.'),
                    default=1,
                ),
                BEST_EFFORT:
                schema.Boolean(
                    _('Whether do best effort scaling when new size of '
                      'cluster will break the size limitation'),
                    default=False,
                ),
            }),
    }

    def __init__(self, name, spec, **kwargs):
        super(ScalingPolicy, self).__init__(name, spec, **kwargs)

        self.event = self.properties[self.EVENT]
        self.singleton = False
        adjustment = self.properties[self.ADJUSTMENT]

        self.adjustment_type = adjustment[self.ADJUSTMENT_TYPE]
        self.adjustment_number = adjustment[self.ADJUSTMENT_NUMBER]
        self.adjustment_min_step = adjustment[self.MIN_STEP]
        self.best_effort = adjustment[self.BEST_EFFORT]

    def _calculate_adjustment_count(self, current_size):
        '''Calculate adjustment count based on current_size'''

        if self.adjustment_type == consts.EXACT_CAPACITY:
            if self.event == consts.CLUSTER_SCALE_IN:
                count = current_size - self.adjustment_number
            else:
                count = self.adjustment_number - current_size
        elif self.adjustment_type == consts.CHANGE_IN_CAPACITY:
            count = self.adjustment_number
        else:  # consts.CHANGE_IN_PERCENTAGE:
            count = int((self.adjustment_number * current_size) / 100.0)
            if count < self.adjustment_min_step:
                count = self.adjustment_min_step

        return count

    def pre_op(self, cluster_id, action):

        status = base.CHECK_OK
        reason = _('Scaling request validated.')

        # Check if the action is expected by the policy
        if self.event != action.action:
            action.data.update({'status': status, 'reason': reason})
            action.store(action.context)
            return

        cluster = db_api.cluster_get(action.context, cluster_id)
        nodes = db_api.node_get_all_by_cluster(action.context, cluster_id)
        current_size = len(nodes)
        count = self._calculate_adjustment_count(current_size)

        # Use action input if count is provided
        count = action.inputs.get('count', count)

        if count <= 0:
            status = base.CHECK_ERROR
            reason = _("Count (%(count)s) invalid for action %(action)s.") % {
                'count': count,
                'action': action.action
            }

        # Check size constraints
        if action.action == consts.CLUSTER_SCALE_IN:
            new_size = current_size - count
            if (new_size < cluster.min_size):
                if self.best_effort:
                    count = current_size - cluster.min_size
                    reason = _('Do best effort scaling.')
                else:
                    status = base.CHECK_ERROR
                    reason = _('Attempted scaling below minimum size.')
        else:
            new_size = current_size + count
            if (new_size > cluster.max_size):
                if self.best_effort:
                    count = cluster.max_size - current_size
                    reason = _('Do best effort scaling.')
                else:
                    status = base.CHECK_ERROR
                    reason = _('Attempted scaling above maximum size.')

        pd = {'status': status, 'reason': reason}
        if status == base.CHECK_OK:
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return
示例#11
0
class ScalingPolicy(base.Policy):
    """Policy for changing the size of a cluster.

    This policy is expected to be enforced before the node count of a cluster
    is changed.
    """

    VERSION = '1.0'
    VERSIONS = {'1.0': [{'status': consts.SUPPORTED, 'since': '2016.04'}]}

    PRIORITY = 100

    TARGET = [
        ('BEFORE', consts.CLUSTER_SCALE_IN),
        ('BEFORE', consts.CLUSTER_SCALE_OUT),
        ('AFTER', consts.CLUSTER_SCALE_IN),
        ('AFTER', consts.CLUSTER_SCALE_OUT),
    ]

    PROFILE_TYPE = [
        'ANY',
    ]

    KEYS = (
        EVENT,
        ADJUSTMENT,
    ) = (
        'event',
        'adjustment',
    )

    _SUPPORTED_EVENTS = (
        CLUSTER_SCALE_IN,
        CLUSTER_SCALE_OUT,
    ) = (
        consts.CLUSTER_SCALE_IN,
        consts.CLUSTER_SCALE_OUT,
    )

    _ADJUSTMENT_KEYS = (
        ADJUSTMENT_TYPE,
        ADJUSTMENT_NUMBER,
        MIN_STEP,
        BEST_EFFORT,
        COOLDOWN,
    ) = (
        'type',
        'number',
        'min_step',
        'best_effort',
        'cooldown',
    )

    properties_schema = {
        EVENT:
        schema.String(
            _('Event that will trigger this policy. Must be one of '
              'CLUSTER_SCALE_IN and CLUSTER_SCALE_OUT.'),
            constraints=[
                constraints.AllowedValues(_SUPPORTED_EVENTS),
            ],
            required=True,
        ),
        ADJUSTMENT:
        schema.Map(
            _('Detailed specification for scaling adjustments.'),
            schema={
                ADJUSTMENT_TYPE:
                schema.String(
                    _('Type of adjustment when scaling is triggered.'),
                    constraints=[
                        constraints.AllowedValues(consts.ADJUSTMENT_TYPES),
                    ],
                    default=consts.CHANGE_IN_CAPACITY,
                ),
                ADJUSTMENT_NUMBER:
                schema.Number(
                    _('A number specifying the amount of adjustment.'),
                    default=1,
                ),
                MIN_STEP:
                schema.Integer(
                    _('When adjustment type is set to "CHANGE_IN_PERCENTAGE",'
                      ' this specifies the cluster size will be decreased by '
                      'at least this number of nodes.'),
                    default=1,
                ),
                BEST_EFFORT:
                schema.Boolean(
                    _('Whether do best effort scaling when new size of '
                      'cluster will break the size limitation'),
                    default=False,
                ),
                COOLDOWN:
                schema.Integer(
                    _('Number of seconds to hold the cluster for cool-down '
                      'before allowing cluster to be resized again.'),
                    default=0,
                ),
            }),
    }

    def __init__(self, name, spec, **kwargs):
        """Initialize a scaling policy object.

        :param name: Name for the policy object.
        :param spec: A dictionary containing the detailed specification for
                     the policy.
        :param dict kwargs: Other optional parameters for policy object
                            creation.
        :return: An object of `ScalingPolicy`.
        """
        super(ScalingPolicy, self).__init__(name, spec, **kwargs)

        self.singleton = False

        self.event = self.properties[self.EVENT]

        adjustment = self.properties[self.ADJUSTMENT]
        self.adjustment_type = adjustment[self.ADJUSTMENT_TYPE]
        self.adjustment_number = adjustment[self.ADJUSTMENT_NUMBER]
        self.adjustment_min_step = adjustment[self.MIN_STEP]

        self.best_effort = adjustment[self.BEST_EFFORT]
        self.cooldown = adjustment[self.COOLDOWN]

    def validate(self, context, validate_props=False):
        super(ScalingPolicy, self).validate(context, validate_props)

        if self.adjustment_number <= 0:
            msg = _("the 'number' for 'adjustment' must be > 0")
            raise exc.InvalidSpec(message=msg)

        if self.adjustment_min_step < 0:
            msg = _("the 'min_step' for 'adjustment' must be >= 0")
            raise exc.InvalidSpec(message=msg)

        if self.cooldown < 0:
            msg = _("the 'cooldown' for 'adjustment' must be >= 0")
            raise exc.InvalidSpec(message=msg)

    def _calculate_adjustment_count(self, current_size):
        """Calculate adjustment count based on current_size.

        :param current_size: The current size of the target cluster.
        :return: The number of nodes to add or to remove.
        """

        if self.adjustment_type == consts.EXACT_CAPACITY:
            if self.event == consts.CLUSTER_SCALE_IN:
                count = current_size - self.adjustment_number
            else:
                count = self.adjustment_number - current_size
        elif self.adjustment_type == consts.CHANGE_IN_CAPACITY:
            count = self.adjustment_number
        else:  # consts.CHANGE_IN_PERCENTAGE:
            count = int((self.adjustment_number * current_size) / 100.0)
            if count < self.adjustment_min_step:
                count = self.adjustment_min_step

        return count

    def pre_op(self, cluster_id, action):
        """The hook function that is executed before the action.

        The checking result is stored in the ``data`` property of the action
        object rather than returned directly from the function.

        :param cluster_id: The ID of the target cluster.
        :param action: Action instance against which the policy is being
                       checked.
        :return: None.
        """

        # check cooldown
        last_op = action.inputs.get('last_op', None)
        if last_op and not timeutils.is_older_than(last_op, self.cooldown):
            action.data.update({
                'status':
                base.CHECK_ERROR,
                'reason':
                _('Policy %s cooldown is still '
                  'in progress.') % self.id
            })
            action.store(action.context)
            return

        # Use action input if count is provided
        count_value = action.inputs.get('count', None)
        cluster = action.entity
        current = len(cluster.nodes)

        if count_value is None:
            # count not specified, calculate it
            count_value = self._calculate_adjustment_count(current)

        # Count must be positive value
        success, count = utils.get_positive_int(count_value)
        if not success:
            action.data.update({
                'status': base.CHECK_ERROR,
                'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % {
                    'c': count_value,
                    'a': action.action
                }
            })
            action.store(action.context)
            return

        # Check size constraints
        max_size = cluster.max_size
        if max_size == -1:
            max_size = cfg.CONF.max_nodes_per_cluster
        if action.action == consts.CLUSTER_SCALE_IN:
            if self.best_effort:
                count = min(count, current - cluster.min_size)
            result = su.check_size_params(cluster,
                                          current - count,
                                          strict=not self.best_effort)
        else:
            if self.best_effort:
                count = min(count, max_size - current)
            result = su.check_size_params(cluster,
                                          current + count,
                                          strict=not self.best_effort)

        if result:
            # failed validation
            pd = {'status': base.CHECK_ERROR, 'reason': result}
        else:
            # passed validation
            pd = {
                'status': base.CHECK_OK,
                'reason': _('Scaling request validated.'),
            }
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return

    def post_op(self, cluster_id, action):
        # update last_op for next cooldown check
        ts = timeutils.utcnow(True)
        cpo.ClusterPolicy.update(action.context, cluster_id, self.id,
                                 {'last_op': ts})

    def need_check(self, target, action):
        # check if target + action matches policy targets
        if not super(ScalingPolicy, self).need_check(target, action):
            return False

        if target == 'BEFORE':
            # Scaling policy BEFORE check should only be triggered if the
            # incoming action matches the specific policy event.
            # E.g. for scale-out policy the BEFORE check to select nodes for
            # termination should only run for scale-out actions.
            return self.event == action.action
        else:
            # Scaling policy AFTER check to reset cooldown timer should be
            # triggered for all supported policy events (both scale-in and
            # scale-out).  E.g. a scale-out policy should reset cooldown timer
            # whenever scale-out or scale-in action completes.
            return action.action in list(self._SUPPORTED_EVENTS)
示例#12
0
class ThresholdAlarm(Alarm):

    rule_schema = {
        METER_NAME:
        schema.String(
            _('Name of a meter to evaluate against.'),
            required=True,
        ),
        OPERATOR:
        schema.String(
            _('Comparison operator for evaluation.'),
            constraints=[
                constraints.AllowedValues(OPERATOR_VALUES),
            ],
            default=OP_EQUAL,
        ),
        THRESHOLD:
        schema.Number(_('Threshold for evaluation.'), required=True),
        PERIOD:
        schema.Integer(
            _('Length of every evaluation period in seconds.'),
            default=60,
        ),
        EVALUATIONS:
        schema.Integer(
            _('Number of periods to evaluate over.'),
            default=1,
        ),
        STATISTIC:
        schema.String(
            _('Statistics to evaluate. Must be one of %s, default to "avg".') %
            list(STATISTIC_VALUES),
            constraints=[
                constraints.AllowedValues(STATISTIC_VALUES),
            ],
            default=SV_AVG,
        ),
        QUERY:
        schema.List(
            _('The query to find the dat afor computing statistics.'),
            schema=schema.Map(
                schema={
                    Q_FIELD:
                    schema.String(
                        _('A field of a meter to query.'),
                        required=True,
                    ),
                    Q_OP:
                    schema.String(
                        _('An operator for meter comparison.'),
                        default='==',
                    ),
                    Q_VALUE:
                    schema.String(
                        _('A value for comparison.'),
                        required=True,
                    )
                }),
        )
    }

    def __init__(self, name, spec, **kwargs):
        super(ThresholdAlarm, self).__init__(name, spec, **kwargs)
        rule_spec = spec.get('rule', {})
        self.rule = schema.Spec(self.rule_schema, rule_spec)
        self.namespace = 'threshold'