def update(self, req, policy_id, body): policy_data = body.get('policy', None) if policy_data is None: raise exc.HTTPBadRequest(_("Malformed request data, missing " "'policy' key in request body.")) spec = policy_data.get(consts.POLICY_SPEC) if spec is not None: msg = _("Updating the spec of a policy is not supported because " "it may cause state conflicts in engine.") raise exc.HTTPBadRequest(msg) name = policy_data.get(consts.POLICY_NAME, None) level = policy_data.get(consts.POLICY_LEVEL, None) if level is not None: level = utils.parse_int_param(consts.POLICY_LEVEL, level) cooldown = policy_data.get(consts.POLICY_COOLDOWN, None) if cooldown is not None: cooldown = utils.parse_int_param(consts.POLICY_COOLDOWN, cooldown) policy = self.rpc_client.policy_update(req.context, policy_id, name, level, cooldown) return {'policy': policy}
def test_parse_int(self): name = 'param' cases = {0: 0, 2: 2, '0': 0, '2': 2} for value, expected in cases.items(): actual = utils.parse_int_param(name, value) self.assertEqual(expected, actual) # A None should be returned directly actual = utils.parse_int_param(name, None) self.assertIsNone(actual) for value in (2, '2'): self.assertTrue(utils.parse_int_param(name, value, allow_zero=False)) for value in (0, '0'): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value, allow_zero=False) for value in (-1, '-2'): self.assertTrue(utils.parse_int_param(name, value, allow_negative=True)) for value in (-1, '-2'): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value) for value in (1, 6): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value, lower_limit=2, upper_limit=5)
def test_parse_int(self): name = 'param' cases = {0: 0, 2: 2, '0': 0, '2': 2} for value, expected in cases.items(): actual = utils.parse_int_param(name, value) self.assertEqual(expected, actual) # A None should be returned directly actual = utils.parse_int_param(name, None) self.assertIsNone(actual) for value in (2, '2'): self.assertTrue( utils.parse_int_param(name, value, allow_zero=False)) for value in (0, '0'): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value, allow_zero=False) for value in (-1, '-2'): self.assertTrue( utils.parse_int_param(name, value, allow_negative=True)) for value in (-1, '-2'): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value) for value in (1, 6): self.assertRaises(exception.InvalidParameter, utils.parse_int_param, name, value, lower_limit=2, upper_limit=5)
def _do_resize(self, req, cluster_id, this_action, body): data = body.get(this_action) adj_type = data.get(consts.ADJUSTMENT_TYPE) number = data.get(consts.ADJUSTMENT_NUMBER) min_size = data.get(consts.ADJUSTMENT_MIN_SIZE) max_size = data.get(consts.ADJUSTMENT_MAX_SIZE) min_step = data.get(consts.ADJUSTMENT_MIN_STEP) strict = data.get(consts.ADJUSTMENT_STRICT) if adj_type is not None: if adj_type not in consts.ADJUSTMENT_TYPES: raise senlin_exc.InvalidParameter(name='adjustment_type', value=adj_type) if number is None: msg = _("Missing number value for resize operation.") raise exc.HTTPBadRequest(msg) if number is not None: if adj_type is None: msg = _("Missing adjustment_type value for resize " "operation.") raise exc.HTTPBadRequest(msg) number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number, allow_negative=True) if min_size is not None: min_size = utils.parse_int_param(consts.ADJUSTMENT_MIN_SIZE, min_size) if max_size is not None: max_size = utils.parse_int_param(consts.ADJUSTMENT_MAX_SIZE, max_size, allow_negative=True) if (min_size is not None and max_size is not None and max_size > 0 and min_size > max_size): msg = _("The specified min_size (%(n)s) is greater than the " "specified max_size (%(m)s).") % { 'm': max_size, 'n': min_size } raise exc.HTTPBadRequest(msg) if min_step is not None: min_step = utils.parse_int_param(consts.ADJUSTMENT_MIN_STEP, min_step) if strict is not None: strict = utils.parse_bool_param(consts.ADJUSTMENT_STRICT, strict) result = self.rpc_client.cluster_resize(req.context, cluster_id, adj_type, number, min_size, max_size, min_step, strict) location = {'location': '/actions/%s' % result['action']} result.update(location) return result
def _do_resize(self, req, cluster_id, this_action, body): data = body.get(this_action) adj_type = data.get(consts.ADJUSTMENT_TYPE) number = data.get(consts.ADJUSTMENT_NUMBER) min_size = data.get(consts.ADJUSTMENT_MIN_SIZE) max_size = data.get(consts.ADJUSTMENT_MAX_SIZE) min_step = data.get(consts.ADJUSTMENT_MIN_STEP) strict = data.get(consts.ADJUSTMENT_STRICT) if adj_type is not None: if adj_type not in consts.ADJUSTMENT_TYPES: raise senlin_exc.InvalidParameter(name='adjustment_type', value=adj_type) if number is None: msg = _("Missing number value for resize operation.") raise exc.HTTPBadRequest(msg) if number is not None: if adj_type is None: msg = _("Missing adjustment_type value for resize " "operation.") raise exc.HTTPBadRequest(msg) number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number, allow_negative=True) if min_size is not None: min_size = utils.parse_int_param(consts.ADJUSTMENT_MIN_SIZE, min_size) if max_size is not None: max_size = utils.parse_int_param(consts.ADJUSTMENT_MAX_SIZE, max_size, allow_negative=True) if (min_size is not None and max_size is not None and max_size > 0 and min_size > max_size): msg = _("The specified min_size (%(n)s) is greater than the " "specified max_size (%(m)s).") % {'m': max_size, 'n': min_size} raise exc.HTTPBadRequest(msg) if min_step is not None: min_step = utils.parse_int_param(consts.ADJUSTMENT_MIN_STEP, min_step) if strict is not None: strict = utils.parse_bool_param(consts.ADJUSTMENT_STRICT, strict) else: strict = True result = self.rpc_client.cluster_resize(req.context, cluster_id, adj_type, number, min_size, max_size, min_step, strict) location = {'location': '/actions/%s' % result['action']} result.update(location) return result
def index(self, req): filter_whitelist = { "name": "mixed", "target": "mixed", "action": "mixed", "created_time": "single", "updated_time": "single", "deleted_time": "single", } param_whitelist = { "limit": "single", "marker": "single", "sort_dir": "single", "sort_keys": "multi", "show_deleted": "single", } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) if not filters: filters = None actions = self.rpc_client.action_list(req.context, filters=filters, **params) return {"actions": actions}
def index(self, req): filter_whitelist = { 'name': 'mixed', 'target': 'mixed', 'action': 'mixed', 'created_time': 'single', 'updated_time': 'single', 'deleted_time': 'single', } param_whitelist = { 'limit': 'single', 'marker': 'single', 'sort_dir': 'single', 'sort_keys': 'multi', 'show_deleted': 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) if not filters: filters = None actions = self.rpc_client.action_list(req.context, filters=filters, **params) return {'actions': actions}
def update(self, req, cluster_id, body): '''Update an existing cluster with new parameters.''' cluster_data = body.get('cluster') if cluster_data is None: raise exc.HTTPBadRequest(_("Malformed request data, missing " "'cluster' key in request body.")) size = cluster_data.get(consts.CLUSTER_SIZE) if size is not None: msg = _("Updating cluster size is not supported, please use " "cluster scaling operations instead.") raise exc.HTTPBadRequest(msg) name = cluster_data.get(consts.CLUSTER_NAME) profile_id = cluster_data.get(consts.CLUSTER_PROFILE) parent = cluster_data.get(consts.CLUSTER_PARENT) tags = cluster_data.get(consts.CLUSTER_TAGS) timeout = cluster_data.get(consts.CLUSTER_TIMEOUT) if timeout is not None: timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout) self.rpc_client.cluster_update(req.context, cluster_id, name, profile_id, parent, tags, timeout) raise exc.HTTPAccepted()
def _enforce_data_types(self): if self.desired_capacity is not None: self.desired_capacity = utils.parse_int_param( consts.CLUSTER_DESIRED_CAPACITY, self.desired_capacity, allow_zero=True) if self.min_size is not None: self.min_size = utils.parse_int_param( consts.CLUSTER_MIN_SIZE, self.min_size, allow_zero=True) if self.max_size is not None: self.max_size = utils.parse_int_param( consts.CLUSTER_MAX_SIZE, self.max_size, allow_zero=True, allow_negative=True) if self.timeout is not None: self.timeout = utils.parse_int_param( consts.CLUSTER_TIMEOUT, self.timeout, allow_zero=True)
def do_scale_out(self): """Handler for the CLUSTER_SCALE_OUT action. :returns: A tuple containing the result and the corresponding reason. """ self.cluster.set_status(self.context, self.cluster.RESIZING, 'Cluster scale out started.') # We use policy output if any, or else the count is # set to 1 as default. pd = self.data.get('creation', None) if pd is not None: count = pd.get('count', 1) else: # If no scaling policy is attached, use the # input count directly count = self.inputs.get('count', 1) try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: reason = _('Invalid count (%s) for scaling out.') % count status_reason = _('Cluster scaling failed: %s') % reason self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, reason # check provided params against current properties # desired is checked when strict is True curr_size = len(self.cluster.nodes) new_size = curr_size + count result = scaleutils.check_size_params(self.cluster, new_size, None, None, True) if result: status_reason = _('Cluster scaling failed: %s') % result self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, result result, reason = self._create_nodes(count) if result == self.RES_OK: reason = _('Cluster scaling succeeded.') self.cluster.set_status(self.context, self.cluster.ACTIVE, reason, desired_capacity=new_size) elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]: self.cluster.set_status(self.context, self.cluster.ERROR, reason, desired_capacity=new_size) else: # RES_RETRY pass return result, reason
def index(self, req): filter_whitelist = { consts.TRIGGER_NAME: 'mixed', consts.TRIGGER_TYPE: 'mixed', consts.TRIGGER_STATE: 'mixed', consts.TRIGGER_ENABLED: 'mixed', consts.TRIGGER_SEVERITY: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT_DIR: 'single', consts.PARAM_SORT_KEYS: 'multi', consts.PARAM_SHOW_DELETED: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SORT_DIR if key in params: sort_dir = params[key].lower() if sort_dir not in ('asc', 'desc'): raise exc.HTTPBadRequest( _("Sorting direction (sort_dir) must " "be either 'asc' or 'desc'.")) key = consts.PARAM_GLOBAL_PROJECT if key in params: project_safe = not utils.parse_bool_param(key, params[key]) del params[key] params['project_safe'] = project_safe if not filters: filters = None triggers = self.rpc_client.trigger_list(req.context, filters=filters, **params) return {'triggers': triggers}
def index(self, req): filter_whitelist = { consts.TRIGGER_NAME: 'mixed', consts.TRIGGER_TYPE: 'mixed', consts.TRIGGER_STATE: 'mixed', consts.TRIGGER_ENABLED: 'mixed', consts.TRIGGER_SEVERITY: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT_DIR: 'single', consts.PARAM_SORT_KEYS: 'multi', consts.PARAM_SHOW_DELETED: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SORT_DIR if key in params: sort_dir = params[key].lower() if sort_dir not in ('asc', 'desc'): raise exc.HTTPBadRequest(_("Sorting direction (sort_dir) must " "be either 'asc' or 'desc'.")) key = consts.PARAM_GLOBAL_PROJECT if key in params: project_safe = not utils.parse_bool_param(key, params[key]) del params[key] params['project_safe'] = project_safe if not filters: filters = None triggers = self.rpc_client.trigger_list(req.context, filters=filters, **params) return {'triggers': triggers}
def index(self, req): filter_whitelist = { 'obj_name': 'mixed', 'obj_type': 'mixed', 'obj_id': 'mixed', 'cluster_id': 'mixed', 'action': 'mixed', } param_whitelist = { 'limit': 'single', 'marker': 'single', 'sort_dir': 'single', 'sort_keys': 'multi', 'global_project': 'single', 'show_deleted': 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_GLOBAL_PROJECT if key in params: global_project = utils.parse_bool_param(key, params[key]) params.pop(key) params['project_safe'] = not global_project key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) if not filters: filters = None events = self.rpc_client.event_list(req.context, filters=filters, **params) return {'events': events}
def index(self, req): filter_whitelist = { 'name': 'mixed', 'user': '******', 'obj_id': 'mixed', 'obj_type': 'mixed', 'action': 'mixed', } param_whitelist = { 'limit': 'single', 'marker': 'single', 'sort_keys': 'multi', 'sort_dir': 'single', 'show_deleted': 'single', 'global_project': 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_GLOBAL_PROJECT if key in params: project_safe = not utils.parse_bool_param(key, params[key]) del params[key] params['project_safe'] = project_safe if not filters: filters = None webhooks = self.rpc_client.webhook_list(req.context, filters=filters, **params) return {'webhooks': webhooks}
def index(self, req): filter_whitelist = { 'obj_name': 'mixed', 'obj_type': 'mixed', 'obj_id': 'mixed', 'cluster_id': 'mixed', 'action': 'mixed', } param_whitelist = { 'limit': 'single', 'marker': 'single', 'sort_dir': 'single', 'sort_keys': 'multi', 'global_tenant': 'single', 'show_deleted': 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_GLOBAL_TENANT if key in params: global_tenant = utils.parse_bool_param(key, params[key]) params.pop(key) params['tenant_safe'] = not global_tenant key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) if not filters: filters = None events = self.rpc_client.event_list(req.context, filters=filters, **params) return {'events': events}
def index(self, req): filter_whitelist = { consts.EVENT_OBJ_NAME: 'mixed', consts.EVENT_OBJ_TYPE: 'mixed', consts.EVENT_OBJ_ID: 'mixed', consts.EVENT_CLUSTER_ID: 'mixed', consts.EVENT_ACTION: 'mixed', consts.EVENT_LEVEL: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } for key in req.params.keys(): if (key not in param_whitelist.keys() and key not in filter_whitelist.keys()): raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_GLOBAL_PROJECT if key in params: global_project = utils.parse_bool_param(key, params[key]) params.pop(key) params['project_safe'] = not global_project key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) if not filters: filters = None events = self.rpc_client.event_list(req.context, filters=filters, **params) return {'events': events}
def index(self, req): filter_whitelist = { consts.PROFILE_NAME: 'mixed', consts.PROFILE_TYPE: 'mixed', consts.PROFILE_METADATA: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } for key in req.params.keys(): if (key not in param_whitelist.keys() and key not in filter_whitelist.keys()): raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_GLOBAL_PROJECT if key in params: project_safe = not utils.parse_bool_param(key, params[key]) del params[key] params['project_safe'] = project_safe if not filters: filters = None profiles = self.rpc_client.profile_list(req.context, filters=filters, **params) return {'profiles': profiles}
def index(self, req): filter_whitelist = { 'status': 'mixed', 'name': 'mixed', } param_whitelist = { 'cluster_id': 'single', 'show_deleted': 'single', 'limit': 'single', 'marker': 'single', 'sort_keys': 'multi', 'sort_dir': 'single', 'global_tenant': 'single', } params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_SHOW_DELETED if key in params: params[key] = utils.parse_bool_param(key, params[key]) key = consts.PARAM_GLOBAL_TENANT if key in params: tenant_safe = not utils.parse_bool_param(key, params[key]) del params[key] params['tenant_safe'] = tenant_safe if not filters: filters = None nodes = self.rpc_client.node_list(req.context, filters=filters, **params) return {'nodes': nodes}
def index(self, req): filter_whitelist = { consts.ACTION_NAME: 'mixed', consts.ACTION_TARGET: 'mixed', consts.ACTION_ACTION: 'mixed', consts.ACTION_STATUS: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } for key in req.params.keys(): if (key not in param_whitelist.keys() and key not in filter_whitelist.keys()): raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_GLOBAL_PROJECT if key in params: global_project = utils.parse_bool_param(key, params[key]) params.pop(key) params['project_safe'] = not global_project if not filters: filters = None actions = self.rpc_client.action_list(req.context, filters=filters, **params) return {'actions': actions}
def index(self, req): filter_whitelist = { consts.RECEIVER_NAME: 'mixed', consts.RECEIVER_TYPE: 'mixed', consts.RECEIVER_CLUSTER_ID: 'mixed', consts.RECEIVER_ACTION: 'mixed', } param_whitelist = { consts.PARAM_LIMIT: 'single', consts.PARAM_MARKER: 'single', consts.PARAM_SORT: 'single', consts.PARAM_GLOBAL_PROJECT: 'single', } for key in req.params.keys(): if (key not in param_whitelist.keys() and key not in filter_whitelist.keys()): raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) params = util.get_allowed_params(req.params, param_whitelist) filters = util.get_allowed_params(req.params, filter_whitelist) key = consts.PARAM_LIMIT if key in params: params[key] = utils.parse_int_param(key, params[key]) key = consts.PARAM_GLOBAL_PROJECT if key in params: show_global = utils.parse_bool_param(key, params[key]) del params[key] params['project_safe'] = not show_global if not filters: filters = None receivers = self.rpc_client.receiver_list(req.context, filters=filters, **params) return {'receivers': receivers}
def do_scale_in(self): """Handler for the CLUSTER_SCALE_IN action. :returns: A tuple containing the result and the corresponding reason. """ self.cluster.set_status(self.context, self.cluster.RESIZING, 'Cluster scale in started.') # We use policy data if any, deletion policy and scaling policy might # be attached. pd = self.data.get('deletion', None) grace_period = None if pd is not None: grace_period = pd.get('grace_period', 0) candidates = pd.get('candidates', []) # if scaling policy is attached, get 'count' from action data count = len(candidates) or pd['count'] else: # If no scaling policy is attached, use the input count directly candidates = [] count = self.inputs.get('count', 1) try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: reason = _('Invalid count (%s) for scaling in.') % count status_reason = _('Cluster scaling failed: %s') % reason self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, reason # check provided params against current properties # desired is checked when strict is True curr_size = len(self.cluster.nodes) if count > curr_size: LOG.warning( _('Triming count (%(count)s) to current cluster size ' '(%(curr)s) for scaling in'), { 'count': count, 'curr': curr_size }) count = curr_size new_size = curr_size - count result = scaleutils.check_size_params(self.cluster, new_size, None, None, True) if result: status_reason = _('Cluster scaling failed: %s') % result self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, result # Choose victims randomly if len(candidates) == 0: candidates = scaleutils.nodes_by_random(self.cluster.nodes, count) if grace_period is not None: self._wait_before_deletion(grace_period) # The policy data may contain destroy flag and grace period option result, reason = self._delete_nodes(candidates) if result == self.RES_OK: reason = _('Cluster scaling succeeded.') self.cluster.set_status(self.context, self.cluster.ACTIVE, reason, desired_capacity=new_size) elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]: self.cluster.set_status(self.context, self.cluster.ERROR, reason, desired_capacity=new_size) else: # RES_RETRY pass return result, reason
def pre_op(self, cluster_id, action): """The hook function that is executed before the action. The checking result is stored in the ``data`` property of the action object rather than returned directly from the function. :param cluster_id: The ID of the target cluster. :param action: Action instance against which the policy is being checked. :return: None. """ # Use action input if count is provided count = action.inputs.get('count', None) current = db_api.node_count_by_cluster(action.context, cluster_id) if count is None: # count not specified, calculate it count = self._calculate_adjustment_count(current) # Count must be positive value try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: action.data.update({ 'status': base.CHECK_ERROR, 'reason': _("Invalid count (%(c)s) for action '%(a)s'." ) % {'c': count, 'a': action.action} }) action.store(action.context) return # Check size constraints cluster = db_api.cluster_get(action.context, cluster_id) if action.action == consts.CLUSTER_SCALE_IN: if self.best_effort: count = min(count, current - cluster.min_size) result = su.check_size_params(cluster, current - count, strict=not self.best_effort) else: if self.best_effort: count = min(count, cluster.max_size - current) result = su.check_size_params(cluster, current + count, strict=not self.best_effort) if result: # failed validation pd = { 'status': base.CHECK_ERROR, 'reason': result } else: # passed validation pd = { 'status': base.CHECK_OK, 'reason': _('Scaling request validated.'), } if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def pre_op(self, cluster_id, action): """The hook function that is executed before the action. The checking result is stored in the ``data`` property of the action object rather than returned directly from the function. :param cluster_id: The ID of the target cluster. :param action: Action instance against which the policy is being checked. :return: None. """ # Use action input if count is provided count = action.inputs.get('count', None) current = db_api.node_count_by_cluster(action.context, cluster_id) if count is None: # count not specified, calculate it count = self._calculate_adjustment_count(current) # Count must be positive value try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: action.data.update({ 'status': base.CHECK_ERROR, 'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % { 'c': count, 'a': action.action } }) action.store(action.context) return # Check size constraints cluster = db_api.cluster_get(action.context, cluster_id) if action.action == consts.CLUSTER_SCALE_IN: if self.best_effort: count = min(count, current - cluster.min_size) result = su.check_size_params(cluster, current - count, strict=not self.best_effort) else: if self.best_effort: count = min(count, cluster.max_size - current) result = su.check_size_params(cluster, current + count, strict=not self.best_effort) if result: # failed validation pd = {'status': base.CHECK_ERROR, 'reason': result} else: # passed validation pd = { 'status': base.CHECK_OK, 'reason': _('Scaling request validated.'), } if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def do_scale_in(self): """Handler for the CLUSTER_SCALE_IN action. :returns: A tuple containing the result and the corresponding reason. """ self.cluster.set_status(self.context, self.cluster.RESIZING, 'Cluster scale in started.') # We use policy data if any, deletion policy and scaling policy might # be attached. pd = self.data.get('deletion', None) grace_period = 0 if pd: grace_period = pd.get('grace_period', 0) candidates = pd.get('candidates', []) # if scaling policy is attached, get 'count' from action data count = len(candidates) or pd['count'] else: # If no scaling policy is attached, use the input count directly candidates = [] count = self.inputs.get('count', 1) try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: reason = _('Invalid count (%s) for scaling in.') % count status_reason = _('Cluster scaling failed: %s') % reason self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, reason # check provided params against current properties # desired is checked when strict is True curr_size = len(self.cluster.nodes) if count > curr_size: LOG.warning(_('Triming count (%(count)s) to current cluster size ' '(%(curr)s) for scaling in'), {'count': count, 'curr': curr_size}) count = curr_size new_size = curr_size - count result = scaleutils.check_size_params(self.cluster, new_size, None, None, True) if result: status_reason = _('Cluster scaling failed: %s') % result self.cluster.set_status(self.context, self.cluster.ACTIVE, status_reason) return self.RES_ERROR, result # Choose victims randomly if len(candidates) == 0: candidates = scaleutils.nodes_by_random(self.cluster.nodes, count) self._sleep(grace_period) # The policy data may contain destroy flag and grace period option result, reason = self._delete_nodes(candidates) if result == self.RES_OK: reason = _('Cluster scaling succeeded.') self.cluster.set_status(self.context, self.cluster.ACTIVE, reason, desired_capacity=new_size) elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]: self.cluster.set_status(self.context, self.cluster.ERROR, reason, desired_capacity=new_size) else: # RES_RETRY pass return result, reason