def _get_delete_candidates(self, cluster_id, action): deletion = action.data.get('deletion', None) # No deletion field in action.data which means no scaling # policy or deletion policy is attached. if deletion is None: candidates = None if action.action == consts.CLUSTER_DEL_NODES: # Get candidates from action.input candidates = action.inputs.get('candidates', []) count = len(candidates) elif action.action == consts.CLUSTER_RESIZE: # Calculate deletion count based on action input db_cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, db_cluster) if 'deletion' not in action.data: return [] else: count = action.data['deletion']['count'] else: # action.action == consts.CLUSTER_SCALE_IN count = 1 else: count = deletion.get('count', 0) candidates = deletion.get('candidates', None) # Still no candidates available, pick count of nodes randomly if candidates is None: nodes = db_api.node_get_all_by_cluster(action.context, cluster_id=cluster_id) if count > len(nodes): count = len(nodes) candidates = scaleutils.nodes_by_random(nodes, count) return candidates
def pre_op(self, cluster_id, action, policy_data): nodes = db_api.node_get_all_by_cluster(action.context, cluster_id) current_size = len(nodes) if self.adjustment_type == self.EXACT_CAPACITY: count = self.adjustment_number - current_size elif self.adjustment_type == self.CHANGE_IN_CAPACITY: count = self.adjustment_number elif self.adjustment_type == self.CHANGE_IN_PERCENTAGE: count = int((self.adjustment_number * current_size) / 100.0) if count < self.adjustment_min_step: count = self.adjustment_min_step if current_size + count > self.max_size: policy_data.status = base.CHECK_ERROR policy_data.reason = _('Attempted scaling exceeds maximum size') elif current_size + count < self.min_size: policy_data.status = base.CHECK_ERROR policy_data.reason = _('Attempted scaling exceeds minimum size') else: policy_data.status = base.CHECK_OK policy_data.reason = _('Scaling request validated') pd = {'count': count} if action.action == consts.CLUSTER_SCALE_OUT: if count < 0: LOG.warning(_LW('Requesting a scale out operation but scaling ' 'policy generates a negative count.')) policy_data['creation'] = pd elif action.action == consts.CLUSTER_SCALE_IN: if count > 0: LOG.warning(_LW('Requesting a scale out operation but scaling ' 'policy generates a negative count.')) policy_data['deletion'] = pd return policy_data
def pre_op(self, cluster_id, action): status = base.CHECK_OK reason = _('Scaling request validated.') # Check if the action is expected by the policy if self.event != action.action: action.data.update({'status': status, 'reason': reason}) action.store(action.context) return cluster = db_api.cluster_get(action.context, cluster_id) nodes = db_api.node_get_all_by_cluster(action.context, cluster_id) current_size = len(nodes) count = self._calculate_adjustment_count(current_size) # Use action input if count is provided count = action.inputs.get('count', count) if count <= 0: status = base.CHECK_ERROR reason = _("Count (%(count)s) invalid for action %(action)s.") % { 'count': count, 'action': action.action } # Check size constraints if action.action == consts.CLUSTER_SCALE_IN: new_size = current_size - count if (new_size < cluster.min_size): if self.best_effort: count = current_size - cluster.min_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling below minimum size.') else: new_size = current_size + count if (new_size > cluster.max_size): if self.best_effort: count = cluster.max_size - current_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling above maximum size.') pd = {'status': status, 'reason': reason} if status == base.CHECK_OK: if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def get_all_by_cluster(cls, context, cluster_id, filters=None, project_safe=True): objs = db_api.node_get_all_by_cluster(context, cluster_id, filters=filters, project_safe=project_safe) return [cls._from_db_object(context, cls(), obj) for obj in objs]
def pre_op(self, cluster_id, action): status = base.CHECK_OK reason = _('Scaling request validated.') # Check if the action is expected by the policy if self.event != action.action: action.data.update({'status': status, 'reason': reason}) action.store(action.context) return cluster = db_api.cluster_get(action.context, cluster_id) nodes = db_api.node_get_all_by_cluster(action.context, cluster_id) current_size = len(nodes) count = self._calculate_adjustment_count(current_size) # Use action input if count is provided count = action.inputs.get('count', count) if count <= 0: status = base.CHECK_ERROR reason = _("Count (%(count)s) invalid for action %(action)s." ) % {'count': count, 'action': action.action} # Check size constraints if action.action == consts.CLUSTER_SCALE_IN: new_size = current_size - count if (new_size < cluster.min_size): if self.best_effort: count = current_size - cluster.min_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling below minimum size.') else: new_size = current_size + count if (new_size > cluster.max_size): if self.best_effort: count = cluster.max_size - current_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling above maximum size.') pd = {'status': status, 'reason': reason} if status == base.CHECK_OK: if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def _select_candidates(self, context, cluster_id, count): candidates = [] nodes = db_api.node_get_all_by_cluster(context, cluster_id) if count > len(nodes): count = len(nodes) err_nodes = [n for n in nodes if n.status == 'ERROR'] nodes = [n for n in nodes if n.status != 'ERROR'] if count <= len(err_nodes): return [n.id for n in err_nodes[:count]] candidates.extend([n.id for n in err_nodes]) count -= len(err_nodes) # Random selection if self.criteria == self.RANDOM: i = count while i > 0: rand = random.randrange(i) candidates.append(nodes[rand].id) nodes.remove(nodes[rand]) i = i - 1 return candidates # Node age based selection if self.criteria in [self.OLDEST_FIRST, self.YOUNGEST_FIRST]: sorted_list = sorted(nodes, key=lambda r: (r.created_time, r.name)) for i in range(count): if self.criteria == self.OLDEST_FIRST: candidates.append(sorted_list[i].id) else: # YOUNGEST_FIRST candidates.append(sorted_list[-1 - i].id) return candidates # Node profile based selection node_map = [] for node in nodes: profile = db_api.profile_get(context, node.profile_id) created_at = profile.created_time node_map.append({'id': node.id, 'created_at': created_at}) sorted_map = sorted(node_map, key=lambda m: m['created_at']) for i in range(count): candidates.append(sorted_map[i]['id']) return candidates
def _select_candidates(self, context, cluster_id, count): candidates = [] nodes = db_api.node_get_all_by_cluster(context, cluster_id) if count > len(nodes): count = len(nodes) err_nodes = [n for n in nodes if n.status == "ERROR"] nodes = [n for n in nodes if n.status != "ERROR"] if count <= len(err_nodes): return [n.id for n in err_nodes[:count]] candidates.extend([n.id for n in err_nodes]) count -= len(err_nodes) # Random selection if self.criteria == self.RANDOM: i = count while i > 0: rand = random.randrange(i) candidates.append(nodes[rand].id) nodes.remove(nodes[rand]) i = i - 1 return candidates # Node age based selection if self.criteria in [self.OLDEST_FIRST, self.YOUNGEST_FIRST]: sorted_list = sorted(nodes, key=lambda r: (r.created_time, r.name)) for i in range(count): if self.criteria == self.OLDEST_FIRST: candidates.append(sorted_list[i].id) else: # YOUNGEST_FIRST candidates.append(sorted_list[-1 - i].id) return candidates # Node profile based selection node_map = [] for node in nodes: profile = db_api.profile_get(context, node.profile_id) created_at = profile.created_time node_map.append({"id": node.id, "created_at": created_at}) sorted_map = sorted(node_map, key=lambda m: m["created_at"]) for i in range(count): candidates.append(sorted_map[i]["id"]) return candidates
def do_scale_in(self, cluster, policy_data): # We may get a scale count from the request directly, if that is the # case, we will use it for scaling. Or else, we check if we have got # hints from policy checking. We use policy output if any, or else # the count is set to 1 as default. count = self.inputs.get('count', 0) candidates = [] if count == 0: pd = policy_data.get('deletion', None) if pd is not None: count = pd.get('count', 1) # Try get candidates (set by deletion policy if attached) candidates = policy_data.get('candidates', []) if count == 0: return self.RES_OK, 'No scaling needed based on policy checking' # Choose victims randomly if len(candidates) == 0: nodes = db_api.node_get_all_by_cluster(self.context, cluster.id) i = count while i > 0: r = random.randrange(len(nodes)) candidates.append(nodes[r].id) nodes.remove(nodes[r]) i = i - 1 # The policy data may contain destroy flag and grace period option result, new_reason = self._delete_nodes(cluster, candidates, policy_data) if result == self.RES_OK: reason = 'Cluster scaling succeeded' cluster.set_status(self.context, cluster.ACTIVE, reason) elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_FAILED]: cluster.set_status(self.context, cluster.ERROR, reason) else: # RETRY or FAILED? pass return result, reason
def _get_delete_candidates(self, cluster_id, action): deletion = action.data.get('deletion', None) # No deletion field in action.data which means no scaling # policy or deletion policy is attached. candidates = None if deletion is None: if action.action == consts.CLUSTER_DEL_NODES: # Get candidates from action.input candidates = action.inputs.get('candidates', []) count = len(candidates) elif action.action == consts.CLUSTER_RESIZE: # Calculate deletion count based on action input db_cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, db_cluster) if 'deletion' not in action.data: return [] else: count = action.data['deletion']['count'] else: # action.action == consts.CLUSTER_SCALE_IN count = 1 else: count = deletion.get('count', 0) candidates = deletion.get('candidates', None) # Still no candidates available, pick count of nodes randomly if candidates is None: if count == 0: return [] nodes = db_api.node_get_all_by_cluster(action.context, cluster_id=cluster_id) if count > len(nodes): count = len(nodes) candidates = scaleutils.nodes_by_random(nodes, count) deletion_data = action.data.get('deletion', {}) deletion_data.update({ 'count': len(candidates), 'candidates': candidates }) action.data.update({'deletion': deletion_data}) return candidates
def _select_candidates(self, context, cluster_id, count): candidates = [] nodes = db_api.node_get_all_by_cluster(context, cluster_id) if count > len(nodes): count = len(nodes) # Random selection if self.criteria == self.RANDOM: i = count while i > 0: rand = random.randrange(i) candidates.append(nodes[rand]) nodes.remove(nodes[rand]) i = i - 1 return candidates # Node age based selection if self.criteria in [self.OLDEST_FIRST, self.YOUNGEST_FIRST]: sorted_list = sorted(nodes, key=lambda r: (r.created_time, r.name)) for i in range(count): if self.criteria == self.OLDEST_FIRST: candidates.append(sorted_list[i]) else: # YOUNGEST_FIRST candidates.append(sorted_list[-i]) return candidates # Node profile based selection if self.criterial == self.OLDEST_PROFILE_FIRST: map = [] for node in nodes: created_at = db_api.profile_get(node.profile_id).created_time map.append({'id': node.id, 'created_at': created_at}) sorted_map = sorted(map, key=lambda m: m['created_at']) for i in range(count): candidates.append(sorted_map[i]) return candidates return []
def do_scale_out(self, cluster, policy_data): # We may get a scale count from the request directly, if that is the # case, we will use it for scaling. Or else, we check if we have got # hints from policy checking. We use policy output if any, or else # the count is set to 1 as default. count = self.inputs.get('count', 0) if count == 0: pd = policy_data.get('creation', None) if pd is not None: count = pd.get('count', 1) if count == 0: return self.RES_OK, 'No scaling needed based on policy checking' if count > 0: result, reason = self._create_nodes(cluster, count, policy_data) else: candidates = [] nodes = db_api.node_get_all_by_cluster(self.context, cluster.id) i = count while i > 0: r = random.randrange(len(nodes)) candidates.append(nodes[r].id) nodes.remove(nodes[r]) i = i - 1 result, reason = self._delete_nodes(cluster, candidates, policy_data) if result == self.RES_OK: reason = 'Cluster scaling succeeded' cluster.set_status(self.context, cluster.ACTIVE, reason) elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_FAILED]: cluster.set_status(self.context, cluster.ERROR, reason) else: # RETRY or FAILED? pass return result, reason
def get_all_by_cluster(cls, context, cluster_id, **kwargs): objs = db_api.node_get_all_by_cluster(context, cluster_id, **kwargs) return [cls._from_db_object(context, cls(), obj) for obj in objs]