def pre_op(self, cluster_id, action): '''Choose victims that can be deleted.''' if action.action == consts.CLUSTER_RESIZE: cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, cluster) if 'deletion' not in action.data: return count = action.data['deletion']['count'] else: # CLUSTER_SCALE_IN or CLUSTER_DEL_NODES count = action.inputs.get('count', 1) candidates = action.inputs.get('candidates', []) # For CLUSTER_RESIZE and CLUSTER_SCALE_IN actions, use policy # creteria to selete candidates. if len(candidates) == 0: candidates = self._select_candidates(action.context, cluster_id, count) pd = action.data.get('deletion', {}) pd['candidates'] = candidates pd['destroy_after_deletion'] = self.destroy_after_deletion pd['grace_period'] = self.grace_period action.data.update({ 'status': base.CHECK_OK, 'reason': _('Candidates generated'), 'deletion': pd }) action.store(action.context) return
def pre_op(self, cluster_id, action): '''Choose victims that can be deleted.''' if action.action == consts.CLUSTER_RESIZE: cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, cluster) if 'deletion' not in action.data: return count = action.data['deletion']['count'] else: # CLUSTER_SCALE_IN or CLUSTER_DEL_NODES count = action.inputs.get('count', 1) pd = action.data.get('deletion', {}) candidates = pd.get('candidates', []) # For certain operations ( e.g. DEL_NODES), the candidates might # have been specified if len(candidates) == 0: candidates = self._select_candidates(action.context, cluster_id, count) pd['candidates'] = candidates pd['destroy_after_deletion'] = self.destroy_after_deletion pd['grace_period'] = self.grace_period action.data.update({ 'status': base.CHECK_OK, 'reason': _('Candidates generated'), 'deletion': pd }) action.store(action.context) return
def _get_delete_candidates(self, cluster_id, action): deletion = action.data.get('deletion', None) # No deletion field in action.data which means no scaling # policy or deletion policy is attached. if deletion is None: candidates = None if action.action == consts.CLUSTER_DEL_NODES: # Get candidates from action.input candidates = action.inputs.get('candidates', []) count = len(candidates) elif action.action == consts.CLUSTER_RESIZE: # Calculate deletion count based on action input db_cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, db_cluster) if 'deletion' not in action.data: return [] else: count = action.data['deletion']['count'] else: # action.action == consts.CLUSTER_SCALE_IN count = 1 else: count = deletion.get('count', 0) candidates = deletion.get('candidates', None) # Still no candidates available, pick count of nodes randomly if candidates is None: nodes = db_api.node_get_all_by_cluster(action.context, cluster_id=cluster_id) if count > len(nodes): count = len(nodes) candidates = scaleutils.nodes_by_random(nodes, count) return candidates
def load(cls, context, cluster_id=None, cluster=None, project_safe=True): '''Retrieve a cluster from database.''' if cluster is None: cluster = db_api.cluster_get(context, cluster_id, project_safe=project_safe) if cluster is None: raise exception.ClusterNotFound(cluster=cluster_id) return cls._from_db_record(context, cluster)
def load(cls, context, cluster_id=None, cluster=None, show_deleted=False): '''Retrieve a cluster from database.''' if cluster is None: cluster = db_api.cluster_get(context, cluster_id, show_deleted=show_deleted) if cluster is None: raise exception.ClusterNotFound(cluster=cluster_id) return cls._from_db_record(context, cluster)
def pre_op(self, cluster_id, action): status = base.CHECK_OK reason = _('Scaling request validated.') # Check if the action is expected by the policy if self.event != action.action: action.data.update({'status': status, 'reason': reason}) action.store(action.context) return cluster = db_api.cluster_get(action.context, cluster_id) nodes = db_api.node_get_all_by_cluster(action.context, cluster_id) current_size = len(nodes) count = self._calculate_adjustment_count(current_size) # Use action input if count is provided count = action.inputs.get('count', count) if count <= 0: status = base.CHECK_ERROR reason = _("Count (%(count)s) invalid for action %(action)s.") % { 'count': count, 'action': action.action } # Check size constraints if action.action == consts.CLUSTER_SCALE_IN: new_size = current_size - count if (new_size < cluster.min_size): if self.best_effort: count = current_size - cluster.min_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling below minimum size.') else: new_size = current_size + count if (new_size > cluster.max_size): if self.best_effort: count = cluster.max_size - current_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling above maximum size.') pd = {'status': status, 'reason': reason} if status == base.CHECK_OK: if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def pre_op(self, cluster_id, action): status = base.CHECK_OK reason = _('Scaling request validated.') # Check if the action is expected by the policy if self.event != action.action: action.data.update({'status': status, 'reason': reason}) action.store(action.context) return cluster = db_api.cluster_get(action.context, cluster_id) nodes = db_api.node_get_all_by_cluster(action.context, cluster_id) current_size = len(nodes) count = self._calculate_adjustment_count(current_size) # Use action input if count is provided count = action.inputs.get('count', count) if count <= 0: status = base.CHECK_ERROR reason = _("Count (%(count)s) invalid for action %(action)s." ) % {'count': count, 'action': action.action} # Check size constraints if action.action == consts.CLUSTER_SCALE_IN: new_size = current_size - count if (new_size < cluster.min_size): if self.best_effort: count = current_size - cluster.min_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling below minimum size.') else: new_size = current_size + count if (new_size > cluster.max_size): if self.best_effort: count = cluster.max_size - current_size reason = _('Do best effort scaling.') else: status = base.CHECK_ERROR reason = _('Attempted scaling above maximum size.') pd = {'status': status, 'reason': reason} if status == base.CHECK_OK: if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def post_op(self, cluster_id, action): """Routine to be called after an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ # TODO(Yanyanhu): Need special handling for cross-az scenario # which is supported by Neutron lbaas. creation = action.data.get('creation', None) nodes_added = creation.get('nodes', []) if creation else [] if len(nodes_added) == 0: return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) subnet = self.pool_spec.get(self.POOL_SUBNET) # Add new nodes to lb pool for node_id in nodes_added: node = node_mod.Node.load(action.context, node_id=node_id) member_id = node.data.get('lb_member', None) if member_id: LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'), { 'n': node_id, 'p': pool_id }) continue member_id = lb_driver.member_add(node, lb_id, pool_id, port, subnet) if member_id is None: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in adding new node(s) ' 'into lb pool.') return node.data.update({'lb_member': member_id}) node.store(action.context) return
def post_op(self, cluster_id, action): """Routine to be called after an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ # TODO(Yanyanhu): Need special handling for cross-az scenario # which is supported by Neutron lbaas. creation = action.data.get('creation', None) nodes_added = creation.get('nodes', []) if creation else [] if len(nodes_added) == 0: return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) subnet = self.pool_spec.get(self.POOL_SUBNET) # Add new nodes to lb pool for node_id in nodes_added: node = node_mod.Node.load(action.context, node_id=node_id) member_id = node.data.get('lb_member', None) if member_id: LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'), {'n': node_id, 'p': pool_id}) continue member_id = lb_driver.member_add(node, lb_id, pool_id, port, subnet) if member_id is None: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in adding new node(s) ' 'into lb pool.') return node.data.update({'lb_member': member_id}) node.store(action.context) return
def pre_op(self, cluster_id, action): """Routine to be called before an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ candidates = self._get_delete_candidates(cluster_id, action) if len(candidates) == 0: return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] # Remove nodes that will be deleted from lb pool for node_id in candidates: node = node_mod.Node.load(action.context, node_id=node_id) member_id = node.data.get('lb_member', None) if member_id is None: LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'), {'n': node_id, 'p': pool_id}) continue res = lb_driver.member_remove(lb_id, pool_id, member_id) if res is not True: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in removing deleted ' 'node(s) from lb pool.') return deletion = action.data.get('deletion', {}) deletion.update({'count': len(candidates), 'candidates': candidates}) action.data.update({'deletion': deletion}) return
def pre_op(self, cluster_id, action): """Routine to be called before an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ candidates = self._get_delete_candidates(cluster_id, action) if len(candidates) == 0: return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] # Remove nodes that will be deleted from lb pool for node_id in candidates: node = node_mod.Node.load(action.context, node_id=node_id) member_id = node.data.get('lb_member', None) if member_id is None: LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'), { 'n': node_id, 'p': pool_id }) continue res = lb_driver.member_remove(lb_id, pool_id, member_id) if res is not True: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in removing deleted ' 'node(s) from lb pool.') return return
def _get_delete_candidates(self, cluster_id, action): deletion = action.data.get('deletion', None) # No deletion field in action.data which means no scaling # policy or deletion policy is attached. candidates = None if deletion is None: if action.action == consts.CLUSTER_DEL_NODES: # Get candidates from action.input candidates = action.inputs.get('candidates', []) count = len(candidates) elif action.action == consts.CLUSTER_RESIZE: # Calculate deletion count based on action input db_cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, db_cluster) if 'deletion' not in action.data: return [] else: count = action.data['deletion']['count'] else: # action.action == consts.CLUSTER_SCALE_IN count = 1 else: count = deletion.get('count', 0) candidates = deletion.get('candidates', None) # Still no candidates available, pick count of nodes randomly if candidates is None: if count == 0: return [] nodes = db_api.node_get_all_by_cluster(action.context, cluster_id=cluster_id) if count > len(nodes): count = len(nodes) candidates = scaleutils.nodes_by_random(nodes, count) deletion_data = action.data.get('deletion', {}) deletion_data.update({ 'count': len(candidates), 'candidates': candidates }) action.data.update({'deletion': deletion_data}) return candidates
def _get_count(self, cluster_id, action): """Get number of nodes to create or delete. :param cluster_id: The ID of the target cluster. :param action: The action object which triggered this policy check. :return: An integer value which can be 1) positive - number of nodes to create; 2) negative - number of nodes to delete; 3) 0 - something wrong happened, and the policy check failed. """ if action.action == consts.CLUSTER_RESIZE: if action.data.get('deletion', None): return -action.data['deletion']['count'] elif action.data.get('creation', None): return action.data['creation']['count'] db_cluster = db_api.cluster_get(action.context, cluster_id) res = scaleutils.parse_resize_params(action, db_cluster) if res[0] == base.CHECK_ERROR: action.data['status'] = base.CHECK_ERROR action.data['reason'] = res[1] LOG.error(res[1]) return 0 if action.data.get('deletion', None): return -action.data['deletion']['count'] else: return action.data['creation']['count'] if action.action == consts.CLUSTER_SCALE_IN: pd = action.data.get('deletion', None) if pd is None: return -action.inputs.get('count', 1) else: return -pd.get('count', 1) # CLUSTER_SCALE_OUT: an action that inflates the cluster pd = action.data.get('creation', None) if pd is None: return action.inputs.get('count', 1) else: return pd.get('count', 1)
def post_op(self, cluster_id, action): """Routine to be called after an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ nodes_added = action.outputs.get('nodes_added', []) nodes_removed = action.outputs.get('nodes_removed', []) if ((len(nodes_added) == 0) and (len(nodes_removed) == 0)): return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) subnet = self.pool_spec.get(self.POOL_SUBNET) # Remove nodes that have been deleted from lb pool for node_id in nodes_removed: node = node_mod.Node.load(action.context, node_id=node_id, show_deleted=True) member_id = node.data.get('lb_member', None) if member_id is None: LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'), {'n': node_id, 'p': pool_id}) continue res = lb_driver.member_remove(lb_id, pool_id, member_id) if res is not True: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in removing deleted ' 'node(s) from lb pool.') return # Add new nodes to lb pool for node_id in nodes_added: node = node_mod.Node.load(action.context, node_id=node_id, show_deleted=True) member_id = node.data.get('lb_member', None) if member_id: LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'), {'n': node_id, 'p': pool_id}) continue member_id = lb_driver.member_add(node, lb_id, pool_id, port, subnet) if member_id is None: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in adding new node(s) ' 'into lb pool.') return node.data.update({'lb_member': member_id}) node.store(action.context) return
def pre_op(self, cluster_id, action): """Choose victims that can be deleted. :param cluster_id: ID of the cluster to be handled. :param action: The action object that triggered this policy. """ victims = action.inputs.get('candidates', []) if len(victims) > 0: self._update_action(action, victims) return db_cluster = None regions = None zones = None deletion = action.data.get('deletion', {}) if deletion: # there are policy decisions count = deletion['count'] regions = deletion.get('regions', None) zones = deletion.get('zones', None) # No policy decision, check action itself: SCALE_IN elif action.action == consts.CLUSTER_SCALE_IN: count = action.inputs.get('count', 1) # No policy decision, check action itself: RESIZE else: db_cluster = db_api.cluster_get(action.context, cluster_id) res = scaleutils.parse_resize_params(action, db_cluster) if res[0] == base.CHECK_ERROR: action.data['status'] = base.CHECK_ERROR action.data['reason'] = res[1] LOG.error(res[1]) return if 'deletion' not in action.data: return count = action.data['deletion']['count'] cluster = cluster_mod.Cluster.load(action.context, cluster=db_cluster, cluster_id=cluster_id) # Cross-region if regions: victims = self._victims_by_regions(cluster, regions) self._update_action(action, victims) return # Cross-AZ if zones: victims = self._victims_by_zones(cluster, zones) self._update_action(action, victims) return if count > len(cluster.nodes): count = len(cluster.nodes) if self.criteria == self.RANDOM: victims = scaleutils.nodes_by_random(cluster.nodes, count) elif self.criteria == self.OLDEST_PROFILE_FIRST: victims = scaleutils.nodes_by_profile_age(cluster.nodes, count) elif self.criteria == self.OLDEST_FIRST: victims = scaleutils.nodes_by_age(cluster.nodes, count, True) else: victims = scaleutils.nodes_by_age(cluster.nodes, count, False) self._update_action(action, victims) return
def get(cls, context, cluster_id, **kwargs): obj = db_api.cluster_get(context, cluster_id, **kwargs) return cls._from_db_object(context, cls(), obj)
def pre_op(self, cluster_id, action): """The hook function that is executed before the action. The checking result is stored in the ``data`` property of the action object rather than returned directly from the function. :param cluster_id: The ID of the target cluster. :param action: Action instance against which the policy is being checked. :return: None. """ # Use action input if count is provided count = action.inputs.get('count', None) current = db_api.node_count_by_cluster(action.context, cluster_id) if count is None: # count not specified, calculate it count = self._calculate_adjustment_count(current) # Count must be positive value try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: action.data.update({ 'status': base.CHECK_ERROR, 'reason': _("Invalid count (%(c)s) for action '%(a)s'." ) % {'c': count, 'a': action.action} }) action.store(action.context) return # Check size constraints cluster = db_api.cluster_get(action.context, cluster_id) if action.action == consts.CLUSTER_SCALE_IN: if self.best_effort: count = min(count, current - cluster.min_size) result = su.check_size_params(cluster, current - count, strict=not self.best_effort) else: if self.best_effort: count = min(count, cluster.max_size - current) result = su.check_size_params(cluster, current + count, strict=not self.best_effort) if result: # failed validation pd = { 'status': base.CHECK_ERROR, 'reason': result } else: # passed validation pd = { 'status': base.CHECK_OK, 'reason': _('Scaling request validated.'), } if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def pre_op(self, cluster_id, action): """Choose victims that can be deleted. :param cluster_id: ID of the cluster to be handled. :param action: The action object that triggered this policy. """ victims = action.inputs.get('candidates', []) if len(victims) > 0: self._update_action(action, victims) return db_cluster = None regions = None zones = None deletion = action.data.get('deletion', {}) if deletion: # there are policy decisions count = deletion['count'] regions = deletion.get('regions', None) zones = deletion.get('zones', None) # No policy decision, check action itself: SCALE_IN elif action.action == consts.CLUSTER_SCALE_IN: count = action.inputs.get('count', 1) # No policy decision, check action itself: RESIZE else: db_cluster = db_api.cluster_get(action.context, cluster_id) scaleutils.parse_resize_params(action, db_cluster) if 'deletion' not in action.data: return count = action.data['deletion']['count'] cluster = cluster_mod.Cluster.load(action.context, cluster=db_cluster, cluster_id=cluster_id) # Cross-region if regions: victims = self._victims_by_regions(cluster, regions) self._update_action(action, victims) return # Cross-AZ if zones: victims = self._victims_by_zones(cluster, zones) self._update_action(action, victims) return if count > len(cluster.nodes): count = len(cluster.nodes) if self.criteria == self.RANDOM: victims = scaleutils.nodes_by_random(cluster.nodes, count) elif self.criteria == self.OLDEST_PROFILE_FIRST: victims = scaleutils.nodes_by_profile_age(cluster.nodes, count) elif self.criteria == self.OLDEST_FIRST: victims = scaleutils.nodes_by_age(cluster.nodes, count, True) else: victims = scaleutils.nodes_by_age(cluster.nodes, count, False) self._update_action(action, victims) return
def post_op(self, cluster_id, action): """Routine to be called after an action has been executed. For this particular policy, we take this chance to update the pool maintained by the load-balancer. :param cluster_id: The ID of the cluster on which a relevant action has been executed. :param action: The action object that triggered this operation. :returns: Nothing. """ nodes = action.data.get('nodes', []) if len(nodes) == 0: return db_cluster = db_api.cluster_get(action.context, cluster_id) params = self._build_conn_params(db_cluster) lb_driver = driver_base.SenlinDriver().loadbalancing(params) cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) lb_id = policy_data['loadbalancer'] pool_id = policy_data['pool'] port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) subnet = self.pool_spec.get(self.POOL_SUBNET) for node_id in nodes: node = node_mod.Node.load(action.context, node_id=node_id, show_deleted=True) member_id = node.data.get('lb_member') if (action.action in (consts.CLUSTER_DEL_NODES, consts.CLUSTER_SCALE_IN))\ or (action.action == consts.CLUSTER_RESIZE and action.data.get('deletion')): if member_id is None: LOG.warning(_LW('Node %(node)s not found in loadbalancer ' 'pool %(pool)s.'), {'node': node_id, 'pool': pool_id}) continue # Remove nodes that have been deleted from lb pool res = lb_driver.member_remove(lb_id, pool_id, member_id) if res is not True: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in removing deleted ' 'node from lb pool') return if (action.action in (consts.CLUSTER_ADD_NODES, consts.CLUSTER_SCALE_OUT))\ or (action.action == consts.CLUSTER_RESIZE and action.data.get('creation')): if member_id: LOG.warning(_LW('Node %(node)s already in loadbalancer ' 'pool %(pool)s.'), {'node': node_id, 'pool': pool_id}) continue res = lb_driver.member_add(node, lb_id, pool_id, port, subnet) if res is None: action.data['status'] = base.CHECK_ERROR action.data['reason'] = _('Failed in adding new node ' 'into lb pool') return node.data.update({'lb_member': res}) node.store(action.context) return
def pre_op(self, cluster_id, action): """Routine to be called before an 'CLUSTER_SCALE_OUT' action. For this particular policy, we take this chance to intelligently select the most proper hypervisor/vsphere cluster to create nodes. In order to realize the function, we need to create construct meta to handle affinity/anti-affinity then update the profile with the specific parameters at first :param cluster_id: ID of the cluster on which the relevant action is to be executed. :param action: The action object that triggered this operation. :returns: Nothing. """ zone_name = self.properties.get(self.AVAILABILITY_ZONE) if not zone_name and self.enable_drs: # we make a reasonable guess of the zone name for vSphere # support because the zone name is required in that case. zone_name = 'nova' # we respect other policies decisions (if any) and fall back to the # action inputs if no hints found. pd = action.data.get('creation', None) if pd is not None: count = pd.get('count', 1) elif action.action == consts.CLUSTER_SCALE_OUT: count = action.inputs.get('count', 1) else: # CLUSTER_RESIZE db_cluster = db_api.cluster_get(action.context, cluster_id) su.parse_resize_params(action, db_cluster) if 'creation' not in action.data: return count = action.data['creation']['count'] cp = db_api.cluster_policy_get(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) pd_entry = {'servergroup': policy_data['servergroup_id']} # special handling for vSphere DRS case where we need to find out # the name of the vSphere host which has DRS enabled. if self.enable_drs: cluster_obj = db_api.cluster_get(action.context, cluster_id) nc = self.nova(cluster_obj) hypervisors = nc.hypervisor_list() hv_id = '' pattern = re.compile(r'.*drs*', re.I) for hypervisor in hypervisors: match = pattern.match(hypervisor.hypervisor_hostname) if match: hv_id = hypervisor.id break if not hv_id: action.data['status'] = base.CHECK_ERROR action.data['status_reason'] = _('No suitable vSphere host ' 'is available.') action.store(action.context) return hv_info = nc.hypervisor_get(hv_id) hostname = hv_info['service']['host'] pd_entry['zone'] = ":".join([zone_name, hostname]) elif zone_name: pd_entry['zone'] = zone_name pd = { 'count': count, 'placements': [pd_entry] * count, } action.data.update({'placement': pd}) action.store(action.context) return
def pre_op(self, cluster_id, action): """The hook function that is executed before the action. The checking result is stored in the ``data`` property of the action object rather than returned directly from the function. :param cluster_id: The ID of the target cluster. :param action: Action instance against which the policy is being checked. :return: None. """ # Use action input if count is provided count = action.inputs.get('count', None) current = db_api.node_count_by_cluster(action.context, cluster_id) if count is None: # count not specified, calculate it count = self._calculate_adjustment_count(current) # Count must be positive value try: count = utils.parse_int_param('count', count, allow_zero=False) except exception.InvalidParameter: action.data.update({ 'status': base.CHECK_ERROR, 'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % { 'c': count, 'a': action.action } }) action.store(action.context) return # Check size constraints cluster = db_api.cluster_get(action.context, cluster_id) if action.action == consts.CLUSTER_SCALE_IN: if self.best_effort: count = min(count, current - cluster.min_size) result = su.check_size_params(cluster, current - count, strict=not self.best_effort) else: if self.best_effort: count = min(count, cluster.max_size - current) result = su.check_size_params(cluster, current + count, strict=not self.best_effort) if result: # failed validation pd = {'status': base.CHECK_ERROR, 'reason': result} else: # passed validation pd = { 'status': base.CHECK_OK, 'reason': _('Scaling request validated.'), } if action.action == consts.CLUSTER_SCALE_IN: pd['deletion'] = {'count': count} else: pd['creation'] = {'count': count} action.data.update(pd) action.store(action.context) return
def _create_nodes(self, count): """Utility method for node creation. :param count: Number of nodes to create. :returns: A tuple comprised of the result and reason. """ if count == 0: return self.RES_OK, '' placement = self.data.get('placement', None) db_cluster = db_api.cluster_get(self.context, self.cluster.id) index = db_cluster.next_index nodes = [] for m in range(count): kwargs = { 'index': index + m, 'metadata': {}, 'user': self.cluster.user, 'project': self.cluster.project, 'domain': self.cluster.domain, } if placement is not None: # We assume placement is a list kwargs['data'] = {'placement': placement[m]} name = 'node-%s-%003d' % (self.cluster.id[:8], index + m) node = node_mod.Node(name, self.cluster.profile_id, self.cluster.id, context=self.context, **kwargs) node.store(self.context) nodes.append(node) kwargs = { 'name': 'node_create_%s' % node.id[:8], 'cause': base.CAUSE_DERIVED, 'user': self.context.user, 'project': self.context.project, 'domain': self.context.domain, } action = base.Action(node.id, 'NODE_CREATE', **kwargs) action.store(self.context) # Build dependency and make the new action ready db_api.action_add_dependency(self.context, action.id, self.id) action.set_status(action.READY) dispatcher.start_action(action_id=action.id) if count > 0: # Wait for cluster creation to complete res, reason = self._wait_for_dependents() if res == self.RES_OK: # TODO(anyone): avoid passing nodes in this way self.data['nodes'] = [n.id for n in nodes] for node in nodes: self.cluster.add_node(node) return res, reason return self.RES_OK, ''