def detach(self, cluster): """Routine to be called when the policy is detached from a cluster. :param cluster: The cluster from which the policy is to be detached. :returns: When the operation was successful, returns a tuple of (True, data) where the data contains references to the resources created; otherwise returns a tuple of (False, error) where the err contains a error message. """ reason = _('Servergroup resource deletion succeeded.') ctx = context.get_admin_context() binding = db_api.cluster_policy_get(ctx, cluster.id, self.id) if not binding or not binding.data: return True, reason policy_data = self._extract_policy_data(binding.data) if not policy_data: return True, reason group_id = policy_data.get('servergroup_id', None) inherited_group = policy_data.get('inherited_group', False) if group_id and not inherited_group: try: self.nova(cluster).delete_server_group(group_id) except Exception as ex: msg = _('Failed in deleting servergroup.') LOG.exception(_LE('%(msg)s: %(ex)s') % { 'msg': msg, 'ex': six.text_type(ex)}) return False, msg return True, reason
def load(cls, context, cluster_id, policy_id): '''Retrieve a cluster-policy binding from database.''' binding = db_api.cluster_policy_get(context, cluster_id, policy_id) if binding is None: raise exception.PolicyNotAttached(policy=policy_id, cluster=cluster_id) return cls._from_db_record(context, binding)
def get(cls, context, cluster_id, policy_id): obj = db_api.cluster_policy_get(context, cluster_id, policy_id) return cls._from_db_object(context, cls(), obj)
def pre_op(self, cluster_id, action): """Routine to be called before an 'CLUSTER_SCALE_OUT' action. For this particular policy, we take this chance to intelligently select the most proper hypervisor/vsphere cluster to create nodes. In order to realize the function, we need to create construct meta to handle affinity/anti-affinity then update the profile with the specific parameters at first :param cluster_id: ID of the cluster on which the relevant action is to be executed. :param action: The action object that triggered this operation. :returns: Nothing. """ zone_name = self.properties.get(self.AVAILABILITY_ZONE) if not zone_name and self.enable_drs: # we make a reasonable guess of the zone name for vSphere # support because the zone name is required in that case. zone_name = 'nova' # we respect other policies decisions (if any) and fall back to the # action inputs if no hints found. pd = action.data.get('creation', None) if pd is not None: count = pd.get('count', 1) elif action.action == consts.CLUSTER_SCALE_OUT: count = action.inputs.get('count', 1) else: # CLUSTER_RESIZE db_cluster = db_api.cluster_get(action.context, cluster_id) su.parse_resize_params(action, db_cluster) if 'creation' not in action.data: return count = action.data['creation']['count'] cp = db_api.cluster_policy_get(action.context, cluster_id, self.id) policy_data = self._extract_policy_data(cp.data) pd_entry = {'servergroup': policy_data['servergroup_id']} # special handling for vSphere DRS case where we need to find out # the name of the vSphere host which has DRS enabled. if self.enable_drs: cluster_obj = db_api.cluster_get(action.context, cluster_id) nc = self.nova(cluster_obj) hypervisors = nc.hypervisor_list() hv_id = '' pattern = re.compile(r'.*drs*', re.I) for hypervisor in hypervisors: match = pattern.match(hypervisor.hypervisor_hostname) if match: hv_id = hypervisor.id break if not hv_id: action.data['status'] = base.CHECK_ERROR action.data['status_reason'] = _('No suitable vSphere host ' 'is available.') action.store(action.context) return hv_info = nc.hypervisor_get(hv_id) hostname = hv_info['service']['host'] pd_entry['zone'] = ":".join([zone_name, hostname]) elif zone_name: pd_entry['zone'] = zone_name pd = { 'count': count, 'placements': [pd_entry] * count, } action.data.update({'placement': pd}) action.store(action.context) return