Example #1
0
    def pre_op(self, cluster_id, action):
        '''Choose victims that can be deleted.'''

        if action.action == consts.CLUSTER_RESIZE:
            cluster = db_api.cluster_get(action.context, cluster_id)
            scaleutils.parse_resize_params(action, cluster)
            if 'deletion' not in action.data:
                return
            count = action.data['deletion']['count']
        else:  # CLUSTER_SCALE_IN or CLUSTER_DEL_NODES
            count = action.inputs.get('count', 1)
        pd = action.data.get('deletion', {})
        candidates = pd.get('candidates', [])

        # For certain operations ( e.g. DEL_NODES), the candidates might
        # have been specified
        if len(candidates) == 0:
            candidates = self._select_candidates(action.context, cluster_id,
                                                 count)
        pd['candidates'] = candidates
        pd['destroy_after_deletion'] = self.destroy_after_deletion
        pd['grace_period'] = self.grace_period
        action.data.update({
            'status': base.CHECK_OK,
            'reason': _('Candidates generated'),
            'deletion': pd
        })
        action.store(action.context)

        return
Example #2
0
    def _get_delete_candidates(self, cluster_id, action):
        deletion = action.data.get('deletion', None)
        # No deletion field in action.data which means no scaling
        # policy or deletion policy is attached.
        if deletion is None:
            candidates = None
            if action.action == consts.CLUSTER_DEL_NODES:
                # Get candidates from action.input
                candidates = action.inputs.get('candidates', [])
                count = len(candidates)
            elif action.action == consts.CLUSTER_RESIZE:
                # Calculate deletion count based on action input
                db_cluster = db_api.cluster_get(action.context, cluster_id)
                scaleutils.parse_resize_params(action, db_cluster)
                if 'deletion' not in action.data:
                    return []
                else:
                    count = action.data['deletion']['count']
            else:  # action.action == consts.CLUSTER_SCALE_IN
                count = 1
        else:
            count = deletion.get('count', 0)
            candidates = deletion.get('candidates', None)

        # Still no candidates available, pick count of nodes randomly
        if candidates is None:
            nodes = db_api.node_get_all_by_cluster(action.context,
                                                   cluster_id=cluster_id)
            if count > len(nodes):
                count = len(nodes)
            candidates = scaleutils.nodes_by_random(nodes, count)

        return candidates
Example #3
0
    def pre_op(self, cluster_id, action):
        '''Choose victims that can be deleted.'''

        if action.action == consts.CLUSTER_RESIZE:
            cluster = db_api.cluster_get(action.context, cluster_id)
            scaleutils.parse_resize_params(action, cluster)
            if 'deletion' not in action.data:
                return
            count = action.data['deletion']['count']
        else:  # CLUSTER_SCALE_IN or CLUSTER_DEL_NODES
            count = action.inputs.get('count', 1)
        candidates = action.inputs.get('candidates', [])

        # For CLUSTER_RESIZE and CLUSTER_SCALE_IN actions, use policy
        # creteria to selete candidates.
        if len(candidates) == 0:
            candidates = self._select_candidates(action.context, cluster_id,
                                                 count)
        pd = action.data.get('deletion', {})
        pd['candidates'] = candidates
        pd['destroy_after_deletion'] = self.destroy_after_deletion
        pd['grace_period'] = self.grace_period
        action.data.update({
            'status': base.CHECK_OK,
            'reason': _('Candidates generated'),
            'deletion': pd
        })
        action.store(action.context)

        return
 def test_parse_resize_params(self):
     action = mock.Mock()
     cluster = mock.Mock()
     # delete nodes
     action.inputs = {
         consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY,
         consts.ADJUSTMENT_NUMBER: 4,
         consts.ADJUSTMENT_MIN_SIZE: 3,
         consts.ADJUSTMENT_MAX_SIZE: 10,
         consts.ADJUSTMENT_MIN_STEP: None,
         consts.ADJUSTMENT_STRICT: True,
     }
     action.data = {}
     action.RES_OK = 'OK'
     cluster.desired_capacity = 6
     result, reason = su.parse_resize_params(action, cluster)
     self.assertEqual('OK', result)
     self.assertEqual('', reason)
     self.assertEqual({'deletion': {'count': 2}}, action.data)
     # create nodes
     action.inputs = {
         consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY,
         consts.ADJUSTMENT_NUMBER: 9,
         consts.ADJUSTMENT_MIN_SIZE: 3,
         consts.ADJUSTMENT_MAX_SIZE: 10,
         consts.ADJUSTMENT_MIN_STEP: None,
         consts.ADJUSTMENT_STRICT: True,
     }
     action.data = {}
     result, reason = su.parse_resize_params(action, cluster)
     self.assertEqual('OK', result)
     self.assertEqual('', reason)
     self.assertEqual({'creation': {'count': 3}}, action.data)
     #  resize params are incorrect.
     action.inputs = {
         consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY,
         consts.ADJUSTMENT_NUMBER: 11,
         consts.ADJUSTMENT_MIN_SIZE: 3,
         consts.ADJUSTMENT_MAX_SIZE: 10,
         consts.ADJUSTMENT_MIN_STEP: None,
         consts.ADJUSTMENT_STRICT: True,
     }
     action.data = {}
     action.RES_ERROR = 'ERROR'
     result, reason = su.parse_resize_params(action, cluster)
     self.assertEqual('ERROR', result)
     msg = _('The target capacity (11) is greater than '
             'the specified max_size (10).')
     self.assertEqual(msg, reason)
Example #5
0
    def _get_delete_candidates(self, cluster_id, action):
        deletion = action.data.get('deletion', None)
        # No deletion field in action.data which means no scaling
        # policy or deletion policy is attached.
        candidates = None
        if deletion is None:
            if action.action == consts.NODE_DELETE:
                candidates = [action.entity.id]
                count = 1
            elif action.action == consts.CLUSTER_DEL_NODES:
                # Get candidates from action.input
                candidates = action.inputs.get('candidates', [])
                count = len(candidates)
            elif action.action == consts.CLUSTER_RESIZE:
                # Calculate deletion count based on action input
                cluster = action.entity
                current = len(cluster.nodes)
                scaleutils.parse_resize_params(action, cluster, current)
                if 'deletion' not in action.data:
                    return []
                else:
                    count = action.data['deletion']['count']
            else:  # action.action == consts.CLUSTER_SCALE_IN
                count = 1
        elif action.action == consts.CLUSTER_REPLACE_NODES:
            candidates = list(action.inputs['candidates'].keys())
            count = len(candidates)
        else:
            count = deletion.get('count', 0)
            candidates = deletion.get('candidates', None)

        # Still no candidates available, pick count of nodes randomly
        # apply to CLUSTER_RESIZE/CLUSTER_SCALE_IN
        if candidates is None:
            if count == 0:
                return []
            nodes = action.entity.nodes
            if count > len(nodes):
                count = len(nodes)
            candidates = scaleutils.nodes_by_random(nodes, count)
            deletion_data = action.data.get('deletion', {})
            deletion_data.update({
                'count': len(candidates),
                'candidates': candidates
            })
            action.data.update({'deletion': deletion_data})

        return candidates
Example #6
0
    def _get_delete_candidates(self, cluster_id, action):
        deletion = action.data.get('deletion', None)
        # No deletion field in action.data which means no scaling
        # policy or deletion policy is attached.
        candidates = None
        if deletion is None:
            if action.action == consts.CLUSTER_DEL_NODES:
                # Get candidates from action.input
                candidates = action.inputs.get('candidates', [])
                count = len(candidates)
            elif action.action == consts.CLUSTER_RESIZE:
                # Calculate deletion count based on action input
                db_cluster = db_api.cluster_get(action.context, cluster_id)
                scaleutils.parse_resize_params(action, db_cluster)
                if 'deletion' not in action.data:
                    return []
                else:
                    count = action.data['deletion']['count']
            else:  # action.action == consts.CLUSTER_SCALE_IN
                count = 1
        else:
            count = deletion.get('count', 0)
            candidates = deletion.get('candidates', None)

        # Still no candidates available, pick count of nodes randomly
        if candidates is None:
            if count == 0:
                return []
            nodes = db_api.node_get_all_by_cluster(action.context,
                                                   cluster_id=cluster_id)
            if count > len(nodes):
                count = len(nodes)
            candidates = scaleutils.nodes_by_random(nodes, count)
            deletion_data = action.data.get('deletion', {})
            deletion_data.update({
                'count': len(candidates),
                'candidates': candidates
            })
            action.data.update({'deletion': deletion_data})

        return candidates
Example #7
0
    def do_resize(self):
        """Handler for the CLUSTER_RESIZE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        node_list = self.cluster.nodes
        current_size = len(node_list)
        count, desired, candidates = self._get_action_data(current_size)
        grace_period = None
        # if policy is attached to the cluster, use policy data directly,
        # or parse resize params to get action data.
        if count == 0:
            result, reason = scaleutils.parse_resize_params(self, self.cluster)
            if result != self.RES_OK:
                return result, reason
            count, desired, candidates = self._get_action_data(current_size)
        elif 'deletion' in self.data:
            grace_period = self.data['deletion']['grace_period']
        if candidates is not None and len(candidates) == 0:
            # Choose victims randomly
            i = count
            while i > 0:
                r = random.randrange(len(node_list))
                candidates.append(node_list[r].id)
                node_list.remove(node_list[r])
                i = i - 1

        # delete nodes if necessary
        if desired < current_size:
            if grace_period is not None:
                self._wait_before_deletion(grace_period)
            result, reason = self._delete_nodes(candidates)
            if result != self.RES_OK:
                return result, reason
        # Create new nodes if desired_capacity increased
        else:
            result, reason = self._create_nodes(count)
            if result != self.RES_OK:
                return result, reason

        reason = _('Cluster resize succeeded.')
        kwargs = {'desired_capacity': desired}
        min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None)
        max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None)
        if min_size is not None:
            kwargs['min_size'] = min_size
        if max_size is not None:
            kwargs['max_size'] = max_size
        self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                **kwargs)
        return self.RES_OK, reason
Example #8
0
    def do_resize(self):
        """Handler for the CLUSTER_RESIZE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster resize started.')
        node_list = self.cluster.nodes
        current_size = len(node_list)
        count, desired, candidates = self._get_action_data(current_size)
        grace_period = None
        # if policy is attached to the cluster, use policy data directly,
        # or parse resize params to get action data.
        if count == 0:
            result, reason = scaleutils.parse_resize_params(self, self.cluster)
            if result != self.RES_OK:
                status_reason = _('Cluster resizing failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return result, reason
            count, desired, candidates = self._get_action_data(current_size)
        elif 'deletion' in self.data:
            grace_period = self.data['deletion'].get('grace_period', None)
        if candidates is not None and len(candidates) == 0:
            # Choose victims randomly
            candidates = scaleutils.nodes_by_random(self.cluster.nodes, count)

        # delete nodes if necessary
        if desired < current_size:
            if grace_period is not None:
                self._wait_before_deletion(grace_period)
            result, reason = self._delete_nodes(candidates)
        # Create new nodes if desired_capacity increased
        else:
            result, reason = self._create_nodes(count)

        if result != self.RES_OK:
            self.cluster.set_status(self.context, self.cluster.WARNING, reason)
            return result, reason

        reason = _('Cluster resize succeeded.')
        kwargs = {'desired_capacity': desired}
        min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None)
        max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None)
        if min_size is not None:
            kwargs['min_size'] = min_size
        if max_size is not None:
            kwargs['max_size'] = max_size
        self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                **kwargs)
        return self.RES_OK, reason
Example #9
0
    def do_resize(self):
        """Handler for the CLUSTER_RESIZE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster resize started.')
        node_list = self.cluster.nodes
        current_size = len(node_list)
        count, desired, candidates = self._get_action_data(current_size)
        grace_period = 0
        # if policy is attached to the cluster, use policy data directly,
        # or parse resize params to get action data.
        if count == 0:
            result, reason = scaleutils.parse_resize_params(self, self.cluster)
            if result != self.RES_OK:
                status_reason = _('Cluster resizing failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return result, reason
            count, desired, candidates = self._get_action_data(current_size)
        elif 'deletion' in self.data:
            grace_period = self.data['deletion'].get('grace_period', 0)
        if candidates is not None and len(candidates) == 0:
            # Choose victims randomly
            candidates = scaleutils.nodes_by_random(self.cluster.nodes, count)

        # delete nodes if necessary
        if desired < current_size:
            self._sleep(grace_period)
            result, reason = self._delete_nodes(candidates)
        # Create new nodes if desired_capacity increased
        else:
            result, reason = self._create_nodes(count)

        if result != self.RES_OK:
            self.cluster.set_status(self.context, self.cluster.WARNING, reason)
            return result, reason

        reason = _('Cluster resize succeeded.')
        kwargs = {'desired_capacity': desired}
        min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None)
        max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None)
        if min_size is not None:
            kwargs['min_size'] = min_size
        if max_size is not None:
            kwargs['max_size'] = max_size
        self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                **kwargs)
        return self.RES_OK, reason
Example #10
0
    def _get_count(self, cluster_id, action):
        """Get number of nodes to create or delete.

        :param cluster_id: The ID of the target cluster.
        :param action: The action object which triggered this policy check.
        :return: An integer value which can be 1) positive - number of nodes
                 to create; 2) negative - number of nodes to delete; 3) 0 -
                 something wrong happened, and the policy check failed.
        """
        if action.action == consts.NODE_CREATE:
            # skip node if the context already contains a region_name
            profile = action.node.rt['profile']
            if 'region_name' in profile.properties[profile.CONTEXT]:
                return 0
            else:
                return 1

        if action.action == consts.CLUSTER_RESIZE:
            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            elif action.data.get('creation', None):
                return action.data['creation']['count']

            db_cluster = co.Cluster.get(action.context, cluster_id)
            curr = no.Node.count_by_cluster(action.context, cluster_id)
            res = scaleutils.parse_resize_params(action, db_cluster, curr)
            if res[0] == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = res[1]
                LOG.error(res[1])
                return 0

            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            else:
                return action.data['creation']['count']

        if action.action == consts.CLUSTER_SCALE_IN:
            pd = action.data.get('deletion', None)
            if pd is None:
                return -action.inputs.get('count', 1)
            else:
                return -pd.get('count', 1)

        # CLUSTER_SCALE_OUT: an action that inflates the cluster
        pd = action.data.get('creation', None)
        if pd is None:
            return action.inputs.get('count', 1)
        else:
            return pd.get('count', 1)
Example #11
0
    def _get_count(self, cluster_id, action):
        """Get number of nodes to create or delete.

        :param cluster_id: The ID of the target cluster.
        :param action: The action object which triggered this policy check.
        :return: An integer value which can be 1) positive - number of nodes
                 to create; 2) negative - number of nodes to delete; 3) 0 -
                 something wrong happened, and the policy check failed.
        """
        if action.action == consts.NODE_CREATE:
            # skip the policy if availability zone is specified in profile
            profile = action.node.rt['profile']
            if profile.properties[profile.AVAILABILITY_ZONE]:
                return 0
            return 1

        if action.action == consts.CLUSTER_RESIZE:
            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            elif action.data.get('creation', None):
                return action.data['creation']['count']

            db_cluster = co.Cluster.get(action.context, cluster_id)
            res = scaleutils.parse_resize_params(action, db_cluster)
            if res[0] == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = res[1]
                LOG.error(res[1])
                return 0

            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            else:
                return action.data['creation']['count']

        if action.action == consts.CLUSTER_SCALE_IN:
            pd = action.data.get('deletion', None)
            if pd is None:
                return -action.inputs.get('count', 1)
            else:
                return -pd.get('count', 1)

        # CLUSTER_SCALE_OUT: an action that inflates the cluster
        pd = action.data.get('creation', None)
        if pd is None:
            return action.inputs.get('count', 1)
        else:
            return pd.get('count', 1)
Example #12
0
    def do_resize(self):
        """Handler for the CLUSTER_RESIZE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        node_list = self.cluster.nodes
        current_size = len(node_list)
        count, desired, candidates = self._get_action_data(current_size)
        # if policy is attached to the cluster, use policy data directly,
        # or parse resize params to get action data.
        if count == 0:
            result, reason = scaleutils.parse_resize_params(self, self.cluster)
            if result != self.RES_OK:
                return result, reason
            count, desired, candidates = self._get_action_data(current_size)
        if candidates is not None and len(candidates) == 0:
            # Choose victims randomly
            i = count
            while i > 0:
                r = random.randrange(len(node_list))
                candidates.append(node_list[r].id)
                node_list.remove(node_list[r])
                i = i - 1

        # delete nodes if necessary
        if desired < current_size:
            result, reason = self._delete_nodes(candidates)
            if result != self.RES_OK:
                return result, reason
        # Create new nodes if desired_capacity increased
        else:
            result, reason = self._create_nodes(count)
            if result != self.RES_OK:
                return result, reason

        reason = _('Cluster resize succeeded.')
        kwargs = {'desired_capacity': desired}
        min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None)
        max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None)
        if min_size is not None:
            kwargs['min_size'] = min_size
        if max_size is not None:
            kwargs['max_size'] = max_size
        self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                **kwargs)
        return self.RES_OK, reason
    def test_parse_resize_params_creation(self):
        action = mock.Mock(RES_OK='OK')
        cluster = mock.Mock()
        action.inputs = {
            consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY,
            consts.ADJUSTMENT_NUMBER: 9,
            consts.ADJUSTMENT_MIN_SIZE: 3,
            consts.ADJUSTMENT_MAX_SIZE: 10,
            consts.ADJUSTMENT_MIN_STEP: None,
            consts.ADJUSTMENT_STRICT: True,
        }
        action.data = {}

        result, reason = su.parse_resize_params(action, cluster, 6)

        self.assertEqual('OK', result)
        self.assertEqual('', reason)
        self.assertEqual({'creation': {'count': 3}}, action.data)
Example #14
0
    def pre_op(self, cluster_id, action, **args):
        """Hook before action execution.

        One of the task for this routine is to disable health policy if the
        action is a request that will shrink the cluster. The reason is that
        the policy may attempt to recover nodes that are to be deleted.

        :param cluster_id: The ID of the target cluster.
        :param action: The action to be examined.
        :param kwargs args: Other keyword arguments to be checked.
        :returns: Boolean indicating whether the checking passed.
        """
        if action.action in (consts.CLUSTER_SCALE_IN, consts.CLUSTER_DEL_NODES,
                             consts.NODE_DELETE):
            health_manager.disable(cluster_id)
            return True

        if action.action == consts.CLUSTER_RESIZE:
            deletion = action.data.get('deletion', None)
            if deletion:
                health_manager.disable(cluster_id)
                return True

            cluster = action.entity
            current = len(cluster.nodes)
            res, reason = scaleutils.parse_resize_params(
                action, cluster, current)
            if res == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = reason
                return False

            if action.data.get('deletion', None):
                health_manager.disable(cluster_id)
                return True

        pd = {
            'recover_action': self.recover_actions,
            'fencing': self.fencing_types,
        }
        action.data.update({'health': pd})
        action.store(action.context)

        return True
Example #15
0
    def do_resize(self):
        """Handler for the CLUSTER_RESIZE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # if no policy decision(s) found, use policy inputs directly,
        # Note the 'parse_resize_params' function is capable of calculating
        # desired capacity and handling best effort scaling. It also verifies
        # that the inputs are valid
        curr_capacity = no.Node.count_by_cluster(self.context, self.entity.id)
        if 'creation' not in self.data and 'deletion' not in self.data:
            result, reason = scaleutils.parse_resize_params(
                self, self.entity, curr_capacity)
            if result != self.RES_OK:
                return result, reason

        # action input consolidated to action data now
        reason = 'Cluster resize succeeded.'
        if 'deletion' in self.data:
            count = self.data['deletion']['count']
            candidates = self.data['deletion'].get('candidates', [])

            # Choose victims randomly if not already picked
            if not candidates:
                node_list = self.entity.nodes
                candidates = scaleutils.nodes_by_random(node_list, count)

            self._update_cluster_size(curr_capacity - count)

            grace_period = self.data['deletion'].get('grace_period', 0)
            self._sleep(grace_period)
            result, new_reason = self._delete_nodes(candidates)
        else:
            # 'creation' in self.data:
            count = self.data['creation']['count']
            self._update_cluster_size(curr_capacity + count)
            result, new_reason = self._create_nodes(count)

        if result != self.RES_OK:
            reason = new_reason

        self.entity.eval_status(self.context, consts.CLUSTER_RESIZE)
        return result, reason
    def test_parse_resize_params_invalid(self):
        action = mock.Mock()
        cluster = mock.Mock()
        action.inputs = {
            consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY,
            consts.ADJUSTMENT_NUMBER: 11,
            consts.ADJUSTMENT_MIN_SIZE: 3,
            consts.ADJUSTMENT_MAX_SIZE: 10,
            consts.ADJUSTMENT_MIN_STEP: None,
            consts.ADJUSTMENT_STRICT: True,
        }
        action.data = {}
        action.RES_ERROR = 'ERROR'

        result, reason = su.parse_resize_params(action, cluster, 6)

        self.assertEqual('ERROR', result)
        msg = _('The target capacity (11) is greater than '
                'the specified max_size (10).')
        self.assertEqual(msg, reason)
Example #17
0
    def _get_count(self, cluster_id, action):
        """Get number of nodes to create or delete.

        :param cluster_id: The ID of the target cluster.
        :param action: The action object which triggered this policy check.
        :return: An integer value which can be 1) positive - number of nodes
                 to create; 2) negative - number of nodes to delete; 3) 0 -
                 something wrong happened, and the policy check failed.
        """
        if action.action == consts.CLUSTER_RESIZE:
            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            elif action.data.get('creation', None):
                return action.data['creation']['count']

            db_cluster = db_api.cluster_get(action.context, cluster_id)
            res = scaleutils.parse_resize_params(action, db_cluster)
            if res[0] == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = res[1]
                LOG.error(res[1])
                return 0

            if action.data.get('deletion', None):
                return -action.data['deletion']['count']
            else:
                return action.data['creation']['count']

        if action.action == consts.CLUSTER_SCALE_IN:
            pd = action.data.get('deletion', None)
            if pd is None:
                return -action.inputs.get('count', 1)
            else:
                return -pd.get('count', 1)

        # CLUSTER_SCALE_OUT: an action that inflates the cluster
        pd = action.data.get('creation', None)
        if pd is None:
            return action.inputs.get('count', 1)
        else:
            return pd.get('count', 1)
Example #18
0
    def post_op(self, cluster_id, action, **args):
        """Hook before action execution.

        One of the task for this routine is to re-enable health policy if the
        action is a request that will shrink the cluster thus the policy has
        been temporarily disabled.

        :param cluster_id: The ID of the target cluster.
        :param action: The action to be examined.
        :param kwargs args: Other keyword arguments to be checked.
        :returns: Boolean indicating whether the checking passed.
        """
        if action.action in (consts.CLUSTER_SCALE_IN, consts.CLUSTER_DEL_NODES,
                             consts.NODE_DELETE):
            health_manager.enable(cluster_id)
            return True

        if action.action == consts.CLUSTER_RESIZE:
            deletion = action.data.get('deletion', None)
            if deletion:
                health_manager.enable(cluster_id)
                return True

            cluster = action.entity
            current = len(cluster.nodes)
            res, reason = scaleutils.parse_resize_params(
                action, cluster, current)
            if res == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = reason
                return False

            if action.data.get('deletion', None):
                health_manager.enable(cluster_id)
                return True

        return True
Example #19
0
    def pre_op(self, cluster_id, action):
        """Choose victims that can be deleted.

        :param cluster_id: ID of the cluster to be handled.
        :param action: The action object that triggered this policy.
        """

        victims = action.inputs.get('candidates', [])
        if len(victims) > 0:
            self._update_action(action, victims)
            return

        if action.action == consts.NODE_DELETE:
            self._update_action(action, [action.entity.id])
            return

        cluster = action.entity
        regions = None
        zones = None

        hooks_data = self.hooks
        action.data.update({'status': base.CHECK_OK,
                            'reason': _('lifecycle hook parameters saved'),
                            'hooks': hooks_data})
        action.store(action.context)

        deletion = action.data.get('deletion', {})
        if deletion:
            # there are policy decisions
            count = deletion['count']
            regions = deletion.get('regions', None)
            zones = deletion.get('zones', None)
        # No policy decision, check action itself: SCALE_IN
        elif action.action == consts.CLUSTER_SCALE_IN:
            count = action.inputs.get('count', 1)

        # No policy decision, check action itself: RESIZE
        else:
            current = len(cluster.nodes)
            res, reason = su.parse_resize_params(action, cluster, current)
            if res == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = reason
                LOG.error(reason)
                return

            if 'deletion' not in action.data:
                return
            count = action.data['deletion']['count']

        # Cross-region
        if regions:
            victims = self._victims_by_regions(cluster, regions)
            self._update_action(action, victims)
            return

        # Cross-AZ
        if zones:
            victims = self._victims_by_zones(cluster, zones)
            self._update_action(action, victims)
            return

        if count > len(cluster.nodes):
            count = len(cluster.nodes)

        if self.criteria == self.RANDOM:
            victims = su.nodes_by_random(cluster.nodes, count)
        elif self.criteria == self.OLDEST_PROFILE_FIRST:
            victims = su.nodes_by_profile_age(cluster.nodes, count)
        elif self.criteria == self.OLDEST_FIRST:
            victims = su.nodes_by_age(cluster.nodes, count, True)
        else:
            victims = su.nodes_by_age(cluster.nodes, count, False)

        self._update_action(action, victims)
        return
Example #20
0
    def pre_op(self, cluster_id, action):
        """Choose victims that can be deleted.

        :param cluster_id: ID of the cluster to be handled.
        :param action: The action object that triggered this policy.
        """

        victims = action.inputs.get('candidates', [])
        if len(victims) > 0:
            self._update_action(action, victims)
            return

        db_cluster = None
        regions = None
        zones = None

        deletion = action.data.get('deletion', {})
        if deletion:
            # there are policy decisions
            count = deletion['count']
            regions = deletion.get('regions', None)
            zones = deletion.get('zones', None)
        # No policy decision, check action itself: SCALE_IN
        elif action.action == consts.CLUSTER_SCALE_IN:
            count = action.inputs.get('count', 1)

        # No policy decision, check action itself: RESIZE
        else:
            db_cluster = db_api.cluster_get(action.context, cluster_id)
            scaleutils.parse_resize_params(action, db_cluster)
            if 'deletion' not in action.data:
                return
            count = action.data['deletion']['count']

        cluster = cluster_mod.Cluster.load(action.context,
                                           cluster=db_cluster,
                                           cluster_id=cluster_id)
        # Cross-region
        if regions:
            victims = self._victims_by_regions(cluster, regions)
            self._update_action(action, victims)
            return

        # Cross-AZ
        if zones:
            victims = self._victims_by_zones(cluster, zones)
            self._update_action(action, victims)
            return

        if count > len(cluster.nodes):
            count = len(cluster.nodes)

        if self.criteria == self.RANDOM:
            victims = scaleutils.nodes_by_random(cluster.nodes, count)
        elif self.criteria == self.OLDEST_PROFILE_FIRST:
            victims = scaleutils.nodes_by_profile_age(cluster.nodes, count)
        elif self.criteria == self.OLDEST_FIRST:
            victims = scaleutils.nodes_by_age(cluster.nodes, count, True)
        else:
            victims = scaleutils.nodes_by_age(cluster.nodes, count, False)

        self._update_action(action, victims)
        return
Example #21
0
    def pre_op(self, cluster_id, action):
        """Routine to be called before target action is executed.

        This policy annotates the node with a server group ID before the
        node is actually created. For vSphere DRS, it is equivalent to the
        selection of vSphere host (cluster).

        :param cluster_id: ID of the cluster on which the relevant action
                            is to be executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        zone_name = self.properties.get(self.AVAILABILITY_ZONE)
        if not zone_name and self.enable_drs:
            # we make a reasonable guess of the zone name for vSphere
            # support because the zone name is required in that case.
            zone_name = 'nova'

        # we respect other policies decisions (if any) and fall back to the
        # action inputs if no hints found.
        pd = action.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        elif action.action == consts.CLUSTER_SCALE_OUT:
            count = action.inputs.get('count', 1)
        elif action.action == consts.NODE_CREATE:
            count = 1
        else:  # CLUSTER_RESIZE
            db_cluster = co.Cluster.get(action.context, cluster_id)
            su.parse_resize_params(action, db_cluster)
            if 'creation' not in action.data:
                return
            count = action.data['creation']['count']

        cp = cpo.ClusterPolicy.get(action.context, cluster_id, self.id)
        policy_data = self._extract_policy_data(cp.data)
        pd_entry = {'servergroup': policy_data['servergroup_id']}

        # special handling for vSphere DRS case where we need to find out
        # the name of the vSphere host which has DRS enabled.
        if self.enable_drs:
            cluster_obj = co.Cluster.get(action.context, cluster_id)
            nc = self.nova(cluster_obj)

            hypervisors = nc.hypervisor_list()
            hv_id = ''
            pattern = re.compile(r'.*drs*', re.I)
            for hypervisor in hypervisors:
                match = pattern.match(hypervisor.hypervisor_hostname)
                if match:
                    hv_id = hypervisor.id
                    break

            if not hv_id:
                action.data['status'] = base.CHECK_ERROR
                action.data['status_reason'] = _('No suitable vSphere host '
                                                 'is available.')
                action.store(action.context)
                return

            hv_info = nc.hypervisor_get(hv_id)
            hostname = hv_info['service']['host']
            pd_entry['zone'] = ":".join([zone_name, hostname])

        elif zone_name:
            pd_entry['zone'] = zone_name

        pd = {
            'count': count,
            'placements': [pd_entry] * count,
        }
        action.data.update({'placement': pd})
        action.store(action.context)

        return
Example #22
0
    def pre_op(self, cluster_id, action):
        """Routine to be called before an 'CLUSTER_SCALE_OUT' action.

        For this particular policy, we take this chance to intelligently
        select the most proper hypervisor/vsphere cluster to create nodes.
        In order to realize the function, we need to create construct meta
        to handle affinity/anti-affinity then update the profile with the
        specific parameters at first

        :param cluster_id: ID of the cluster on which the relevant action
                            is to be executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        zone_name = self.properties.get(self.AVAILABILITY_ZONE)
        if not zone_name and self.enable_drs:
            # we make a reasonable guess of the zone name for vSphere
            # support because the zone name is required in that case.
            zone_name = 'nova'

        # we respect other policies decisions (if any) and fall back to the
        # action inputs if no hints found.
        pd = action.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        elif action.action == consts.CLUSTER_SCALE_OUT:
            count = action.inputs.get('count', 1)
        else:  # CLUSTER_RESIZE
            db_cluster = co.Cluster.get(action.context, cluster_id)
            su.parse_resize_params(action, db_cluster)
            if 'creation' not in action.data:
                return
            count = action.data['creation']['count']

        cp = cpo.ClusterPolicy.get(action.context, cluster_id, self.id)
        policy_data = self._extract_policy_data(cp.data)
        pd_entry = {'servergroup': policy_data['servergroup_id']}

        # special handling for vSphere DRS case where we need to find out
        # the name of the vSphere host which has DRS enabled.
        if self.enable_drs:
            cluster_obj = co.Cluster.get(action.context, cluster_id)
            nc = self.nova(cluster_obj)

            hypervisors = nc.hypervisor_list()
            hv_id = ''
            pattern = re.compile(r'.*drs*', re.I)
            for hypervisor in hypervisors:
                match = pattern.match(hypervisor.hypervisor_hostname)
                if match:
                    hv_id = hypervisor.id
                    break

            if not hv_id:
                action.data['status'] = base.CHECK_ERROR
                action.data['status_reason'] = _('No suitable vSphere host '
                                                 'is available.')
                action.store(action.context)
                return

            hv_info = nc.hypervisor_get(hv_id)
            hostname = hv_info['service']['host']
            pd_entry['zone'] = ":".join([zone_name, hostname])

        elif zone_name:
            pd_entry['zone'] = zone_name

        pd = {
            'count': count,
            'placements': [pd_entry] * count,
        }
        action.data.update({'placement': pd})
        action.store(action.context)

        return
Example #23
0
    def pre_op(self, cluster_id, action):
        """Routine to be called before target action is executed.

        This policy annotates the node with a server group ID before the
        node is actually created. For vSphere DRS, it is equivalent to the
        selection of vSphere host (cluster).

        :param cluster_id: ID of the cluster on which the relevant action
                            is to be executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        zone_name = self.properties.get(self.AVAILABILITY_ZONE)
        if not zone_name and self.enable_drs:
            # we make a reasonable guess of the zone name for vSphere
            # support because the zone name is required in that case.
            zone_name = 'nova'

        # we respect other policies decisions (if any) and fall back to the
        # action inputs if no hints found.
        pd = action.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        elif action.action == consts.CLUSTER_SCALE_OUT:
            count = action.inputs.get('count', 1)
        elif action.action == consts.NODE_CREATE:
            count = 1
        else:  # CLUSTER_RESIZE
            cluster = action.entity
            current = len(cluster.nodes)
            su.parse_resize_params(action, cluster, current)
            if 'creation' not in action.data:
                return
            count = action.data['creation']['count']

        cp = cpo.ClusterPolicy.get(action.context, cluster_id, self.id)
        policy_data = self._extract_policy_data(cp.data)
        pd_entry = {'servergroup': policy_data['servergroup_id']}

        # special handling for vSphere DRS case where we need to find out
        # the name of the vSphere host which has DRS enabled.
        if self.enable_drs:
            obj = action.entity
            nc = self.nova(obj.user, obj.project)

            hypervisors = nc.hypervisor_list()
            hv_id = ''
            pattern = re.compile(r'.*drs*', re.I)
            for hypervisor in hypervisors:
                match = pattern.match(hypervisor.hypervisor_hostname)
                if match:
                    hv_id = hypervisor.id
                    break

            if not hv_id:
                action.data['status'] = base.CHECK_ERROR
                action.data['status_reason'] = _('No suitable vSphere host '
                                                 'is available.')
                action.store(action.context)
                return

            hv_info = nc.hypervisor_get(hv_id)
            hostname = hv_info['service']['host']
            pd_entry['zone'] = ":".join([zone_name, hostname])

        elif zone_name:
            pd_entry['zone'] = zone_name

        pd = {
            'count': count,
            'placements': [pd_entry] * count,
        }
        action.data.update({'placement': pd})
        action.store(action.context)

        return
Example #24
0
    def pre_op(self, cluster_id, action):
        """Choose victims that can be deleted.

        :param cluster_id: ID of the cluster to be handled.
        :param action: The action object that triggered this policy.
        """

        victims = action.inputs.get('candidates', [])
        if len(victims) > 0:
            self._update_action(action, victims)
            return

        if action.action == consts.NODE_DELETE:
            self._update_action(action, [action.node.id])
            return

        db_cluster = None
        regions = None
        zones = None

        deletion = action.data.get('deletion', {})
        if deletion:
            # there are policy decisions
            count = deletion['count']
            regions = deletion.get('regions', None)
            zones = deletion.get('zones', None)
        # No policy decision, check action itself: SCALE_IN
        elif action.action == consts.CLUSTER_SCALE_IN:
            count = action.inputs.get('count', 1)

        # No policy decision, check action itself: RESIZE
        else:
            db_cluster = co.Cluster.get(action.context, cluster_id)
            current = no.Node.count_by_cluster(action.context, cluster_id)
            res, reason = scaleutils.parse_resize_params(action, db_cluster,
                                                         current)
            if res == base.CHECK_ERROR:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = reason
                LOG.error(reason)
                return

            if 'deletion' not in action.data:
                return
            count = action.data['deletion']['count']

        cluster = cm.Cluster.load(action.context, dbcluster=db_cluster,
                                  cluster_id=cluster_id)
        # Cross-region
        if regions:
            victims = self._victims_by_regions(cluster, regions)
            self._update_action(action, victims)
            return

        # Cross-AZ
        if zones:
            victims = self._victims_by_zones(cluster, zones)
            self._update_action(action, victims)
            return

        if count > len(cluster.nodes):
            count = len(cluster.nodes)

        if self.criteria == self.RANDOM:
            victims = scaleutils.nodes_by_random(cluster.nodes, count)
        elif self.criteria == self.OLDEST_PROFILE_FIRST:
            victims = scaleutils.nodes_by_profile_age(cluster.nodes, count)
        elif self.criteria == self.OLDEST_FIRST:
            victims = scaleutils.nodes_by_age(cluster.nodes, count, True)
        else:
            victims = scaleutils.nodes_by_age(cluster.nodes, count, False)

        self._update_action(action, victims)
        return