예제 #1
0
    def do_delete(self):
        """Handler for the NODE_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cluster_mod.Cluster.load(self.context,
                                               self.node.cluster_id)
            result = scaleutils.check_size_params(cluster,
                                                  cluster.desired_capacity - 1,
                                                  None, None, True)
            if result:
                return self.RES_ERROR, result

        res = self.node.do_delete(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node deletion succeeded
                cluster.desired_capacity -= 1
                cluster.store(self.context)
                cluster.remove_node(self.node.id)
            return self.RES_OK, _('Node deleted successfully.')
        else:
            return self.RES_ERROR, _('Node deletion failed.')
예제 #2
0
    def do_create(self):
        """Handler for the NODE_CREATE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.entity.cluster_id
        if cluster_id and self.cause == consts.CAUSE_RPC:
            # Check cluster size constraint if target cluster is specified
            cluster = cm.Cluster.load(self.context, cluster_id)
            desired = no.Node.count_by_cluster(self.context, cluster_id)
            result = su.check_size_params(cluster, desired, None, None, True)
            if result:
                # cannot place node into the cluster
                no.Node.update(self.context, self.entity.id, {
                    'cluster_id': '',
                    'status': consts.NS_ERROR
                })
                return self.RES_ERROR, result

        res, reason = self.entity.do_create(self.context)

        if cluster_id and self.cause == consts.CAUSE_RPC:
            # Update cluster's desired_capacity and re-evaluate its status no
            # matter the creation is a success or not because the node object
            # is already treated as member of the cluster and the node
            # creation may have changed the cluster's status
            cluster.eval_status(self.context,
                                consts.NODE_CREATE,
                                desired_capacity=desired)
        if res:
            return self.RES_OK, 'Node created successfully.'
        else:
            return self.RES_ERROR, reason
예제 #3
0
    def do_create(self):
        """Handler for the NODE_CREATE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node is created with target cluster specified,
            # check cluster size constraint
            cluster = cluster_mod.Cluster.load(self.context,
                                               self.node.cluster_id)
            result = scaleutils.check_size_params(
                cluster, cluster.desired_capacity + 1, None, None, True)

            if result:
                return self.RES_ERROR, result

        res = self.node.do_create(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node creation succeeded
                cluster.desired_capacity += 1
                cluster.store(self.context)
                cluster.add_node(self.node)
            return self.RES_OK, _('Node created successfully.')
        else:
            return self.RES_ERROR, _('Node creation failed.')
예제 #4
0
    def do_resize(self):
        adj_type = self.inputs.get(consts.ADJUSTMENT_TYPE, None)
        number = self.inputs.get(consts.ADJUSTMENT_NUMBER, None)
        min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None)
        max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None)
        min_step = self.inputs.get(consts.ADJUSTMENT_MIN_STEP, None)
        strict = self.inputs.get(consts.ADJUSTMENT_STRICT, False)

        desired = self.cluster.desired_capacity
        if adj_type is not None:
            # number must be not None according to previous tests
            desired = scaleutils.calculate_desired(
                desired, adj_type, number, min_step)

        # truncate adjustment if permitted (strict==False)
        if strict is False:
            desired = scaleutils.truncate_desired(
                self.cluster, desired, min_size, max_size)

        # check provided params against current properties
        # desired is checked when strict is True
        result = scaleutils.check_size_params(self.cluster, desired, min_size,
                                              max_size, strict)
        if result != '':
            return self.RES_ERROR, result

        # save sanitized properties
        self._update_cluster_properties(desired, min_size, max_size)
        node_list = self.cluster.nodes
        current_size = len(node_list)

        # delete nodes if necessary
        if desired < current_size:
            adjustment = current_size - desired
            if 'deletion' not in self.data:
                self.data['deletion'] = {'count': adjustment}
            candidates = []
            # Choose victims randomly
            i = adjustment
            while i > 0:
                r = random.randrange(len(node_list))
                candidates.append(node_list[r].id)
                node_list.remove(node_list[r])
                i = i - 1

            result, reason = self._delete_nodes(candidates)
            if result != self.RES_OK:
                return result, reason

        # Create new nodes if desired_capacity increased
        if desired > current_size:
            delta = desired - current_size
            self.data['creation'] = {'count': delta}
            result, reason = self._create_nodes(delta)
            if result != self.RES_OK:
                return result, reason

        reason = _('Cluster resize succeeded.')
        self.cluster.set_status(self.context, self.cluster.ACTIVE, reason)
        return self.RES_OK, reason
예제 #5
0
    def do_create(self):
        """Handler for the NODE_CREATE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node is created with target cluster specified,
            # check cluster size constraint
            cluster = cm.Cluster.load(self.context, self.node.cluster_id)
            result = scaleutils.check_size_params(
                cluster, cluster.desired_capacity + 1, None, None, True)

            if result:
                return self.RES_ERROR, result

        res = self.node.do_create(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node creation succeeded
                cluster.desired_capacity += 1
                cluster.store(self.context)
                cluster.add_node(self.node)
            return self.RES_OK, _('Node created successfully.')
        else:
            return self.RES_ERROR, _('Node creation failed.')
예제 #6
0
    def do_scale_in(self):
        """Handler for the CLUSTER_SCALE_IN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy data if any, or else the count is set to 1 as default.
        pd = self.data.get('deletion', None)
        if pd is not None:
            count = pd.get('count', 1)
            candidates = pd.get('candidates', [])
        else:
            # If no scaling policy is attached, use the input count directly
            count = self.inputs.get('count', 1)
            candidates = []

        if count <= 0:
            reason = _('Invalid count (%s) for scaling in.') % count
            return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        if count > curr_size:
            LOG.warning(_('Triming count (%(count)s) to current cluster size '
                          '(%(curr)s) for scaling in'),
                        {'count': count, 'curr': curr_size})
            count = curr_size
        new_size = curr_size - count

        result = scaleutils.check_size_params(self.cluster, new_size,
                                              None, None, False)
        if result != '':
            return self.RES_ERROR, result

        # Choose victims randomly
        if len(candidates) == 0:
            ids = [node.id for node in self.cluster.nodes]
            i = count
            while i > 0:
                r = random.randrange(len(ids))
                candidates.append(ids[r])
                ids.remove(ids[r])
                i = i - 1

        # The policy data may contain destroy flag and grace period option
        result, reason = self._delete_nodes(candidates)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            # TODO(anyone): make update to desired capacity customizable
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason)
        else:
            # RES_RETRY
            pass

        return result, reason
예제 #7
0
    def do_scale_in(self):
        """Handler for the CLUSTER_SCALE_IN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy data if any, deletion policy and scaling policy might
        # be attached.
        pd = self.data.get('deletion', None)
        grace_period = 0
        if pd:
            grace_period = pd.get('grace_period', 0)
            candidates = pd.get('candidates', [])
            # if scaling policy is attached, get 'count' from action data
            count = len(candidates) or pd['count']
        else:
            # If no scaling policy is attached, use the input count directly
            candidates = []
            value = self.inputs.get('count', 1)
            success, count = utils.get_positive_int(value)
            if not success:
                reason = _('Invalid count (%s) for scaling in.') % value
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = no.Node.count_by_cluster(self.context, self.target)
        if count > curr_size:
            msg = _("Triming count (%(count)s) to current "
                    "cluster size (%(curr)s) for scaling in")
            LOG.warning(msg, {'count': count, 'curr': curr_size})
            count = curr_size
        new_size = curr_size - count

        result = scaleutils.check_size_params(self.entity, new_size, None,
                                              None, True)
        if result:
            return self.RES_ERROR, result

        self.entity.set_status(self.context,
                               consts.CS_RESIZING,
                               _('Cluster scale in started.'),
                               desired_capacity=new_size)

        # Choose victims randomly
        if len(candidates) == 0:
            candidates = scaleutils.nodes_by_random(self.entity.nodes, count)

        #
        self._sleep(grace_period)

        result, reason = self._delete_nodes(candidates)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')

        self.entity.eval_status(self.context, consts.CLUSTER_SCALE_IN)

        return result, reason
예제 #8
0
    def test_check_size_params(self):
        cluster = mock.Mock()
        cluster.min_size = 10
        cluster.max_size = 20
        cluster.desired_capacity = 15

        actual = su.check_size_params(cluster, self.desired, self.min_size,
                                      self.max_size, self.strict)
        self.assertEqual(self.result, actual)
예제 #9
0
    def test_check_size_params(self):
        cluster = mock.Mock()
        cluster.min_size = 10
        cluster.max_size = 20
        cluster.desired_capacity = 15

        actual = su.check_size_params(cluster, self.desired, self.min_size,
                                      self.max_size, self.strict)
        self.assertEqual(self.result, actual)
예제 #10
0
    def test_check_size_params_default_strict(self):
        cluster = mock.Mock()
        cluster.min_size = 10
        cluster.max_size = 20
        cluster.desired_capacity = 15
        desired = 5
        min_size = None
        max_size = None

        actual = su.check_size_params(cluster, desired, min_size, max_size)
        self.assertIsNone(actual)
예제 #11
0
    def do_scale_out(self):
        """Handler for the CLUSTER_SCALE_OUT action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster scale out started.')
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the
            # input count directly
            count = self.inputs.get('count', 1)
            try:
                count = utils.parse_int_param('count', count, allow_zero=False)
            except exception.InvalidParameter:
                reason = _('Invalid count (%s) for scaling out.') % count
                status_reason = _('Cluster scaling failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        new_size = curr_size + count

        result = scaleutils.check_size_params(self.cluster, new_size, None,
                                              None, True)
        if result:
            status_reason = _('Cluster scaling failed: %s') % result
            self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                    status_reason)
            return self.RES_ERROR, result

        result, reason = self._create_nodes(count)
        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            self.cluster.set_status(self.context,
                                    self.cluster.ACTIVE,
                                    reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context,
                                    self.cluster.ERROR,
                                    reason,
                                    desired_capacity=new_size)
        else:  # RES_RETRY
            pass

        return result, reason
예제 #12
0
    def test_check_size_params_default_strict(self):
        cluster = mock.Mock()
        cluster.min_size = 10
        cluster.max_size = 20
        cluster.desired_capacity = 15
        desired = 5
        min_size = None
        max_size = None

        actual = su.check_size_params(cluster, desired, min_size, max_size)
        self.assertIsNone(actual)
예제 #13
0
    def do_scale_out(self):
        """Handler for the CLUSTER_SCALE_OUT action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster scale out started.')
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the
            # input count directly
            count = self.inputs.get('count', 1)
            try:
                count = utils.parse_int_param('count', count,
                                              allow_zero=False)
            except exception.InvalidParameter:
                reason = _('Invalid count (%s) for scaling out.') % count
                status_reason = _('Cluster scaling failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        new_size = curr_size + count

        result = scaleutils.check_size_params(self.cluster, new_size,
                                              None, None, True)
        if result:
            status_reason = _('Cluster scaling failed: %s') % result
            self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                    status_reason)
            return self.RES_ERROR, result

        result, reason = self._create_nodes(count)
        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason,
                                    desired_capacity=new_size)
        else:  # RES_RETRY
            pass

        return result, reason
예제 #14
0
    def do_delete(self):
        """Handler for the NODE_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.entity.cluster_id
        if cluster_id and self.cause == consts.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cm.Cluster.load(self.context, cluster_id)
            current = no.Node.count_by_cluster(self.context, cluster_id)
            desired = current - 1
            result = su.check_size_params(cluster, desired, None, None, True)
            if result:
                return self.RES_ERROR, result

            # handle grace_period
            pd = self.data.get('deletion', None)
            if pd:
                grace_period = pd.get('grace_period', 0)
                if grace_period:
                    eventlet.sleep(grace_period)

        res = self.entity.do_delete(self.context)

        if cluster_id and self.cause == consts.CAUSE_RPC:
            # check if desired_capacity should be changed
            do_reduce = True
            params = {}
            pd = self.data.get('deletion', None)
            if pd:
                do_reduce = pd.get('reduce_desired_capacity', True)
            if do_reduce and res:
                params = {'desired_capacity': desired}
            cluster.eval_status(self.context, consts.NODE_DELETE, **params)

        if not res:
            return self.RES_ERROR, 'Node deletion failed.'

        # Remove all action records which target on deleted
        # node except the on-going NODE_DELETE action from DB
        try:
            ao.Action.delete_by_target(
                self.context,
                self.target,
                action_excluded=[consts.NODE_DELETE],
                status=[consts.ACTION_SUCCEEDED, consts.ACTION_FAILED])
        except Exception as ex:
            LOG.warning('Failed to clean node action records: %s', ex)
        return self.RES_OK, 'Node deleted successfully.'
예제 #15
0
파일: node_action.py 프로젝트: Alzon/senlin
    def do_join(self):
        cluster_id = self.inputs.get('cluster_id')
        # Check the size constraint of parent cluster
        cluster = cluster_mod.Cluster.load(self.context, cluster_id)
        desired_capacity = cluster.desired_capacity + 1
        result = scaleutils.check_size_params(cluster, desired_capacity,
                                              None, None, True)
        if result != '':
            return self.RES_ERROR, result

        result = self.node.do_join(self.context, cluster_id)
        if result:
            # Update cluster desired_capacity if node join succeeded
            cluster.desired_capacity = desired_capacity
            cluster.store(self.context)
            return self.RES_OK, _('Node successfully joined cluster')
        else:
            return self.RES_ERROR, _('Node failed in joining cluster')
예제 #16
0
파일: node_action.py 프로젝트: Alzon/senlin
    def do_leave(self):
        # Check the size constraint of parent cluster
        cluster = cluster_mod.Cluster.load(self.context,
                                           self.node.cluster_id)
        desired_capacity = cluster.desired_capacity - 1
        result = scaleutils.check_size_params(cluster, desired_capacity,
                                              None, None, True)
        if result != '':
            return self.RES_ERROR, result

        res = self.node.do_leave(self.context)
        if res:
            # Update cluster desired_capacity if node leave succeeded
            cluster.desired_capacity = desired_capacity
            cluster.store(self.context)
            return self.RES_OK, _('Node successfully left cluster')
        else:
            return self.RES_ERROR, _('Node failed in leaving cluster')
예제 #17
0
    def do_scale_out(self):
        """Handler for the CLUSTER_SCALE_OUT action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the
            # input count directly
            count = self.inputs.get('count', 1)

        if count <= 0:
            reason = _('Invalid count (%s) for scaling out.') % count
            return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        new_size = curr_size + count

        result = scaleutils.check_size_params(self.cluster, new_size, None,
                                              None, True)
        if result != '':
            return self.RES_ERROR, result

        result, reason = self._create_nodes(count)
        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            # TODO(anyone): make update to desired_capacity customizable
            self.cluster.set_status(self.context,
                                    self.cluster.ACTIVE,
                                    reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason)
        else:  # RES_RETRY
            pass

        return result, reason
예제 #18
0
    def do_leave(self):
        """Handler for the NODE_LEAVE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # Check the size constraint of parent cluster
        cluster = cm.Cluster.load(self.context, self.node.cluster_id)
        new_capacity = cluster.desired_capacity - 1
        result = scaleutils.check_size_params(cluster, new_capacity,
                                              None, None, True)
        if result:
            return self.RES_ERROR, result

        res = self.node.do_leave(self.context)
        if res:
            cluster.remove_node(self.node.id)
            return self.RES_OK, _('Node successfully left cluster.')
        else:
            return self.RES_ERROR, _('Node failed in leaving cluster.')
예제 #19
0
    def do_leave(self):
        """Handler for the NODE_LEAVE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # Check the size constraint of parent cluster
        cluster = cluster_mod.Cluster.load(self.context,
                                           self.node.cluster_id)
        new_capacity = cluster.desired_capacity - 1
        result = scaleutils.check_size_params(cluster, new_capacity,
                                              None, None, True)
        if result:
            return self.RES_ERROR, result

        res = self.node.do_leave(self.context)
        if res:
            cluster.remove_node(self.node.id)
            return self.RES_OK, _('Node successfully left cluster.')
        else:
            return self.RES_ERROR, _('Node failed in leaving cluster.')
예제 #20
0
    def do_join(self):
        """Handler for the NODE_JOIN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.inputs.get('cluster_id')
        # Check the size constraint of parent cluster
        cluster = cluster_mod.Cluster.load(self.context, cluster_id)
        new_capacity = cluster.desired_capacity + 1
        result = scaleutils.check_size_params(cluster, new_capacity,
                                              None, None, True)
        if result:
            return self.RES_ERROR, result

        result = self.node.do_join(self.context, cluster_id)
        if result:
            cluster.add_node(self.node)
            return self.RES_OK, _('Node successfully joined cluster.')
        else:
            return self.RES_ERROR, _('Node failed in joining cluster.')
예제 #21
0
    def do_join(self):
        """Handler for the NODE_JOIN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.inputs.get('cluster_id')
        # Check the size constraint of parent cluster
        cluster = cm.Cluster.load(self.context, cluster_id)
        new_capacity = cluster.desired_capacity + 1
        result = scaleutils.check_size_params(cluster, new_capacity,
                                              None, None, True)
        if result:
            return self.RES_ERROR, result

        result = self.node.do_join(self.context, cluster_id)
        if result:
            cluster.add_node(self.node)
            return self.RES_OK, _('Node successfully joined cluster.')
        else:
            return self.RES_ERROR, _('Node failed in joining cluster.')
예제 #22
0
    def do_scale_out(self):
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the
            # input count directly
            count = self.inputs.get('count', 1)

        if count <= 0:
            reason = _('Invalid count (%s) for scaling out.') % count
            return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        new_size = curr_size + count

        result = scaleutils.check_size_params(self.cluster, new_size,
                                              None, None, True)
        if result != '':
            return self.RES_ERROR, result

        # Update desired_capacity of cluster
        # TODO(anyone): make this behavior customizable
        self._update_cluster_properties(new_size, None, None)

        result, reason = self._create_nodes(count)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason)
        else:
            # RETRY?
            pass

        return result, reason
예제 #23
0
    def do_scale_out(self):
        """Handler for the CLUSTER_SCALE_OUT action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the
            # input count directly
            count = self.inputs.get('count', 1)

        if count <= 0:
            reason = _('Invalid count (%s) for scaling out.') % count
            return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        new_size = curr_size + count

        result = scaleutils.check_size_params(self.cluster, new_size,
                                              None, None, True)
        if result != '':
            return self.RES_ERROR, result

        result, reason = self._create_nodes(count)
        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            # TODO(anyone): make update to desired_capacity customizable
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason)
        else:  # RES_RETRY
            pass

        return result, reason
예제 #24
0
파일: node_action.py 프로젝트: Alzon/senlin
    def do_delete(self):
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cluster_mod.Cluster.load(self.context,
                                               self.node.cluster_id)
            result = scaleutils.check_size_params(cluster,
                                                  cluster.desired_capacity-1,
                                                  None, None, True)
            if result != '':
                return self.RES_ERROR, result

        res = self.node.do_delete(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node deletion succeeded
                cluster.desired_capacity -= 1
                cluster.store(self.context)
            return self.RES_OK, _('Node deleted successfully')
        else:
            return self.RES_ERROR, _('Node deletion failed')
예제 #25
0
파일: node_action.py 프로젝트: Alzon/senlin
    def do_create(self):
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node is created with target cluster specified,
            # check cluster size constraint
            cluster = cluster_mod.Cluster.load(self.context,
                                               self.node.cluster_id)
            result = scaleutils.check_size_params(
                cluster, cluster.desired_capacity+1, None, None, True)

            if result != '':
                return self.RES_ERROR, result

        res = self.node.do_create(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node creation succeeded
                cluster.desired_capacity += 1
                cluster.store(self.context)
            return self.RES_OK, _('Node created successfully')
        else:
            return self.RES_ERROR, _('Node creation failed')
예제 #26
0
    def do_delete(self):
        """Handler for the NODE_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.entity.cluster_id
        if cluster_id and self.cause == consts.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cm.Cluster.load(self.context, cluster_id)
            current = no.Node.count_by_cluster(self.context, cluster_id)
            desired = current - 1
            result = su.check_size_params(cluster, desired, None, None, True)
            if result:
                return self.RES_ERROR, result

            # handle grace_period
            pd = self.data.get('deletion', None)
            if pd:
                grace_period = pd.get('grace_period', 0)
                if grace_period:
                    eventlet.sleep(grace_period)

        res = self.entity.do_delete(self.context)

        if cluster_id and self.cause == consts.CAUSE_RPC:
            # check if desired_capacity should be changed
            do_reduce = True
            params = {}
            pd = self.data.get('deletion', None)
            if pd:
                do_reduce = pd.get('reduce_desired_capacity', True)
            if do_reduce and res:
                params = {'desired_capacity': desired}
            cluster.eval_status(self.context, consts.NODE_DELETE, **params)

        if not res:
            return self.RES_ERROR, 'Node deletion failed.'

        return self.RES_OK, 'Node deleted successfully.'
예제 #27
0
    def do_join(self):
        """Handler for the NODE_JOIN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        cluster_id = self.inputs.get("cluster_id")
        # Check the size constraint of parent cluster
        cluster = cluster_mod.Cluster.load(self.context, cluster_id)
        desired_capacity = cluster.desired_capacity + 1
        result = scaleutils.check_size_params(cluster, desired_capacity, None, None, True)
        if result != "":
            return self.RES_ERROR, result

        result = self.node.do_join(self.context, cluster_id)
        if result:
            # Update cluster desired_capacity if node join succeeded
            cluster.desired_capacity = desired_capacity
            cluster.store(self.context)
            cluster.add_node(self.node)
            return self.RES_OK, _("Node successfully joined cluster.")
        else:
            return self.RES_ERROR, _("Node failed in joining cluster.")
예제 #28
0
    def do_scale_out(self):
        """Handler for the CLUSTER_SCALE_OUT action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy output if any, or else the count is
        # set to 1 as default.
        pd = self.data.get('creation', None)
        if pd is not None:
            count = pd.get('count', 1)
        else:
            # If no scaling policy is attached, use the input count directly
            value = self.inputs.get('count', 1)
            success, count = utils.get_positive_int(value)
            if not success:
                reason = 'Invalid count (%s) for scaling out.' % value
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = no.Node.count_by_cluster(self.context, self.target)
        new_size = curr_size + count
        result = scaleutils.check_size_params(self.entity, new_size, None,
                                              None, True)
        if result:
            return self.RES_ERROR, result

        self.entity.set_status(self.context,
                               consts.CS_RESIZING,
                               'Cluster scale out started.',
                               desired_capacity=new_size)

        result, reason = self._create_nodes(count)
        if result == self.RES_OK:
            reason = 'Cluster scaling succeeded.'
        self.entity.eval_status(self.context, consts.CLUSTER_SCALE_OUT)

        return result, reason
예제 #29
0
    def do_delete(self):
        """Handler for the NODE_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cm.Cluster.load(self.context, self.node.cluster_id)
            result = scaleutils.check_size_params(cluster,
                                                  cluster.desired_capacity - 1,
                                                  None, None, True)
            if result:
                return self.RES_ERROR, result

            # handle grace_period
            pd = self.data.get('deletion', None)
            if pd:
                grace_period = pd.get('grace_period', 0)
                if grace_period:
                    eventlet.sleep(grace_period)

        res = self.node.do_delete(self.context)
        if not res:
            return self.RES_ERROR, _('Node deletion failed.')

        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # check if desired_capacity should be changed
            do_reduce = True
            pd = self.data.get('deletion', None)
            if pd:
                do_reduce = pd.get('reduce_desired_capacity', True)
            if do_reduce:
                cluster.desired_capacity -= 1
                cluster.store(self.context)
            cluster.remove_node(self.node.id)
        return self.RES_OK, _('Node deleted successfully.')
예제 #30
0
    def do_scale_in(self):
        """Handler for the CLUSTER_SCALE_IN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster scale in started.')
        # We use policy data if any, deletion policy and scaling policy might
        # be attached.
        pd = self.data.get('deletion', None)
        grace_period = None
        if pd is not None:
            grace_period = pd.get('grace_period', 0)
            candidates = pd.get('candidates', [])
            # if scaling policy is attached, get 'count' from action data
            count = len(candidates) or pd['count']
        else:
            # If no scaling policy is attached, use the input count directly
            candidates = []
            count = self.inputs.get('count', 1)
            try:
                count = utils.parse_int_param('count', count, allow_zero=False)
            except exception.InvalidParameter:
                reason = _('Invalid count (%s) for scaling in.') % count
                status_reason = _('Cluster scaling failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        if count > curr_size:
            LOG.warning(
                _('Triming count (%(count)s) to current cluster size '
                  '(%(curr)s) for scaling in'), {
                      'count': count,
                      'curr': curr_size
                  })
            count = curr_size
        new_size = curr_size - count

        result = scaleutils.check_size_params(self.cluster, new_size, None,
                                              None, True)
        if result:
            status_reason = _('Cluster scaling failed: %s') % result
            self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                    status_reason)
            return self.RES_ERROR, result

        # Choose victims randomly
        if len(candidates) == 0:
            candidates = scaleutils.nodes_by_random(self.cluster.nodes, count)

        if grace_period is not None:
            self._wait_before_deletion(grace_period)
        # The policy data may contain destroy flag and grace period option
        result, reason = self._delete_nodes(candidates)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            self.cluster.set_status(self.context,
                                    self.cluster.ACTIVE,
                                    reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context,
                                    self.cluster.ERROR,
                                    reason,
                                    desired_capacity=new_size)
        else:
            # RES_RETRY
            pass

        return result, reason
예제 #31
0
    def pre_op(self, cluster_id, action):
        """The hook function that is executed before the action.

        The checking result is stored in the ``data`` property of the action
        object rather than returned directly from the function.

        :param cluster_id: The ID of the target cluster.
        :param action: Action instance against which the policy is being
                       checked.
        :return: None.
        """

        # Use action input if count is provided
        count = action.inputs.get('count', None)
        current = db_api.node_count_by_cluster(action.context, cluster_id)
        if count is None:
            # count not specified, calculate it
            count = self._calculate_adjustment_count(current)

        # Count must be positive value
        try:
            count = utils.parse_int_param('count', count, allow_zero=False)
        except exception.InvalidParameter:
            action.data.update({
                'status': base.CHECK_ERROR,
                'reason': _("Invalid count (%(c)s) for action '%(a)s'."
                            ) % {'c': count, 'a': action.action}
            })
            action.store(action.context)
            return

        # Check size constraints
        cluster = db_api.cluster_get(action.context, cluster_id)
        if action.action == consts.CLUSTER_SCALE_IN:
            if self.best_effort:
                count = min(count, current - cluster.min_size)
            result = su.check_size_params(cluster, current - count,
                                          strict=not self.best_effort)
        else:
            if self.best_effort:
                count = min(count, cluster.max_size - current)
            result = su.check_size_params(cluster, current + count,
                                          strict=not self.best_effort)

        if result:
            # failed validation
            pd = {
                'status': base.CHECK_ERROR,
                'reason': result
            }
        else:
            # passed validation
            pd = {
                'status': base.CHECK_OK,
                'reason': _('Scaling request validated.'),
            }
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return
예제 #32
0
    def pre_op(self, cluster_id, action):
        """The hook function that is executed before the action.

        The checking result is stored in the ``data`` property of the action
        object rather than returned directly from the function.

        :param cluster_id: The ID of the target cluster.
        :param action: Action instance against which the policy is being
                       checked.
        :return: None.
        """

        # check cooldown
        last_op = action.inputs.get('last_op', None)
        if last_op and not timeutils.is_older_than(last_op, self.cooldown):
            action.data.update({
                'status':
                base.CHECK_ERROR,
                'reason':
                _('Policy %s cooldown is still '
                  'in progress.') % self.id
            })
            action.store(action.context)
            return

        # Use action input if count is provided
        count_value = action.inputs.get('count', None)
        cluster = action.entity
        current = len(cluster.nodes)

        if count_value is None:
            # count not specified, calculate it
            count_value = self._calculate_adjustment_count(current)

        # Count must be positive value
        success, count = utils.get_positive_int(count_value)
        if not success:
            action.data.update({
                'status': base.CHECK_ERROR,
                'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % {
                    'c': count_value,
                    'a': action.action
                }
            })
            action.store(action.context)
            return

        # Check size constraints
        max_size = cluster.max_size
        if max_size == -1:
            max_size = cfg.CONF.max_nodes_per_cluster
        if action.action == consts.CLUSTER_SCALE_IN:
            if self.best_effort:
                count = min(count, current - cluster.min_size)
            result = su.check_size_params(cluster,
                                          current - count,
                                          strict=not self.best_effort)
        else:
            if self.best_effort:
                count = min(count, max_size - current)
            result = su.check_size_params(cluster,
                                          current + count,
                                          strict=not self.best_effort)

        if result:
            # failed validation
            pd = {'status': base.CHECK_ERROR, 'reason': result}
        else:
            # passed validation
            pd = {
                'status': base.CHECK_OK,
                'reason': _('Scaling request validated.'),
            }
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return
예제 #33
0
    def do_del_nodes(self):
        """Handler for the CLUSTER_DEL_NODES action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # Use policy decision if any, or fall back to defaults
        destroy_after_deletion = self.inputs.get('destroy_after_deletion',
                                                 False)
        grace_period = 0
        reduce_desired_capacity = True
        pd = self.data.get('deletion', None)
        if pd is not None:
            destroy_after_deletion = pd.get('destroy_after_deletion', False)
            grace_period = pd.get('grace_period', 0)
            reduce_desired_capacity = pd.get('reduce_desired_capacity', True)

        data = {
            'deletion': {
                'destroy_after_deletion': destroy_after_deletion,
                'grace_period': grace_period,
                'reduce_desired_capacity': reduce_desired_capacity,
            }
        }
        self.data.update(data)
        nodes = self.inputs.get('candidates', [])

        node_ids = copy.deepcopy(nodes)
        errors = []
        for node_id in node_ids:
            node = no.Node.get(self.context, node_id)

            # The return value is None if node not found
            if not node:
                errors.append(node_id)
                continue

            if ((not node.cluster_id) or (node.cluster_id != self.target)):
                nodes.remove(node_id)

        if len(errors) > 0:
            msg = "Nodes not found: %s." % errors
            return self.RES_ERROR, msg

        reason = 'Completed deleting nodes.'
        if len(nodes) == 0:
            return self.RES_OK, reason

        # check the size constraint
        current = no.Node.count_by_cluster(self.context, self.target)
        desired = current - len(nodes)
        res = scaleutils.check_size_params(self.entity, desired, None, None,
                                           True)
        if res:
            return self.RES_ERROR, res

        # sleep period
        self._sleep(grace_period)
        result, new_reason = self._delete_nodes(nodes)

        params = {}
        if result != self.RES_OK:
            reason = new_reason
        if reduce_desired_capacity:
            params['desired_capacity'] = desired

        self.entity.eval_status(self.context, consts.CLUSTER_DEL_NODES,
                                **params)

        return result, reason
예제 #34
0
    def do_add_nodes(self):
        """Handler for the CLUSTER_ADD_NODES action.

        TODO(anyone): handle placement data

        :returns: A tuple containing the result and the corresponding reason.
        """
        node_ids = self.inputs.get('nodes')
        errors = []
        nodes = []
        for nid in node_ids:
            node = no.Node.get(self.context, nid)
            if not node:
                errors.append('Node %s is not found.' % nid)
                continue

            if node.cluster_id:
                errors.append('Node %(n)s is already owned by cluster %(c)s.'
                              '' % {
                                  'n': nid,
                                  'c': node.cluster_id
                              })
                continue

            if node.status != consts.NS_ACTIVE:
                errors.append('Node %s is not in ACTIVE status.' % nid)
                continue

            nodes.append(node)

        if len(errors) > 0:
            return self.RES_ERROR, '\n'.join(errors)

        reason = 'Completed adding nodes.'
        # check the size constraint
        current = no.Node.count_by_cluster(self.context, self.target)
        desired = current + len(node_ids)
        res = scaleutils.check_size_params(self.entity, desired, None, None,
                                           True)
        if res:
            return self.RES_ERROR, res

        child = []
        for node in nodes:
            nid = node.id
            kwargs = {
                'name': 'node_join_%s' % nid[:8],
                'cause': consts.CAUSE_DERIVED,
                'inputs': {
                    'cluster_id': self.target
                },
            }
            action_id = base.Action.create(self.context, nid, consts.NODE_JOIN,
                                           **kwargs)
            child.append(action_id)

        if child:
            dobj.Dependency.create(self.context, [c for c in child], self.id)
            for cid in child:
                ao.Action.update(self.context, cid,
                                 {'status': base.Action.READY})
            dispatcher.start_action()

        # Wait for dependent action if any
        result, new_reason = self._wait_for_dependents()
        if result != self.RES_OK:
            reason = new_reason
        else:
            self.entity.eval_status(self.context,
                                    consts.CLUSTER_ADD_NODES,
                                    desired_capacity=desired)
            self.outputs['nodes_added'] = node_ids
            creation = self.data.get('creation', {})
            creation['nodes'] = node_ids
            self.data['creation'] = creation
            for node in nodes:
                obj = node_mod.Node.load(self.context, db_node=node)
                self.entity.add_node(obj)

        return result, reason
예제 #35
0
    def do_scale_in(self):
        """Handler for the CLUSTER_SCALE_IN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        # We use policy data if any, or else the count is set to 1 as default.
        pd = self.data.get('deletion', None)
        if pd is not None:
            count = pd.get('count', 1)
            candidates = pd.get('candidates', [])
        else:
            # If no scaling policy is attached, use the input count directly
            count = self.inputs.get('count', 1)
            candidates = []

        if count <= 0:
            reason = _('Invalid count (%s) for scaling in.') % count
            return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        if count > curr_size:
            LOG.warning(
                _('Triming count (%(count)s) to current cluster size '
                  '(%(curr)s) for scaling in'), {
                      'count': count,
                      'curr': curr_size
                  })
            count = curr_size
        new_size = curr_size - count

        result = scaleutils.check_size_params(self.cluster, new_size, None,
                                              None, False)
        if result != '':
            return self.RES_ERROR, result

        # Choose victims randomly
        if len(candidates) == 0:
            ids = [node.id for node in self.cluster.nodes]
            i = count
            while i > 0:
                r = random.randrange(len(ids))
                candidates.append(ids[r])
                ids.remove(ids[r])
                i = i - 1

        # The policy data may contain destroy flag and grace period option
        result, reason = self._delete_nodes(candidates)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            # TODO(anyone): make update to desired capacity customizable
            self.cluster.set_status(self.context,
                                    self.cluster.ACTIVE,
                                    reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason)
        else:
            # RES_RETRY
            pass

        return result, reason
예제 #36
0
    def pre_op(self, cluster_id, action):
        """The hook function that is executed before the action.

        The checking result is stored in the ``data`` property of the action
        object rather than returned directly from the function.

        :param cluster_id: The ID of the target cluster.
        :param action: Action instance against which the policy is being
                       checked.
        :return: None.
        """

        # Use action input if count is provided
        count = action.inputs.get('count', None)
        current = db_api.node_count_by_cluster(action.context, cluster_id)
        if count is None:
            # count not specified, calculate it
            count = self._calculate_adjustment_count(current)

        # Count must be positive value
        try:
            count = utils.parse_int_param('count', count, allow_zero=False)
        except exception.InvalidParameter:
            action.data.update({
                'status': base.CHECK_ERROR,
                'reason': _("Invalid count (%(c)s) for action '%(a)s'.") % {
                    'c': count,
                    'a': action.action
                }
            })
            action.store(action.context)
            return

        # Check size constraints
        cluster = db_api.cluster_get(action.context, cluster_id)
        if action.action == consts.CLUSTER_SCALE_IN:
            if self.best_effort:
                count = min(count, current - cluster.min_size)
            result = su.check_size_params(cluster,
                                          current - count,
                                          strict=not self.best_effort)
        else:
            if self.best_effort:
                count = min(count, cluster.max_size - current)
            result = su.check_size_params(cluster,
                                          current + count,
                                          strict=not self.best_effort)

        if result:
            # failed validation
            pd = {'status': base.CHECK_ERROR, 'reason': result}
        else:
            # passed validation
            pd = {
                'status': base.CHECK_OK,
                'reason': _('Scaling request validated.'),
            }
            if action.action == consts.CLUSTER_SCALE_IN:
                pd['deletion'] = {'count': count}
            else:
                pd['creation'] = {'count': count}

        action.data.update(pd)
        action.store(action.context)

        return
예제 #37
0
    def do_scale_in(self):
        """Handler for the CLUSTER_SCALE_IN action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        self.cluster.set_status(self.context, self.cluster.RESIZING,
                                'Cluster scale in started.')
        # We use policy data if any, deletion policy and scaling policy might
        # be attached.
        pd = self.data.get('deletion', None)
        grace_period = 0
        if pd:
            grace_period = pd.get('grace_period', 0)
            candidates = pd.get('candidates', [])
            # if scaling policy is attached, get 'count' from action data
            count = len(candidates) or pd['count']
        else:
            # If no scaling policy is attached, use the input count directly
            candidates = []
            count = self.inputs.get('count', 1)
            try:
                count = utils.parse_int_param('count', count,
                                              allow_zero=False)
            except exception.InvalidParameter:
                reason = _('Invalid count (%s) for scaling in.') % count
                status_reason = _('Cluster scaling failed: %s') % reason
                self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                        status_reason)
                return self.RES_ERROR, reason

        # check provided params against current properties
        # desired is checked when strict is True
        curr_size = len(self.cluster.nodes)
        if count > curr_size:
            LOG.warning(_('Triming count (%(count)s) to current cluster size '
                          '(%(curr)s) for scaling in'),
                        {'count': count, 'curr': curr_size})
            count = curr_size
        new_size = curr_size - count

        result = scaleutils.check_size_params(self.cluster, new_size,
                                              None, None, True)
        if result:
            status_reason = _('Cluster scaling failed: %s') % result
            self.cluster.set_status(self.context, self.cluster.ACTIVE,
                                    status_reason)
            return self.RES_ERROR, result

        # Choose victims randomly
        if len(candidates) == 0:
            candidates = scaleutils.nodes_by_random(self.cluster.nodes, count)

        self._sleep(grace_period)
        # The policy data may contain destroy flag and grace period option
        result, reason = self._delete_nodes(candidates)

        if result == self.RES_OK:
            reason = _('Cluster scaling succeeded.')
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
                                    desired_capacity=new_size)
        elif result in [self.RES_CANCEL, self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.ERROR, reason,
                                    desired_capacity=new_size)
        else:
            # RES_RETRY
            pass

        return result, reason