Example #1
0
    def do_detach_policy(self):
        """Handler for the CLUSTER_DETACH_POLICY action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        policy_id = self.inputs.get('policy_id', None)
        if not policy_id:
            return self.RES_ERROR, _('Policy not specified.')

        # Check if policy has already been attached
        found = False
        for existing in self.cluster.policies:
            if existing.id == policy_id:
                found = True
                break
        if not found:
            return self.RES_OK, _('Policy not attached.')

        policy = policy_mod.Policy.load(self.context, policy_id)
        res, data = policy.detach(self.cluster)
        if not res:
            return self.RES_ERROR, data

        db_api.cluster_policy_detach(self.context, self.cluster.id, policy_id)

        self.cluster.remove_policy(policy)
        return self.RES_OK, _('Policy detached.')
Example #2
0
 def do_update(self):
     params = self.inputs
     res = self.node.do_update(self.context, params)
     if res:
         return self.RES_OK, _('Node updated successfully')
     else:
         return self.RES_ERROR, _('Node update failed')
Example #3
0
    def do_delete(self):
        """Handler for the CLUSTER_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        reason = _('Deletion in progress.')
        self.cluster.set_status(self.context, self.cluster.DELETING, reason)
        node_ids = [node.id for node in self.cluster.nodes]

        # For cluster delete, we delete the nodes
        data = {
            'deletion': {
                'destroy_after_deletion': True
            }
        }
        self.data.update(data)
        result, reason = self._delete_nodes(node_ids)

        if result == self.RES_OK:
            res = self.cluster.do_delete(self.context)
            if not res:
                return self.RES_ERROR, _('Cannot delete cluster object.')
        elif result == self.RES_CANCEL:
            self.cluster.set_status(self.context, self.cluster.ACTIVE, reason)
        elif result in [self.RES_TIMEOUT, self.RES_ERROR]:
            self.cluster.set_status(self.context, self.cluster.WARNING, reason)
        else:
            # RETRY
            pass

        return result, reason
Example #4
0
    def update(self, req, cluster_id, body):
        '''Update an existing cluster with new parameters.'''

        cluster_data = body.get('cluster')
        if cluster_data is None:
            raise exc.HTTPBadRequest(_("Malformed request data, missing "
                                       "'cluster' key in request body."))

        size = cluster_data.get(consts.CLUSTER_SIZE)
        if size is not None:
            msg = _("Updating cluster size is not supported, please use "
                    "cluster scaling operations instead.")
            raise exc.HTTPBadRequest(msg)

        name = cluster_data.get(consts.CLUSTER_NAME)
        profile_id = cluster_data.get(consts.CLUSTER_PROFILE)
        parent = cluster_data.get(consts.CLUSTER_PARENT)
        tags = cluster_data.get(consts.CLUSTER_TAGS)
        timeout = cluster_data.get(consts.CLUSTER_TIMEOUT)
        if timeout is not None:
            timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)

        self.rpc_client.cluster_update(req.context, cluster_id, name,
                                       profile_id, parent, tags, timeout)

        raise exc.HTTPAccepted()
Example #5
0
    def update(self, req, policy_id, body):
        policy_data = body.get('policy', None)
        if policy_data is None:
            raise exc.HTTPBadRequest(_("Malformed request data, missing "
                                       "'policy' key in request body."))

        spec = policy_data.get(consts.POLICY_SPEC)
        if spec is not None:
            msg = _("Updating the spec of a policy is not supported because "
                    "it may cause state conflicts in engine.")
            raise exc.HTTPBadRequest(msg)

        name = policy_data.get(consts.POLICY_NAME, None)

        level = policy_data.get(consts.POLICY_LEVEL, None)
        if level is not None:
            level = utils.parse_int_param(consts.POLICY_LEVEL, level)

        cooldown = policy_data.get(consts.POLICY_COOLDOWN, None)
        if cooldown is not None:
            cooldown = utils.parse_int_param(consts.POLICY_COOLDOWN, cooldown)

        policy = self.rpc_client.policy_update(req.context, policy_id, name,
                                               level, cooldown)

        return {'policy': policy}
Example #6
0
    def do_create(self):
        """Handler for the NODE_CREATE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node is created with target cluster specified,
            # check cluster size constraint
            cluster = cm.Cluster.load(self.context, self.node.cluster_id)
            result = scaleutils.check_size_params(
                cluster, cluster.desired_capacity + 1, None, None, True)

            if result:
                return self.RES_ERROR, result

        res = self.node.do_create(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node creation succeeded
                cluster.desired_capacity += 1
                cluster.store(self.context)
                cluster.add_node(self.node)
            return self.RES_OK, _('Node created successfully.')
        else:
            return self.RES_ERROR, _('Node creation failed.')
Example #7
0
File: api.py Project: tengqm/senlin
def node_update(context, node_id, values):
    '''Update a node with new property values.

    :param node_id: ID of the node to be updated.
    :param values: A dictionary of values to be updated on the node.
    :raises NotFound: The specified node does not exist in database.
    '''
    session = _session(context)
    session.begin()

    node = session.query(models.Node).get(node_id)
    if not node:
        session.rollback()
        raise exception.NotFound(
            _('Attempt to update a node with id "%s" that does '
              'not exists failed.') % node_id)

    node.update(values)
    node.save(session)
    if 'status' in values and node.cluster_id is not None:
        cluster = session.query(models.Cluster).get(node.cluster_id)
        if cluster is not None:
            if values['status'] == 'ERROR':
                cluster.status = 'WARNING'
            if 'status_reason' in values:
                cluster.status_reason = _('Node %(node)s: %(reason)s') % {
                    'node': node.name, 'reason': values['status_reason']}
            cluster.save(session)
    session.commit()
Example #8
0
    def pre_op(self, cluster_id, action, policy_data):
        nodes = db_api.node_get_all_by_cluster(action.context, cluster_id)
        current_size = len(nodes)

        if self.adjustment_type == self.EXACT_CAPACITY:
            count = self.adjustment_number - current_size
        elif self.adjustment_type == self.CHANGE_IN_CAPACITY:
            count = self.adjustment_number
        elif self.adjustment_type == self.CHANGE_IN_PERCENTAGE:
            count = int((self.adjustment_number * current_size) / 100.0)
            if count < self.adjustment_min_step:
                count = self.adjustment_min_step
        if current_size + count > self.max_size:
            policy_data.status = base.CHECK_ERROR
            policy_data.reason = _('Attempted scaling exceeds maximum size')
        elif current_size + count < self.min_size:
            policy_data.status = base.CHECK_ERROR
            policy_data.reason = _('Attempted scaling exceeds minimum size')
        else:
            policy_data.status = base.CHECK_OK
            policy_data.reason = _('Scaling request validated')

        pd = {'count': count}
        if action.action == consts.CLUSTER_SCALE_OUT:
            if count < 0:
                LOG.warning(_LW('Requesting a scale out operation but scaling '
                                'policy generates a negative count.'))
            policy_data['creation'] = pd
        elif action.action == consts.CLUSTER_SCALE_IN:
            if count > 0:
                LOG.warning(_LW('Requesting a scale out operation but scaling '
                                'policy generates a negative count.'))
            policy_data['deletion'] = pd

        return policy_data
Example #9
0
    def do_delete(self):
        """Handler for the NODE_DELETE action.

        :returns: A tuple containing the result and the corresponding reason.
        """
        if self.node.cluster_id and self.cause == base.CAUSE_RPC:
            # If node belongs to a cluster, check size constraint
            # before deleting it
            cluster = cluster_mod.Cluster.load(self.context,
                                               self.node.cluster_id)
            result = scaleutils.check_size_params(cluster,
                                                  cluster.desired_capacity - 1,
                                                  None, None, True)
            if result:
                return self.RES_ERROR, result

        res = self.node.do_delete(self.context)
        if res:
            if self.node.cluster_id and self.cause == base.CAUSE_RPC:
                # Update cluster desired_capacity if node deletion succeeded
                cluster.desired_capacity -= 1
                cluster.store(self.context)
                cluster.remove_node(self.node.id)
            return self.RES_OK, _('Node deleted successfully.')
        else:
            return self.RES_ERROR, _('Node deletion failed.')
Example #10
0
    def action(self, req, node_id, body=None):
        '''Perform specified action on a node.'''
        body = body or {}
        if len(body) == 0:
            raise exc.HTTPBadRequest(_('No action specified'))

        if len(body) > 1:
            raise exc.HTTPBadRequest(_('Multiple actions specified'))

        this_action = body.keys()[0]
        if this_action not in self.SUPPORTED_ACTIONS:
            msg = _('Unrecognized action "%s" specified') % this_action
            raise exc.HTTPBadRequest(msg)

        if this_action == self.NODE_JOIN:
            cluster_id = body.get(this_action).get('cluster_id')
            if cluster_id is None:
                raise exc.HTTPBadRequest(_('No cluster specified'))
            res = self.rpc_client.node_join(req.context, node_id, cluster_id)
        elif this_action == self.NODE_LEAVE:
            res = self.rpc_client.node_leave(req.context, node_id)
        else:
            raise exc.HTTPInternalServerError(_('Unexpected action "%s"'),
                                              this_action)

        return res
Example #11
0
    def do_recover(self, context, **options):
        """recover a node.

        This function is supposed to be invoked from a NODE_RECOVER action.
        """
        if not self.physical_id:
            return False

        self.set_status(context, self.RECOVERING,
                        reason=_('Recover in progress'))

        try:
            physical_id = profile_base.Profile.recover_object(context, self,
                                                              **options)
        except exception.ResourceStatusError as ex:
            self._handle_exception(context, 'recover', self.ERROR, ex)
            return False

        if not physical_id:
            self.set_status(context, self.ERROR, reason=_('Recover failed'))
            return False

        self.set_status(context, self.ACTIVE, reason=_('Recover succeeded'))
        if self.physical_id != physical_id:
            self.physical_id = physical_id
            self.store(context)

        return True
Example #12
0
    def test_generate_url(self, mock_service_get, mock_endpoint_get,
                          mock_init):
        mock_init.return_value = None
        mock_service_get.return_value = {
            'id': 'SENLIN_SERVICE_ID'
        }
        mock_endpoint_get.return_value = {
            'url': 'HTTP://HOST_IP:PORT/V1/$(tenant_id)s'
        }

        kwargs = {
            'id': 'WEBHOOK_ID',
            'name': 'test-webhook',
            'user': '******',
            'project': 'test-project',
            'domain': 'test-domain',
            'created_time': timeutils.utcnow(),
            'deleted_time': None,
            'credential': self.credential,
            'params': self.params
        }

        webhook = webhook_mod.Webhook('test-obj-id', 'test-obj-type',
                                      'test-action', **kwargs)
        key = 'test-key'
        res1, res2 = webhook.generate_url(key)

        expected_url = _('HTTP://HOST_IP:PORT/V1/%(tenant_id)s/webhooks/'
                         '%(webhook_id)s/trigger?key=%(key)s'
                         ) % {'tenant_id': 'test-project',
                              'webhook_id': webhook.id,
                              'key': six.text_type(key)}
        self.assertEqual(expected_url, res1)
        self.assertEqual(key, res2)

        # Senlin service not found
        mock_service_get.return_value = None
        ex = self.assertRaises(exception.ResourceNotFound,
                               webhook.generate_url, key)
        resource = _('service:type=clustering,name=senlin')
        msg = _('The resource (%(resource)s) could not be found.'
                ) % {'resource': resource}
        self.assertEqual(msg, six.text_type(ex))

        # Senlin endpoint not found
        mock_service_get.return_value = {
            'id': 'SENLIN_SERVICE_ID'
        }
        service_id = mock_service_get.return_value['id']
        mock_endpoint_get.return_value = None
        ex = self.assertRaises(exception.ResourceNotFound,
                               webhook.generate_url, key)
        resource = _('endpoint: service=%(service)s,region='
                     '%(region)s,visibility=%(interface)s'
                     ) % {'service': service_id,
                          'region': None,
                          'interface': 'public'}
        msg = _('The resource (%(resource)s) could not be found.'
                ) % {'resource': resource}
        self.assertEqual(msg, six.text_type(ex))
Example #13
0
    def _wait_for_dependents(self):
        """Wait for dependent actions to complete.

        :returns: A tuple containing the result and the corresponding reason.
        """
        status = self.get_status()
        reason = ''
        while status != self.READY:
            if status == self.FAILED:
                reason = _('%(action)s [%(id)s] failed') % {
                    'action': self.action, 'id': self.id[:8]}
                LOG.debug(reason)
                return self.RES_ERROR, reason

            if self.is_cancelled():
                # During this period, if cancel request comes, cancel this
                # operation immediately, then release the cluster lock
                reason = _('%(action)s [%(id)s] cancelled') % {
                    'action': self.action, 'id': self.id[:8]}
                LOG.debug(reason)
                return self.RES_CANCEL, reason

            if self.is_timeout():
                # Action timeout, return
                reason = _('%(action)s [%(id)s] timeout') % {
                    'action': self.action, 'id': self.id[:8]}
                LOG.debug(reason)
                return self.RES_TIMEOUT, reason

            # Continue waiting (with reschedule)
            scheduler.reschedule(self.id, 3)
            status = self.get_status()

        return self.RES_OK, 'All dependents ended with success'
Example #14
0
    def detach_policy(self, ctx, policy_id):
        """Detach policy object from the cluster.

        Note this method MUST be called with the cluster locked.

        :param ctx: A context for DB operation.
        :param policy_id: ID of the policy object.

        :returns: A tuple containing a boolean result and a reason string.
        """
        # Check if policy has already been attached
        found = None
        for existing in self.policies:
            if existing.id == policy_id:
                found = existing
                break
        if found is None:
            return False, _('Policy not attached.')

        policy = policy_base.Policy.load(ctx, policy_id)
        res, reason = policy.detach(self)
        if not res:
            return res, reason

        db_api.cluster_policy_detach(ctx, self.id, policy_id)
        self.rt['policies'].remove(found)

        return True, _('Policy detached.')
Example #15
0
    def generate_url(self, key):
        """Generate webhook URL with proper format.

        :param key: Key string to be used for decrypt the credentials.
        """
        senlin_creds = context.get_service_context()
        kc = driver_base.SenlinDriver().identity(senlin_creds)
        senlin_service = kc.service_get('clustering', 'senlin')
        if not senlin_service:
            resource = _('service:type=clustering,name=senlin')
            raise exception.ResourceNotFound(resource=resource)
        senlin_service_id = senlin_service['id']
        region = cfg.CONF.region_name_for_services
        endpoint = kc.endpoint_get(senlin_service_id, region, 'public')
        if not endpoint:
            resource = _('endpoint: service=%(service)s,region='
                         '%(region)s,visibility=%(interface)s'
                         ) % {'service': senlin_service_id,
                              'region': region,
                              'interface': 'public'}
            raise exception.ResourceNotFound(resource=resource)

        endpoint_url = endpoint['url'].replace('$(tenant_id)s', self.project)
        location = endpoint_url + '/webhooks/%s/trigger' % self.id
        location += "?%s" % parse.urlencode({'key': key})

        return location, key
Example #16
0
def load_paste_app(app_name=None):
    """Builds and returns a WSGI app from a paste config file.

    We assume the last config file specified in the supplied ConfigOpts
    object is the paste config file.

    :param app_name: name of the application to load

    :raises RuntimeError when config file cannot be located or application
            cannot be loaded from config file
    """
    if app_name is None:
        app_name = cfg.CONF.prog

    conf_file = _get_deployment_config_file()
    if conf_file is None:
        raise RuntimeError(_("Unable to locate config file"))

    try:
        app = paste_deploy_app(conf_file, app_name, cfg.CONF)

        # Log the options used when starting if we're in debug mode...
        if cfg.CONF.debug:
            cfg.CONF.log_opt_values(logging.getLogger(app_name),
                                    std_logging.DEBUG)

        return app
    except (LookupError, ImportError) as e:
        raise RuntimeError(_("Unable to load %(app_name)s from "
                             "configuration file %(conf_file)s."
                             "\nGot: %(e)r") % {'app_name': app_name,
                                                'conf_file': conf_file,
                                                'e': e})
Example #17
0
    def _execute(self, **kwargs):
        # do pre-action policy checking
        self.policy_check(self.cluster.id, 'BEFORE')
        if self.data['status'] != policy_mod.CHECK_OK:
            reason = _('Policy check failure: %s') % self.data['reason']
            EVENT.error(self.context, self.cluster, self.action, 'Failed',
                        reason)
            return self.RES_ERROR, reason

        result = self.RES_OK
        action_name = self.action.lower()
        method_name = action_name.replace('cluster', 'do')
        method = getattr(self, method_name, None)
        if method is None:
            error = _('Unsupported action: %s.') % self.action
            EVENT.error(self.context, self.cluster, self.action, 'Failed',
                        error)
            return self.RES_ERROR, error

        result, reason = method()

        # do post-action policy checking
        if result == self.RES_OK:
            self.policy_check(self.cluster.id, 'AFTER')
            if self.data['status'] != policy_mod.CHECK_OK:
                error = _('Policy check failure: %s') % self.data['reason']
                EVENT.error(self.context, self.cluster, self.action, 'Failed',
                            error)
                return self.RES_ERROR, error

        return result, reason
Example #18
0
    def _sanitize_policy(self, data):
        """Validate dict body of policy attach or update.

        :param dict data: A dictionary containing the properties of the policy
                          to be attached/updated including the policy ID.
        :returns: A sanitized dict containing the policy properties.
        :raises: :class:`~webob.exception.HTTPBadRequest` if the policy dict
                 contains invalid property values.
        """
        if not isinstance(data, dict):
            msg = _("The data provided is not a map.")
            raise exc.HTTPBadRequest(msg)

        if consts.CP_POLICY_ID not in data:
            msg = _("The 'policy_id' field is missing in the request.")
            raise exc.HTTPBadRequest(msg)

        if consts.CP_ENABLED in data:
            enabled = data.get(consts.CP_ENABLED)
            try:
                enabled = utils.parse_bool_param(consts.CP_ENABLED, enabled)
            except senlin_exc.InvalidParameter as ex:
                raise exc.HTTPBadRequest(six.text_type(ex))
            data[consts.CP_ENABLED] = enabled

        return data
Example #19
0
    def detach(self, cluster):
        """Routine to be called when the policy is detached from a cluster.

        :param cluster: The cluster from which the policy is to be detached.
        :returns: When the operation was successful, returns a tuple of
                  (True, data) where the data contains references to the
                  resources created; otherwise returns a tuple of (False,
                  error) where the err contains a error message.
        """

        reason = _('Servergroup resource deletion succeeded.')

        ctx = context.get_admin_context()
        binding = cpo.ClusterPolicy.get(ctx, cluster.id, self.id)
        if not binding or not binding.data:
            return True, reason

        policy_data = self._extract_policy_data(binding.data)
        if not policy_data:
            return True, reason

        group_id = policy_data.get('servergroup_id', None)
        inherited_group = policy_data.get('inherited_group', False)

        if group_id and not inherited_group:
            try:
                self.nova(cluster).delete_server_group(group_id)
            except Exception as ex:
                msg = _('Failed in deleting servergroup.')
                LOG.exception(_LE('%(msg)s: %(ex)s') % {
                    'msg': msg, 'ex': six.text_type(ex)})
                return False, msg

        return True, reason
Example #20
0
def dependency_add(context, depended, dependent):
    if isinstance(depended, list) and isinstance(dependent, list):
        raise exception.NotSupport(
            _('Multiple dependencies between lists not support'))

    with session_for_write() as session:
        if isinstance(depended, list):   # e.g. D depends on A,B,C
            for d in depended:
                r = models.ActionDependency(depended=d, dependent=dependent)
                session.add(r)

            query = session.query(models.Action).filter_by(id=dependent)
            query.update({'status': consts.ACTION_WAITING,
                          'status_reason': _('Waiting for depended actions.')},
                         synchronize_session='fetch')
            return

        # Only dependent can be a list now, convert it to a list if it
        # is not a list
        if not isinstance(dependent, list):  # e.g. B,C,D depend on A
            dependents = [dependent]
        else:
            dependents = dependent

        for d in dependents:
            r = models.ActionDependency(depended=depended, dependent=d)
            session.add(r)

        q = session.query(models.Action).filter(
            models.Action.id.in_(dependents))
        q.update({'status': consts.ACTION_WAITING,
                  'status_reason': _('Waiting for depended actions.')},
                 synchronize_session='fetch')
Example #21
0
    def _update_basic_properties(self, obj, new_profile):
        '''Updating basic server properties including name, metadata'''

        # Update server metadata
        metadata = self.properties[self.METADATA]
        new_metadata = new_profile.properties[self.METADATA]
        if new_metadata != metadata:
            if new_metadata is None:
                new_metadata = {}
            try:
                self.nova(obj).server_metadata_update(self.server_id,
                                                      new_metadata)
            except Exception as ex:
                LOG.exception(_('Failed in updating server metadata: %s'),
                              six.text_type(ex))
                return False

        # Update server name
        name = self.properties[self.NAME]
        new_name = new_profile.properties[self.NAME]
        if new_name != name:
            attrs = {'name': new_name if new_name else obj.name}
            try:
                self.nova(obj).server_update(self.server_id, **attrs)
            except Exception as ex:
                LOG.exception(_('Failed in updating server name: %s'),
                              six.text_type(ex))
                return False

        return True
Example #22
0
    def do_rebuild(self, obj):
        if not obj.physical_id:
            return False

        self.server_id = obj.physical_id

        try:
            server = self.nova(obj).server_get(self.server_id)
        except Exception as ex:
            LOG.exception(_('Failed at getting server: %s'),
                          six.text_type(ex))
            return False

        if server is None or server.image is None:
            return False

        image_id = server.image['id']
        admin_pass = self.properties.get(self.ADMIN_PASS)

        try:
            self.nova(obj).server_rebuild(self.server_id, image_id,
                                          self.properties.get(self.NAME),
                                          admin_pass)
            self.nova(obj).wait_for_server(self.server_id, 'ACTIVE')
        except Exception as ex:
            LOG.exception(_('Failed at rebuilding server: %s'),
                          six.text_type(ex))
            return False

        return True
Example #23
0
    def action(self, req, node_id, body=None):
        """Perform specified action on a node."""

        body = body or {}
        if len(body) == 0:
            raise exc.HTTPBadRequest(_('No action specified.'))

        if len(body) > 1:
            raise exc.HTTPBadRequest(_('Multiple actions specified.'))

        this_action = list(body.keys())[0]
        if this_action not in self.SUPPORTED_ACTIONS:
            msg = _('Unrecognized action "%s" specified') % this_action
            raise exc.HTTPBadRequest(msg)

        if this_action == self.NODE_CHECK:
            params = body.get(this_action)
            if not isinstance(params, dict):
                msg = _("The params provided is not a map.")
                raise exc.HTTPBadRequest(msg)
            res = self.rpc_client.node_check(req.context, node_id,
                                             params=params)
        else:    # self.NODE_RECOVER
            params = body.get(this_action)
            if not isinstance(params, dict):
                msg = _("The params provided is not a map.")
                raise exc.HTTPBadRequest(msg)
            res = self.rpc_client.node_recover(req.context, node_id,
                                               params=params)

        location = {'location': '/actions/%s' % res['action']}
        res.update(location)
        return res
Example #24
0
    def test_cluster_add_nodes_node_already_owned(self, notify):
        c1 = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
        cid1 = c1['id']
        c2 = self.eng.cluster_create(self.ctx, 'c-2', 0, self.profile['id'])
        cid2 = c2['id']
        nodes1 = self._prepare_nodes(self.ctx, count=1, cluster_id=cid1)
        nodes2 = self._prepare_nodes(self.ctx, count=1, cluster_id=cid2)

        ex = self.assertRaises(rpc.ExpectedException,
                               self.eng.cluster_add_nodes,
                               self.ctx, cid1, nodes1)

        # adding from the same cluster is not allowed
        self.assertEqual(exception.NodeNotOrphan, ex.exc_info[0])
        msg = _("Nodes %s owned by other cluster, need to delete them from "
                "those clusters first.") % nodes1
        self.assertEqual(msg, six.text_type(ex.exc_info[1]))

        ex = self.assertRaises(rpc.ExpectedException,
                               self.eng.cluster_add_nodes,
                               self.ctx, cid1, nodes2)

        # adding from a different cluster is not allowed either
        self.assertEqual(exception.NodeNotOrphan, ex.exc_info[0])
        msg = _("Nodes %s owned by other cluster, need to delete them from "
                "those clusters first.") % nodes2
        self.assertEqual(msg, six.text_type(ex.exc_info[1]))
Example #25
0
    def execute(self, **kwargs):
        '''Wrapper of action execution.
        This is mainly a wrapper that executes an action with cluster lock
        acquired.
        :return: A tuple (res, reason) that indicates whether the execution
                 was a success and why if it wasn't a success.
        '''

        try:
            cluster = cluster_mod.Cluster.load(self.context, self.target)
        except exception.NotFound:
            reason = _('Cluster %(id)s not found') % {'id': self.target}
            LOG.error(_LE(reason))
            return self.RES_ERROR, reason

        # Try to lock cluster before do real operation
        forced = True if self.action == self.CLUSTER_DELETE else False
        res = senlin_lock.cluster_lock_acquire(cluster.id, self.id,
                                               senlin_lock.CLUSTER_SCOPE,
                                               forced)
        if not res:
            return self.RES_ERROR, _('Failed locking cluster')

        try:
            res, reason = self._execute(cluster)
        finally:
            senlin_lock.cluster_lock_release(cluster.id, self.id,
                                             senlin_lock.CLUSTER_SCOPE)

        return res, reason
Example #26
0
 def _check_plugin_name(self, plugin_type, name):
     if name is None or name == "":
         msg = _('%s type name not specified') % plugin_type
         raise exception.InvalidPlugin(message=msg)
     elif not isinstance(name, six.string_types):
         msg = _('%s type name is not a string') % plugin_type
         raise exception.InvalidPlugin(message=msg)
Example #27
0
    def _wait_for_dependents(self):
        self.get_status()
        reason = ''
        while self.status != self.READY:
            if self.status == self.FAILED:
                reason = _('%(action)s [%(id)s] failed due to dependent '
                           'action failure') % {'action': self.action,
                                                'id': self.id}
                LOG.debug(reason)
                return self.RES_ERROR, reason

            if self.is_cancelled():
                # During this period, if cancel request come, cancel this
                # cluster operation immediately, then release the cluster
                # lock and return.
                reason = _('%(action)s %(id)s cancelled') % {
                    'action': self.action, 'id': self.id}
                LOG.debug(reason)
                return self.RES_CANCEL, reason

            if self.is_timeout():
                # Action timeout, return
                reason = _('%(action)s %(id)s timeout') % {
                    'action': self.action, 'id': self.id}
                LOG.debug(reason)
                return self.RES_TIMEOUT

            # Continue waiting (with reschedule)
            scheduler.reschedule(self, 1)
            self.get_status()

        return self.RES_OK, 'All dependents ended with success'
Example #28
0
def truncate_desired(cluster, desired, min_size, max_size):
    '''Do truncation of desired capacity for non-strict cases.'''

    if min_size is not None and desired < min_size:
        desired = min_size
        LOG.debug(_("Truncating shrinkage to specified min_size (%s).")
                  % desired)

    if min_size is None and desired < cluster.min_size:
        desired = cluster.min_size
        LOG.debug(_("Truncating shrinkage to cluster's min_size (%s).")
                  % desired)

    if max_size is not None and max_size > 0 and desired > max_size:
        desired = max_size
        LOG.debug(_("Truncating growth to specified max_size (%s).")
                  % desired)

    if (max_size is None and desired > cluster.max_size and
            cluster.max_size > 0):
        desired = cluster.max_size
        LOG.debug(_("Truncating growth to cluster's max_size (%s).")
                  % desired)

    return desired
Example #29
0
File: sdk.py Project: tengqm/senlin
def parse_exception(ex):
    '''Parse exception code and yield useful information.
    :param details: details of the exception.
    '''
    if isinstance(ex, exc.HttpException):
        record = jsonutils.loads(ex.details)
    elif isinstance(ex, reqexc.RequestException):
        # Exceptions that are not captured by SDK
        code = ex.message[1].errno
        record = {
            'error': {
                'code': code,
                'message': ex.message[0],
            }
        }
    else:
        print(_('Unknown exception: %s') % ex)
        return

    try:
        code = record['error'].get('code', None)
        if code is None:
            code = record['code']
            record['error']['code'] = code
    except KeyError as err:
        print(_('Malformed exception record, missing field "%s"') % err)
        print(_('Original error record: %s') % record)
        return

    if code in _EXCEPTION_MAP:
        inst = _EXCEPTION_MAP.get(code)
        return inst(record)
    else:
        return HTTPException(record)
Example #30
0
    def update_policy(self, ctx, policy_id, **values):
        """Update a policy that is already attached to a cluster.

        Note this method must be called with the cluster locked.
        :param ctx: A context for DB operation.
        :param policy_id: ID of the policy object.
        :param values: Optional dictionary containing new binding properties.

        :returns: A tuple containing a boolean result and a string reason.
        """
        # Check if policy has already been attached
        found = False
        for existing in self.policies:
            if existing.id == policy_id:
                found = True
                break
        if not found:
            return False, _('Policy not attached.')

        enabled = values.get('enabled', None)
        if enabled is None:
            return True, _('No update is needed.')

        params = {'enabled': bool(enabled)}

        db_api.cluster_policy_update(ctx, self.id, policy_id, params)
        return True, _('Policy updated.')
Example #31
0
 def target(self):
     if consts.ACTION_TARGET not in self.data:
         raise exc.HTTPBadRequest(_("No target specified"))
     return self.data[consts.ACTION_TARGET]
Example #32
0
    def resolve(self, value, context=None):
        if not isinstance(value, collections.Sequence):
            raise TypeError(_('"%s" is not a List') % value)

        return [v for v in self._get_children(value, context=context)]
Example #33
0
 def to_schema_type(self, value):
     try:
         return strutils.bool_from_string(str(value), strict=True)
     except ValueError:
         msg = _("The value '%s' is not a valid Boolean") % value
         raise exc.ESchema(message=msg)
Example #34
0
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from oslo_config import cfg

from senlin.common.i18n import _

RECEIVER_GROUP = cfg.OptGroup(name='receiver', )

RECEIVER_OPTS = [
    cfg.StrOpt('host',
               deprecated_group='webhook',
               help=_('The address for notifying and triggering receivers. '
                      'It is useful for case Senlin API service is running '
                      'behind a proxy.')),
    cfg.PortOpt('port',
                default=8778,
                deprecated_group='webhook',
                help=_('The port for notifying and triggering receivers. '
                       'It is useful for case Senlin API service is running '
                       'behind a proxy.')),
    cfg.IntOpt('max_message_size',
               default=65535,
               help=_('The max size(bytes) of message can be posted to '
                      'receiver queue.')),
]


def register_opts(conf):
Example #35
0
class MethodVersionNotFound(SenlinException):
    msg_fmt = _("API version '%(version)s' is not supported on this method.")
Example #36
0
    def post_op(self, cluster_id, action):
        """Routine to be called after an action has been executed.

        For this particular policy, we take this chance to update the pool
        maintained by the load-balancer.

        :param cluster_id: The ID of the cluster on which a relevant action
            has been executed.
        :param action: The action object that triggered this operation.
        :returns: Nothing.
        """
        nodes_added = action.outputs.get('nodes_added', [])
        nodes_removed = action.outputs.get('nodes_removed', [])
        if ((len(nodes_added) == 0) and (len(nodes_removed) == 0)):
            return

        db_cluster = db_api.cluster_get(action.context, cluster_id)
        params = self._build_conn_params(db_cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)
        cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
                                               self.id)
        policy_data = self._extract_policy_data(cp.data)
        lb_id = policy_data['loadbalancer']
        pool_id = policy_data['pool']
        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        # Remove nodes that have been deleted from lb pool
        for node_id in nodes_removed:
            node = node_mod.Node.load(action.context, node_id=node_id,
                                      show_deleted=True)
            member_id = node.data.get('lb_member', None)
            if member_id is None:
                LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            res = lb_driver.member_remove(lb_id, pool_id, member_id)
            if res is not True:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in removing deleted '
                                          'node(s) from lb pool.')
                return

        # Add new nodes to lb pool
        for node_id in nodes_added:
            node = node_mod.Node.load(action.context, node_id=node_id,
                                      show_deleted=True)
            member_id = node.data.get('lb_member', None)
            if member_id:
                LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'),
                            {'n': node_id, 'p': pool_id})
                continue

            member_id = lb_driver.member_add(node, lb_id, pool_id, port,
                                             subnet)
            if member_id is None:
                action.data['status'] = base.CHECK_ERROR
                action.data['reason'] = _('Failed in adding new node(s) '
                                          'into lb pool.')
                return

            node.data.update({'lb_member': member_id})
            node.store(action.context)

        return
Example #37
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""
        action_args = self.get_action_args(request.environ)
        action = action_args.pop('action', None)
        status_code = action_args.pop('success', None)

        try:
            deserialized_request = self.dispatch(self.deserializer, action,
                                                 request)
            action_args.update(deserialized_request)

            LOG.debug(('Calling %(controller)s : %(action)s'), {
                'controller': self.controller,
                'action': action
            })

            action_result = self.dispatch(self.controller, action, request,
                                          **action_args)
        except TypeError as err:
            LOG.error(_LE('Exception handling resource: %s') % err)
            msg = _('The server could not comply with the request since '
                    'it is either malformed or otherwise incorrect.')
            err = exc.HTTPBadRequest(msg)
            http_exc = translate_exception(err, request.best_match_language())
            # NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
            # treated by wsgi as responses ready to be sent back and they
            # won't make it into the pipeline app that serializes errors
            raise exception.HTTPExceptionDisguise(http_exc)
        except exc.HTTPException as err:
            if not isinstance(err, exc.HTTPError):
                # Some HTTPException are actually not errors, they are
                # responses ready to be sent back to the users, so we don't
                # create error log, but disguise and translate them to meet
                # openstacksdk's need.
                http_exc = translate_exception(err,
                                               request.best_match_language())
                raise exception.HTTPExceptionDisguise(http_exc)
            if isinstance(err, exc.HTTPServerError):
                LOG.error(_LE("Returning %(code)s to user: %(explanation)s"), {
                    'code': err.code,
                    'explanation': err.explanation
                })
            http_exc = translate_exception(err, request.best_match_language())
            raise exception.HTTPExceptionDisguise(http_exc)
        except exception.SenlinException as err:
            raise translate_exception(err, request.best_match_language())
        except Exception as err:
            log_exception(err, sys.exc_info())
            raise translate_exception(err, request.best_match_language())

        try:
            response = webob.Response(request=request)
            # Customize status code if default (200) should be overridden
            if status_code is not None:
                response.status_code = int(status_code)
            # Customize 'location' header if provided
            if action_result and isinstance(action_result, dict):
                location = action_result.pop('location', None)
                if location:
                    response.location = '/v1%s' % location
                if not action_result:
                    action_result = None

            # Attach openstack-api-version header
            if hasattr(response, 'headers'):
                for hdr, val in response.headers.items():
                    response.headers[hdr] = six.text_type(val)
                ver = request.version_request
                if not ver.is_null():
                    ver_res = ' '.join([SERVICE_ALIAS[0], str(ver)])
                    response.headers[API_VERSION_KEY] = ver_res
                    response.headers['Vary'] = API_VERSION_KEY

            self.dispatch(self.serializer, action, response, action_result)
            return response

        # return unserializable result (typically an exception)
        except Exception:
            return action_result
Example #38
0
def check_size_params(cluster=None,
                      desired=None,
                      min_size=None,
                      max_size=None,
                      strict=False):
    """Validate provided arguments against cluster properties.

    Sanity Checking 1: the desired, min_size, max_size parameters must
                       form a reasonable relationship among themselves,
                       if specified.
    Sanity Checking 2: the desired_capacity must be within the existing
                       range of the cluster, if new range is not provided.

    :param cluster: The cluster object if provided.
    :param desired: The desired capacity for an operation if provided.
    :param min_size: The new min_size property for the cluster, if provided.
    :param max_size: The new max_size property for the cluster, if provided.
    :param strict: Whether we are doing a strict checking.

    :return: A string of error message if failed checking or None if passed
        the checking.
    """

    max_nodes_per_cluster = cfg.CONF.max_nodes_per_cluster
    if desired is not None:
        # recalculate/validate desired based on strict setting
        if desired > max_nodes_per_cluster:
            v = {'d': desired, 'm': max_nodes_per_cluster}
            return _("The target capacity (%(d)s) is greater than the "
                     "maximum number of nodes allowed per cluster "
                     "(%(m)s).") % v
        if (min_size is not None and desired < min_size):
            v = {'d': desired, 'm': min_size}
            return _("The target capacity (%(d)s) is less than "
                     "the specified min_size (%(m)s).") % v

        if (min_size is None and cluster is not None
                and desired < cluster.min_size and strict):
            v = {'d': desired, 'm': cluster.min_size}
            return _("The target capacity (%(d)s) is less than "
                     "the cluster's min_size (%(m)s).") % v

        if (max_size is not None and desired > max_size and max_size >= 0):
            v = {'d': desired, 'm': max_size}
            return _("The target capacity (%(d)s) is greater "
                     "than the specified max_size (%(m)s).") % v

        if (max_size is None and cluster is not None
                and desired > cluster.max_size and cluster.max_size >= 0
                and strict):
            v = {'d': desired, 'm': cluster.max_size}
            return _("The target capacity (%(d)s) is greater "
                     "than the cluster's max_size (%(m)s).") % v

    if min_size is not None:
        if max_size is not None and max_size >= 0 and min_size > max_size:
            v = {'n': min_size, 'm': max_size}
            return _("The specified min_size (%(n)s) is greater than the "
                     "specified max_size (%(m)s).") % v

        if (max_size is None and cluster is not None and cluster.max_size >= 0
                and min_size > cluster.max_size):
            v = {'n': min_size, 'm': cluster.max_size}
            return _("The specified min_size (%(n)s) is greater than the "
                     "current max_size (%(m)s) of the cluster.") % v

        if (desired is None and cluster is not None
                and min_size > cluster.desired_capacity and strict):
            v = {'n': min_size, 'd': cluster.desired_capacity}
            return _("The specified min_size (%(n)s) is greater than the "
                     "current desired_capacity (%(d)s) of the cluster.") % v

    if max_size is not None:
        if max_size > max_nodes_per_cluster:
            v = {'m': max_size, 'mc': max_nodes_per_cluster}
            return _("The specified max_size (%(m)s) is greater than the "
                     "maximum number of nodes allowed per cluster "
                     "(%(mc)s).") % v
        if (min_size is None and cluster is not None and max_size >= 0
                and max_size < cluster.min_size):
            v = {'m': max_size, 'n': cluster.min_size}
            return _("The specified max_size (%(m)s) is less than the "
                     "current min_size (%(n)s) of the cluster.") % v

        if (desired is None and cluster is not None and max_size >= 0
                and max_size < cluster.desired_capacity and strict):
            v = {'m': max_size, 'd': cluster.desired_capacity}
            return _("The specified max_size (%(m)s) is less than the "
                     "current desired_capacity (%(d)s) of the cluster.") % v

    return None
Example #39
0
 def name(self):
     if consts.ACTION_NAME not in self.data:
         raise exc.HTTPBadRequest(_("No action name specified"))
     return self.data[consts.ACTION_NAME]
Example #40
0
class Policy(object):
    '''Base class for policies.'''
    PROFILE_TYPE = 'ANY'

    KEYS = (
        TYPE,
        VERSION,
        DESCRIPTION,
        PROPERTIES,
    ) = (
        'type',
        'version',
        'description',
        'properties',
    )

    spec_schema = {
        TYPE:
        schema.String(
            _('Name of the policy type.'),
            required=True,
        ),
        VERSION:
        schema.String(
            _('Version number of the policy type.'),
            required=True,
        ),
        DESCRIPTION:
        schema.String(
            _('A text description of policy.'),
            default='',
        ),
        PROPERTIES:
        schema.Map(
            _('Properties for the policy.'),
            required=True,
        )
    }

    properties_schema = {}

    def __new__(cls, name, spec, **kwargs):
        """Create a new policy of the appropriate class.

        :param name: The name for the policy.
        :param spec: A dictionary containing the spec for the policy.
        :param kwargs: Keyword arguments for policy creation.
        :returns: An instance of a specific sub-class of Policy.
        """
        type_name, version = schema.get_spec_version(spec)
        type_str = "-".join([type_name, version])

        if cls != Policy:
            PolicyClass = cls
        else:
            PolicyClass = environment.global_env().get_policy(type_str)

        return super(Policy, cls).__new__(PolicyClass)

    def __init__(self, name, spec, **kwargs):
        """Initialize a policy instance.

        :param name: The name for the policy.
        :param spec: A dictionary containing the detailed policy spec.
        :param kwargs: Keyword arguments for initializing the policy.
        :returns: An instance of a specific sub-class of Policy.
        """

        type_name, version = schema.get_spec_version(spec)
        type_str = "-".join([type_name, version])
        self.name = name
        self.spec = spec

        self.id = kwargs.get('id', None)
        self.type = kwargs.get('type', type_str)
        self.user = kwargs.get('user')
        self.project = kwargs.get('project')
        self.domain = kwargs.get('domain')
        self.data = kwargs.get('data', {})

        self.created_at = kwargs.get('created_at', None)
        self.updated_at = kwargs.get('updated_at', None)

        self.spec_data = schema.Spec(self.spec_schema, spec)
        self.properties = schema.Spec(self.properties_schema,
                                      self.spec.get(self.PROPERTIES, {}))
        self.singleton = True

    @classmethod
    def _from_object(cls, policy):
        """Construct a policy from a Policy object.

        @param cls: The target class.
        @param policy: A policy object.
        """

        kwargs = {
            'id': policy.id,
            'type': policy.type,
            'user': policy.user,
            'project': policy.project,
            'domain': policy.domain,
            'created_at': policy.created_at,
            'updated_at': policy.updated_at,
            'data': policy.data,
        }

        return cls(policy.name, policy.spec, **kwargs)

    @classmethod
    def load(cls, context, policy_id=None, db_policy=None, project_safe=True):
        """Retrieve and reconstruct a policy object from DB.

        :param context: DB context for object retrieval.
        :param policy_id: Optional parameter specifying the ID of policy.
        :param db_policy: Optional parameter referencing a policy DB object.
        :param project_safe: Optional parameter specifying whether only
                             policies belong to the context.project will be
                             loaded.
        :returns: An object of the proper policy class.
        """
        if db_policy is None:
            db_policy = po.Policy.get(context,
                                      policy_id,
                                      project_safe=project_safe)
            if db_policy is None:
                raise exception.PolicyNotFound(policy=policy_id)

        return cls._from_object(db_policy)

    @classmethod
    def load_all(cls,
                 context,
                 limit=None,
                 marker=None,
                 sort=None,
                 filters=None,
                 project_safe=True):
        """Retrieve all policies from database."""

        objs = po.Policy.get_all(context,
                                 limit=limit,
                                 marker=marker,
                                 sort=sort,
                                 filters=filters,
                                 project_safe=project_safe)

        for obj in objs:
            yield cls._from_object(obj)

    @classmethod
    def delete(cls, context, policy_id):
        po.Policy.delete(context, policy_id)

    def store(self, context):
        '''Store the policy object into database table.'''
        timestamp = timeutils.utcnow()

        values = {
            'name': self.name,
            'type': self.type,
            'user': self.user,
            'project': self.project,
            'domain': self.domain,
            'spec': self.spec,
            'data': self.data,
        }

        if self.id is not None:
            self.updated_at = timestamp
            values['updated_at'] = timestamp
            po.Policy.update(context, self.id, values)
        else:
            self.created_at = timestamp
            values['created_at'] = timestamp
            policy = po.Policy.create(context, values)
            self.id = policy.id

        return self.id

    def validate(self):
        '''Validate the schema and the data provided.'''
        self.spec_data.validate()
        self.properties.validate()

    @classmethod
    def get_schema(cls):
        return dict((name, dict(schema))
                    for name, schema in cls.properties_schema.items())

    def _build_policy_data(self, data):
        clsname = reflection.get_class_name(self, fully_qualified=False)
        version = self.VERSION
        result = {
            clsname: {
                'version': version,
                'data': data,
            }
        }
        return result

    def _extract_policy_data(self, policy_data):
        clsname = reflection.get_class_name(self, fully_qualified=False)
        if clsname not in policy_data:
            return None
        data = policy_data.get(clsname)
        if 'version' not in data or data['version'] != self.VERSION:
            return None

        return data.get('data', None)

    def attach(self, cluster):
        '''Method to be invoked before policy is attached to a cluster.

        :param cluster: the cluster to which the policy is being attached to.
        :returns: (True, message) if the operation is successful, or (False,
                 error) otherwise.
        '''
        if self.PROFILE_TYPE == ['ANY']:
            return True, None

        profile = cluster.rt['profile']
        if profile.type not in self.PROFILE_TYPE:
            error = _('Policy not applicable on profile type: '
                      '%s') % profile.type
            return False, error

        return True, None

    def detach(self, cluster):
        '''Method to be invoked before policy is detached from a cluster.'''
        return True, None

    def need_check(self, target, action):
        if getattr(self, 'TARGET', None) is None:
            return True

        if (target, action.action) in self.TARGET:
            return True
        else:
            return False

    def pre_op(self, cluster_id, action):
        '''A method that will be invoked before an action execution.'''
        return

    def post_op(self, cluster_id, action):
        '''A method that will be invoked after an action execution.'''
        return

    def to_dict(self):
        pb_dict = {
            'id': self.id,
            'name': self.name,
            'type': self.type,
            'user': self.user,
            'project': self.project,
            'domain': self.domain,
            'spec': self.spec,
            'created_at': utils.format_time(self.created_at),
            'updated_at': utils.format_time(self.updated_at),
            'data': self.data,
        }
        return pb_dict

    def _build_conn_params(self, cluster):
        """Build trust-based connection parameters.

        :param cluster: the cluste for which the trust will be checked.
        """
        service_creds = senlin_context.get_service_context()
        params = {
            'username': service_creds.get('username'),
            'password': service_creds.get('password'),
            'auth_url': service_creds.get('auth_url'),
            'user_domain_name': service_creds.get('user_domain_name')
        }

        cred = db_api.cred_get(oslo_context.get_current(), cluster.user,
                               cluster.project)
        if cred is None:
            raise exception.TrustNotFound(trustor=cluster.user)
        params['trust_id'] = cred.cred['openstack']['trust']

        return params
Example #41
0
class MultipleChoices(SenlinException):
    msg_fmt = _("Multiple results found matching the query criteria "
                "'%(arg)s'. Please be more specific.")
Example #42
0
class InvalidGlobalAPIVersion(SenlinException):
    msg_fmt = _("Version '%(req_ver)s' is not supported by the API. Minimum "
                "is '%(min_ver)s' and maximum is '%(max_ver)s'.")
Example #43
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Senlin
"""
import socket

from oslo_config import cfg

from senlin.common.i18n import _

# DEFAULT, service
service_opts = [
    cfg.StrOpt('default_region_name',
               help=_('Default region name used to get services endpoints.')),
    cfg.IntOpt('max_response_size',
               default=524288,
               help=_('Maximum raw byte size of data from web response.'))
]

cfg.CONF.register_opts(service_opts)

# DEFAULT, engine
engine_opts = [
    cfg.IntOpt('periodic_interval',
               default=60,
               help=_('Seconds between running periodic tasks.')),
    cfg.IntOpt('periodic_interval_max',
               default=120,
               help='Seconds between periodic tasks to be called'),
Example #44
0
 def action(self):
     if consts.ACTION_ACTION not in self.data:
         raise exc.HTTPBadRequest(_("No action specified"))
     return self.data[consts.ACTION_ACTION]
Example #45
0
from senlin.common.i18n import _LI
from senlin.common.i18n import _LW
from senlin.rpc import client as rpc_client

LOG = logging.getLogger(__name__)
URL_LENGTH_LIMIT = 50000
DEFAULT_API_VERSION = '1.0'
API_VERSION_KEY = 'OpenStack-API-Version'
VER_METHOD_ATTR = 'versioned_methods'
SERVICE_ALIAS = ['clustering', 'cluster']

# senlin_api, api opts
api_opts = [
    cfg.IPOpt('bind_host',
              default='0.0.0.0',
              help=_('Address to bind the server. Useful when '
                     'selecting a particular network interface.')),
    cfg.PortOpt('bind_port',
                default=8778,
                help=_('The port on which the server will listen.')),
    cfg.IntOpt('backlog',
               default=4096,
               help=_("Number of backlog requests "
                      "to configure the socket with.")),
    cfg.StrOpt('cert_file',
               help=_("Location of the SSL certificate file "
                      "to use for SSL mode.")),
    cfg.StrOpt('key_file',
               help=_("Location of the SSL key file to use "
                      "for enabling SSL mode.")),
    cfg.IntOpt('workers',
               default=0,
Example #46
0
    def test_cluster_resize_with_constraint_breaking(self):
        # Create cluster
        desired_capacity = 3
        min_size = 1
        max_size = 5
        cluster = test_api.create_cluster(self.client,
                                          test_utils.random_name('cluster'),
                                          self.profile['id'], desired_capacity,
                                          min_size, max_size)
        cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
                                             cluster['id'], 'ACTIVE')

        # Increase cluster size and break the size constraint
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': 3,
            'strict': True
        }
        res = test_api.action_cluster(self.client, cluster['id'], 'resize',
                                      params)
        reason = _("The target capacity (6) is greater than the cluster's "
                   "max_size (5).")
        self.assertIn(reason, res)

        # Do best-effort resizing
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': 3,
            'strict': False
        }
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'resize', params)

        # Wait for cluster resize action succeeded
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(5, len(cluster['nodes']))

        # Decrease cluster size and break the size constraint
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': -5,
            'strict': True
        }
        res = test_api.action_cluster(self.client, cluster['id'], 'resize',
                                      params)
        reason = _("The target capacity (0) is less than the cluster's "
                   "min_size (1).")
        self.assertIn(reason, res)

        # Do best-effort resizing
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': -5,
            'strict': False
        }
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'resize', params)

        # Wait for cluster resize action succeeded
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(1, len(cluster['nodes']))

        # Delete cluster
        test_api.delete_cluster(self.client, cluster['id'])
        cluster = test_utils.wait_for_delete(test_api.get_cluster, self.client,
                                             cluster['id'])
Example #47
0
class ServerProfile(base.KubeBaseProfile):
    """Profile for an kubernetes node server."""

    VERSIONS = {'1.0': [{'status': consts.EXPERIMENTAL, 'since': '2017.10'}]}

    KEYS = (CONTEXT, FLAVOR, IMAGE, KEY_NAME, BLOCK_DEVICE_MAPPING_V2) = (
        'context',
        'flavor',
        'image',
        'key_name',
        'block_device_mapping_v2',
    )

    KUBE_KEYS = (MASTER_CLUSTER, ) = ('master_cluster', )

    MASTER_CLUSTER_KEYS = (
        KUBEADM_TOKEN,
        KUBE_MASTER_IP,
        PRIVATE_NETWORK,
        PRIVATE_SUBNET,
        PRIVATE_ROUTER,
    ) = (
        'kubeadm_token',
        'kube_master_ip',
        'private_network',
        'private_subnet',
        'private_router',
    )

    INTERNAL_KEYS = (
        SECURITY_GROUP,
        SCALE_OUT_RECV_ID,
        SCALE_OUT_URL,
    ) = (
        'security_group',
        'scale_out_recv_id',
        'scale_out_url',
    )

    NETWORK_KEYS = (
        PORT,
        FIXED_IP,
        NETWORK,
        PORT_SECURITY_GROUPS,
        FLOATING_NETWORK,
        FLOATING_IP,
    ) = (
        'port',
        'fixed_ip',
        'network',
        'security_groups',
        'floating_network',
        'floating_ip',
    )

    BDM2_KEYS = (
        BDM2_UUID,
        BDM2_SOURCE_TYPE,
        BDM2_DESTINATION_TYPE,
        BDM2_DISK_BUS,
        BDM2_DEVICE_NAME,
        BDM2_VOLUME_SIZE,
        BDM2_GUEST_FORMAT,
        BDM2_BOOT_INDEX,
        BDM2_DEVICE_TYPE,
        BDM2_DELETE_ON_TERMINATION,
    ) = (
        'uuid',
        'source_type',
        'destination_type',
        'disk_bus',
        'device_name',
        'volume_size',
        'guest_format',
        'boot_index',
        'device_type',
        'delete_on_termination',
    )

    properties_schema = {
        CONTEXT:
        schema.Map(_('Customized security context for operating servers.'), ),
        FLAVOR:
        schema.String(
            _('ID of flavor used for the server.'),
            required=True,
            updatable=True,
        ),
        IMAGE:
        schema.String(
            # IMAGE is not required, because there could be BDM or BDMv2
            # support and the corresponding settings effective
            _('ID of image to be used for the new server.'),
            updatable=True,
        ),
        KEY_NAME:
        schema.String(_('Name of Nova keypair to be injected to server.'), ),
        MASTER_CLUSTER:
        schema.String(
            _('Cluster running kubernetes master.'),
            required=True,
        ),
        BLOCK_DEVICE_MAPPING_V2:
        schema.List(
            _('A list specifying the properties of block devices to be used '
              'for this server.'),
            schema=schema.Map(
                _('A map specifying the properties of a block device to be '
                  'used by the server.'),
                schema={
                    BDM2_UUID:
                    schema.String(
                        _('ID of the source image, snapshot or volume'), ),
                    BDM2_SOURCE_TYPE:
                    schema.String(
                        _("Volume source type, must be one of 'image', "
                          "'snapshot', 'volume' or 'blank'"),
                        required=True,
                    ),
                    BDM2_DESTINATION_TYPE:
                    schema.String(
                        _("Volume destination type, must be 'volume' or "
                          "'local'"),
                        required=True,
                    ),
                    BDM2_DISK_BUS:
                    schema.String(_('Bus of the device.'), ),
                    BDM2_DEVICE_NAME:
                    schema.String(
                        _('Name of the device(e.g. vda, xda, ....).'), ),
                    BDM2_VOLUME_SIZE:
                    schema.Integer(
                        _('Size of the block device in MB(for swap) and '
                          'in GB(for other formats)'),
                        required=True,
                    ),
                    BDM2_GUEST_FORMAT:
                    schema.String(
                        _('Specifies the disk file system format(e.g. swap, '
                          'ephemeral, ...).'), ),
                    BDM2_BOOT_INDEX:
                    schema.Integer(_('Define the boot order of the device'), ),
                    BDM2_DEVICE_TYPE:
                    schema.String(
                        _('Type of the device(e.g. disk, cdrom, ...).'), ),
                    BDM2_DELETE_ON_TERMINATION:
                    schema.Boolean(
                        _('Whether to delete the volume when the server '
                          'stops.'), ),
                }),
        ),
    }

    def __init__(self, type_name, name, **kwargs):
        super(ServerProfile, self).__init__(type_name, name, **kwargs)
        self.server_id = None

    def _get_master_cluster_info(self, obj):
        ctx = context.get_service_context(user_id=obj.user,
                                          project_id=obj.project)
        master = self.properties[self.MASTER_CLUSTER]
        try:
            cluster = cluster_obj.Cluster.find(ctx, master)
        except Exception as ex:
            raise exc.EResourceCreation(type='kubernetes.worker',
                                        message=str(ex))
        for key in self.MASTER_CLUSTER_KEYS:
            if key not in cluster.data:
                raise exc.EResourceCreation(
                    type='kubernetes.worker',
                    message="Can't find %s in cluster %s" % (key, master))

        return cluster.data

    def _set_cluster_dependents(self, obj):
        ctx = context.get_service_context(user_id=obj.user,
                                          project_id=obj.project)
        master = self.properties[self.MASTER_CLUSTER]
        try:
            master_cluster = cluster_obj.Cluster.find(ctx, master)
        except exc.ResourceNotFound:
            msg = _("Cannot find the given cluster: %s") % master
            raise exc.BadRequest(msg=msg)
        if master_cluster:
            # configure kube master dependents, kube master record kube node
            # cluster uuid
            master_dependents = master_cluster.dependents
            master_dependents['kube-node'] = obj.id
            cluster_obj.Cluster.update(ctx, master_cluster.id,
                                       {'dependents': master_dependents})

    def _del_cluster_dependents(self, obj):
        ctx = context.get_service_context(user_id=obj.user,
                                          project_id=obj.project)
        master = self.properties[self.MASTER_CLUSTER]
        try:
            master_cluster = cluster_obj.Cluster.find(ctx, master)
        except exc.ResourceNotFound:
            msg = _("Cannot find the given cluster: %s") % master
            raise exc.BadRequest(msg=msg)

        if master_cluster:
            # remove kube master record kube node dependents
            master_dependents = master_cluster.dependents
            if master_dependents and 'kube-node' in master_dependents:
                master_dependents.pop('kube-node')
                cluster_obj.Cluster.update(ctx, master_cluster.id,
                                           {'dependents': master_dependents})

    def _get_cluster_data(self, obj):
        ctx = context.get_service_context(user_id=obj.user,
                                          project_id=obj.project)
        if obj.cluster_id:
            cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
            return cluster.data

        return {}

    def do_cluster_create(self, obj):
        self._create_security_group(obj)
        self._set_cluster_dependents(obj)

    def do_cluster_delete(self, obj):
        self._delete_security_group(obj)
        self._del_cluster_dependents(obj)

    def do_validate(self, obj):
        """Validate if the spec has provided valid info for server creation.

        :param obj: The node object.
        """
        # validate flavor
        flavor = self.properties[self.FLAVOR]
        self._validate_flavor(obj, flavor)

        # validate image
        image = self.properties[self.IMAGE]
        if image is not None:
            self._validate_image(obj, image)

        # validate key_name
        keypair = self.properties[self.KEY_NAME]
        if keypair is not None:
            self._validate_keypair(obj, keypair)

        return True

    def do_create(self, obj):
        """Create a server for the node object.

        :param obj: The node object for which a server will be created.
        """
        kwargs = {}
        for key in self.KEYS:
            if self.properties[key] is not None:
                kwargs[key] = self.properties[key]

        image_ident = self.properties[self.IMAGE]
        if image_ident is not None:
            image = self._validate_image(obj, image_ident, 'create')
            kwargs.pop(self.IMAGE)
            kwargs['imageRef'] = image.id

        flavor_ident = self.properties[self.FLAVOR]
        flavor = self._validate_flavor(obj, flavor_ident, 'create')
        kwargs.pop(self.FLAVOR)
        kwargs['flavorRef'] = flavor.id

        keypair_name = self.properties[self.KEY_NAME]
        if keypair_name:
            keypair = self._validate_keypair(obj, keypair_name, 'create')
            kwargs['key_name'] = keypair.name

        kwargs['name'] = obj.name

        metadata = self._build_metadata(obj, {})
        kwargs['metadata'] = metadata

        sgid = self._get_security_group(obj)
        kwargs['security_groups'] = [{'name': sgid}]

        jj_vars = {}
        master_cluster = self._get_master_cluster_info(obj)
        kwargs['networks'] = [{'uuid': master_cluster[self.PRIVATE_NETWORK]}]
        jj_vars['KUBETOKEN'] = master_cluster[self.KUBEADM_TOKEN]
        jj_vars['MASTERIP'] = master_cluster[self.KUBE_MASTER_IP]

        block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2]
        if block_device_mapping_v2 is not None:
            kwargs['block_device_mapping_v2'] = self._resolve_bdm(
                obj, block_device_mapping_v2, 'create')

        user_data = base.loadScript('./scripts/worker.sh')
        if user_data is not None:
            # Use jinja2 to replace variables defined in user_data
            try:
                jj_t = jinja2.Template(user_data)
                user_data = jj_t.render(**jj_vars)
            except (jinja2.exceptions.UndefinedError, ValueError) as ex:
                # TODO(anyone) Handle jinja2 error
                pass
            ud = encodeutils.safe_encode(user_data)
            kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud))

        server = None
        resource_id = None
        try:
            server = self.compute(obj).server_create(**kwargs)
            self.compute(obj).wait_for_server(server.id)
            server = self.compute(obj).server_get(server.id)
            return server.id
        except exc.InternalError as ex:
            if server and server.id:
                resource_id = server.id
            raise exc.EResourceCreation(type='server',
                                        message=str(ex),
                                        resource_id=resource_id)

    def do_delete(self, obj, **params):
        """Delete the physical resource associated with the specified node.

        :param obj: The node object to operate on.
        :param kwargs params: Optional keyword arguments for the delete
                              operation.
        :returns: This operation always return True unless exception is
                  caught.
        :raises: `EResourceDeletion` if interaction with compute service fails.
        """
        if not obj.physical_id:
            return True

        server_id = obj.physical_id
        ignore_missing = params.get('ignore_missing', True)
        internal_ports = obj.data.get('internal_ports', [])
        force = params.get('force', False)

        try:
            driver = self.compute(obj)
            if force:
                driver.server_force_delete(server_id, ignore_missing)
            else:
                driver.server_delete(server_id, ignore_missing)
            driver.wait_for_server_delete(server_id)
            if internal_ports:
                ex = self._delete_ports(obj, internal_ports)
                if ex:
                    raise ex
            return True
        except exc.InternalError as ex:
            raise exc.EResourceDeletion(type='server',
                                        id=server_id,
                                        message=str(ex))
Example #48
0
class BadRequest(SenlinException):
    msg_fmt = _("%(msg)s.")
Example #49
0
class NotAuthenticated(SenlinException):
    msg_fmt = _("You are not authenticated.")
Example #50
0
class InvalidAPIVersionString(SenlinException):
    msg_fmt = _("API Version String '%(version)s' is of invalid format. It "
                "must be of format 'major.minor'.")
Example #51
0
class PolicyNotAttached(InternalError):
    msg_fmt = _("The policy '%(policy)s' is not attached to the specified "
                "cluster '%(cluster)s'.")
Example #52
0
class Forbidden(SenlinException):
    msg_fmt = _("You are not authorized to complete this operation.")
Example #53
0
class ESchema(InternalError):
    msg_fmt = _("%(message)s")
Example #54
0
class SIGHUPInterrupt(SenlinException):
    msg_fmt = _("System SIGHUP signal received.")
Example #55
0
class EResourceUpdate(InternalError):
    # Used when updating resources from other services
    msg_fmt = _("Failed in updating %(type)s '%(id)s': %(message)s.")
Example #56
0
class InvalidPlugin(InternalError):
    msg_fmt = _("%(message)s")
Example #57
0
    def test_cluster_resize_with_constraint_adjusting(self):
        # Create cluster
        desired_capacity = 3
        min_size = 2
        max_size = 5
        cluster = test_api.create_cluster(self.client,
                                          test_utils.random_name('cluster'),
                                          self.profile['id'], desired_capacity,
                                          min_size, max_size)
        cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
                                             cluster['id'], 'ACTIVE')

        # Increase cluster size with upper limit increasing
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': 3,
            'max_size': 6
        }
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'resize', params)
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(6, len(cluster['nodes']))

        # Decrease cluster size upper limit with strict set to False
        params = {'max_size': 4, 'strict': False}
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'resize', params)
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(4, len(cluster['nodes']))

        # Reduce cluster size with lower limit change
        params = {
            'adjustment_type': 'CHANGE_IN_CAPACITY',
            'number': -3,
            'min_size': 1
        }
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'resize', params)
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(1, len(cluster['nodes']))

        # Increase cluster size lower limit with strict set to True
        params = {'min_size': 2, 'strict': True}
        res = test_api.action_cluster(self.client, cluster['id'], 'resize',
                                      params)
        reason = _("The specified min_size (2) is greater than the current "
                   "desired_capacity (1) of the cluster.")
        self.assertIn(reason, res)

        # Verify cluster resize result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(1, len(cluster['nodes']))

        # Delete cluster
        test_api.delete_cluster(self.client, cluster['id'])
        cluster = test_utils.wait_for_delete(test_api.get_cluster, self.client,
                                             cluster['id'])
Example #58
0
class EResourceDeletion(InternalError):
    # Used when deleting resources from other services
    msg_fmt = _("Failed in deleting %(type)s '%(id)s': %(message)s.")
Example #59
0
    def test_cluster_scale_in_out(self):
        # Create cluster
        desired_capacity = 2
        min_size = 1
        max_size = 5
        cluster = test_api.create_cluster(self.client,
                                          test_utils.random_name('cluster'),
                                          self.profile['id'], desired_capacity,
                                          min_size, max_size)
        cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
                                             cluster['id'], 'ACTIVE')

        # Scale out cluster without params
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_out')
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster scale out result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(3, len(cluster['nodes']))

        # Scale out with count set to 2
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_out', {'count': 2})
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster scale out result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(5, len(cluster['nodes']))

        # Keep scaling out and break the size constraint
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_out')

        # Wait for cluster scale out action failed
        action = test_utils.wait_for_status(test_api.get_action, self.client,
                                            action_id, 'FAILED')
        reason = _("The target capacity (6) is greater "
                   "than the cluster's max_size (5).")
        self.assertEqual(reason, action['status_reason'])

        # Verify cluster scale out result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(5, len(cluster['nodes']))

        # Scale in cluster without params
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_in')
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster scale in result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(4, len(cluster['nodes']))

        # Scale in with count set to 3
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_in', {'count': 3})
        test_utils.wait_for_status(test_api.get_action, self.client, action_id,
                                   'SUCCEEDED')

        # Verify cluster scale in result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(1, len(cluster['nodes']))

        # Keep scaling in and break the size constraint
        action_id = test_api.action_cluster(self.client, cluster['id'],
                                            'scale_in')
        action = test_utils.wait_for_status(test_api.get_action, self.client,
                                            action_id, 'FAILED')

        reason = _("The target capacity (0) is less "
                   "than the cluster's min_size (1).")
        self.assertEqual(reason, action['status_reason'])

        # Verify cluster scale in result
        cluster = test_api.get_cluster(self.client, cluster['id'])
        self.assertEqual('ACTIVE', cluster['status'])
        self.assertEqual(1, len(cluster['nodes']))

        # Delete cluster
        test_api.delete_cluster(self.client, cluster['id'])
        cluster = test_utils.wait_for_delete(test_api.get_cluster, self.client,
                                             cluster['id'])
Example #60
0
class TrustNotFound(InternalError):
    # Internal exception, not to be exposed to end user.
    msg_fmt = _("The trust for trustor '%(trustor)s' could not be found.")