class ClustersController(base.Controller): """REST controller for Clusters.""" def __init__(self): super(ClustersController, self).__init__() _custom_actions = { 'detail': ['GET'], } actions = cluster_actions.ActionsController() def _generate_name_for_cluster(self, context): """Generate a random name like: zeta-22-cluster.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-cluster' def _get_clusters_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): context = pecan.request.context if context.is_admin: if expand: policy.enforce(context, "cluster:detail_all_projects", action="cluster:detail_all_projects") else: policy.enforce(context, "cluster:get_all_all_projects", action="cluster:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, marker) clusters = objects.Cluster.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterCollection.convert_with_links(clusters, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) nodegroups = nodegroup.NodeGroupController() @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:get_all', action='cluster:get_all') return self._get_clusters_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:detail', action='cluster:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clusters": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['clusters', 'detail']) return self._get_clusters_collection(marker, limit, sort_key, sort_dir, expand, resource_url) def _collect_fault_info(self, context, cluster): """Collect fault info from heat resources of given cluster and store them into cluster.faults. """ osc = clients.OpenStackClients(context) filters = {'status': 'FAILED'} try: failed_resources = osc.heat().resources.list(cluster.stack_id, nested_depth=2, filters=filters) except Exception as e: failed_resources = [] LOG.warning( "Failed to retrieve failed resources for " "cluster %(cluster)s from Heat stack " "%(stack)s due to error: %(e)s", { 'cluster': cluster.uuid, 'stack': cluster.stack_id, 'e': e }, exc_info=True) return { res.resource_name: res.resource_status_reason for res in failed_resources } @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): """Retrieve information about the given Cluster. :param cluster_ident: UUID or logical name of the Cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:get_one_all_projects", action="cluster:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:get', cluster.as_dict(), action='cluster:get') cluster = Cluster.convert_with_links(cluster) if cluster.status in fields.ClusterStatus.STATUS_FAILED: cluster.faults = self._collect_fault_info(context, cluster) return cluster def _check_cluster_quota_limit(self, context): try: # Check if there is any explicit quota limit set in Quotas table quota = objects.Quota.get_quota_by_project_id_resource( context, context.project_id, 'Cluster') cluster_limit = quota.hard_limit except exception.QuotaNotFound: # If explicit quota was not set for the project, use default limit cluster_limit = CONF.quotas.max_clusters_per_project if objects.Cluster.get_count_all(context) >= cluster_limit: msg = _("You have reached the maximum clusters per project, " "%d. You may delete a cluster to make room for a new " "one.") % cluster_limit raise exception.ResourceLimitExceeded(msg=msg) @expose.expose(ClusterID, body=Cluster, status_code=202) @validation.enforce_cluster_type_supported() @validation.enforce_cluster_volume_storage_size() def post(self, cluster): """Create a new cluster. :param cluster: a cluster within the request body. """ context = pecan.request.context policy.enforce(context, 'cluster:create', action='cluster:create') self._check_cluster_quota_limit(context) temp_id = cluster.cluster_template_id cluster_template = objects.ClusterTemplate.get_by_uuid( context, temp_id) # If keypair not present, use cluster_template value if cluster.keypair is None: cluster.keypair = cluster_template.keypair_id # If docker_volume_size is not present, use cluster_template value if (cluster.docker_volume_size == wtypes.Unset or not cluster.docker_volume_size): cluster.docker_volume_size = cluster_template.docker_volume_size # If labels is not present, use cluster_template value if cluster.labels == wtypes.Unset: cluster.labels = cluster_template.labels # If master_flavor_id is not present, use cluster_template value if (cluster.master_flavor_id == wtypes.Unset or not cluster.master_flavor_id): cluster.master_flavor_id = cluster_template.master_flavor_id # If flavor_id is not present, use cluster_template value if cluster.flavor_id == wtypes.Unset or not cluster.flavor_id: cluster.flavor_id = cluster_template.flavor_id cluster_dict = cluster.as_dict() attr_validator.validate_os_resources(context, cluster_template.as_dict(), cluster_dict) attr_validator.validate_master_count(cluster_dict, cluster_template.as_dict()) cluster_dict['project_id'] = context.project_id cluster_dict['user_id'] = context.user_id # NOTE(yuywz): We will generate a random human-readable name for # cluster if the name is not specified by user. name = cluster_dict.get('name') or \ self._generate_name_for_cluster(context) cluster_dict['name'] = name cluster_dict['coe_version'] = None cluster_dict['container_version'] = None node_count = cluster_dict.pop('node_count') master_count = cluster_dict.pop('master_count') new_cluster = objects.Cluster(context, **cluster_dict) new_cluster.uuid = uuid.uuid4() pecan.request.rpcapi.cluster_create_async(new_cluster, master_count, node_count, cluster.create_timeout) return ClusterID(new_cluster.uuid) @base.Controller.api_version("1.1", "1.2") @wsme.validate(types.uuid, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, patch): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param patch: a json PATCH document to apply to this cluster. """ cluster, node_count = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, node_count) return ClusterID(cluster.uuid) @base.Controller.api_version("1.3") # noqa @wsme.validate(types.uuid, bool, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, types.boolean, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, rollback=False, patch=None): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param rollback: whether to rollback cluster on update failure. :param patch: a json PATCH document to apply to this cluster. """ cluster, node_count = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, node_count, rollback) return ClusterID(cluster.uuid) def _patch(self, cluster_ident, patch): context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:update', cluster.as_dict(), action='cluster:update') try: cluster_dict = cluster.as_dict() new_cluster = Cluster( **api_utils.apply_jsonpatch(cluster_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # NOTE(ttsiouts): magnum.objects.Cluster.node_count will be a # property so we won't be able to store it in the object. So # instead of object_what_changed compare the new and the old # clusters. delta = set() for field in new_cluster.fields: if getattr(cluster, field) != getattr(new_cluster, field): delta.add(field) validation.validate_cluster_properties(delta) return cluster, new_cluster.node_count @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): """Delete a cluster. :param cluster_ident: UUID of cluster or logical name of the cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'cluster:delete_all_projects', action='cluster:delete_all_projects') context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:delete', cluster.as_dict(), action='cluster:delete') pecan.request.rpcapi.cluster_delete_async(cluster.uuid)
class ClustersController(base.Controller): """REST controller for Clusters.""" def __init__(self): super(ClustersController, self).__init__() _custom_actions = { 'detail': ['GET'], } _in_tree_cinder_volume_driver_deprecation_note = ( "The in-tree Cinder volume driver is deprecated and will be removed " "in X cycle in favour of out-of-tree Cinder CSI driver which requires " "the label cinder_csi_enabled set to True (default behaviour from " "V cycle) when volume_driver is cinder.") actions = cluster_actions.ActionsController() def _generate_name_for_cluster(self, context): """Generate a random name like: zeta-22-cluster.""" name_gen = name_generator.NameGenerator() name = name_gen.generate() return name + '-cluster' def _get_clusters_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): context = pecan.request.context if context.is_admin: if expand: policy.enforce(context, "cluster:detail_all_projects", action="cluster:detail_all_projects") else: policy.enforce(context, "cluster:get_all_all_projects", action="cluster:get_all_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So the could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, marker) clusters = objects.Cluster.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return ClusterCollection.convert_with_links(clusters, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) nodegroups = nodegroup.NodeGroupController() @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:get_all', action='cluster:get_all') return self._get_clusters_collection(marker, limit, sort_key, sort_dir) @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of clusters with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'cluster:detail', action='cluster:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "clusters": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['clusters', 'detail']) return self._get_clusters_collection(marker, limit, sort_key, sort_dir, expand, resource_url) def _collect_fault_info(self, context, cluster): """Collect fault info from heat resources of given cluster and store them into cluster.faults. """ # Gather fault info from the cluster nodegroups. return { ng.name: ng.status_reason for ng in cluster.nodegroups if ng.status.endswith('FAILED') } @expose.expose(Cluster, types.uuid_or_name) def get_one(self, cluster_ident): """Retrieve information about the given Cluster. :param cluster_ident: UUID or logical name of the Cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:get_one_all_projects", action="cluster:get_one_all_projects") # TODO(flwang): Instead of asking an extra 'all_project's # parameter, currently the design is allowing admin user to list # all clusters from all projects. But the all_tenants is one of # the condition to do project filter in DB API. And it's also used # by periodic tasks. So this could be removed in the future and # a new parameter 'project_id' would be added so that admin user # can list clusters for a particular project. context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:get', cluster.as_dict(), action='cluster:get') api_cluster = Cluster.convert_with_links(cluster) if api_cluster.status in fields.ClusterStatus.STATUS_FAILED: api_cluster.faults = self._collect_fault_info(context, cluster) return api_cluster def _check_cluster_quota_limit(self, context): try: # Check if there is any explicit quota limit set in Quotas table quota = objects.Quota.get_quota_by_project_id_resource( context, context.project_id, 'Cluster') cluster_limit = quota.hard_limit except exception.QuotaNotFound: # If explicit quota was not set for the project, use default limit cluster_limit = CONF.quotas.max_clusters_per_project if objects.Cluster.get_count_all(context) >= cluster_limit: msg = _("You have reached the maximum clusters per project, " "%d. You may delete a cluster to make room for a new " "one.") % cluster_limit raise exception.ResourceLimitExceeded(msg=msg) @expose.expose(ClusterID, body=Cluster, status_code=202) @validation.enforce_cluster_type_supported() @validation.enforce_cluster_volume_storage_size() def post(self, cluster): """Create a new cluster. :param cluster: a cluster within the request body. """ context = pecan.request.context policy.enforce(context, 'cluster:create', action='cluster:create') self._check_cluster_quota_limit(context) temp_id = cluster.cluster_template_id cluster_template = objects.ClusterTemplate.get_by_uuid(context, temp_id) # If keypair not present, use cluster_template value if cluster.keypair is None: cluster.keypair = cluster_template.keypair_id # If labels is not present, use cluster_template value if cluster.labels == wtypes.Unset or not cluster.labels: cluster.labels = cluster_template.labels else: # If labels are provided check if the user wishes to merge # them with the values from the cluster template. if cluster.merge_labels: labels = cluster_template.labels labels.update(cluster.labels) cluster.labels = labels cinder_csi_enabled = cluster.labels.get('cinder_csi_enabled', True) if (cluster_template.volume_driver == 'cinder' and not strutils.bool_from_string(cinder_csi_enabled)): warnings.warn(self._in_tree_cinder_volume_driver_deprecation_note, DeprecationWarning) LOG.warning(self._in_tree_cinder_volume_driver_deprecation_note) # If floating_ip_enabled is not present, use cluster_template value if cluster.floating_ip_enabled == wtypes.Unset: cluster.floating_ip_enabled = cluster_template.floating_ip_enabled # If master_lb_enabled is not present, use cluster_template value if cluster.master_lb_enabled == wtypes.Unset: cluster.master_lb_enabled = cluster_template.master_lb_enabled attributes = ["docker_volume_size", "master_flavor_id", "flavor_id", "fixed_network", "fixed_subnet"] for attr in attributes: if (getattr(cluster, attr) == wtypes.Unset or not getattr(cluster, attr)): setattr(cluster, attr, getattr(cluster_template, attr)) cluster_dict = cluster.as_dict() attr_validator.validate_os_resources(context, cluster_template.as_dict(), cluster_dict) attr_validator.validate_master_count(cluster_dict, cluster_template.as_dict()) cluster_dict['project_id'] = context.project_id cluster_dict['user_id'] = context.user_id # NOTE(yuywz): We will generate a random human-readable name for # cluster if the name is not specified by user. name = cluster_dict.get('name') or \ self._generate_name_for_cluster(context) cluster_dict['name'] = name cluster_dict['coe_version'] = None cluster_dict['container_version'] = None node_count = cluster_dict.pop('node_count') master_count = cluster_dict.pop('master_count') new_cluster = objects.Cluster(context, **cluster_dict) new_cluster.uuid = uuid.uuid4() pecan.request.rpcapi.cluster_create_async(new_cluster, master_count, node_count, cluster.create_timeout) return ClusterID(new_cluster.uuid) @base.Controller.api_version("1.1", "1.2") @wsme.validate(types.uuid, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, patch): """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param patch: a json PATCH document to apply to this cluster. """ (cluster, node_count, health_status, health_status_reason) = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, node_count, health_status, health_status_reason) return ClusterID(cluster.uuid) @base.Controller.api_version("1.3") # noqa @wsme.validate(types.uuid, bool, [ClusterPatchType]) @expose.expose(ClusterID, types.uuid_or_name, types.boolean, body=[ClusterPatchType], status_code=202) def patch(self, cluster_ident, rollback=False, patch=None): # noqa """Update an existing Cluster. :param cluster_ident: UUID or logical name of a cluster. :param rollback: whether to rollback cluster on update failure. :param patch: a json PATCH document to apply to this cluster. """ (cluster, node_count, health_status, health_status_reason) = self._patch(cluster_ident, patch) pecan.request.rpcapi.cluster_update_async(cluster, node_count, health_status, health_status_reason, rollback) return ClusterID(cluster.uuid) def _patch(self, cluster_ident, patch): context = pecan.request.context if context.is_admin: policy.enforce(context, "cluster:update_all_projects", action="cluster:update_all_projects") context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:update', cluster.as_dict(), action='cluster:update') policy.enforce(context, "cluster:update_health_status", action="cluster:update_health_status") try: cluster_dict = cluster.as_dict() new_cluster = Cluster(**api_utils.apply_jsonpatch(cluster_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # NOTE(ttsiouts): magnum.objects.Cluster.node_count will be a # property so we won't be able to store it in the object. So # instead of object_what_changed compare the new and the old # clusters. delta = set() for field in new_cluster.fields: if getattr(cluster, field) != getattr(new_cluster, field): delta.add(field) validation.validate_cluster_properties(delta) # NOTE(brtknr): cluster.node_count is the size of the whole cluster # which includes non-default nodegroups. However cluster_update expects # node_count to be the size of the default_ng_worker therefore return # this value unless the patch object says otherwise. node_count = cluster.default_ng_worker.node_count for p in patch: if p['path'] == '/node_count': node_count = p.get('value') or new_cluster.node_count return (cluster, node_count, new_cluster.health_status, new_cluster.health_status_reason) @expose.expose(None, types.uuid_or_name, status_code=204) def delete(self, cluster_ident): """Delete a cluster. :param cluster_ident: UUID of cluster or logical name of the cluster. """ context = pecan.request.context if context.is_admin: policy.enforce(context, 'cluster:delete_all_projects', action='cluster:delete_all_projects') context.all_tenants = True cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:delete', cluster.as_dict(), action='cluster:delete') pecan.request.rpcapi.cluster_delete_async(cluster.uuid)