def bay_update(self, context, bay): LOG.debug('bay_heat bay_update') osc = clients.OpenStackClients(context) stack = osc.heat().stacks.get(bay.stack_id) allow_update_status = (bay_status.CREATE_COMPLETE, bay_status.UPDATE_COMPLETE, bay_status.RESUME_COMPLETE, bay_status.RESTORE_COMPLETE, bay_status.ROLLBACK_COMPLETE, bay_status.SNAPSHOT_COMPLETE, bay_status.CHECK_COMPLETE, bay_status.ADOPT_COMPLETE) if stack.stack_status not in allow_update_status: conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) operation = _('Updating a bay when stack status is ' '"%s"') % stack.stack_status raise exception.NotSupported(operation=operation) delta = bay.obj_what_changed() if not delta: return bay manager = scale_manager.ScaleManager(context, osc, bay) conductor_utils.notify_about_bay_operation(context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) _update_stack(context, osc, bay, manager) self._poll_and_check(osc, bay) return bay
def rotate_ca_certificate(self, context, cluster): cluster_template = conductor_utils.retrieve_cluster_template(context, cluster) if cluster_template.cluster_distro not in ["fedora-coreos"]: raise exception.NotSupported("Rotating the CA certificate is " "not supported for cluster with " "cluster_distro: %s." % cluster_template.cluster_distro) osc = clients.OpenStackClients(context) rollback = True heat_params = {} csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account") heat_params['kube_service_account_key'] = \ csr_keys["public_key"].replace("\n", "\\n") heat_params['kube_service_account_private_key'] = \ csr_keys["private_key"].replace("\n", "\\n") fields = { 'existing': True, 'parameters': heat_params, 'disable_rollback': not rollback } osc.heat().stacks.update(cluster.stack_id, **fields)
def bay_update(self, context, bay): LOG.debug('bay_heat bay_update') osc = clients.OpenStackClients(context) stack = osc.heat().stacks.get(bay.stack_id) if (stack.stack_status != bay_status.CREATE_COMPLETE and stack.stack_status != bay_status.UPDATE_COMPLETE): operation = _('Updating a bay when stack status is ' '"%s"') % stack.stack_status raise exception.NotSupported(operation=operation) delta = set(bay.obj_what_changed()) if 'node_count' in delta: delta.remove('node_count') manager = scale_manager.ScaleManager(context, osc, bay) _update_stack(context, osc, bay, manager) self._poll_and_check(osc, bay) if delta: raise exception.InvalidParameterValue( err=("cannot change bay property(ies) %s." % ", ".join(delta))) bay.save() return bay
def cluster_update(self, context, cluster, rollback=False): LOG.debug('cluster_heat cluster_update') osc = clients.OpenStackClients(context) stack = osc.heat().stacks.get(cluster.stack_id) allow_update_status = (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE) if stack.stack_status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) operation = _('Updating a cluster when stack status is ' '"%s"') % stack.stack_status raise exception.NotSupported(operation=operation) delta = cluster.obj_what_changed() if not delta: return cluster manager = scale_manager.ScaleManager(context, osc, cluster) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) _update_stack(context, osc, cluster, manager, rollback) self._poll_and_check(osc, cluster) return cluster
def cluster_update(self, context, cluster, node_count, rollback=False): LOG.debug('cluster_heat cluster_update') osc = clients.OpenStackClients(context) allow_update_status = (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) operation = _('Updating a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) # Updates will be only reflected to the default worker # nodegroup. worker_ng = cluster.default_ng_worker if worker_ng.node_count == node_count: return # Backup the old node count so that we can restore it # in case of an exception. old_node_count = worker_ng.node_count manager = scale_manager.get_scale_manager(context, osc, cluster) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) # Update cluster try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) worker_ng.node_count = node_count worker_ng.save() cluster_driver.update_cluster(context, cluster, manager, rollback) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() # Restore the node_count worker_ng.node_count = old_node_count worker_ng.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise cluster.save() return cluster
def cluster_upgrade(self, context, cluster, cluster_template, max_batch_size, nodegroup, rollback=False): LOG.debug('cluster_conductor cluster_upgrade') # osc = clients.OpenStackClients(context) allow_update_status = (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Upgrading a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) # Upgrade cluster try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.upgrade_cluster(context, cluster, cluster_template, max_batch_size, nodegroup, rollback) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() nodegroup.status = fields.ClusterStatus.UPDATE_FAILED nodegroup.status_reason = six.text_type(e) nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise nodegroup.save() cluster.save() return cluster
def patch(self, cluster_ident): context = pecan.request.context cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'certificate:rotate_ca', cluster, action='certificate:rotate_ca') if cluster.cluster_template.tls_disabled: raise exception.NotSupported("Rotating the CA certificate on a " "non-TLS cluster is not supported") pecan.request.rpcapi.rotate_ca_certificate(cluster)
def wrapper(self, context, cluster, nodegroup, *args, **kwargs): # Before we begin we need to check the status # of the cluster. If the cluster is in a status # that does not allow nodegroup creation we just # fail. if ('status' in nodegroup and nodegroup.status not in ALLOWED_NODEGROUP_STATES): operation = _( '%(fname)s when nodegroup status is "%(status)s"') % { 'fname': func.__name__, 'status': cluster.status } raise exception.NotSupported(operation=operation) return func(self, context, cluster, nodegroup, *args, **kwargs)
def rotate_ca_certificate(self, context, cluster): LOG.info('start rotate_ca_certificate for cluster: %s', cluster.uuid) allow_update_status = (fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Updating a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) try: # re-generate the ca certs cert_manager.generate_certificates_to_cluster(cluster, context=context) cluster_driver = driver.Driver.get_driver_for_cluster( context, cluster) cluster_driver.rotate_ca_certificate(context, cluster) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise cluster.save() return cluster
def bay_update(self, context, bay): LOG.debug('bay_heat bay_update') osc = clients.OpenStackClients(context) stack = osc.heat().stacks.get(bay.stack_id) if (stack.stack_status != bay_status.CREATE_COMPLETE and stack.stack_status != bay_status.UPDATE_COMPLETE): operation = _('Updating a bay when stack status is ' '"%s"') % stack.stack_status raise exception.NotSupported(operation=operation) delta = bay.obj_what_changed() if not delta: return bay self._validate_properties(delta) manager = scale_manager.ScaleManager(context, osc, bay) _update_stack(context, osc, bay, manager) self._poll_and_check(osc, bay) return bay
def rotate_ca_certificate(self, context, cluster): raise exception.NotSupported( "'rotate_ca_certificate' is not supported by this driver.")
def cluster_resize(self, context, cluster, node_count, nodes_to_remove, nodegroup): LOG.debug('cluster_conductor cluster_resize') osc = clients.OpenStackClients(context) # NOTE(flwang): One of important user cases of /resize API is # supporting the auto scaling action triggered by Kubernetes Cluster # Autoscaler, so there are 2 cases may happen: # 1. API could be triggered very offen # 2. Scale up or down may fail and we would like to offer the ability # that recover the cluster to allow it being resized when last # update failed. allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, fields.ClusterStatus.RESUME_COMPLETE, fields.ClusterStatus.RESTORE_COMPLETE, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.SNAPSHOT_COMPLETE, fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE, fields.ClusterStatus.UPDATE_FAILED, fields.ClusterStatus.UPDATE_IN_PROGRESS, ) if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) operation = _('Resizing a cluster when status is ' '"%s"') % cluster.status raise exception.NotSupported(operation=operation) resize_manager = scale_manager.get_scale_manager(context, osc, cluster) # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) # Backup the old node count so that we can restore it # in case of an exception. old_node_count = nodegroup.node_count # Resize cluster try: nodegroup.node_count = node_count nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.resize_cluster(context, cluster, resize_manager, node_count, nodes_to_remove, nodegroup) cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.UPDATE_FAILED cluster.status_reason = six.text_type(e) cluster.save() nodegroup.node_count = old_node_count nodegroup.status = fields.ClusterStatus.UPDATE_FAILED nodegroup.status_reason = six.text_type(e) nodegroup.save() conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE, cluster) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise cluster.save() return cluster
def test_NotSupported(self): self.assertRaises(exception.NotSupported, lambda: self.raise_(exception.NotSupported()))