def bay_create(self, context, bay, bay_create_timeout): LOG.debug('bay_heat bay_create') osc = clients.OpenStackClients(context) try: # Create trustee/trust and set them to bay trust_manager.create_trustee_and_trust(osc, bay) # Generate certificate and set the cert reference to bay cert_manager.generate_certificates_to_bay(bay, context=context) conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING) created_stack = _create_stack(context, osc, bay, bay_create_timeout) except Exception as e: cert_manager.delete_certificates_from_bay(bay, context=context) trust_manager.delete_trustee_and_trust(osc, context, bay) conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise bay.stack_id = created_stack['stack']['id'] bay.status = bay_status.CREATE_IN_PROGRESS bay.create() self._poll_and_check(osc, bay) return bay
def bay_delete(self, context, uuid): LOG.debug('bay_heat bay_delete') osc = clients.OpenStackClients(context) bay = objects.Bay.get_by_uuid(context, uuid) stack_id = bay.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the bay, likely a user # has deleted the stack outside the context of Magnum. Therefore the # contents of the bay are forever lost. # # If the exception is unhandled, the original exception will be raised. try: osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not be found during bay' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, bay) cert_manager.delete_certificates_from_bay(bay) bay.destroy() except exception.BayNotFound: LOG.info(_LI('The bay %s has been deleted by others.'), uuid) return None except exc.HTTPConflict: raise exception.OperationInProgress(bay_name=bay.name) except Exception: raise self._poll_and_check(osc, bay) return None
def bay_create(self, context, bay, bay_create_timeout): LOG.debug('bay_heat bay_create') osc = clients.OpenStackClients(context) bay.uuid = uuid.uuid4() try: # Create trustee/trust and set them to bay trust_manager.create_trustee_and_trust(osc, bay) # Generate certificate and set the cert reference to bay cert_manager.generate_certificates_to_bay(bay, context=context) conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING) created_stack = _create_stack(context, osc, bay, bay_create_timeout) except Exception as e: cert_manager.delete_certificates_from_bay(bay, context=context) trust_manager.delete_trustee_and_trust(osc, context, bay) conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e raise bay.stack_id = created_stack['stack']['id'] bay.status = bay_status.CREATE_IN_PROGRESS bay.create() self._poll_and_check(osc, bay) return bay
def bay_create(self, context, bay, bay_create_timeout): LOG.debug('bay_heat bay_create') osc = clients.OpenStackClients(context) bay.uuid = uuid.uuid4() try: # Create trustee/trust and set them to bay trust_manager.create_trustee_and_trust(osc, bay) # Generate certificate and set the cert reference to bay cert_manager.generate_certificates_to_bay(bay) created_stack = _create_stack(context, osc, bay, bay_create_timeout) except Exception as e: cert_manager.delete_certificates_from_bay(bay) trust_manager.delete_trustee_and_trust(osc, context, bay) if isinstance(e, exc.HTTPBadRequest): e = exception.InvalidParameterValue(message=six.text_type(e)) raise e bay.stack_id = created_stack['stack']['id'] bay.status = bay_status.CREATE_IN_PROGRESS bay.create() self._poll_and_check(osc, bay) return bay
def _delete_complete(self): LOG.info(_LI('Bay has been deleted, stack_id: %s') % self.bay.stack_id) try: trust_manager.delete_trustee_and_trust(self.openstack_client, self.context, self.bay) cert_manager.delete_certificates_from_bay(self.bay) self.bay.destroy() except exception.BayNotFound: LOG.info( _LI('The bay %s has been deleted by others.') % self.bay.uuid)
def _delete_complete(self): LOG.info('Cluster has been deleted, stack_id: %s', self.cluster.stack_id) try: trust_manager.delete_trustee_and_trust(self.openstack_client, self.context, self.cluster) cert_manager.delete_certificates_from_cluster(self.cluster, context=self.context) except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', self.cluster.uuid)
def test_delete_trustee_and_trust_without_trustee_user_id(self): mock_bay = mock.MagicMock() mock_bay.trust_id = 'trust_id' mock_bay.trustee_user_id = None mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_bay) mock_keystone.delete_trust.assert_called_once_with(context, mock_bay) self.assertEqual(0, mock_keystone.delete_trustee.call_count)
def _delete_complete(self): LOG.info(_LI('Bay has been deleted, stack_id: %s') % self.bay.stack_id) try: trust_manager.delete_trustee_and_trust(self.openstack_client, self.context, self.bay) cert_manager.delete_certificates_from_bay(self.bay) self.bay.destroy() except exception.BayNotFound: LOG.info(_LI('The bay %s has been deleted by others.') % self.bay.uuid)
def _delete_complete(self): LOG.info(_LI('Cluster has been deleted, stack_id: %s') % self.cluster.stack_id) try: trust_manager.delete_trustee_and_trust(self.openstack_client, self.context, self.cluster) cert_manager.delete_certificates_from_cluster(self.cluster, context=self.context) except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.') % self.cluster.uuid)
def test_delete_trustee_and_trust_without_trust_id(self): mock_bay = mock.MagicMock() mock_bay.trust_id = None mock_bay.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone trust_manager.delete_trustee_and_trust(self.osc, mock_bay) self.assertEqual(0, mock_keystone.delete_trust.call_count) mock_keystone.delete_trustee.assert_called_once_with( mock_bay.trustee_user_id, )
def test_delete_trustee_and_trust_without_trustee_user_id(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = None mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster ) self.assertEqual(0, mock_keystone.delete_trustee.call_count)
def test_delete_trustee_and_trust(self): mock_cluster = mock.MagicMock() mock_cluster.trust_id = 'trust_id' mock_cluster.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) mock_keystone.delete_trust.assert_called_once_with( context, mock_cluster) mock_keystone.delete_trustee.assert_called_once_with( mock_cluster.trustee_user_id, )
def cluster_delete(self, context, uuid): LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.delete_cluster(context, cluster) cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.status_reason = None except exc.HTTPNotFound: LOG.info( 'The cluster %s was not found during cluster' ' deletion.', cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) # delete all cluster's nodegroups for ng in cluster.nodegroups: ng.destroy() cluster.destroy() except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS, cluster) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) cluster.status = fields.ClusterStatus.DELETE_FAILED cluster.status_reason = six.text_type(unexp) cluster.save() raise cluster.save() return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_heat cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) stack_id = cluster.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the cluster, likely a # user has deleted the stack outside the context of Magnum. Therefore # the contents of the cluster are forever lost. # # If the exception is unhandled, the original exception will be raised. try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not found during cluster' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.'), uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() self._poll_and_check(osc, cluster) return None
def test_delete_trustee_and_trust(self): mock_bay = mock.MagicMock() mock_bay.trust_id = 'trust_id' mock_bay.trustee_user_id = 'trustee_user_id' mock_keystone = mock.MagicMock() self.osc.keystone.return_value = mock_keystone context = mock.MagicMock() trust_manager.delete_trustee_and_trust(self.osc, context, mock_bay) mock_keystone.delete_trust.assert_called_once_with( context, mock_bay ) mock_keystone.delete_trustee.assert_called_once_with( mock_bay.trustee_user_id, )
def cluster_delete(self, context, uuid): LOG.debug('cluster_heat cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) stack_id = cluster.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the cluster, likely a # user has deleted the stack outside the context of Magnum. Therefore # the contents of the cluster are forever lost. # # If the exception is unhandled, the original exception will be raised. try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info(_LI('The stack %s was not found during cluster' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.'), uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() self._poll_and_check(osc, cluster) return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) cluster_driver.delete_cluster(context, cluster) cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.status_reason = None except exc.HTTPNotFound: LOG.info('The cluster %s was not found during cluster' ' deletion.', cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) # delete all cluster's nodegroups for ng in cluster.nodegroups: ng.destroy() cluster.destroy() except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) cluster.status = fields.ClusterStatus.DELETE_FAILED cluster.status_reason = six.text_type(unexp) cluster.save() raise cluster.save() return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_heat cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) cluster_driver.delete_stack(context, osc, cluster) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not found during cluster' ' deletion.'), cluster.stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.'), uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() self._poll_and_check(osc, cluster, cluster_driver) return None
def bay_delete(self, context, uuid): LOG.debug('bay_heat bay_delete') osc = clients.OpenStackClients(context) bay = objects.Bay.get_by_uuid(context, uuid) stack_id = bay.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the bay, likely a user # has deleted the stack outside the context of Magnum. Therefore the # contents of the bay are forever lost. # # If the exception is unhandled, the original exception will be raised. try: conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info(_LI('The stack %s was not be found during bay' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, bay) cert_manager.delete_certificates_from_bay(bay) bay.destroy() except exception.BayNotFound: LOG.info(_LI('The bay %s has been deleted by others.'), uuid) conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(bay_name=bay.name) except Exception: conductor_utils.notify_about_bay_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise self._poll_and_check(osc, bay) return None
def update_cluster_status(self, context, cluster): """Updates the cluster status. This method should be finished within the periodic interval(10s). :param context: Admin context. :param cluster: Cluster object. """ if cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS: if cluster.stack_id is None: return stack_ctx = mag_ctx.make_cluster_context(cluster) os_clients = clients.OpenStackClients(stack_ctx) stack = os_clients.heat().stacks.get( cluster.stack_id, resolve_outputs=False ) if stack.stack_status == fields.ClusterStatus.CREATE_COMPLETE: stack_ctx = mag_ctx.make_cluster_context(cluster) kubeconfig_path = self._get_kubeconfig(stack_ctx, cluster) cluster_kubectl = kubectl.KubeCtl( bin="/usr/bin/kubectl", global_flags="--kubeconfig %s" % kubeconfig_path ) ns = self.kubectl.get("namespace %s" % cluster.uuid) labels = ns['metadata'].get('labels', {}) if not labels.get('magnum.k8s.io/status'): self._install_addons(cluster, cluster_kubectl, context) return if self._workers_ready(cluster, cluster_kubectl): LOG.info( 'Cluster %s is created successfully', cluster.uuid ) # Update the worker addresses in the cluster from the Heat # stack output. stack = os_clients.heat().stacks.get( cluster.stack_id, resolve_outputs=True ) template_def = self.get_template_definition() c_template = conductor_utils.retrieve_cluster_template( context, cluster ) template_def.update_outputs(stack, c_template, cluster) cluster.status = fields.ClusterStatus.CREATE_COMPLETE cluster.save() elif stack.stack_status in ( fields.ClusterStatus.CREATE_FAILED, fields.ClusterStatus.DELETE_FAILED, fields.ClusterStatus.UPDATE_FAILED, fields.ClusterStatus.ROLLBACK_COMPLETE, fields.ClusterStatus.ROLLBACK_FAILED ): self._sync_cluster_status(cluster, stack) LOG.error('Failed to create cluster %s', cluster.uuid) elif cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS: # Check if the namespace is deleted. ns_template = self.jinja_env.get_template('namespace.yaml.j2') ns_body = ns_template.render({"namespace": cluster.uuid}) namespaces = self.kubectl.get('namespace') names = [n['metadata']['name'] for n in namespaces] if cluster.uuid not in names: LOG.debug( "Namespace has been deleted for cluster %s", cluster.uuid ) stack_ctx = mag_ctx.make_cluster_context(cluster) os_client = clients.OpenStackClients(stack_ctx) try: trust_manager.delete_trustee_and_trust( os_client, context, cluster ) cert_manager.delete_certificates_from_cluster( cluster, context=context ) cert_manager.delete_client_files(cluster, context=context) except exception.ClusterNotFound: LOG.info( 'The cluster %s has been deleted by others.', cluster.uuid ) LOG.info('Cluster %s has been deleted.', cluster.uuid) cluster.status = fields.ClusterStatus.DELETE_COMPLETE cluster.save()