def bay_delete(self, context, uuid): LOG.debug('bay_heat bay_delete') osc = clients.OpenStackClients(context) bay = objects.Bay.get_by_uuid(context, uuid) stack_id = bay.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the bay, likely a user # has deleted the stack outside the context of Magnum. Therefore the # contents of the bay are forever lost. # # If the exception is unhandled, the original exception will be raised. try: osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not be found during bay' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, bay) cert_manager.delete_certificates_from_bay(bay) bay.destroy() except exception.BayNotFound: LOG.info(_LI('The bay %s has been deleted by others.'), uuid) return None except exc.HTTPConflict: raise exception.OperationInProgress(bay_name=bay.name) except Exception: raise self._poll_and_check(osc, bay) return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING, cluster) cluster_driver.delete_cluster(context, cluster) cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.status_reason = None except exc.HTTPNotFound: LOG.info( 'The cluster %s was not found during cluster' ' deletion.', cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) # delete all cluster's nodegroups for ng in cluster.nodegroups: ng.destroy() cluster.destroy() except exception.ClusterNotFound: LOG.info('The cluster %s has been deleted by others.', uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS, cluster) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE, cluster) cluster.status = fields.ClusterStatus.DELETE_FAILED cluster.status_reason = six.text_type(unexp) cluster.save() raise cluster.save() return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_heat cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) stack_id = cluster.stack_id # NOTE(sdake): This will execute a stack_delete operation. This will # Ignore HTTPNotFound exceptions (stack wasn't present). In the case # that Heat couldn't find the stack representing the cluster, likely a # user has deleted the stack outside the context of Magnum. Therefore # the contents of the cluster are forever lost. # # If the exception is unhandled, the original exception will be raised. try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) osc.heat().stacks.delete(stack_id) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not found during cluster' ' deletion.'), stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.'), uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() self._poll_and_check(osc, cluster) return None
def cluster_delete(self, context, uuid): LOG.debug('cluster_heat cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) cluster_driver.delete_stack(context, osc, cluster) except exc.HTTPNotFound: LOG.info( _LI('The stack %s was not found during cluster' ' deletion.'), cluster.stack_id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, context=context) cluster.destroy() except exception.ClusterNotFound: LOG.info(_LI('The cluster %s has been deleted by others.'), uuid) conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) return None except exc.HTTPConflict: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) except Exception: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() self._poll_and_check(osc, cluster, cluster_driver) return None
def acquire(self, retry=True): """Acquire a lock on the bay. :param retry: When True, retry if lock was released while stealing. """ lock_conductor_id = objects.BayLock.create(self.bay.uuid, self.conductor_id) if lock_conductor_id is None: LOG.debug("Conductor %(conductor)s acquired lock on bay " "%(bay)s" % { 'conductor': self.conductor_id, 'bay': self.bay.uuid }) return if (lock_conductor_id == self.conductor_id or self.conductor_alive(self.context, lock_conductor_id)): LOG.debug("Lock on bay %(bay)s is owned by conductor " "%(conductor)s" % { 'bay': self.bay.uuid, 'conductor': lock_conductor_id }) raise exception.OperationInProgress(bay_name=self.bay.name) else: LOG.info( _LI("Stale lock detected on bay %(bay)s. Conductor " "%(conductor)s will attempt to steal the lock"), { 'bay': self.bay.uuid, 'conductor': self.conductor_id }) result = objects.BayLock.steal(self.bay.uuid, lock_conductor_id, self.conductor_id) if result is None: LOG.info( _LI("Conductor %(conductor)s successfully stole the " "lock on bay %(bay)s"), { 'conductor': self.conductor_id, 'bay': self.bay.uuid }) return elif result is True: if retry: LOG.info( _LI("The lock on bay %(bay)s was released while " "conductor %(conductor)s was stealing it. " "Trying again"), { 'bay': self.bay.uuid, 'conductor': self.conductor_id }) return self.acquire(retry=False) else: new_lock_conductor_id = result LOG.info( _LI("Failed to steal lock on bay %(bay)s. " "Conductor %(conductor)s stole the lock first"), { 'bay': self.bay.uuid, 'conductor': new_lock_conductor_id }) raise exception.OperationInProgress(bay_name=self.bay.name)