def _cleanup_deploy(self, context, node, instance, network_info): icli = client_wrapper.IronicClientWrapper() # TODO(mrda): It would be better to use instance.get_flavor() here # but right now that doesn't include extra_specs which are required flavor = objects.Flavor.get_by_id(context, instance['instance_type_id']) patch = patcher.create(node).get_cleanup_patch(instance, network_info, flavor) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: icli.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: LOG.error( _LE("Failed to clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s"), { 'node': node.uuid, 'instance': instance['uuid'] }) reason = (_("Fail to clean up node %s parameters") % node.uuid) raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def _cleanup_deploy(self, context, node, instance, network_info, flavor=None): if flavor is None: flavor = instance.flavor patch = patcher.create(node).get_cleanup_patch(instance, network_info, flavor) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: self.ironicclient.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: LOG.error( _LE("Failed to clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s"), { 'node': node.uuid, 'instance': instance.uuid }) reason = (_("Fail to clean up node %s parameters") % node.uuid) raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy the specified instance from the Hypervisor. If the instance is not found (for example if networking failed), this function should still succeed. It's probably a good idea to log a warning in that case. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: instance network information :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed """ # TODO(thorst, efried) Add resize checks for destroy self._log_operation('destroy', instance) def _setup_flow_and_run(): # Define the flow flow = tf_lf.Flow("destroy") # Power Off the LPAR. If its disks are about to be deleted, issue a # hard shutdown. flow.add( tf_vm.PowerOff(self.adapter, instance, force_immediate=destroy_disks)) # TODO(thorst, efried) Add unplug vifs task # TODO(thorst, efried) Add config drive tasks # TODO(thorst, efried) Add volume disconnect tasks # Detach the disk storage adapters flow.add(tf_stg.DetachDisk(self.disk_dvr, instance)) # Delete the storage disks if destroy_disks: flow.add(tf_stg.DeleteDisk(self.disk_dvr)) # TODO(thorst, efried) Add LPAR id based scsi map clean up task flow.add(tf_vm.Delete(self.adapter, instance)) # Build the engine & run! tf_base.run(flow, instance=instance) try: _setup_flow_and_run() except exc.InstanceNotFound: LOG.debug('VM was not found during destroy operation.', instance=instance) return except pvm_exc.Error as e: LOG.exception("PowerVM error during destroy.", instance=instance) # Convert to a Nova exception raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def _cleanup_deploy(self, context, node, instance, network_info, flavor=None): if flavor is None: # TODO(mrda): It would be better to use instance.get_flavor() here # but right now that doesn't include extra_specs which are required # NOTE(pmurray): Flavor may have been deleted ctxt = context.elevated(read_deleted="yes") flavor = objects.Flavor.get_by_id(ctxt, instance.instance_type_id) patch = patcher.create(node).get_cleanup_patch(instance, network_info, flavor) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: self.ironicclient.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: LOG.error(_LE("Failed to clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s"), {'node': node.uuid, 'instance': instance.uuid}) reason = (_("Fail to clean up node %s parameters") % node.uuid) raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def _cleanup_deploy(self, node, instance, network_info): icli = client_wrapper.IronicClientWrapper() patch = patcher.create(node).get_cleanup_patch(instance, network_info) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: icli.call('node.update', node.uuid, patch) except ironic_exception.BadRequest: msg = (_("Failed clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s") % { 'node': node.uuid, 'instance': instance['uuid'] }) LOG.error(msg) reason = _("Fail to clean up node %s parameters") % node.uuid raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy the specified instance from the Hypervisor. If the instance is not found (for example if networking failed), this function should still succeed. It's probably a good idea to log a warning in that case. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: instance network information :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed """ # TODO(thorst, efried) Add resize checks for destroy self._log_operation('destroy', instance) def _setup_flow_and_run(): # Define the flow flow = tf_lf.Flow("destroy") # Power Off the LPAR. If its disks are about to be deleted, issue a # hard shutdown. flow.add( tf_vm.PowerOff(self.adapter, instance, force_immediate=destroy_disks)) # The FeedTask accumulates storage disconnection tasks to be run in # parallel. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag=[pvm_const.XAG.VIO_SMAP]) # Call the unplug VIFs task. While CNAs get removed from the LPAR # directly on the destroy, this clears up the I/O Host side. flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info)) # Add the disconnect/deletion of the vOpt to the transaction # manager. if configdrive.required_by(instance): flow.add( tf_stg.DeleteVOpt(self.adapter, instance, stg_ftsk=stg_ftsk)) # TODO(thorst, efried) Add volume disconnect tasks # Detach the disk storage adapters flow.add(tf_stg.DetachDisk(self.disk_dvr, instance)) # Accumulated storage disconnection tasks next flow.add(stg_ftsk) # Delete the storage disks if destroy_disks: flow.add(tf_stg.DeleteDisk(self.disk_dvr)) # TODO(thorst, efried) Add LPAR id based scsi map clean up task flow.add(tf_vm.Delete(self.adapter, instance)) # Build the engine & run! tf_base.run(flow, instance=instance) try: _setup_flow_and_run() except exc.InstanceNotFound: LOG.debug('VM was not found during destroy operation.', instance=instance) return except pvm_exc.Error as e: LOG.exception("PowerVM error during destroy.", instance=instance) # Convert to a Nova exception raise exc.InstanceTerminationFailure(reason=six.text_type(e))