def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False): """Remove all disks for an instance. This abstracts away some work from `AddInstance()` and `RemoveInstance()`. Note that in case some of the devices couldn't be removed, the removal will continue with the other ones. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type instance: L{objects.Instance} @param instance: the instance whose disks we should remove @type target_node_uuid: string @param target_node_uuid: used to override the node on which to remove the disks @rtype: boolean @return: the success of the removal """ logging.info("Removing block devices for instance %s", instance.name) all_result = True ports_to_release = set() anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg) for (idx, device) in enumerate(anno_disks): if target_node_uuid: edata = [(target_node_uuid, device)] else: edata = device.ComputeNodeTree(instance.primary_node) for node_uuid, disk in edata: result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) if result.fail_msg: lu.LogWarning("Could not remove disk %s on node %s," " continuing anyway: %s", idx, lu.cfg.GetNodeName(node_uuid), result.fail_msg) if not (result.offline and node_uuid != instance.primary_node): all_result = False # if this is a DRBD disk, return its port to the pool if device.dev_type in constants.DTS_DRBD: ports_to_release.add(device.logical_id[2]) if all_result or ignore_failures: for port in ports_to_release: lu.cfg.AddTcpUdpPort(port) CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) if target_node_uuid: tgt = target_node_uuid else: tgt = instance.primary_node result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) if result.fail_msg: lu.LogWarning("Could not remove directory '%s' on node %s: %s", file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) all_result = False return all_result
def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev): """Compute block device status. """ (anno_dev, ) = AnnotateDiskParams(instance, [dev], self.cfg) return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn, anno_dev)
def RemoveDisks(lu, instance, disks=None, target_node_uuid=None, ignore_failures=False): """Remove all or a subset of disks for an instance. This abstracts away some work from `AddInstance()` and `RemoveInstance()`. Note that in case some of the devices couldn't be removed, the removal will continue with the other ones. This function is also used by the disk template conversion mechanism to remove the old block devices of the instance. Since the instance has changed its template at the time we remove the original disks, we must specify the template of the disks we are about to remove as an argument. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type instance: L{objects.Instance} @param instance: the instance whose disks we should remove @type disks: list of L{objects.Disk} @param disks: the disks to remove; if not specified, all the disks of the instance are removed @type target_node_uuid: string @param target_node_uuid: used to override the node on which to remove the disks @rtype: boolean @return: the success of the removal """ logging.info("Removing block devices for instance %s", instance.name) all_result = True ports_to_release = set() all_disks = lu.cfg.GetInstanceDisks(instance.uuid) if disks is None: disks = all_disks anno_disks = AnnotateDiskParams(instance, disks, lu.cfg) uuid_idx_map = {} for (idx, device) in enumerate(all_disks): uuid_idx_map[device.uuid] = idx for (idx, device) in enumerate(anno_disks): if target_node_uuid: edata = [(target_node_uuid, device)] else: edata = device.ComputeNodeTree(instance.primary_node) for node_uuid, disk in edata: result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) if result.fail_msg: lu.LogWarning( "Could not remove disk %s on node %s," " continuing anyway: %s", uuid_idx_map.get(device.uuid), lu.cfg.GetNodeName(node_uuid), result.fail_msg) if not (result.offline and node_uuid != instance.primary_node): all_result = False # if this is a DRBD disk, return its port to the pool if device.dev_type in constants.DTS_DRBD: ports_to_release.add(device.logical_id[2]) if all_result or ignore_failures: for port in ports_to_release: lu.cfg.AddTcpUdpPort(port) for d in disks: CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), d.dev_type) if target_node_uuid: tgt = target_node_uuid else: tgt = instance.primary_node obsolete_storage_paths = _StoragePathsRemoved(disks, all_disks) for file_storage_dir in obsolete_storage_paths: result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) if result.fail_msg: lu.LogWarning("Could not remove directory '%s' on node %s: %s", file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) all_result = False return all_result