Example #1
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster and is not running.

    """
        (self.op.instance_uuid, self.op.instance_name) = \
          ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
                                    self.op.instance_name)
        instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert instance is not None

        # It should actually not happen that an instance is running with a disabled
        # disk template, but in case it does, the renaming of file-based instances
        # will fail horribly. Thus, we test it before.
        for disk in self.cfg.GetInstanceDisks(instance.uuid):
            if (disk.dev_type in constants.DTS_FILEBASED
                    and self.op.new_name != instance.name):
                # TODO: when disks are separate objects, this should check for disk
                # types, not disk templates.
                CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
                                         disk.dev_type)

        CheckNodeOnline(self, instance.primary_node)
        CheckInstanceState(self,
                           instance,
                           INSTANCE_NOT_RUNNING,
                           msg="cannot rename")
        self.instance = instance

        self._PerformChecksAndResolveNewName()

        if self.op.new_name != instance.name:
            CheckInstanceExistence(self, self.op.new_name)
Example #2
0
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
  """Remove all disks for an instance.

  This abstracts away some work from `AddInstance()` and
  `RemoveInstance()`. Note that in case some of the devices couldn't
  be removed, the removal will continue with the other ones.

  @type lu: L{LogicalUnit}
  @param lu: the logical unit on whose behalf we execute
  @type instance: L{objects.Instance}
  @param instance: the instance whose disks we should remove
  @type target_node_uuid: string
  @param target_node_uuid: used to override the node on which to remove the
          disks
  @rtype: boolean
  @return: the success of the removal

  """
  logging.info("Removing block devices for instance %s", instance.name)

  all_result = True
  ports_to_release = set()
  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
  for (idx, device) in enumerate(anno_disks):
    if target_node_uuid:
      edata = [(target_node_uuid, device)]
    else:
      edata = device.ComputeNodeTree(instance.primary_node)
    for node_uuid, disk in edata:
      result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
      if result.fail_msg:
        lu.LogWarning("Could not remove disk %s on node %s,"
                      " continuing anyway: %s", idx,
                      lu.cfg.GetNodeName(node_uuid), result.fail_msg)
        if not (result.offline and node_uuid != instance.primary_node):
          all_result = False

    # if this is a DRBD disk, return its port to the pool
    if device.dev_type in constants.DTS_DRBD:
      ports_to_release.add(device.logical_id[2])

  if all_result or ignore_failures:
    for port in ports_to_release:
      lu.cfg.AddTcpUdpPort(port)

  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)

  if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
    if target_node_uuid:
      tgt = target_node_uuid
    else:
      tgt = instance.primary_node
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
    if result.fail_msg:
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
                    file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
      all_result = False

  return all_result
Example #3
0
def RemoveDisks(lu,
                instance,
                disks=None,
                target_node_uuid=None,
                ignore_failures=False):
    """Remove all or a subset of disks for an instance.

  This abstracts away some work from `AddInstance()` and
  `RemoveInstance()`. Note that in case some of the devices couldn't
  be removed, the removal will continue with the other ones.

  This function is also used by the disk template conversion mechanism to
  remove the old block devices of the instance. Since the instance has
  changed its template at the time we remove the original disks, we must
  specify the template of the disks we are about to remove as an argument.

  @type lu: L{LogicalUnit}
  @param lu: the logical unit on whose behalf we execute
  @type instance: L{objects.Instance}
  @param instance: the instance whose disks we should remove
  @type disks: list of L{objects.Disk}
  @param disks: the disks to remove; if not specified, all the disks of the
          instance are removed
  @type target_node_uuid: string
  @param target_node_uuid: used to override the node on which to remove the
          disks
  @rtype: boolean
  @return: the success of the removal

  """
    logging.info("Removing block devices for instance %s", instance.name)

    all_result = True
    ports_to_release = set()

    all_disks = lu.cfg.GetInstanceDisks(instance.uuid)
    if disks is None:
        disks = all_disks

    anno_disks = AnnotateDiskParams(instance, disks, lu.cfg)

    uuid_idx_map = {}
    for (idx, device) in enumerate(all_disks):
        uuid_idx_map[device.uuid] = idx

    for (idx, device) in enumerate(anno_disks):
        if target_node_uuid:
            edata = [(target_node_uuid, device)]
        else:
            edata = device.ComputeNodeTree(instance.primary_node)
        for node_uuid, disk in edata:
            result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
            if result.fail_msg:
                lu.LogWarning(
                    "Could not remove disk %s on node %s,"
                    " continuing anyway: %s", uuid_idx_map.get(device.uuid),
                    lu.cfg.GetNodeName(node_uuid), result.fail_msg)
                if not (result.offline and node_uuid != instance.primary_node):
                    all_result = False

        # if this is a DRBD disk, return its port to the pool
        if device.dev_type in constants.DTS_DRBD:
            ports_to_release.add(device.logical_id[2])

    if all_result or ignore_failures:
        for port in ports_to_release:
            lu.cfg.AddTcpUdpPort(port)

    for d in disks:
        CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), d.dev_type)

    if target_node_uuid:
        tgt = target_node_uuid
    else:
        tgt = instance.primary_node

    obsolete_storage_paths = _StoragePathsRemoved(disks, all_disks)

    for file_storage_dir in obsolete_storage_paths:
        result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
        if result.fail_msg:
            lu.LogWarning("Could not remove directory '%s' on node %s: %s",
                          file_storage_dir, lu.cfg.GetNodeName(tgt),
                          result.fail_msg)
            all_result = False

    return all_result