Ejemplo n.º 1
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        for idx, dsk in enumerate(disks):
            if dsk.dev_type not in constants.DTS_COPYABLE:
                raise errors.OpPrereqError(
                    "Instance disk %d has disk type %s and is"
                    " not suitable for copying" % (idx, dsk.dev_type),
                    errors.ECODE_STATE)

        target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
        assert target_node is not None, \
          "Cannot retrieve locked node %s" % self.op.target_node

        self.target_node_uuid = target_node.uuid
        if target_node.uuid == self.instance.primary_node:
            raise errors.OpPrereqError(
                "Instance %s is already on the node %s" %
                (self.instance.name, target_node.name), errors.ECODE_STATE)

        cluster = self.cfg.GetClusterInfo()
        bep = cluster.FillBE(self.instance)

        CheckNodeOnline(self, target_node.uuid)
        CheckNodeNotDrained(self, target_node.uuid)
        CheckNodeVmCapable(self, target_node.uuid)
        group_info = self.cfg.GetNodeGroup(target_node.group)
        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(
            cluster, group_info)
        CheckTargetNodeIPolicy(self,
                               ipolicy,
                               self.instance,
                               target_node,
                               self.cfg,
                               ignore=self.op.ignore_ipolicy)

        if self.instance.admin_state == constants.ADMINST_UP:
            # check memory requirements on the target node
            CheckNodeFreeMemory(
                self, target_node.uuid,
                "failing over instance %s" % self.instance.name,
                bep[constants.BE_MAXMEM], self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
        else:
            self.LogInfo("Not checking memory on the secondary node as"
                         " instance will not be started")

        # check bridge existance
        CheckInstanceBridgesExist(self,
                                  self.instance,
                                  node_uuid=target_node.uuid)
Ejemplo n.º 2
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError), err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error, err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)
Ejemplo n.º 3
0
  def CheckPrereq(self):
    """Check prerequisites.

    This checks that the instance is in the cluster.

    """
    (self.instance_uuid, self.instance_name) = \
      ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
                                self.instance_name)
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
    assert self.instance is not None
    cluster = self.cfg.GetClusterInfo()

    if (not self.cleanup and
        not self.instance.admin_state == constants.ADMINST_UP and
        not self.failover and self.fallback):
      self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
                      " switching to failover")
      self.failover = True

    disks = self.cfg.GetInstanceDisks(self.instance.uuid)

    if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
      if self.failover:
        text = "failovers"
      else:
        text = "migrations"
      invalid_disks = set(d.dev_type for d in disks
                             if d.dev_type not in constants.DTS_MIRRORED)
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
                                 " %s" % (utils.CommaJoin(invalid_disks), text),
                                 errors.ECODE_STATE)

    # TODO allow heterogeneous disk types if all are mirrored in some way.
    if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
      CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")

      if self.lu.op.iallocator:
        self._RunAllocator()
      else:
        # We set set self.target_node_uuid as it is required by
        # BuildHooksEnv
        self.target_node_uuid = self.lu.op.target_node_uuid

      # Check that the target node is correct in terms of instance policy
      nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

      # self.target_node is already populated, either directly or by the
      # iallocator run
      target_node_uuid = self.target_node_uuid
      if self.target_node_uuid == self.instance.primary_node:
        raise errors.OpPrereqError(
          "Cannot migrate instance %s to its primary (%s)" %
          (self.instance.name,
           self.cfg.GetNodeName(self.instance.primary_node)),
          errors.ECODE_STATE)

      if len(self.lu.tasklets) == 1:
        # It is safe to release locks only when we're the only tasklet
        # in the LU
        ReleaseLocks(self.lu, locking.LEVEL_NODE,
                     keep=[self.instance.primary_node, self.target_node_uuid])

    elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
      templates = [d.dev_type for d in disks]
      secondary_node_uuids = \
        self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
      if not secondary_node_uuids:
        raise errors.ConfigurationError("No secondary node but using"
                                        " %s disk types" %
                                        utils.CommaJoin(set(templates)))
      self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
      if self.lu.op.iallocator or \
        (self.lu.op.target_node_uuid and
         self.lu.op.target_node_uuid != target_node_uuid):
        if self.failover:
          text = "failed over"
        else:
          text = "migrated"
        raise errors.OpPrereqError("Instances with disk types %s cannot"
                                   " be %s to arbitrary nodes"
                                   " (neither an iallocator nor a target"
                                   " node can be passed)" %
                                   (utils.CommaJoin(set(templates)), text),
                                   errors.ECODE_INVAL)
      nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

    else:
      raise errors.OpPrereqError("Instance mixes internal and external "
                                 "mirroring. This is not currently supported.")

    i_be = cluster.FillBE(self.instance)

    # check memory requirements on the secondary node
    if (not self.cleanup and
         (not self.failover or
           self.instance.admin_state == constants.ADMINST_UP)):
      self.tgt_free_mem = CheckNodeFreeMemory(
          self.lu, target_node_uuid,
          "migrating instance %s" % self.instance.name,
          i_be[constants.BE_MINMEM], self.instance.hypervisor,
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
    else:
      self.lu.LogInfo("Not checking memory on the secondary node as"
                      " instance will not be started")

    # check if failover must be forced instead of migration
    if (not self.cleanup and not self.failover and
        i_be[constants.BE_ALWAYS_FAILOVER]):
      self.lu.LogInfo("Instance configured to always failover; fallback"
                      " to failover")
      self.failover = True

    # check bridge existance
    CheckInstanceBridgesExist(self.lu, self.instance,
                              node_uuid=target_node_uuid)

    if not self.cleanup:
      CheckNodeNotDrained(self.lu, target_node_uuid)
      if not self.failover:
        result = self.rpc.call_instance_migratable(self.instance.primary_node,
                                                   self.instance)
        if result.fail_msg and self.fallback:
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
                          " failover")
          self.failover = True
        else:
          result.Raise("Can't migrate, please use failover",
                       prereq=True, ecode=errors.ECODE_STATE)

    assert not (self.failover and self.cleanup)

    if not self.failover:
      if self.lu.op.live is not None and self.lu.op.mode is not None:
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
                                   " parameters are accepted",
                                   errors.ECODE_INVAL)
      if self.lu.op.live is not None:
        if self.lu.op.live:
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
        else:
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
        # reset the 'live' parameter to None so that repeated
        # invocations of CheckPrereq do not raise an exception
        self.lu.op.live = None
      elif self.lu.op.mode is None:
        # read the default value from the hypervisor
        i_hv = cluster.FillHV(self.instance, skip_globals=False)
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]

      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
    else:
      # Failover is never live
      self.live = False

    if not (self.failover or self.cleanup):
      remote_info = self.rpc.call_instance_info(
          self.instance.primary_node, self.instance.name,
          self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
      remote_info.Raise("Error checking instance on node %s" %
                        self.cfg.GetNodeName(self.instance.primary_node),
                        prereq=True)
      instance_running = bool(remote_info.payload)
      if instance_running:
        self.current_mem = int(remote_info.payload["memory"])
Ejemplo n.º 4
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError) as err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error as err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)

            (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
            if errcode is not None:
                raise errors.OpPrereqError(
                    "Invalid destination X509 CA (%s)" % (msg, ),
                    errors.ECODE_INVAL)

            self.dest_x509_ca = cert

            # Verify target information
            disk_info = []
            for idx, disk_data in enumerate(self.op.target_node):
                try:
                    (host, port, magic) = \
                      masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
                except errors.GenericError as err:
                    raise errors.OpPrereqError(
                        "Target info for disk %s: %s" % (idx, err),
                        errors.ECODE_INVAL)

                disk_info.append((host, port, magic))

            assert len(disk_info) == len(self.op.target_node)
            self.dest_disk_info = disk_info

        else:
            raise errors.ProgrammerError("Unhandled export mode %r" %
                                         self.op.mode)

        # Check prerequisites for zeroing
        if self.op.zero_free_space:
            # Check that user shutdown detection has been enabled
            hvparams = self.cfg.GetClusterInfo().FillHV(self.instance)
            if self.instance.hypervisor == constants.HT_KVM and \
               not hvparams.get(constants.HV_KVM_USER_SHUTDOWN, False):
                raise errors.OpPrereqError(
                    "Instance shutdown detection must be "
                    "enabled for zeroing to work", errors.ECODE_INVAL)

            # Check that the instance is set to boot from the disk
            if constants.HV_BOOT_ORDER in hvparams and \
               hvparams[constants.HV_BOOT_ORDER] != constants.HT_BO_DISK:
                raise errors.OpPrereqError(
                    "Booting from disk must be set for zeroing "
                    "to work", errors.ECODE_INVAL)

            # Check that the zeroing image is set
            if not self.cfg.GetZeroingImage():
                raise errors.OpPrereqError(
                    "A zeroing image must be set for zeroing to"
                    " work", errors.ECODE_INVAL)

            if self.op.zeroing_timeout_fixed is None:
                self.op.zeroing_timeout_fixed = constants.HELPER_VM_STARTUP

            if self.op.zeroing_timeout_per_mib is None:
                self.op.zeroing_timeout_per_mib = constants.ZEROING_TIMEOUT_PER_MIB

        else:
            if (self.op.zeroing_timeout_fixed is not None
                    or self.op.zeroing_timeout_per_mib is not None):
                raise errors.OpPrereqError(
                    "Zeroing timeout options can only be used"
                    " only with the --zero-free-space option",
                    errors.ECODE_INVAL)

        if self.op.long_sleep and not self.op.shutdown:
            raise errors.OpPrereqError(
                "The long sleep option only makes sense when"
                " the instance can be shut down.", errors.ECODE_INVAL)

        self.secondary_nodes = \
          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
        self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)

        # Check if the compression tool is whitelisted
        CheckCompressionTool(self, self.op.compress)