Exemple #1
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster and is not running.

    """
        instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, instance.primary_node, "Instance primary node"
                        " offline, cannot reinstall")

        if not instance.disks:
            raise errors.OpPrereqError(
                "Instance '%s' has no disks" % self.op.instance_name,
                errors.ECODE_INVAL)
        CheckInstanceState(self,
                           instance,
                           INSTANCE_DOWN,
                           msg="cannot reinstall")

        # Handle OS parameters
        self._MergeValidateOsParams(instance)

        self.instance = instance
Exemple #2
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster and is not running.

    """
        (self.op.instance_uuid, self.op.instance_name) = \
          ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
                                    self.op.instance_name)
        instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert instance is not None

        # It should actually not happen that an instance is running with a disabled
        # disk template, but in case it does, the renaming of file-based instances
        # will fail horribly. Thus, we test it before.
        for disk in self.cfg.GetInstanceDisks(instance.uuid):
            if (disk.dev_type in constants.DTS_FILEBASED
                    and self.op.new_name != instance.name):
                # TODO: when disks are separate objects, this should check for disk
                # types, not disk templates.
                CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
                                         disk.dev_type)

        CheckNodeOnline(self, instance.primary_node)
        CheckInstanceState(self,
                           instance,
                           INSTANCE_NOT_RUNNING,
                           msg="cannot rename")
        self.instance = instance

        self._PerformChecksAndResolveNewName()

        if self.op.new_name != instance.name:
            CheckInstanceExistence(self, self.op.new_name)
Exemple #3
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        for idx, dsk in enumerate(disks):
            if dsk.dev_type not in constants.DTS_COPYABLE:
                raise errors.OpPrereqError(
                    "Instance disk %d has disk type %s and is"
                    " not suitable for copying" % (idx, dsk.dev_type),
                    errors.ECODE_STATE)

        target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
        assert target_node is not None, \
          "Cannot retrieve locked node %s" % self.op.target_node

        self.target_node_uuid = target_node.uuid
        if target_node.uuid == self.instance.primary_node:
            raise errors.OpPrereqError(
                "Instance %s is already on the node %s" %
                (self.instance.name, target_node.name), errors.ECODE_STATE)

        cluster = self.cfg.GetClusterInfo()
        bep = cluster.FillBE(self.instance)

        CheckNodeOnline(self, target_node.uuid)
        CheckNodeNotDrained(self, target_node.uuid)
        CheckNodeVmCapable(self, target_node.uuid)
        group_info = self.cfg.GetNodeGroup(target_node.group)
        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(
            cluster, group_info)
        CheckTargetNodeIPolicy(self,
                               ipolicy,
                               self.instance,
                               target_node,
                               self.cfg,
                               ignore=self.op.ignore_ipolicy)

        if self.instance.admin_state == constants.ADMINST_UP:
            # check memory requirements on the target node
            CheckNodeFreeMemory(
                self, target_node.uuid,
                "failing over instance %s" % self.instance.name,
                bep[constants.BE_MAXMEM], self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
        else:
            self.LogInfo("Not checking memory on the secondary node as"
                         " instance will not be started")

        # check bridge existance
        CheckInstanceBridgesExist(self,
                                  self.instance,
                                  node_uuid=target_node.uuid)
Exemple #4
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)
Exemple #5
0
    def CheckPrereq(self):
        """Check prerequisites.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        self._cds = GetClusterDomainSecret()
Exemple #6
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
        CheckNodeOnline(self, self.instance.primary_node)

        # check bridges existence
        CheckInstanceBridgesExist(self, self.instance)
Exemple #7
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        if not self.op.force:
            CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
        else:
            self.LogWarning("Ignoring offline instance check")

        self.primary_offline = \
          self.cfg.GetNodeInfo(self.instance.primary_node).offline

        if self.primary_offline and self.op.ignore_offline_nodes:
            self.LogWarning("Ignoring offline primary node")
        else:
            CheckNodeOnline(self, self.instance.primary_node)
Exemple #8
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        if self.op.force:
            self.LogWarning("Ignoring offline instance check")
        else:
            CheckInstanceState(self, self.instance, INSTANCE_ONLINE)

        self.primary_offline = \
          self.cfg.GetNodeInfo(self.instance.primary_node).offline

        if self.primary_offline and self.op.ignore_offline_nodes:
            self.LogWarning("Ignoring offline primary node")
        else:
            CheckNodeOnline(self, self.instance.primary_node)

        if self.op.admin_state_source == constants.USER_SOURCE:
            cluster = self.cfg.GetClusterInfo()

            result = self.rpc.call_instance_info(
                self.instance.primary_node, self.instance.name,
                self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
            result.Raise("Error checking instance '%s'" % self.instance.name,
                         prereq=True)

            if not _IsInstanceUserDown(cluster, self.instance, result.payload):
                raise errors.OpPrereqError(
                    "Instance '%s' was not shutdown by the user" %
                    self.instance.name)
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster and is not running.

    """
        instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, instance.primary_node, "Instance primary node"
                        " offline, cannot snapshot")

        disks = self.cfg.GetInstanceDisks(instance.uuid)

        self.snapshots = []
        for ident, params in self.op.disks:
            idx, disk = GetItemFromContainer(ident, 'disk', disks)
            snapshot_name = params.get("snapshot_name", None)
            if not snapshot_name:
                raise errors.OpPrereqError(
                    "No snapshot_name passed for disk %s", ident)
            self.snapshots.append((idx, disk, snapshot_name))

        self.instance = instance
Exemple #10
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        cluster = self.cfg.GetClusterInfo()
        # extra hvparams
        if self.op.hvparams:
            # check hypervisor parameter syntax (locally)
            utils.ForceDictType(self.op.hvparams,
                                constants.HVS_PARAMETER_TYPES)
            filled_hvp = cluster.FillHV(self.instance)
            filled_hvp.update(self.op.hvparams)
            hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
            hv_type.CheckParameterSyntax(filled_hvp)
            CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance.uuid),
                          self.instance.hypervisor, filled_hvp)

        CheckInstanceState(self, self.instance, INSTANCE_ONLINE)

        self.primary_offline = \
          self.cfg.GetNodeInfo(self.instance.primary_node).offline

        if self.primary_offline and self.op.ignore_offline_nodes:
            self.LogWarning("Ignoring offline primary node")

            if self.op.hvparams or self.op.beparams:
                self.LogWarning("Overridden parameters are ignored")
        else:
            CheckNodeOnline(self, self.instance.primary_node)

            bep = self.cfg.GetClusterInfo().FillBE(self.instance)
            bep.update(self.op.beparams)

            # check bridges existence
            CheckInstanceBridgesExist(self, self.instance)

            remote_info = self.rpc.call_instance_info(
                self.instance.primary_node, self.instance.name,
                self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
            remote_info.Raise("Error checking node %s" %
                              self.cfg.GetNodeName(self.instance.primary_node),
                              prereq=True,
                              ecode=errors.ECODE_ENVIRON)

            self.requires_cleanup = False

            if remote_info.payload:
                if _IsInstanceUserDown(self.cfg.GetClusterInfo(),
                                       self.instance, remote_info.payload):
                    self.requires_cleanup = True
            else:  # not running already
                CheckNodeFreeMemory(
                    self, self.instance.primary_node,
                    "starting instance %s" % self.instance.name,
                    bep[constants.BE_MINMEM], self.instance.hypervisor,
                    self.cfg.GetClusterInfo().hvparams[
                        self.instance.hypervisor])
Exemple #11
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError), err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error, err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)
Exemple #12
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError) as err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error as err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)

            (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
            if errcode is not None:
                raise errors.OpPrereqError(
                    "Invalid destination X509 CA (%s)" % (msg, ),
                    errors.ECODE_INVAL)

            self.dest_x509_ca = cert

            # Verify target information
            disk_info = []
            for idx, disk_data in enumerate(self.op.target_node):
                try:
                    (host, port, magic) = \
                      masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
                except errors.GenericError as err:
                    raise errors.OpPrereqError(
                        "Target info for disk %s: %s" % (idx, err),
                        errors.ECODE_INVAL)

                disk_info.append((host, port, magic))

            assert len(disk_info) == len(self.op.target_node)
            self.dest_disk_info = disk_info

        else:
            raise errors.ProgrammerError("Unhandled export mode %r" %
                                         self.op.mode)

        # Check prerequisites for zeroing
        if self.op.zero_free_space:
            # Check that user shutdown detection has been enabled
            hvparams = self.cfg.GetClusterInfo().FillHV(self.instance)
            if self.instance.hypervisor == constants.HT_KVM and \
               not hvparams.get(constants.HV_KVM_USER_SHUTDOWN, False):
                raise errors.OpPrereqError(
                    "Instance shutdown detection must be "
                    "enabled for zeroing to work", errors.ECODE_INVAL)

            # Check that the instance is set to boot from the disk
            if constants.HV_BOOT_ORDER in hvparams and \
               hvparams[constants.HV_BOOT_ORDER] != constants.HT_BO_DISK:
                raise errors.OpPrereqError(
                    "Booting from disk must be set for zeroing "
                    "to work", errors.ECODE_INVAL)

            # Check that the zeroing image is set
            if not self.cfg.GetZeroingImage():
                raise errors.OpPrereqError(
                    "A zeroing image must be set for zeroing to"
                    " work", errors.ECODE_INVAL)

            if self.op.zeroing_timeout_fixed is None:
                self.op.zeroing_timeout_fixed = constants.HELPER_VM_STARTUP

            if self.op.zeroing_timeout_per_mib is None:
                self.op.zeroing_timeout_per_mib = constants.ZEROING_TIMEOUT_PER_MIB

        else:
            if (self.op.zeroing_timeout_fixed is not None
                    or self.op.zeroing_timeout_per_mib is not None):
                raise errors.OpPrereqError(
                    "Zeroing timeout options can only be used"
                    " only with the --zero-free-space option",
                    errors.ECODE_INVAL)

        if self.op.long_sleep and not self.op.shutdown:
            raise errors.OpPrereqError(
                "The long sleep option only makes sense when"
                " the instance can be shut down.", errors.ECODE_INVAL)

        self.secondary_nodes = \
          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
        self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)

        # Check if the compression tool is whitelisted
        CheckCompressionTool(self, self.op.compress)