示例#1
0
 def testHeterogeneousDiskless(self):
     self.assertFalse(
         utils.AllDiskOfType([Rbd(), Drbd()], [constants.DT_DISKLESS]))
示例#2
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        (self.instance_uuid, self.instance_name) = \
          ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
                                    self.instance_name)
        self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
        assert self.instance is not None
        cluster = self.cfg.GetClusterInfo()

        if (not self.cleanup
                and not self.instance.admin_state == constants.ADMINST_UP
                and not self.failover and self.fallback):
            self.lu.LogInfo(
                "Instance is marked down or offline, fallback allowed,"
                " switching to failover")
            self.failover = True

        disks = self.cfg.GetInstanceDisks(self.instance.uuid)

        if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
            if self.failover:
                text = "failovers"
            else:
                text = "migrations"
            invalid_disks = set(d.dev_type for d in disks
                                if d.dev_type not in constants.DTS_MIRRORED)
            raise errors.OpPrereqError(
                "Instance's disk layout '%s' does not allow"
                " %s" % (utils.CommaJoin(invalid_disks), text),
                errors.ECODE_STATE)

        # TODO allow heterogeneous disk types if all are mirrored in some way.
        if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
            CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")

            if self.lu.op.iallocator:
                self._RunAllocator()
            else:
                # We set set self.target_node_uuid as it is required by
                # BuildHooksEnv
                self.target_node_uuid = self.lu.op.target_node_uuid

            # Check that the target node is correct in terms of instance policy
            nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
            group_info = self.cfg.GetNodeGroup(nodeinfo.group)
            ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(
                cluster, group_info)
            CheckTargetNodeIPolicy(self.lu,
                                   ipolicy,
                                   self.instance,
                                   nodeinfo,
                                   self.cfg,
                                   ignore=self.ignore_ipolicy)

            # self.target_node is already populated, either directly or by the
            # iallocator run
            target_node_uuid = self.target_node_uuid
            if self.target_node_uuid == self.instance.primary_node:
                raise errors.OpPrereqError(
                    "Cannot migrate instance %s to its primary (%s)" %
                    (self.instance.name,
                     self.cfg.GetNodeName(self.instance.primary_node)),
                    errors.ECODE_STATE)

            if len(self.lu.tasklets) == 1:
                # It is safe to release locks only when we're the only tasklet
                # in the LU
                ReleaseLocks(
                    self.lu,
                    locking.LEVEL_NODE,
                    keep=[self.instance.primary_node, self.target_node_uuid])

        elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
            templates = [d.dev_type for d in disks]
            secondary_node_uuids = \
              self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
            if not secondary_node_uuids:
                raise errors.ConfigurationError(
                    "No secondary node but using"
                    " %s disk types" % utils.CommaJoin(set(templates)))
            self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
            if self.lu.op.iallocator or \
              (self.lu.op.target_node_uuid and
               self.lu.op.target_node_uuid != target_node_uuid):
                if self.failover:
                    text = "failed over"
                else:
                    text = "migrated"
                raise errors.OpPrereqError(
                    "Instances with disk types %s cannot"
                    " be %s to arbitrary nodes"
                    " (neither an iallocator nor a target"
                    " node can be passed)" %
                    (utils.CommaJoin(set(templates)), text),
                    errors.ECODE_INVAL)
            nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
            group_info = self.cfg.GetNodeGroup(nodeinfo.group)
            ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(
                cluster, group_info)
            CheckTargetNodeIPolicy(self.lu,
                                   ipolicy,
                                   self.instance,
                                   nodeinfo,
                                   self.cfg,
                                   ignore=self.ignore_ipolicy)

        else:
            raise errors.OpPrereqError(
                "Instance mixes internal and external "
                "mirroring. This is not currently supported.")

        i_be = cluster.FillBE(self.instance)

        # check memory requirements on the secondary node
        if (not self.cleanup
                and (not self.failover
                     or self.instance.admin_state == constants.ADMINST_UP)):
            self.tgt_free_mem = CheckNodeFreeMemory(
                self.lu, target_node_uuid,
                "migrating instance %s" % self.instance.name,
                i_be[constants.BE_MINMEM], self.instance.hypervisor,
                self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
        else:
            self.lu.LogInfo("Not checking memory on the secondary node as"
                            " instance will not be started")

        # check if failover must be forced instead of migration
        if (not self.cleanup and not self.failover
                and i_be[constants.BE_ALWAYS_FAILOVER]):
            self.lu.LogInfo("Instance configured to always failover; fallback"
                            " to failover")
            self.failover = True

        # check bridge existance
        CheckInstanceBridgesExist(self.lu,
                                  self.instance,
                                  node_uuid=target_node_uuid)

        if not self.cleanup:
            CheckNodeNotDrained(self.lu, target_node_uuid)
            if not self.failover:
                result = self.rpc.call_instance_migratable(
                    self.instance.primary_node, self.instance)
                if result.fail_msg and self.fallback:
                    self.lu.LogInfo(
                        "Can't migrate, instance offline, fallback to"
                        " failover")
                    self.failover = True
                else:
                    result.Raise("Can't migrate, please use failover",
                                 prereq=True,
                                 ecode=errors.ECODE_STATE)

        assert not (self.failover and self.cleanup)

        if not self.failover:
            if self.lu.op.live is not None and self.lu.op.mode is not None:
                raise errors.OpPrereqError(
                    "Only one of the 'live' and 'mode'"
                    " parameters are accepted", errors.ECODE_INVAL)
            if self.lu.op.live is not None:
                if self.lu.op.live:
                    self.lu.op.mode = constants.HT_MIGRATION_LIVE
                else:
                    self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
                # reset the 'live' parameter to None so that repeated
                # invocations of CheckPrereq do not raise an exception
                self.lu.op.live = None
            elif self.lu.op.mode is None:
                # read the default value from the hypervisor
                i_hv = cluster.FillHV(self.instance, skip_globals=False)
                self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]

            self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
        else:
            # Failover is never live
            self.live = False

        if not (self.failover or self.cleanup):
            remote_info = self.rpc.call_instance_info(
                self.instance.primary_node, self.instance.name,
                self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
            remote_info.Raise("Error checking instance on node %s" %
                              self.cfg.GetNodeName(self.instance.primary_node),
                              prereq=True)
            instance_running = bool(remote_info.payload)
            if instance_running:
                self.current_mem = int(remote_info.payload["memory"])
示例#3
0
 def testNotRbdDiskless(self):
     self.assertFalse(utils.AllDiskOfType([Rbd()], [constants.DT_DISKLESS]))
示例#4
0
 def testHeterogeneous(self):
     self.assertFalse(
         utils.AllDiskOfType([Rbd(), Drbd()], [constants.DT_DRBD8]))
示例#5
0
 def testNotDiskless(self):
     self.assertFalse(utils.AllDiskOfType([], [constants.DT_DRBD8]))
示例#6
0
 def testNotRbd(self):
     self.assertFalse(utils.AllDiskOfType([Rbd()], [constants.DT_DRBD8]))
示例#7
0
 def testOrRbd(self):
     self.assertTrue(
         utils.AllDiskOfType([Rbd()],
                             [constants.DT_RBD, constants.DT_DRBD8]))
示例#8
0
 def testOrDrbd(self):
     self.assertTrue(
         utils.AllDiskOfType([Drbd()],
                             [constants.DT_DISKLESS, constants.DT_DRBD8]))
示例#9
0
 def testAllDiskless(self):
     self.assertTrue(utils.AllDiskOfType([], [constants.DT_DISKLESS]))