コード例 #1
0
ファイル: instance.py プロジェクト: dimara/ganeti
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        for idx, dsk in enumerate(disks):
            if dsk.dev_type not in constants.DTS_COPYABLE:
                raise errors.OpPrereqError(
                    "Instance disk %d has disk type %s and is"
                    " not suitable for copying" % (idx, dsk.dev_type),
                    errors.ECODE_STATE)

        target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
        assert target_node is not None, \
          "Cannot retrieve locked node %s" % self.op.target_node

        self.target_node_uuid = target_node.uuid
        if target_node.uuid == self.instance.primary_node:
            raise errors.OpPrereqError(
                "Instance %s is already on the node %s" %
                (self.instance.name, target_node.name), errors.ECODE_STATE)

        cluster = self.cfg.GetClusterInfo()
        bep = cluster.FillBE(self.instance)

        CheckNodeOnline(self, target_node.uuid)
        CheckNodeNotDrained(self, target_node.uuid)
        CheckNodeVmCapable(self, target_node.uuid)
        group_info = self.cfg.GetNodeGroup(target_node.group)
        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(
            cluster, group_info)
        CheckTargetNodeIPolicy(self,
                               ipolicy,
                               self.instance,
                               target_node,
                               self.cfg,
                               ignore=self.op.ignore_ipolicy)

        if self.instance.admin_state == constants.ADMINST_UP:
            # check memory requirements on the target node
            CheckNodeFreeMemory(
                self, target_node.uuid,
                "failing over instance %s" % self.instance.name,
                bep[constants.BE_MAXMEM], self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
        else:
            self.LogInfo("Not checking memory on the secondary node as"
                         " instance will not be started")

        # check bridge existance
        CheckInstanceBridgesExist(self,
                                  self.instance,
                                  node_uuid=target_node.uuid)
コード例 #2
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
        CheckNodeOnline(self, self.instance.primary_node)

        # check bridges existence
        CheckInstanceBridgesExist(self, self.instance)
コード例 #3
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance is in the cluster.

    """
        self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
        assert self.instance is not None, \
          "Cannot retrieve locked instance %s" % self.op.instance_name

        cluster = self.cfg.GetClusterInfo()
        # extra hvparams
        if self.op.hvparams:
            # check hypervisor parameter syntax (locally)
            utils.ForceDictType(self.op.hvparams,
                                constants.HVS_PARAMETER_TYPES)
            filled_hvp = cluster.FillHV(self.instance)
            filled_hvp.update(self.op.hvparams)
            hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
            hv_type.CheckParameterSyntax(filled_hvp)
            CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance.uuid),
                          self.instance.hypervisor, filled_hvp)

        CheckInstanceState(self, self.instance, INSTANCE_ONLINE)

        self.primary_offline = \
          self.cfg.GetNodeInfo(self.instance.primary_node).offline

        if self.primary_offline and self.op.ignore_offline_nodes:
            self.LogWarning("Ignoring offline primary node")

            if self.op.hvparams or self.op.beparams:
                self.LogWarning("Overridden parameters are ignored")
        else:
            CheckNodeOnline(self, self.instance.primary_node)

            bep = self.cfg.GetClusterInfo().FillBE(self.instance)
            bep.update(self.op.beparams)

            # check bridges existence
            CheckInstanceBridgesExist(self, self.instance)

            remote_info = self.rpc.call_instance_info(
                self.instance.primary_node, self.instance.name,
                self.instance.hypervisor,
                cluster.hvparams[self.instance.hypervisor])
            remote_info.Raise("Error checking node %s" %
                              self.cfg.GetNodeName(self.instance.primary_node),
                              prereq=True,
                              ecode=errors.ECODE_ENVIRON)

            self.requires_cleanup = False

            if remote_info.payload:
                if _IsInstanceUserDown(self.cfg.GetClusterInfo(),
                                       self.instance, remote_info.payload):
                    self.requires_cleanup = True
            else:  # not running already
                CheckNodeFreeMemory(
                    self, self.instance.primary_node,
                    "starting instance %s" % self.instance.name,
                    bep[constants.BE_MINMEM], self.instance.hypervisor,
                    self.cfg.GetClusterInfo().hvparams[
                        self.instance.hypervisor])
コード例 #4
0
  def CheckPrereq(self):
    """Check prerequisites.

    This checks that the instance is in the cluster.

    """
    (self.instance_uuid, self.instance_name) = \
      ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
                                self.instance_name)
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
    assert self.instance is not None
    cluster = self.cfg.GetClusterInfo()

    if (not self.cleanup and
        not self.instance.admin_state == constants.ADMINST_UP and
        not self.failover and self.fallback):
      self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
                      " switching to failover")
      self.failover = True

    disks = self.cfg.GetInstanceDisks(self.instance.uuid)

    if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
      if self.failover:
        text = "failovers"
      else:
        text = "migrations"
      invalid_disks = set(d.dev_type for d in disks
                             if d.dev_type not in constants.DTS_MIRRORED)
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
                                 " %s" % (utils.CommaJoin(invalid_disks), text),
                                 errors.ECODE_STATE)

    # TODO allow heterogeneous disk types if all are mirrored in some way.
    if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
      CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")

      if self.lu.op.iallocator:
        self._RunAllocator()
      else:
        # We set set self.target_node_uuid as it is required by
        # BuildHooksEnv
        self.target_node_uuid = self.lu.op.target_node_uuid

      # Check that the target node is correct in terms of instance policy
      nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

      # self.target_node is already populated, either directly or by the
      # iallocator run
      target_node_uuid = self.target_node_uuid
      if self.target_node_uuid == self.instance.primary_node:
        raise errors.OpPrereqError(
          "Cannot migrate instance %s to its primary (%s)" %
          (self.instance.name,
           self.cfg.GetNodeName(self.instance.primary_node)),
          errors.ECODE_STATE)

      if len(self.lu.tasklets) == 1:
        # It is safe to release locks only when we're the only tasklet
        # in the LU
        ReleaseLocks(self.lu, locking.LEVEL_NODE,
                     keep=[self.instance.primary_node, self.target_node_uuid])

    elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
      templates = [d.dev_type for d in disks]
      secondary_node_uuids = \
        self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
      if not secondary_node_uuids:
        raise errors.ConfigurationError("No secondary node but using"
                                        " %s disk types" %
                                        utils.CommaJoin(set(templates)))
      self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
      if self.lu.op.iallocator or \
        (self.lu.op.target_node_uuid and
         self.lu.op.target_node_uuid != target_node_uuid):
        if self.failover:
          text = "failed over"
        else:
          text = "migrated"
        raise errors.OpPrereqError("Instances with disk types %s cannot"
                                   " be %s to arbitrary nodes"
                                   " (neither an iallocator nor a target"
                                   " node can be passed)" %
                                   (utils.CommaJoin(set(templates)), text),
                                   errors.ECODE_INVAL)
      nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

    else:
      raise errors.OpPrereqError("Instance mixes internal and external "
                                 "mirroring. This is not currently supported.")

    i_be = cluster.FillBE(self.instance)

    # check memory requirements on the secondary node
    if (not self.cleanup and
         (not self.failover or
           self.instance.admin_state == constants.ADMINST_UP)):
      self.tgt_free_mem = CheckNodeFreeMemory(
          self.lu, target_node_uuid,
          "migrating instance %s" % self.instance.name,
          i_be[constants.BE_MINMEM], self.instance.hypervisor,
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
    else:
      self.lu.LogInfo("Not checking memory on the secondary node as"
                      " instance will not be started")

    # check if failover must be forced instead of migration
    if (not self.cleanup and not self.failover and
        i_be[constants.BE_ALWAYS_FAILOVER]):
      self.lu.LogInfo("Instance configured to always failover; fallback"
                      " to failover")
      self.failover = True

    # check bridge existance
    CheckInstanceBridgesExist(self.lu, self.instance,
                              node_uuid=target_node_uuid)

    if not self.cleanup:
      CheckNodeNotDrained(self.lu, target_node_uuid)
      if not self.failover:
        result = self.rpc.call_instance_migratable(self.instance.primary_node,
                                                   self.instance)
        if result.fail_msg and self.fallback:
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
                          " failover")
          self.failover = True
        else:
          result.Raise("Can't migrate, please use failover",
                       prereq=True, ecode=errors.ECODE_STATE)

    assert not (self.failover and self.cleanup)

    if not self.failover:
      if self.lu.op.live is not None and self.lu.op.mode is not None:
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
                                   " parameters are accepted",
                                   errors.ECODE_INVAL)
      if self.lu.op.live is not None:
        if self.lu.op.live:
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
        else:
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
        # reset the 'live' parameter to None so that repeated
        # invocations of CheckPrereq do not raise an exception
        self.lu.op.live = None
      elif self.lu.op.mode is None:
        # read the default value from the hypervisor
        i_hv = cluster.FillHV(self.instance, skip_globals=False)
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]

      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
    else:
      # Failover is never live
      self.live = False

    if not (self.failover or self.cleanup):
      remote_info = self.rpc.call_instance_info(
          self.instance.primary_node, self.instance.name,
          self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
      remote_info.Raise("Error checking instance on node %s" %
                        self.cfg.GetNodeName(self.instance.primary_node),
                        prereq=True)
      instance_running = bool(remote_info.payload)
      if instance_running:
        self.current_mem = int(remote_info.payload["memory"])